summaryrefslogtreecommitdiffstats
path: root/drivers/usb/dwc3
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/usb/dwc3
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/usb/dwc3/Kconfig171
-rw-r--r--drivers/usb/dwc3/Makefile56
-rw-r--r--drivers/usb/dwc3/core.c2397
-rw-r--r--drivers/usb/dwc3/core.h1660
-rw-r--r--drivers/usb/dwc3/debug.h430
-rw-r--r--drivers/usb/dwc3/debugfs.c1040
-rw-r--r--drivers/usb/dwc3/drd.c622
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c328
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c249
-rw-r--r--drivers/usb/dwc3/dwc3-haps.c148
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c427
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c226
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c994
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c198
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c627
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c587
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c1110
-rw-r--r--drivers/usb/dwc3/dwc3-st.c379
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c405
-rw-r--r--drivers/usb/dwc3/ep0.c1211
-rw-r--r--drivers/usb/dwc3/gadget.c4628
-rw-r--r--drivers/usb/dwc3/gadget.h152
-rw-r--r--drivers/usb/dwc3/host.c139
-rw-r--r--drivers/usb/dwc3/io.h57
-rw-r--r--drivers/usb/dwc3/trace.c11
-rw-r--r--drivers/usb/dwc3/trace.h350
-rw-r--r--drivers/usb/dwc3/ulpi.c104
27 files changed, 18706 insertions, 0 deletions
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
new file mode 100644
index 000000000..864fef540
--- /dev/null
+++ b/drivers/usb/dwc3/Kconfig
@@ -0,0 +1,171 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config USB_DWC3
+ tristate "DesignWare USB3 DRD Core Support"
+ depends on (USB || USB_GADGET) && HAS_DMA
+ depends on (EXTCON || EXTCON=n)
+ select USB_XHCI_PLATFORM if USB_XHCI_HCD
+ select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
+ help
+ Say Y or M here if your system has a Dual Role SuperSpeed
+ USB controller based on the DesignWare USB3 IP Core.
+
+ If you choose to build this driver as a dynamically linked
+ module, the module will be called dwc3.ko.
+
+if USB_DWC3
+
+config USB_DWC3_ULPI
+ bool "Register ULPI PHY Interface"
+ depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_DWC3
+ help
+ Select this if you have ULPI type PHY attached to your DWC3
+ controller.
+
+choice
+ bool "DWC3 Mode Selection"
+ default USB_DWC3_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_DWC3_HOST if (USB && !USB_GADGET)
+ default USB_DWC3_GADGET if (!USB && USB_GADGET)
+
+config USB_DWC3_HOST
+ bool "Host only mode"
+ depends on USB=y || USB=USB_DWC3
+ help
+ Select this when you want to use DWC3 in host mode only,
+ thereby the gadget feature will be regressed.
+
+config USB_DWC3_GADGET
+ bool "Gadget only mode"
+ depends on USB_GADGET=y || USB_GADGET=USB_DWC3
+ help
+ Select this when you want to use DWC3 in gadget mode only,
+ thereby the host feature will be regressed.
+
+config USB_DWC3_DUAL_ROLE
+ bool "Dual Role mode"
+ depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
+ help
+ This is the default mode of working of DWC3 controller where
+ both host and gadget features are enabled.
+
+endchoice
+
+comment "Platform Glue Driver Support"
+
+config USB_DWC3_OMAP
+ tristate "Texas Instruments OMAP5 and similar Platforms"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on EXTCON || !EXTCON
+ depends on OF
+ default USB_DWC3
+ help
+ Some platforms from Texas Instruments like OMAP5, DRA7xxx and
+ AM437x use this IP for USB2/3 functionality.
+
+ Say 'Y' or 'M' here if you have one such device
+
+config USB_DWC3_EXYNOS
+ tristate "Samsung Exynos SoC Platform"
+ depends on (ARCH_EXYNOS || COMPILE_TEST) && OF
+ default USB_DWC3
+ help
+ Recent Samsung Exynos SoCs (Exynos5250, Exynos5410, Exynos542x,
+ Exynos5800, Exynos5433, Exynos7) ship with one DesignWare Core USB3
+ IP inside, say 'Y' or 'M' if you have one such device.
+
+config USB_DWC3_PCI
+ tristate "PCIe-based Platforms"
+ depends on USB_PCI && ACPI
+ default USB_DWC3
+ help
+ If you're using the DesignWare Core IP with a PCIe (but not HAPS
+ platform), please say 'Y' or 'M' here.
+
+config USB_DWC3_HAPS
+ tristate "Synopsys PCIe-based HAPS Platforms"
+ depends on USB_PCI
+ default USB_DWC3
+ help
+ If you're using the DesignWare Core IP with a Synopsys PCIe HAPS
+ platform, please say 'Y' or 'M' here.
+
+config USB_DWC3_KEYSTONE
+ tristate "Texas Instruments Keystone2/AM654 Platforms"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ default USB_DWC3
+ help
+ Support of USB2/3 functionality in TI Keystone2 and AM654 platforms.
+ Say 'Y' or 'M' here if you have one such device
+
+config USB_DWC3_MESON_G12A
+ tristate "Amlogic Meson G12A Platforms"
+ depends on OF && COMMON_CLK
+ depends on ARCH_MESON || COMPILE_TEST
+ default USB_DWC3
+ select USB_ROLE_SWITCH
+ select REGMAP_MMIO
+ help
+ Support USB2/3 functionality in Amlogic G12A platforms.
+ Say 'Y' or 'M' if you have one such device.
+
+config USB_DWC3_OF_SIMPLE
+ tristate "Generic OF Simple Glue Layer"
+ depends on OF && COMMON_CLK
+ default USB_DWC3
+ help
+ Support USB2/3 functionality in simple SoC integrations.
+ Currently supports Xilinx and Qualcomm DWC USB3 IP.
+ Say 'Y' or 'M' if you have one such device.
+
+config USB_DWC3_ST
+ tristate "STMicroelectronics Platforms"
+ depends on (ARCH_STI || COMPILE_TEST) && OF
+ default USB_DWC3
+ help
+ STMicroelectronics SoCs with one DesignWare Core USB3 IP
+ inside (i.e. STiH407).
+ Say 'Y' or 'M' if you have one such device.
+
+config USB_DWC3_QCOM
+ tristate "Qualcomm Platform"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on EXTCON || !EXTCON
+ depends on (OF || ACPI)
+ default USB_DWC3
+ help
+ Some Qualcomm SoCs use DesignWare Core IP for USB2/3
+ functionality.
+ This driver also handles Qscratch wrapper which is needed
+ for peripheral mode support.
+ Say 'Y' or 'M' if you have one such device.
+
+config USB_DWC3_IMX8MP
+ tristate "NXP iMX8MP Platform"
+ depends on OF && COMMON_CLK
+ depends on (ARCH_MXC && ARM64) || COMPILE_TEST
+ default USB_DWC3
+ help
+ NXP iMX8M Plus SoC use DesignWare Core IP for USB2/3
+ functionality.
+ Say 'Y' or 'M' if you have one such device.
+
+config USB_DWC3_XILINX
+ tristate "Xilinx Platforms"
+ depends on (ARCH_ZYNQMP || ARCH_VERSAL) && OF
+ default USB_DWC3
+ help
+ Support Xilinx SoCs with DesignWare Core USB3 IP.
+ This driver handles both ZynqMP and Versal SoC operations.
+ Say 'Y' or 'M' if you have one such device.
+
+config USB_DWC3_AM62
+ tristate "Texas Instruments AM62 Platforms"
+ depends on ARCH_K3 || COMPILE_TEST
+ default USB_DWC3
+ help
+ Support TI's AM62 platforms with DesignWare Core USB3 IP.
+ The Designware Core USB3 IP is programmed to operate in
+ in USB 2.0 mode only.
+ Say 'Y' or 'M' here if you have one such device
+endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
new file mode 100644
index 000000000..9f66bd82b
--- /dev/null
+++ b/drivers/usb/dwc3/Makefile
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+# define_trace.h needs to know how to find our header
+CFLAGS_trace.o := -I$(src)
+
+obj-$(CONFIG_USB_DWC3) += dwc3.o
+
+dwc3-y := core.o
+
+ifneq ($(CONFIG_TRACING),)
+ dwc3-y += trace.o
+endif
+
+ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
+ dwc3-y += host.o
+endif
+
+ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
+ dwc3-y += gadget.o ep0.o
+endif
+
+ifneq ($(CONFIG_USB_DWC3_DUAL_ROLE),)
+ dwc3-y += drd.o
+endif
+
+ifneq ($(CONFIG_USB_DWC3_ULPI),)
+ dwc3-y += ulpi.o
+endif
+
+ifneq ($(CONFIG_DEBUG_FS),)
+ dwc3-y += debugfs.o
+endif
+
+##
+# Platform-specific glue layers go here
+#
+# NOTICE: Make sure your glue layer doesn't depend on anything
+# which is arch-specific and that it compiles on all situations.
+#
+# We want to keep this requirement in order to be able to compile
+# the entire driver (with all its glue layers) on several architectures
+# and make sure it compiles fine. This will also help with allmodconfig
+# and allyesconfig builds.
+##
+
+obj-$(CONFIG_USB_DWC3_AM62) += dwc3-am62.o
+obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o
+obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
+obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
+obj-$(CONFIG_USB_DWC3_HAPS) += dwc3-haps.o
+obj-$(CONFIG_USB_DWC3_KEYSTONE) += dwc3-keystone.o
+obj-$(CONFIG_USB_DWC3_MESON_G12A) += dwc3-meson-g12a.o
+obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o
+obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
+obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
+obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o
+obj-$(CONFIG_USB_DWC3_XILINX) += dwc3-xilinx.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
new file mode 100644
index 000000000..011a3909f
--- /dev/null
+++ b/drivers/usb/dwc3/core.c
@@ -0,0 +1,2397 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * core.c - DesignWare USB3 DRD Controller Core file
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/acpi.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/reset.h>
+#include <linux/bitfield.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+
+#include "debug.h"
+
+#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
+
+/**
+ * dwc3_get_dr_mode - Validates and sets dr_mode
+ * @dwc: pointer to our context structure
+ */
+static int dwc3_get_dr_mode(struct dwc3 *dwc)
+{
+ enum usb_dr_mode mode;
+ struct device *dev = dwc->dev;
+ unsigned int hw_mode;
+
+ if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
+ dwc->dr_mode = USB_DR_MODE_OTG;
+
+ mode = dwc->dr_mode;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
+ switch (hw_mode) {
+ case DWC3_GHWPARAMS0_MODE_GADGET:
+ if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) {
+ dev_err(dev,
+ "Controller does not support host mode.\n");
+ return -EINVAL;
+ }
+ mode = USB_DR_MODE_PERIPHERAL;
+ break;
+ case DWC3_GHWPARAMS0_MODE_HOST:
+ if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
+ dev_err(dev,
+ "Controller does not support device mode.\n");
+ return -EINVAL;
+ }
+ mode = USB_DR_MODE_HOST;
+ break;
+ default:
+ if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
+ mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
+ mode = USB_DR_MODE_PERIPHERAL;
+
+ /*
+ * DWC_usb31 and DWC_usb3 v3.30a and higher do not support OTG
+ * mode. If the controller supports DRD but the dr_mode is not
+ * specified or set to OTG, then set the mode to peripheral.
+ */
+ if (mode == USB_DR_MODE_OTG && !dwc->edev &&
+ (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
+ !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
+ !DWC3_VER_IS_PRIOR(DWC3, 330A))
+ mode = USB_DR_MODE_PERIPHERAL;
+ }
+
+ if (mode != dwc->dr_mode) {
+ dev_warn(dev,
+ "Configuration mismatch. dr_mode forced to %s\n",
+ mode == USB_DR_MODE_HOST ? "host" : "gadget");
+
+ dwc->dr_mode = mode;
+ }
+
+ return 0;
+}
+
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
+ reg |= DWC3_GCTL_PRTCAPDIR(mode);
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ dwc->current_dr_role = mode;
+}
+
+static void __dwc3_set_mode(struct work_struct *work)
+{
+ struct dwc3 *dwc = work_to_dwc(work);
+ unsigned long flags;
+ int ret;
+ u32 reg;
+ u32 desired_dr_role;
+
+ mutex_lock(&dwc->mutex);
+ spin_lock_irqsave(&dwc->lock, flags);
+ desired_dr_role = dwc->desired_dr_role;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_get_sync(dwc->dev);
+
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
+ dwc3_otg_update(dwc, 0);
+
+ if (!desired_dr_role)
+ goto out;
+
+ if (desired_dr_role == dwc->current_dr_role)
+ goto out;
+
+ if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
+ goto out;
+
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ dwc3_gadget_exit(dwc);
+ dwc3_event_buffers_cleanup(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ dwc3_otg_exit(dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc3_otg_update(dwc, 1);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * When current_dr_role is not set, there's no role switching.
+ * Only perform GCTL.CoreSoftReset when there's DRD role switching.
+ */
+ if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
+ DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
+ desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg |= DWC3_GCTL_CORESOFTRESET;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ /*
+ * Wait for internal clocks to synchronized. DWC_usb31 and
+ * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
+ * keep it consistent across different IPs, let's wait up to
+ * 100ms before clearing GCTL.CORESOFTRESET.
+ */
+ msleep(100);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~DWC3_GCTL_CORESOFTRESET;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ }
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ dwc3_set_prtcap(dwc, desired_dr_role);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ switch (desired_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to initialize host\n");
+ } else {
+ if (dwc->usb2_phy)
+ otg_set_vbus(dwc->usb2_phy->otg, true);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
+ if (dwc->dis_split_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
+ reg |= DWC3_GUCTL3_SPLITDISABLE;
+ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
+ }
+ }
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ dwc3_core_soft_reset(dwc);
+
+ dwc3_event_buffers_setup(dwc);
+
+ if (dwc->usb2_phy)
+ otg_set_vbus(dwc->usb2_phy->otg, false);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
+
+ ret = dwc3_gadget_init(dwc);
+ if (ret)
+ dev_err(dwc->dev, "failed to initialize peripheral\n");
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ dwc3_otg_init(dwc);
+ dwc3_otg_update(dwc, 0);
+ break;
+ default:
+ break;
+ }
+
+out:
+ pm_runtime_mark_last_busy(dwc->dev);
+ pm_runtime_put_autosuspend(dwc->dev);
+ mutex_unlock(&dwc->mutex);
+}
+
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+{
+ unsigned long flags;
+
+ if (dwc->dr_mode != USB_DR_MODE_OTG)
+ return;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->desired_dr_role = mode;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ queue_work(system_freezable_wq, &dwc->drd_work);
+}
+
+u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
+{
+ struct dwc3 *dwc = dep->dwc;
+ u32 reg;
+
+ dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE,
+ DWC3_GDBGFIFOSPACE_NUM(dep->number) |
+ DWC3_GDBGFIFOSPACE_TYPE(type));
+
+ reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE);
+
+ return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg);
+}
+
+/**
+ * dwc3_core_soft_reset - Issues core soft reset and PHY reset
+ * @dwc: pointer to our context structure
+ */
+int dwc3_core_soft_reset(struct dwc3 *dwc)
+{
+ u32 reg;
+ int retries = 1000;
+
+ /*
+ * We're resetting only the device side because, if we're in host mode,
+ * XHCI driver will reset the host block. If dwc3 was configured for
+ * host-only mode, then we can return early.
+ */
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+ return 0;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_CSFTRST;
+ reg &= ~DWC3_DCTL_RUN_STOP;
+ dwc3_gadget_dctl_write_safe(dwc, reg);
+
+ /*
+ * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
+ * is cleared only after all the clocks are synchronized. This can
+ * take a little more than 50ms. Set the polling rate at 20ms
+ * for 10 times instead.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
+ retries = 10;
+
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (!(reg & DWC3_DCTL_CSFTRST))
+ goto done;
+
+ if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
+ msleep(20);
+ else
+ udelay(1);
+ } while (--retries);
+
+ dev_warn(dwc->dev, "DWC3 controller soft reset failed.\n");
+ return -ETIMEDOUT;
+
+done:
+ /*
+ * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit
+ * is cleared, we must wait at least 50ms before accessing the PHY
+ * domain (synchronization delay).
+ */
+ if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
+ msleep(50);
+
+ return 0;
+}
+
+/*
+ * dwc3_frame_length_adjustment - Adjusts frame length if required
+ * @dwc3: Pointer to our controller context structure
+ */
+static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
+{
+ u32 reg;
+ u32 dft;
+
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A))
+ return;
+
+ if (dwc->fladj == 0)
+ return;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
+ dft = reg & DWC3_GFLADJ_30MHZ_MASK;
+ if (dft != dwc->fladj) {
+ reg &= ~DWC3_GFLADJ_30MHZ_MASK;
+ reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
+ dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
+ }
+}
+
+/**
+ * dwc3_ref_clk_period - Reference clock period configuration
+ * Default reference clock period depends on hardware
+ * configuration. For systems with reference clock that differs
+ * from the default, this will set clock period in DWC3_GUCTL
+ * register.
+ * @dwc: Pointer to our controller context structure
+ */
+static void dwc3_ref_clk_period(struct dwc3 *dwc)
+{
+ unsigned long period;
+ unsigned long fladj;
+ unsigned long decr;
+ unsigned long rate;
+ u32 reg;
+
+ if (dwc->ref_clk) {
+ rate = clk_get_rate(dwc->ref_clk);
+ if (!rate)
+ return;
+ period = NSEC_PER_SEC / rate;
+ } else if (dwc->ref_clk_per) {
+ period = dwc->ref_clk_per;
+ rate = NSEC_PER_SEC / period;
+ } else {
+ return;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
+ reg &= ~DWC3_GUCTL_REFCLKPER_MASK;
+ reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, period);
+ dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
+
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A))
+ return;
+
+ /*
+ * The calculation below is
+ *
+ * 125000 * (NSEC_PER_SEC / (rate * period) - 1)
+ *
+ * but rearranged for fixed-point arithmetic. The division must be
+ * 64-bit because 125000 * NSEC_PER_SEC doesn't fit in 32 bits (and
+ * neither does rate * period).
+ *
+ * Note that rate * period ~= NSEC_PER_SECOND, minus the number of
+ * nanoseconds of error caused by the truncation which happened during
+ * the division when calculating rate or period (whichever one was
+ * derived from the other). We first calculate the relative error, then
+ * scale it to units of 8 ppm.
+ */
+ fladj = div64_u64(125000ULL * NSEC_PER_SEC, (u64)rate * period);
+ fladj -= 125000;
+
+ /*
+ * The documented 240MHz constant is scaled by 2 to get PLS1 as well.
+ */
+ decr = 480000000 / rate;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
+ reg &= ~DWC3_GFLADJ_REFCLK_FLADJ_MASK
+ & ~DWC3_GFLADJ_240MHZDECR
+ & ~DWC3_GFLADJ_240MHZDECR_PLS1;
+ reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj)
+ | FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1)
+ | FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1);
+
+ if (dwc->gfladj_refclk_lpm_sel)
+ reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+
+ dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
+}
+
+/**
+ * dwc3_free_one_event_buffer - Frees one event buffer
+ * @dwc: Pointer to our controller context structure
+ * @evt: Pointer to event buffer to be freed
+ */
+static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
+ struct dwc3_event_buffer *evt)
+{
+ dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
+}
+
+/**
+ * dwc3_alloc_one_event_buffer - Allocates one event buffer structure
+ * @dwc: Pointer to our controller context structure
+ * @length: size of the event buffer
+ *
+ * Returns a pointer to the allocated event buffer structure on success
+ * otherwise ERR_PTR(errno).
+ */
+static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
+ unsigned int length)
+{
+ struct dwc3_event_buffer *evt;
+
+ evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
+ if (!evt)
+ return ERR_PTR(-ENOMEM);
+
+ evt->dwc = dwc;
+ evt->length = length;
+ evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
+ if (!evt->cache)
+ return ERR_PTR(-ENOMEM);
+
+ evt->buf = dma_alloc_coherent(dwc->sysdev, length,
+ &evt->dma, GFP_KERNEL);
+ if (!evt->buf)
+ return ERR_PTR(-ENOMEM);
+
+ return evt;
+}
+
+/**
+ * dwc3_free_event_buffers - frees all allocated event buffers
+ * @dwc: Pointer to our controller context structure
+ */
+static void dwc3_free_event_buffers(struct dwc3 *dwc)
+{
+ struct dwc3_event_buffer *evt;
+
+ evt = dwc->ev_buf;
+ if (evt)
+ dwc3_free_one_event_buffer(dwc, evt);
+}
+
+/**
+ * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
+ * @dwc: pointer to our controller context structure
+ * @length: size of event buffer
+ *
+ * Returns 0 on success otherwise negative errno. In the error case, dwc
+ * may contain some buffers allocated but not all which were requested.
+ */
+static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
+{
+ struct dwc3_event_buffer *evt;
+
+ evt = dwc3_alloc_one_event_buffer(dwc, length);
+ if (IS_ERR(evt)) {
+ dev_err(dwc->dev, "can't allocate event buffer\n");
+ return PTR_ERR(evt);
+ }
+ dwc->ev_buf = evt;
+
+ return 0;
+}
+
+/**
+ * dwc3_event_buffers_setup - setup our allocated event buffers
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+int dwc3_event_buffers_setup(struct dwc3 *dwc)
+{
+ struct dwc3_event_buffer *evt;
+
+ evt = dwc->ev_buf;
+ evt->lpos = 0;
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
+ lower_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
+ upper_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ DWC3_GEVNTSIZ_SIZE(evt->length));
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
+
+ return 0;
+}
+
+void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
+{
+ struct dwc3_event_buffer *evt;
+
+ evt = dwc->ev_buf;
+
+ evt->lpos = 0;
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK
+ | DWC3_GEVNTSIZ_SIZE(0));
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
+}
+
+static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc)
+{
+ if (!dwc->has_hibernation)
+ return 0;
+
+ if (!dwc->nr_scratch)
+ return 0;
+
+ dwc->scratchbuf = kmalloc_array(dwc->nr_scratch,
+ DWC3_SCRATCHBUF_SIZE, GFP_KERNEL);
+ if (!dwc->scratchbuf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
+{
+ dma_addr_t scratch_addr;
+ u32 param;
+ int ret;
+
+ if (!dwc->has_hibernation)
+ return 0;
+
+ if (!dwc->nr_scratch)
+ return 0;
+
+ /* should never fall here */
+ if (!WARN_ON(dwc->scratchbuf))
+ return 0;
+
+ scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
+ dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
+ dev_err(dwc->sysdev, "failed to map scratch buffer\n");
+ ret = -EFAULT;
+ goto err0;
+ }
+
+ dwc->scratch_addr = scratch_addr;
+
+ param = lower_32_bits(scratch_addr);
+
+ ret = dwc3_send_gadget_generic_command(dwc,
+ DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param);
+ if (ret < 0)
+ goto err1;
+
+ param = upper_32_bits(scratch_addr);
+
+ ret = dwc3_send_gadget_generic_command(dwc,
+ DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param);
+ if (ret < 0)
+ goto err1;
+
+ return 0;
+
+err1:
+ dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
+ DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
+
+err0:
+ return ret;
+}
+
+static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
+{
+ if (!dwc->has_hibernation)
+ return;
+
+ if (!dwc->nr_scratch)
+ return;
+
+ /* should never fall here */
+ if (!WARN_ON(dwc->scratchbuf))
+ return;
+
+ dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
+ DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
+ kfree(dwc->scratchbuf);
+}
+
+static void dwc3_core_num_eps(struct dwc3 *dwc)
+{
+ struct dwc3_hwparams *parms = &dwc->hwparams;
+
+ dwc->num_eps = DWC3_NUM_EPS(parms);
+}
+
+static void dwc3_cache_hwparams(struct dwc3 *dwc)
+{
+ struct dwc3_hwparams *parms = &dwc->hwparams;
+
+ parms->hwparams0 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS0);
+ parms->hwparams1 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS1);
+ parms->hwparams2 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS2);
+ parms->hwparams3 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS3);
+ parms->hwparams4 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS4);
+ parms->hwparams5 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS5);
+ parms->hwparams6 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
+ parms->hwparams7 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS7);
+ parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
+
+ if (DWC3_IP_IS(DWC32))
+ parms->hwparams9 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS9);
+}
+
+static int dwc3_core_ulpi_init(struct dwc3 *dwc)
+{
+ int intf;
+ int ret = 0;
+
+ intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
+
+ if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
+ (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
+ dwc->hsphy_interface &&
+ !strncmp(dwc->hsphy_interface, "ulpi", 4)))
+ ret = dwc3_ulpi_init(dwc);
+
+ return ret;
+}
+
+/**
+ * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+ *
+ * Returns 0 on success. The USB PHY interfaces are configured but not
+ * initialized. The PHY interfaces and the PHYs get initialized together with
+ * the core in dwc3_core_init.
+ */
+static int dwc3_phy_setup(struct dwc3 *dwc)
+{
+ unsigned int hw_mode;
+ u32 reg;
+
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+
+ /*
+ * Make sure UX_EXIT_PX is cleared as that causes issues with some
+ * PHYs. Also, this bit is not supposed to be used in normal operation.
+ */
+ reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
+
+ /*
+ * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
+ * to '0' during coreConsultant configuration. So default value
+ * will be '0' when the core is reset. Application needs to set it
+ * to '1' after the core initialization is completed.
+ */
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+
+ /*
+ * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
+ if (dwc->u2ss_inp3_quirk)
+ reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
+
+ if (dwc->dis_rxdet_inp3_quirk)
+ reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3;
+
+ if (dwc->req_p1p2p3_quirk)
+ reg |= DWC3_GUSB3PIPECTL_REQP1P2P3;
+
+ if (dwc->del_p1p2p3_quirk)
+ reg |= DWC3_GUSB3PIPECTL_DEP1P2P3_EN;
+
+ if (dwc->del_phy_power_chg_quirk)
+ reg |= DWC3_GUSB3PIPECTL_DEPOCHANGE;
+
+ if (dwc->lfps_filter_quirk)
+ reg |= DWC3_GUSB3PIPECTL_LFPSFILT;
+
+ if (dwc->rx_detect_poll_quirk)
+ reg |= DWC3_GUSB3PIPECTL_RX_DETOPOLL;
+
+ if (dwc->tx_de_emphasis_quirk)
+ reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
+
+ if (dwc->dis_u3_susphy_quirk)
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
+ if (dwc->dis_del_phy_power_chg_quirk)
+ reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+
+ /* Select the HS PHY interface */
+ switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
+ case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
+ if (dwc->hsphy_interface &&
+ !strncmp(dwc->hsphy_interface, "utmi", 4)) {
+ reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
+ break;
+ } else if (dwc->hsphy_interface &&
+ !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
+ reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ } else {
+ /* Relying on default value. */
+ if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
+ break;
+ }
+ fallthrough;
+ case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
+ default:
+ break;
+ }
+
+ switch (dwc->hsphy_mode) {
+ case USBPHY_INTERFACE_MODE_UTMI:
+ reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
+ DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
+ reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_8_BIT) |
+ DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_8_BIT);
+ break;
+ case USBPHY_INTERFACE_MODE_UTMIW:
+ reg &= ~(DWC3_GUSB2PHYCFG_PHYIF_MASK |
+ DWC3_GUSB2PHYCFG_USBTRDTIM_MASK);
+ reg |= DWC3_GUSB2PHYCFG_PHYIF(UTMI_PHYIF_16_BIT) |
+ DWC3_GUSB2PHYCFG_USBTRDTIM(USBTRDTIM_UTMI_16_BIT);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to
+ * '0' during coreConsultant configuration. So default value will
+ * be '0' when the core is reset. Application needs to set it to
+ * '1' after the core initialization is completed.
+ */
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+
+ /*
+ * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
+ if (dwc->dis_u2_susphy_quirk)
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
+ if (dwc->dis_enblslpm_quirk)
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+ else
+ reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
+
+ if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel)
+ reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ return 0;
+}
+
+static int dwc3_clk_enable(struct dwc3 *dwc)
+{
+ int ret;
+
+ ret = clk_prepare_enable(dwc->bus_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwc->ref_clk);
+ if (ret)
+ goto disable_bus_clk;
+
+ ret = clk_prepare_enable(dwc->susp_clk);
+ if (ret)
+ goto disable_ref_clk;
+
+ return 0;
+
+disable_ref_clk:
+ clk_disable_unprepare(dwc->ref_clk);
+disable_bus_clk:
+ clk_disable_unprepare(dwc->bus_clk);
+ return ret;
+}
+
+static void dwc3_clk_disable(struct dwc3 *dwc)
+{
+ clk_disable_unprepare(dwc->susp_clk);
+ clk_disable_unprepare(dwc->ref_clk);
+ clk_disable_unprepare(dwc->bus_clk);
+}
+
+static void dwc3_core_exit(struct dwc3 *dwc)
+{
+ dwc3_event_buffers_cleanup(dwc);
+
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
+ dwc3_clk_disable(dwc);
+ reset_control_assert(dwc->reset);
+}
+
+static bool dwc3_core_is_valid(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+ dwc->ip = DWC3_GSNPS_ID(reg);
+
+ /* This should read as U3 followed by revision number */
+ if (DWC3_IP_IS(DWC3)) {
+ dwc->revision = reg;
+ } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) {
+ dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
+ dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE);
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+static void dwc3_core_setup_global_control(struct dwc3 *dwc)
+{
+ u32 hwparams4 = dwc->hwparams.hwparams4;
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
+
+ switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
+ case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
+ /**
+ * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
+ * issue which would cause xHCI compliance tests to fail.
+ *
+ * Because of that we cannot enable clock gating on such
+ * configurations.
+ *
+ * Refers to:
+ *
+ * STAR#9000588375: Clock Gating, SOF Issues when ref_clk-Based
+ * SOF/ITP Mode Used
+ */
+ if ((dwc->dr_mode == USB_DR_MODE_HOST ||
+ dwc->dr_mode == USB_DR_MODE_OTG) &&
+ DWC3_VER_IS_WITHIN(DWC3, 210A, 250A))
+ reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
+ else
+ reg &= ~DWC3_GCTL_DSBLCLKGTNG;
+ break;
+ case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
+ /* enable hibernation here */
+ dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
+
+ /*
+ * REVISIT Enabling this bit so that host-mode hibernation
+ * will work. Device-mode hibernation is not yet implemented.
+ */
+ reg |= DWC3_GCTL_GBLHIBERNATIONEN;
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+
+ /* check if current dwc3 is on simulation board */
+ if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
+ dev_info(dwc->dev, "Running with FPGA optimizations\n");
+ dwc->is_fpga = true;
+ }
+
+ WARN_ONCE(dwc->disable_scramble_quirk && !dwc->is_fpga,
+ "disable_scramble cannot be used on non-FPGA builds\n");
+
+ if (dwc->disable_scramble_quirk && dwc->is_fpga)
+ reg |= DWC3_GCTL_DISSCRAMBLE;
+ else
+ reg &= ~DWC3_GCTL_DISSCRAMBLE;
+
+ if (dwc->u2exit_lfps_quirk)
+ reg |= DWC3_GCTL_U2EXIT_LFPS;
+
+ /*
+ * WORKAROUND: DWC3 revisions <1.90a have a bug
+ * where the device can fail to connect at SuperSpeed
+ * and falls back to high-speed mode which causes
+ * the device to enter a Connect/Disconnect loop
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 190A))
+ reg |= DWC3_GCTL_U2RSTECN;
+
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
+static int dwc3_core_get_phy(struct dwc3 *dwc);
+static int dwc3_core_ulpi_init(struct dwc3 *dwc);
+
+/* set global incr burst type configuration registers */
+static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ /* incrx_mode : for INCR burst type. */
+ bool incrx_mode;
+ /* incrx_size : for size of INCRX burst. */
+ u32 incrx_size;
+ u32 *vals;
+ u32 cfg;
+ int ntype;
+ int ret;
+ int i;
+
+ cfg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
+
+ /*
+ * Handle property "snps,incr-burst-type-adjustment".
+ * Get the number of value from this property:
+ * result <= 0, means this property is not supported.
+ * result = 1, means INCRx burst mode supported.
+ * result > 1, means undefined length burst mode supported.
+ */
+ ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment");
+ if (ntype <= 0)
+ return;
+
+ vals = kcalloc(ntype, sizeof(u32), GFP_KERNEL);
+ if (!vals)
+ return;
+
+ /* Get INCR burst type, and parse it */
+ ret = device_property_read_u32_array(dev,
+ "snps,incr-burst-type-adjustment", vals, ntype);
+ if (ret) {
+ kfree(vals);
+ dev_err(dev, "Error to get property\n");
+ return;
+ }
+
+ incrx_size = *vals;
+
+ if (ntype > 1) {
+ /* INCRX (undefined length) burst mode */
+ incrx_mode = INCRX_UNDEF_LENGTH_BURST_MODE;
+ for (i = 1; i < ntype; i++) {
+ if (vals[i] > incrx_size)
+ incrx_size = vals[i];
+ }
+ } else {
+ /* INCRX burst mode */
+ incrx_mode = INCRX_BURST_MODE;
+ }
+
+ kfree(vals);
+
+ /* Enable Undefined Length INCR Burst and Enable INCRx Burst */
+ cfg &= ~DWC3_GSBUSCFG0_INCRBRST_MASK;
+ if (incrx_mode)
+ cfg |= DWC3_GSBUSCFG0_INCRBRSTENA;
+ switch (incrx_size) {
+ case 256:
+ cfg |= DWC3_GSBUSCFG0_INCR256BRSTENA;
+ break;
+ case 128:
+ cfg |= DWC3_GSBUSCFG0_INCR128BRSTENA;
+ break;
+ case 64:
+ cfg |= DWC3_GSBUSCFG0_INCR64BRSTENA;
+ break;
+ case 32:
+ cfg |= DWC3_GSBUSCFG0_INCR32BRSTENA;
+ break;
+ case 16:
+ cfg |= DWC3_GSBUSCFG0_INCR16BRSTENA;
+ break;
+ case 8:
+ cfg |= DWC3_GSBUSCFG0_INCR8BRSTENA;
+ break;
+ case 4:
+ cfg |= DWC3_GSBUSCFG0_INCR4BRSTENA;
+ break;
+ case 1:
+ break;
+ default:
+ dev_err(dev, "Invalid property\n");
+ break;
+ }
+
+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, cfg);
+}
+
+static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
+{
+ u32 scale;
+ u32 reg;
+
+ if (!dwc->susp_clk)
+ return;
+
+ /*
+ * The power down scale field specifies how many suspend_clk
+ * periods fit into a 16KHz clock period. When performing
+ * the division, round up the remainder.
+ *
+ * The power down scale value is calculated using the fastest
+ * frequency of the suspend_clk. If it isn't fixed (but within
+ * the accuracy requirement), the driver may not know the max
+ * rate of the suspend_clk, so only update the power down scale
+ * if the default is less than the calculated value from
+ * clk_get_rate() or if the default is questionably high
+ * (3x or more) to be within the requirement.
+ */
+ scale = DIV_ROUND_UP(clk_get_rate(dwc->susp_clk), 16000);
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ if ((reg & DWC3_GCTL_PWRDNSCALE_MASK) < DWC3_GCTL_PWRDNSCALE(scale) ||
+ (reg & DWC3_GCTL_PWRDNSCALE_MASK) > DWC3_GCTL_PWRDNSCALE(scale*3)) {
+ reg &= ~(DWC3_GCTL_PWRDNSCALE_MASK);
+ reg |= DWC3_GCTL_PWRDNSCALE(scale);
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ }
+}
+
+static void dwc3_config_threshold(struct dwc3 *dwc)
+{
+ u32 reg;
+ u8 rx_thr_num;
+ u8 rx_maxburst;
+ u8 tx_thr_num;
+ u8 tx_maxburst;
+
+ /*
+ * Must config both number of packets and max burst settings to enable
+ * RX and/or TX threshold.
+ */
+ if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
+ rx_thr_num = dwc->rx_thr_num_pkt_prd;
+ rx_maxburst = dwc->rx_max_burst_prd;
+ tx_thr_num = dwc->tx_thr_num_pkt_prd;
+ tx_maxburst = dwc->tx_max_burst_prd;
+
+ if (rx_thr_num && rx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ reg |= DWC31_RXTHRNUMPKTSEL_PRD;
+
+ reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
+ reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
+
+ reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
+ reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+ }
+
+ if (tx_thr_num && tx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+ reg |= DWC31_TXTHRNUMPKTSEL_PRD;
+
+ reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
+ reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
+
+ reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
+ reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+ }
+ }
+
+ rx_thr_num = dwc->rx_thr_num_pkt;
+ rx_maxburst = dwc->rx_max_burst;
+ tx_thr_num = dwc->tx_thr_num_pkt;
+ tx_maxburst = dwc->tx_max_burst;
+
+ if (DWC3_IP_IS(DWC3)) {
+ if (rx_thr_num && rx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
+
+ reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
+ reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
+
+ reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
+ reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+ }
+
+ if (tx_thr_num && tx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+ reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
+
+ reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
+ reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
+
+ reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
+ reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+ }
+ } else {
+ if (rx_thr_num && rx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
+
+ reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
+ reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
+
+ reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
+ reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+ }
+
+ if (tx_thr_num && tx_maxburst) {
+ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+ reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
+
+ reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
+ reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
+
+ reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
+ reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
+
+ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+ }
+ }
+}
+
+/**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_core_init(struct dwc3 *dwc)
+{
+ unsigned int hw_mode;
+ u32 reg;
+ int ret;
+
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
+ /*
+ * Write Linux Version Code to our GUID register so it's easy to figure
+ * out which kernel version a bug was found.
+ */
+ dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
+
+ ret = dwc3_phy_setup(dwc);
+ if (ret)
+ goto err0;
+
+ if (!dwc->ulpi_ready) {
+ ret = dwc3_core_ulpi_init(dwc);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ dwc3_core_soft_reset(dwc);
+ ret = -EPROBE_DEFER;
+ }
+ goto err0;
+ }
+ dwc->ulpi_ready = true;
+ }
+
+ if (!dwc->phys_ready) {
+ ret = dwc3_core_get_phy(dwc);
+ if (ret)
+ goto err0a;
+ dwc->phys_ready = true;
+ }
+
+ usb_phy_init(dwc->usb2_phy);
+ usb_phy_init(dwc->usb3_phy);
+ ret = phy_init(dwc->usb2_generic_phy);
+ if (ret < 0)
+ goto err0a;
+
+ ret = phy_init(dwc->usb3_generic_phy);
+ if (ret < 0) {
+ phy_exit(dwc->usb2_generic_phy);
+ goto err0a;
+ }
+
+ ret = dwc3_core_soft_reset(dwc);
+ if (ret)
+ goto err1;
+
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
+ !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
+ if (!dwc->dis_u3_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ }
+
+ if (!dwc->dis_u2_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+ }
+
+ dwc3_core_setup_global_control(dwc);
+ dwc3_core_num_eps(dwc);
+
+ ret = dwc3_setup_scratch_buffers(dwc);
+ if (ret)
+ goto err1;
+
+ /* Set power down scale of suspend_clk */
+ dwc3_set_power_down_clk_scale(dwc);
+
+ /* Adjust Frame Length */
+ dwc3_frame_length_adjustment(dwc);
+
+ /* Adjust Reference Clock Period */
+ dwc3_ref_clk_period(dwc);
+
+ dwc3_set_incr_burst_type(dwc);
+
+ usb_phy_set_suspend(dwc->usb2_phy, 0);
+ usb_phy_set_suspend(dwc->usb3_phy, 0);
+ ret = phy_power_on(dwc->usb2_generic_phy);
+ if (ret < 0)
+ goto err2;
+
+ ret = phy_power_on(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err3;
+
+ ret = dwc3_event_buffers_setup(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to setup event buffers\n");
+ goto err4;
+ }
+
+ /*
+ * ENDXFER polling is available on version 3.10a and later of
+ * the DWC_usb3 controller. It is NOT available in the
+ * DWC_usb31 controller.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
+ reg |= DWC3_GUCTL2_RST_ACTBITLATER;
+ dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+ }
+
+ /*
+ * When configured in HOST mode, after issuing U3/L2 exit controller
+ * fails to send proper CRC checksum in CRC5 feild. Because of this
+ * behaviour Transaction Error is generated, resulting in reset and
+ * re-enumeration of usb device attached. All the termsel, xcvrsel,
+ * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
+ * will correct this problem. This option is to support certain
+ * legacy ULPI PHYs.
+ */
+ if (dwc->resume_hs_terminations) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
+ if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+
+ /*
+ * Enable hardware control of sending remote wakeup
+ * in HS when the device is in the L1 state.
+ */
+ if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
+ reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
+
+ /*
+ * Decouple USB 2.0 L1 & L2 events which will allow for
+ * gadget driver to only receive U3/L2 suspend & wakeup
+ * events and prevent the more frequent L1 LPM transitions
+ * from interrupting the driver.
+ */
+ if (!DWC3_VER_IS_PRIOR(DWC3, 300A))
+ reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT;
+
+ if (dwc->dis_tx_ipgap_linecheck_quirk)
+ reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
+
+ if (dwc->parkmode_disable_ss_quirk)
+ reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
+
+ if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY) &&
+ (dwc->maximum_speed == USB_SPEED_HIGH ||
+ dwc->maximum_speed == USB_SPEED_FULL))
+ reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
+
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
+ dwc3_config_threshold(dwc);
+
+ return 0;
+
+err4:
+ phy_power_off(dwc->usb3_generic_phy);
+
+err3:
+ phy_power_off(dwc->usb2_generic_phy);
+
+err2:
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+
+err1:
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
+err0a:
+ dwc3_ulpi_exit(dwc);
+
+err0:
+ return ret;
+}
+
+static int dwc3_core_get_phy(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ if (node) {
+ dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
+ dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
+ } else {
+ dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
+ }
+
+ if (IS_ERR(dwc->usb2_phy)) {
+ ret = PTR_ERR(dwc->usb2_phy);
+ if (ret == -ENXIO || ret == -ENODEV)
+ dwc->usb2_phy = NULL;
+ else
+ return dev_err_probe(dev, ret, "no usb2 phy configured\n");
+ }
+
+ if (IS_ERR(dwc->usb3_phy)) {
+ ret = PTR_ERR(dwc->usb3_phy);
+ if (ret == -ENXIO || ret == -ENODEV)
+ dwc->usb3_phy = NULL;
+ else
+ return dev_err_probe(dev, ret, "no usb3 phy configured\n");
+ }
+
+ dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy");
+ if (IS_ERR(dwc->usb2_generic_phy)) {
+ ret = PTR_ERR(dwc->usb2_generic_phy);
+ if (ret == -ENOSYS || ret == -ENODEV)
+ dwc->usb2_generic_phy = NULL;
+ else
+ return dev_err_probe(dev, ret, "no usb2 phy configured\n");
+ }
+
+ dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy");
+ if (IS_ERR(dwc->usb3_generic_phy)) {
+ ret = PTR_ERR(dwc->usb3_generic_phy);
+ if (ret == -ENOSYS || ret == -ENODEV)
+ dwc->usb3_generic_phy = NULL;
+ else
+ return dev_err_probe(dev, ret, "no usb3 phy configured\n");
+ }
+
+ return 0;
+}
+
+static int dwc3_core_init_mode(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ int ret;
+
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+
+ if (dwc->usb2_phy)
+ otg_set_vbus(dwc->usb2_phy->otg, false);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
+
+ ret = dwc3_gadget_init(dwc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize gadget\n");
+ break;
+ case USB_DR_MODE_HOST:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+
+ if (dwc->usb2_phy)
+ otg_set_vbus(dwc->usb2_phy->otg, true);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
+
+ ret = dwc3_host_init(dwc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize host\n");
+ break;
+ case USB_DR_MODE_OTG:
+ INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
+ ret = dwc3_drd_init(dwc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
+ break;
+ default:
+ dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dwc3_core_exit_mode(struct dwc3 *dwc)
+{
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ dwc3_gadget_exit(dwc);
+ break;
+ case USB_DR_MODE_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case USB_DR_MODE_OTG:
+ dwc3_drd_exit(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ /* de-assert DRVVBUS for HOST and OTG mode */
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+}
+
+static void dwc3_get_properties(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ u8 lpm_nyet_threshold;
+ u8 tx_de_emphasis;
+ u8 hird_threshold;
+ u8 rx_thr_num_pkt = 0;
+ u8 rx_max_burst = 0;
+ u8 tx_thr_num_pkt = 0;
+ u8 tx_max_burst = 0;
+ u8 rx_thr_num_pkt_prd = 0;
+ u8 rx_max_burst_prd = 0;
+ u8 tx_thr_num_pkt_prd = 0;
+ u8 tx_max_burst_prd = 0;
+ u8 tx_fifo_resize_max_num;
+ const char *usb_psy_name;
+ int ret;
+
+ /* default to highest possible threshold */
+ lpm_nyet_threshold = 0xf;
+
+ /* default to -3.5dB de-emphasis */
+ tx_de_emphasis = 1;
+
+ /*
+ * default to assert utmi_sleep_n and use maximum allowed HIRD
+ * threshold value of 0b1100
+ */
+ hird_threshold = 12;
+
+ /*
+ * default to a TXFIFO size large enough to fit 6 max packets. This
+ * allows for systems with larger bus latencies to have some headroom
+ * for endpoints that have a large bMaxBurst value.
+ */
+ tx_fifo_resize_max_num = 6;
+
+ dwc->maximum_speed = usb_get_maximum_speed(dev);
+ dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
+ dwc->dr_mode = usb_get_dr_mode(dev);
+ dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
+
+ dwc->sysdev_is_parent = device_property_read_bool(dev,
+ "linux,sysdev_is_parent");
+ if (dwc->sysdev_is_parent)
+ dwc->sysdev = dwc->dev->parent;
+ else
+ dwc->sysdev = dwc->dev;
+
+ ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
+ if (ret >= 0) {
+ dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
+ if (!dwc->usb_psy)
+ dev_err(dev, "couldn't get usb power supply\n");
+ }
+
+ dwc->has_lpm_erratum = device_property_read_bool(dev,
+ "snps,has-lpm-erratum");
+ device_property_read_u8(dev, "snps,lpm-nyet-threshold",
+ &lpm_nyet_threshold);
+ dwc->is_utmi_l1_suspend = device_property_read_bool(dev,
+ "snps,is-utmi-l1-suspend");
+ device_property_read_u8(dev, "snps,hird-threshold",
+ &hird_threshold);
+ dwc->dis_start_transfer_quirk = device_property_read_bool(dev,
+ "snps,dis-start-transfer-quirk");
+ dwc->usb3_lpm_capable = device_property_read_bool(dev,
+ "snps,usb3_lpm_capable");
+ dwc->usb2_lpm_disable = device_property_read_bool(dev,
+ "snps,usb2-lpm-disable");
+ dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
+ "snps,usb2-gadget-lpm-disable");
+ device_property_read_u8(dev, "snps,rx-thr-num-pkt",
+ &rx_thr_num_pkt);
+ device_property_read_u8(dev, "snps,rx-max-burst",
+ &rx_max_burst);
+ device_property_read_u8(dev, "snps,tx-thr-num-pkt",
+ &tx_thr_num_pkt);
+ device_property_read_u8(dev, "snps,tx-max-burst",
+ &tx_max_burst);
+ device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
+ &rx_thr_num_pkt_prd);
+ device_property_read_u8(dev, "snps,rx-max-burst-prd",
+ &rx_max_burst_prd);
+ device_property_read_u8(dev, "snps,tx-thr-num-pkt-prd",
+ &tx_thr_num_pkt_prd);
+ device_property_read_u8(dev, "snps,tx-max-burst-prd",
+ &tx_max_burst_prd);
+ dwc->do_fifo_resize = device_property_read_bool(dev,
+ "tx-fifo-resize");
+ if (dwc->do_fifo_resize)
+ device_property_read_u8(dev, "tx-fifo-max-num",
+ &tx_fifo_resize_max_num);
+
+ dwc->disable_scramble_quirk = device_property_read_bool(dev,
+ "snps,disable_scramble_quirk");
+ dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
+ "snps,u2exit_lfps_quirk");
+ dwc->u2ss_inp3_quirk = device_property_read_bool(dev,
+ "snps,u2ss_inp3_quirk");
+ dwc->req_p1p2p3_quirk = device_property_read_bool(dev,
+ "snps,req_p1p2p3_quirk");
+ dwc->del_p1p2p3_quirk = device_property_read_bool(dev,
+ "snps,del_p1p2p3_quirk");
+ dwc->del_phy_power_chg_quirk = device_property_read_bool(dev,
+ "snps,del_phy_power_chg_quirk");
+ dwc->lfps_filter_quirk = device_property_read_bool(dev,
+ "snps,lfps_filter_quirk");
+ dwc->rx_detect_poll_quirk = device_property_read_bool(dev,
+ "snps,rx_detect_poll_quirk");
+ dwc->dis_u3_susphy_quirk = device_property_read_bool(dev,
+ "snps,dis_u3_susphy_quirk");
+ dwc->dis_u2_susphy_quirk = device_property_read_bool(dev,
+ "snps,dis_u2_susphy_quirk");
+ dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
+ "snps,dis_enblslpm_quirk");
+ dwc->dis_u1_entry_quirk = device_property_read_bool(dev,
+ "snps,dis-u1-entry-quirk");
+ dwc->dis_u2_entry_quirk = device_property_read_bool(dev,
+ "snps,dis-u2-entry-quirk");
+ dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
+ "snps,dis_rxdet_inp3_quirk");
+ dwc->dis_u2_freeclk_exists_quirk = device_property_read_bool(dev,
+ "snps,dis-u2-freeclk-exists-quirk");
+ dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
+ "snps,dis-del-phy-power-chg-quirk");
+ dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
+ "snps,dis-tx-ipgap-linecheck-quirk");
+ dwc->resume_hs_terminations = device_property_read_bool(dev,
+ "snps,resume-hs-terminations");
+ dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
+ "snps,parkmode-disable-ss-quirk");
+ dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev,
+ "snps,gfladj-refclk-lpm-sel-quirk");
+
+ dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
+ "snps,tx_de_emphasis_quirk");
+ device_property_read_u8(dev, "snps,tx_de_emphasis",
+ &tx_de_emphasis);
+ device_property_read_string(dev, "snps,hsphy_interface",
+ &dwc->hsphy_interface);
+ device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
+ &dwc->fladj);
+ device_property_read_u32(dev, "snps,ref-clock-period-ns",
+ &dwc->ref_clk_per);
+
+ dwc->dis_metastability_quirk = device_property_read_bool(dev,
+ "snps,dis_metastability_quirk");
+
+ dwc->dis_split_quirk = device_property_read_bool(dev,
+ "snps,dis-split-quirk");
+
+ dwc->lpm_nyet_threshold = lpm_nyet_threshold;
+ dwc->tx_de_emphasis = tx_de_emphasis;
+
+ dwc->hird_threshold = hird_threshold;
+
+ dwc->rx_thr_num_pkt = rx_thr_num_pkt;
+ dwc->rx_max_burst = rx_max_burst;
+
+ dwc->tx_thr_num_pkt = tx_thr_num_pkt;
+ dwc->tx_max_burst = tx_max_burst;
+
+ dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
+ dwc->rx_max_burst_prd = rx_max_burst_prd;
+
+ dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
+ dwc->tx_max_burst_prd = tx_max_burst_prd;
+
+ dwc->imod_interval = 0;
+
+ dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
+}
+
+/* check whether the core supports IMOD */
+bool dwc3_has_imod(struct dwc3 *dwc)
+{
+ return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) ||
+ DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) ||
+ DWC3_IP_IS(DWC32);
+}
+
+static void dwc3_check_params(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ unsigned int hwparam_gen =
+ DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
+
+ /* Check for proper value of imod_interval */
+ if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
+ dev_warn(dwc->dev, "Interrupt moderation not supported\n");
+ dwc->imod_interval = 0;
+ }
+
+ /*
+ * Workaround for STAR 9000961433 which affects only version
+ * 3.00a of the DWC_usb3 core. This prevents the controller
+ * interrupt from being masked while handling events. IMOD
+ * allows us to work around this issue. Enable it for the
+ * affected version.
+ */
+ if (!dwc->imod_interval &&
+ DWC3_VER_IS(DWC3, 300A))
+ dwc->imod_interval = 1;
+
+ /* Check the maximum_speed parameter */
+ switch (dwc->maximum_speed) {
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ break;
+ case USB_SPEED_SUPER:
+ if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS)
+ dev_warn(dev, "UDC doesn't support Gen 1\n");
+ break;
+ case USB_SPEED_SUPER_PLUS:
+ if ((DWC3_IP_IS(DWC32) &&
+ hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) ||
+ (!DWC3_IP_IS(DWC32) &&
+ hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
+ dev_warn(dev, "UDC doesn't support SSP\n");
+ break;
+ default:
+ dev_err(dev, "invalid maximum_speed parameter %d\n",
+ dwc->maximum_speed);
+ fallthrough;
+ case USB_SPEED_UNKNOWN:
+ switch (hwparam_gen) {
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
+ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
+ break;
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
+ if (DWC3_IP_IS(DWC32))
+ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
+ else
+ dwc->maximum_speed = USB_SPEED_SUPER;
+ break;
+ case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
+ dwc->maximum_speed = USB_SPEED_HIGH;
+ break;
+ default:
+ dwc->maximum_speed = USB_SPEED_SUPER;
+ break;
+ }
+ break;
+ }
+
+ /*
+ * Currently the controller does not have visibility into the HW
+ * parameter to determine the maximum number of lanes the HW supports.
+ * If the number of lanes is not specified in the device property, then
+ * set the default to support dual-lane for DWC_usb32 and single-lane
+ * for DWC_usb31 for super-speed-plus.
+ */
+ if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
+ switch (dwc->max_ssp_rate) {
+ case USB_SSP_GEN_2x1:
+ if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
+ dev_warn(dev, "UDC only supports Gen 1\n");
+ break;
+ case USB_SSP_GEN_1x2:
+ case USB_SSP_GEN_2x2:
+ if (DWC3_IP_IS(DWC31))
+ dev_warn(dev, "UDC only supports single lane\n");
+ break;
+ case USB_SSP_GEN_UNKNOWN:
+ default:
+ switch (hwparam_gen) {
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
+ if (DWC3_IP_IS(DWC32))
+ dwc->max_ssp_rate = USB_SSP_GEN_2x2;
+ else
+ dwc->max_ssp_rate = USB_SSP_GEN_2x1;
+ break;
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
+ if (DWC3_IP_IS(DWC32))
+ dwc->max_ssp_rate = USB_SSP_GEN_1x2;
+ break;
+ }
+ break;
+ }
+ }
+}
+
+static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ struct device_node *np_phy;
+ struct extcon_dev *edev = NULL;
+ const char *name;
+
+ if (device_property_read_bool(dev, "extcon"))
+ return extcon_get_edev_by_phandle(dev, 0);
+
+ /*
+ * Device tree platforms should get extcon via phandle.
+ * On ACPI platforms, we get the name from a device property.
+ * This device property is for kernel internal use only and
+ * is expected to be set by the glue code.
+ */
+ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
+ return extcon_get_extcon_dev(name);
+
+ /*
+ * Check explicitly if "usb-role-switch" is used since
+ * extcon_find_edev_by_node() can not be used to check the absence of
+ * an extcon device. In the absence of an device it will always return
+ * EPROBE_DEFER.
+ */
+ if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) &&
+ device_property_read_bool(dev, "usb-role-switch"))
+ return NULL;
+
+ /*
+ * Try to get an extcon device from the USB PHY controller's "port"
+ * node. Check if it has the "port" node first, to avoid printing the
+ * error message from underlying code, as it's a valid case: extcon
+ * device (and "port" node) may be missing in case of "usb-role-switch"
+ * or OTG mode.
+ */
+ np_phy = of_parse_phandle(dev->of_node, "phys", 0);
+ if (of_graph_is_present(np_phy)) {
+ struct device_node *np_conn;
+
+ np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+ if (np_conn)
+ edev = extcon_find_edev_by_node(np_conn);
+ of_node_put(np_conn);
+ }
+ of_node_put(np_phy);
+
+ return edev;
+}
+
+static int dwc3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res, dwc_res;
+ struct dwc3 *dwc;
+
+ int ret;
+
+ void __iomem *regs;
+
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "missing memory resource\n");
+ return -ENODEV;
+ }
+
+ dwc->xhci_resources[0].start = res->start;
+ dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
+ DWC3_XHCI_REGS_END;
+ dwc->xhci_resources[0].flags = res->flags;
+ dwc->xhci_resources[0].name = res->name;
+
+ /*
+ * Request memory region but exclude xHCI regs,
+ * since it will be requested by the xhci-plat driver.
+ */
+ dwc_res = *res;
+ dwc_res.start += DWC3_GLOBALS_REGS_START;
+
+ regs = devm_ioremap_resource(dev, &dwc_res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ dwc->regs = regs;
+ dwc->regs_size = resource_size(&dwc_res);
+
+ dwc3_get_properties(dwc);
+
+ dwc->reset = devm_reset_control_array_get_optional_shared(dev);
+ if (IS_ERR(dwc->reset)) {
+ ret = PTR_ERR(dwc->reset);
+ goto put_usb_psy;
+ }
+
+ if (dev->of_node) {
+ /*
+ * Clocks are optional, but new DT platforms should support all
+ * clocks as required by the DT-binding.
+ * Some devices have different clock names in legacy device trees,
+ * check for them to retain backwards compatibility.
+ */
+ dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
+ if (IS_ERR(dwc->bus_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
+ "could not get bus clock\n");
+ goto put_usb_psy;
+ }
+
+ if (dwc->bus_clk == NULL) {
+ dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
+ if (IS_ERR(dwc->bus_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
+ "could not get bus clock\n");
+ goto put_usb_psy;
+ }
+ }
+
+ dwc->ref_clk = devm_clk_get_optional(dev, "ref");
+ if (IS_ERR(dwc->ref_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
+ "could not get ref clock\n");
+ goto put_usb_psy;
+ }
+
+ if (dwc->ref_clk == NULL) {
+ dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
+ if (IS_ERR(dwc->ref_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
+ "could not get ref clock\n");
+ goto put_usb_psy;
+ }
+ }
+
+ dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
+ if (IS_ERR(dwc->susp_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
+ "could not get suspend clock\n");
+ goto put_usb_psy;
+ }
+
+ if (dwc->susp_clk == NULL) {
+ dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
+ if (IS_ERR(dwc->susp_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
+ "could not get suspend clock\n");
+ goto put_usb_psy;
+ }
+ }
+ }
+
+ ret = reset_control_deassert(dwc->reset);
+ if (ret)
+ goto put_usb_psy;
+
+ ret = dwc3_clk_enable(dwc);
+ if (ret)
+ goto assert_reset;
+
+ if (!dwc3_core_is_valid(dwc)) {
+ dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+ ret = -ENODEV;
+ goto disable_clks;
+ }
+
+ platform_set_drvdata(pdev, dwc);
+ dwc3_cache_hwparams(dwc);
+
+ if (!dwc->sysdev_is_parent &&
+ DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ goto disable_clks;
+ }
+
+ spin_lock_init(&dwc->lock);
+ mutex_init(&dwc->mutex);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
+ pm_runtime_enable(dev);
+
+ pm_runtime_forbid(dev);
+
+ ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
+ if (ret) {
+ dev_err(dwc->dev, "failed to allocate event buffers\n");
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ dwc->edev = dwc3_get_extcon(dwc);
+ if (IS_ERR(dwc->edev)) {
+ ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n");
+ goto err3;
+ }
+
+ ret = dwc3_get_dr_mode(dwc);
+ if (ret)
+ goto err3;
+
+ ret = dwc3_alloc_scratch_buffers(dwc);
+ if (ret)
+ goto err3;
+
+ ret = dwc3_core_init(dwc);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to initialize core\n");
+ goto err4;
+ }
+
+ dwc3_check_params(dwc);
+ dwc3_debugfs_init(dwc);
+
+ ret = dwc3_core_init_mode(dwc);
+ if (ret)
+ goto err5;
+
+ pm_runtime_put(dev);
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ return 0;
+
+err5:
+ dwc3_debugfs_exit(dwc);
+ dwc3_event_buffers_cleanup(dwc);
+
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
+ dwc3_ulpi_exit(dwc);
+
+err4:
+ dwc3_free_scratch_buffers(dwc);
+
+err3:
+ dwc3_free_event_buffers(dwc);
+
+err2:
+ pm_runtime_allow(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+disable_clks:
+ dwc3_clk_disable(dwc);
+assert_reset:
+ reset_control_assert(dwc->reset);
+put_usb_psy:
+ if (dwc->usb_psy)
+ power_supply_put(dwc->usb_psy);
+
+ return ret;
+}
+
+static int dwc3_remove(struct platform_device *pdev)
+{
+ struct dwc3 *dwc = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ dwc3_core_exit_mode(dwc);
+ dwc3_debugfs_exit(dwc);
+
+ dwc3_core_exit(dwc);
+ dwc3_ulpi_exit(dwc);
+
+ pm_runtime_allow(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ /*
+ * HACK: Clear the driver data, which is currently accessed by parent
+ * glue drivers, before allowing the parent to suspend.
+ */
+ platform_set_drvdata(pdev, NULL);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ dwc3_free_event_buffers(dwc);
+ dwc3_free_scratch_buffers(dwc);
+
+ if (dwc->usb_psy)
+ power_supply_put(dwc->usb_psy);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int dwc3_core_init_for_resume(struct dwc3 *dwc)
+{
+ int ret;
+
+ ret = reset_control_deassert(dwc->reset);
+ if (ret)
+ return ret;
+
+ ret = dwc3_clk_enable(dwc);
+ if (ret)
+ goto assert_reset;
+
+ ret = dwc3_core_init(dwc);
+ if (ret)
+ goto disable_clks;
+
+ return 0;
+
+disable_clks:
+ dwc3_clk_disable(dwc);
+assert_reset:
+ reset_control_assert(dwc->reset);
+
+ return ret;
+}
+
+static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+{
+ unsigned long flags;
+ u32 reg;
+
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ if (pm_runtime_suspended(dwc->dev))
+ break;
+ dwc3_gadget_suspend(dwc);
+ synchronize_irq(dwc->irq_gadget);
+ dwc3_core_exit(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_HOST:
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
+ dwc3_core_exit(dwc);
+ break;
+ }
+
+ /* Let controller to suspend HSPHY before PHY driver suspends */
+ if (dwc->dis_u2_susphy_quirk ||
+ dwc->dis_enblslpm_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_ENBLSLPM |
+ DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ /* Give some time for USB2 PHY to suspend */
+ usleep_range(5000, 6000);
+ }
+
+ phy_pm_runtime_put_sync(dwc->usb2_generic_phy);
+ phy_pm_runtime_put_sync(dwc->usb3_generic_phy);
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ /* do nothing during runtime_suspend */
+ if (PMSG_IS_AUTO(msg))
+ break;
+
+ if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_suspend(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ synchronize_irq(dwc->irq_gadget);
+ }
+
+ dwc3_otg_exit(dwc);
+ dwc3_core_exit(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+{
+ unsigned long flags;
+ int ret;
+ u32 reg;
+
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ ret = dwc3_core_init_for_resume(dwc);
+ if (ret)
+ return ret;
+
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_gadget_resume(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_HOST:
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
+ ret = dwc3_core_init_for_resume(dwc);
+ if (ret)
+ return ret;
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ break;
+ }
+ /* Restore GUSB2PHYCFG bits that were modified in suspend */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (dwc->dis_u2_susphy_quirk)
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
+ if (dwc->dis_enblslpm_quirk)
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ phy_pm_runtime_get_sync(dwc->usb2_generic_phy);
+ phy_pm_runtime_get_sync(dwc->usb3_generic_phy);
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ /* nothing to do on runtime_resume */
+ if (PMSG_IS_AUTO(msg))
+ break;
+
+ ret = dwc3_core_init_for_resume(dwc);
+ if (ret)
+ return ret;
+
+ dwc3_set_prtcap(dwc, dwc->current_dr_role);
+
+ dwc3_otg_init(dwc);
+ if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
+ dwc3_otg_host_init(dwc);
+ } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_resume(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+static int dwc3_runtime_checks(struct dwc3 *dwc)
+{
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ if (dwc->connected)
+ return -EBUSY;
+ break;
+ case DWC3_GCTL_PRTCAP_HOST:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+static int dwc3_runtime_suspend(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
+
+ if (dwc3_runtime_checks(dwc))
+ return -EBUSY;
+
+ ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int dwc3_runtime_resume(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
+ if (ret)
+ return ret;
+
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ dwc3_gadget_process_pending_events(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_HOST:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+
+ return 0;
+}
+
+static int dwc3_runtime_idle(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ if (dwc3_runtime_checks(dwc))
+ return -EBUSY;
+ break;
+ case DWC3_GCTL_PRTCAP_HOST:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_suspend(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
+ if (ret)
+ return ret;
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int dwc3_resume(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = dwc3_resume_common(dwc, PMSG_RESUME);
+ if (ret)
+ return ret;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static void dwc3_complete(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ u32 reg;
+
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
+ dwc->dis_split_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
+ reg |= DWC3_GUCTL3_SPLITDISABLE;
+ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
+ }
+}
+#else
+#define dwc3_complete NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops dwc3_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+ .complete = dwc3_complete,
+ SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
+ dwc3_runtime_idle)
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_dwc3_match[] = {
+ {
+ .compatible = "snps,dwc3"
+ },
+ {
+ .compatible = "synopsys,dwc3"
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_dwc3_match);
+#endif
+
+#ifdef CONFIG_ACPI
+
+#define ACPI_ID_INTEL_BSW "808622B7"
+
+static const struct acpi_device_id dwc3_acpi_match[] = {
+ { ACPI_ID_INTEL_BSW, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
+#endif
+
+static struct platform_driver dwc3_driver = {
+ .probe = dwc3_probe,
+ .remove = dwc3_remove,
+ .driver = {
+ .name = "dwc3",
+ .of_match_table = of_match_ptr(of_dwc3_match),
+ .acpi_match_table = ACPI_PTR(dwc3_acpi_match),
+ .pm = &dwc3_dev_pm_ops,
+ },
+};
+
+module_platform_driver(dwc3_driver);
+
+MODULE_ALIAS("platform:dwc3");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
new file mode 100644
index 000000000..889c122da
--- /dev/null
+++ b/drivers/usb/dwc3/core.h
@@ -0,0 +1,1660 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * core.h - DesignWare USB3 DRD Core Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#ifndef __DRIVERS_USB_DWC3_CORE_H
+#define __DRIVERS_USB_DWC3_CORE_H
+
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
+#include <linux/ulpi/interface.h>
+
+#include <linux/phy/phy.h>
+
+#include <linux/power_supply.h>
+
+#define DWC3_MSG_MAX 500
+
+/* Global constants */
+#define DWC3_PULL_UP_TIMEOUT 500 /* ms */
+#define DWC3_BOUNCE_SIZE 1024 /* size of a superspeed bulk */
+#define DWC3_EP0_SETUP_SIZE 512
+#define DWC3_ENDPOINTS_NUM 32
+#define DWC3_XHCI_RESOURCES_NUM 2
+#define DWC3_ISOC_MAX_RETRIES 5
+
+#define DWC3_SCRATCHBUF_SIZE 4096 /* each buffer is assumed to be 4KiB */
+#define DWC3_EVENT_BUFFERS_SIZE 4096
+#define DWC3_EVENT_TYPE_MASK 0xfe
+
+#define DWC3_EVENT_TYPE_DEV 0
+#define DWC3_EVENT_TYPE_CARKIT 3
+#define DWC3_EVENT_TYPE_I2C 4
+
+#define DWC3_DEVICE_EVENT_DISCONNECT 0
+#define DWC3_DEVICE_EVENT_RESET 1
+#define DWC3_DEVICE_EVENT_CONNECT_DONE 2
+#define DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE 3
+#define DWC3_DEVICE_EVENT_WAKEUP 4
+#define DWC3_DEVICE_EVENT_HIBER_REQ 5
+#define DWC3_DEVICE_EVENT_SUSPEND 6
+#define DWC3_DEVICE_EVENT_SOF 7
+#define DWC3_DEVICE_EVENT_ERRATIC_ERROR 9
+#define DWC3_DEVICE_EVENT_CMD_CMPL 10
+#define DWC3_DEVICE_EVENT_OVERFLOW 11
+
+/* Controller's role while using the OTG block */
+#define DWC3_OTG_ROLE_IDLE 0
+#define DWC3_OTG_ROLE_HOST 1
+#define DWC3_OTG_ROLE_DEVICE 2
+
+#define DWC3_GEVNTCOUNT_MASK 0xfffc
+#define DWC3_GEVNTCOUNT_EHB BIT(31)
+#define DWC3_GSNPSID_MASK 0xffff0000
+#define DWC3_GSNPSREV_MASK 0xffff
+#define DWC3_GSNPS_ID(p) (((p) & DWC3_GSNPSID_MASK) >> 16)
+
+/* DWC3 registers memory space boundries */
+#define DWC3_XHCI_REGS_START 0x0
+#define DWC3_XHCI_REGS_END 0x7fff
+#define DWC3_GLOBALS_REGS_START 0xc100
+#define DWC3_GLOBALS_REGS_END 0xc6ff
+#define DWC3_DEVICE_REGS_START 0xc700
+#define DWC3_DEVICE_REGS_END 0xcbff
+#define DWC3_OTG_REGS_START 0xcc00
+#define DWC3_OTG_REGS_END 0xccff
+
+/* Global Registers */
+#define DWC3_GSBUSCFG0 0xc100
+#define DWC3_GSBUSCFG1 0xc104
+#define DWC3_GTXTHRCFG 0xc108
+#define DWC3_GRXTHRCFG 0xc10c
+#define DWC3_GCTL 0xc110
+#define DWC3_GEVTEN 0xc114
+#define DWC3_GSTS 0xc118
+#define DWC3_GUCTL1 0xc11c
+#define DWC3_GSNPSID 0xc120
+#define DWC3_GGPIO 0xc124
+#define DWC3_GUID 0xc128
+#define DWC3_GUCTL 0xc12c
+#define DWC3_GBUSERRADDR0 0xc130
+#define DWC3_GBUSERRADDR1 0xc134
+#define DWC3_GPRTBIMAP0 0xc138
+#define DWC3_GPRTBIMAP1 0xc13c
+#define DWC3_GHWPARAMS0 0xc140
+#define DWC3_GHWPARAMS1 0xc144
+#define DWC3_GHWPARAMS2 0xc148
+#define DWC3_GHWPARAMS3 0xc14c
+#define DWC3_GHWPARAMS4 0xc150
+#define DWC3_GHWPARAMS5 0xc154
+#define DWC3_GHWPARAMS6 0xc158
+#define DWC3_GHWPARAMS7 0xc15c
+#define DWC3_GDBGFIFOSPACE 0xc160
+#define DWC3_GDBGLTSSM 0xc164
+#define DWC3_GDBGBMU 0xc16c
+#define DWC3_GDBGLSPMUX 0xc170
+#define DWC3_GDBGLSP 0xc174
+#define DWC3_GDBGEPINFO0 0xc178
+#define DWC3_GDBGEPINFO1 0xc17c
+#define DWC3_GPRTBIMAP_HS0 0xc180
+#define DWC3_GPRTBIMAP_HS1 0xc184
+#define DWC3_GPRTBIMAP_FS0 0xc188
+#define DWC3_GPRTBIMAP_FS1 0xc18c
+#define DWC3_GUCTL2 0xc19c
+
+#define DWC3_VER_NUMBER 0xc1a0
+#define DWC3_VER_TYPE 0xc1a4
+
+#define DWC3_GUSB2PHYCFG(n) (0xc200 + ((n) * 0x04))
+#define DWC3_GUSB2I2CCTL(n) (0xc240 + ((n) * 0x04))
+
+#define DWC3_GUSB2PHYACC(n) (0xc280 + ((n) * 0x04))
+
+#define DWC3_GUSB3PIPECTL(n) (0xc2c0 + ((n) * 0x04))
+
+#define DWC3_GTXFIFOSIZ(n) (0xc300 + ((n) * 0x04))
+#define DWC3_GRXFIFOSIZ(n) (0xc380 + ((n) * 0x04))
+
+#define DWC3_GEVNTADRLO(n) (0xc400 + ((n) * 0x10))
+#define DWC3_GEVNTADRHI(n) (0xc404 + ((n) * 0x10))
+#define DWC3_GEVNTSIZ(n) (0xc408 + ((n) * 0x10))
+#define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10))
+
+#define DWC3_GHWPARAMS8 0xc600
+#define DWC3_GUCTL3 0xc60c
+#define DWC3_GFLADJ 0xc630
+#define DWC3_GHWPARAMS9 0xc6e0
+
+/* Device Registers */
+#define DWC3_DCFG 0xc700
+#define DWC3_DCTL 0xc704
+#define DWC3_DEVTEN 0xc708
+#define DWC3_DSTS 0xc70c
+#define DWC3_DGCMDPAR 0xc710
+#define DWC3_DGCMD 0xc714
+#define DWC3_DALEPENA 0xc720
+#define DWC3_DCFG1 0xc740 /* DWC_usb32 only */
+
+#define DWC3_DEP_BASE(n) (0xc800 + ((n) * 0x10))
+#define DWC3_DEPCMDPAR2 0x00
+#define DWC3_DEPCMDPAR1 0x04
+#define DWC3_DEPCMDPAR0 0x08
+#define DWC3_DEPCMD 0x0c
+
+#define DWC3_DEV_IMOD(n) (0xca00 + ((n) * 0x4))
+
+/* OTG Registers */
+#define DWC3_OCFG 0xcc00
+#define DWC3_OCTL 0xcc04
+#define DWC3_OEVT 0xcc08
+#define DWC3_OEVTEN 0xcc0C
+#define DWC3_OSTS 0xcc10
+
+/* Bit fields */
+
+/* Global SoC Bus Configuration INCRx Register 0 */
+#define DWC3_GSBUSCFG0_INCR256BRSTENA (1 << 7) /* INCR256 burst */
+#define DWC3_GSBUSCFG0_INCR128BRSTENA (1 << 6) /* INCR128 burst */
+#define DWC3_GSBUSCFG0_INCR64BRSTENA (1 << 5) /* INCR64 burst */
+#define DWC3_GSBUSCFG0_INCR32BRSTENA (1 << 4) /* INCR32 burst */
+#define DWC3_GSBUSCFG0_INCR16BRSTENA (1 << 3) /* INCR16 burst */
+#define DWC3_GSBUSCFG0_INCR8BRSTENA (1 << 2) /* INCR8 burst */
+#define DWC3_GSBUSCFG0_INCR4BRSTENA (1 << 1) /* INCR4 burst */
+#define DWC3_GSBUSCFG0_INCRBRSTENA (1 << 0) /* undefined length enable */
+#define DWC3_GSBUSCFG0_INCRBRST_MASK 0xff
+
+/* Global Debug LSP MUX Select */
+#define DWC3_GDBGLSPMUX_ENDBC BIT(15) /* Host only */
+#define DWC3_GDBGLSPMUX_HOSTSELECT(n) ((n) & 0x3fff)
+#define DWC3_GDBGLSPMUX_DEVSELECT(n) (((n) & 0xf) << 4)
+#define DWC3_GDBGLSPMUX_EPSELECT(n) ((n) & 0xf)
+
+/* Global Debug Queue/FIFO Space Available Register */
+#define DWC3_GDBGFIFOSPACE_NUM(n) ((n) & 0x1f)
+#define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
+#define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff)
+
+#define DWC3_TXFIFO 0
+#define DWC3_RXFIFO 1
+#define DWC3_TXREQQ 2
+#define DWC3_RXREQQ 3
+#define DWC3_RXINFOQ 4
+#define DWC3_PSTATQ 5
+#define DWC3_DESCFETCHQ 6
+#define DWC3_EVENTQ 7
+#define DWC3_AUXEVENTQ 8
+
+/* Global RX Threshold Configuration Register */
+#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
+#define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
+#define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
+
+/* Global TX Threshold Configuration Register */
+#define DWC3_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0xff) << 16)
+#define DWC3_GTXTHRCFG_TXPKTCNT(n) (((n) & 0xf) << 24)
+#define DWC3_GTXTHRCFG_PKTCNTSEL BIT(29)
+
+/* Global RX Threshold Configuration Register for DWC_usb31 only */
+#define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 16)
+#define DWC31_GRXTHRCFG_RXPKTCNT(n) (((n) & 0x1f) << 21)
+#define DWC31_GRXTHRCFG_PKTCNTSEL BIT(26)
+#define DWC31_RXTHRNUMPKTSEL_HS_PRD BIT(15)
+#define DWC31_RXTHRNUMPKT_HS_PRD(n) (((n) & 0x3) << 13)
+#define DWC31_RXTHRNUMPKTSEL_PRD BIT(10)
+#define DWC31_RXTHRNUMPKT_PRD(n) (((n) & 0x1f) << 5)
+#define DWC31_MAXRXBURSTSIZE_PRD(n) ((n) & 0x1f)
+
+/* Global TX Threshold Configuration Register for DWC_usb31 only */
+#define DWC31_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0x1f) << 16)
+#define DWC31_GTXTHRCFG_TXPKTCNT(n) (((n) & 0x1f) << 21)
+#define DWC31_GTXTHRCFG_PKTCNTSEL BIT(26)
+#define DWC31_TXTHRNUMPKTSEL_HS_PRD BIT(15)
+#define DWC31_TXTHRNUMPKT_HS_PRD(n) (((n) & 0x3) << 13)
+#define DWC31_TXTHRNUMPKTSEL_PRD BIT(10)
+#define DWC31_TXTHRNUMPKT_PRD(n) (((n) & 0x1f) << 5)
+#define DWC31_MAXTXBURSTSIZE_PRD(n) ((n) & 0x1f)
+
+/* Global Configuration Register */
+#define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19)
+#define DWC3_GCTL_PWRDNSCALE_MASK GENMASK(31, 19)
+#define DWC3_GCTL_U2RSTECN BIT(16)
+#define DWC3_GCTL_RAMCLKSEL(x) (((x) & DWC3_GCTL_CLK_MASK) << 6)
+#define DWC3_GCTL_CLK_BUS (0)
+#define DWC3_GCTL_CLK_PIPE (1)
+#define DWC3_GCTL_CLK_PIPEHALF (2)
+#define DWC3_GCTL_CLK_MASK (3)
+
+#define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12)
+#define DWC3_GCTL_PRTCAPDIR(n) ((n) << 12)
+#define DWC3_GCTL_PRTCAP_HOST 1
+#define DWC3_GCTL_PRTCAP_DEVICE 2
+#define DWC3_GCTL_PRTCAP_OTG 3
+
+#define DWC3_GCTL_CORESOFTRESET BIT(11)
+#define DWC3_GCTL_SOFITPSYNC BIT(10)
+#define DWC3_GCTL_SCALEDOWN(n) ((n) << 4)
+#define DWC3_GCTL_SCALEDOWN_MASK DWC3_GCTL_SCALEDOWN(3)
+#define DWC3_GCTL_DISSCRAMBLE BIT(3)
+#define DWC3_GCTL_U2EXIT_LFPS BIT(2)
+#define DWC3_GCTL_GBLHIBERNATIONEN BIT(1)
+#define DWC3_GCTL_DSBLCLKGTNG BIT(0)
+
+/* Global User Control 1 Register */
+#define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT BIT(31)
+#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
+#define DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK BIT(26)
+#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
+#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17)
+#define DWC3_GUCTL1_RESUME_OPMODE_HS_HOST BIT(10)
+
+/* Global Status Register */
+#define DWC3_GSTS_OTG_IP BIT(10)
+#define DWC3_GSTS_BC_IP BIT(9)
+#define DWC3_GSTS_ADP_IP BIT(8)
+#define DWC3_GSTS_HOST_IP BIT(7)
+#define DWC3_GSTS_DEVICE_IP BIT(6)
+#define DWC3_GSTS_CSR_TIMEOUT BIT(5)
+#define DWC3_GSTS_BUS_ERR_ADDR_VLD BIT(4)
+#define DWC3_GSTS_CURMOD(n) ((n) & 0x3)
+#define DWC3_GSTS_CURMOD_DEVICE 0
+#define DWC3_GSTS_CURMOD_HOST 1
+
+/* Global USB2 PHY Configuration Register */
+#define DWC3_GUSB2PHYCFG_PHYSOFTRST BIT(31)
+#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS BIT(30)
+#define DWC3_GUSB2PHYCFG_SUSPHY BIT(6)
+#define DWC3_GUSB2PHYCFG_ULPI_UTMI BIT(4)
+#define DWC3_GUSB2PHYCFG_ENBLSLPM BIT(8)
+#define DWC3_GUSB2PHYCFG_PHYIF(n) (n << 3)
+#define DWC3_GUSB2PHYCFG_PHYIF_MASK DWC3_GUSB2PHYCFG_PHYIF(1)
+#define DWC3_GUSB2PHYCFG_USBTRDTIM(n) (n << 10)
+#define DWC3_GUSB2PHYCFG_USBTRDTIM_MASK DWC3_GUSB2PHYCFG_USBTRDTIM(0xf)
+#define USBTRDTIM_UTMI_8_BIT 9
+#define USBTRDTIM_UTMI_16_BIT 5
+#define UTMI_PHYIF_16_BIT 1
+#define UTMI_PHYIF_8_BIT 0
+
+/* Global USB2 PHY Vendor Control Register */
+#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_DONE BIT(24)
+#define DWC3_GUSB2PHYACC_BUSY BIT(23)
+#define DWC3_GUSB2PHYACC_WRITE BIT(22)
+#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
+#define DWC3_GUSB2PHYACC_EXTEND_ADDR(n) (n << 8)
+#define DWC3_GUSB2PHYACC_DATA(n) (n & 0xff)
+
+/* Global USB3 PIPE Control Register */
+#define DWC3_GUSB3PIPECTL_PHYSOFTRST BIT(31)
+#define DWC3_GUSB3PIPECTL_U2SSINP3OK BIT(29)
+#define DWC3_GUSB3PIPECTL_DISRXDETINP3 BIT(28)
+#define DWC3_GUSB3PIPECTL_UX_EXIT_PX BIT(27)
+#define DWC3_GUSB3PIPECTL_REQP1P2P3 BIT(24)
+#define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19)
+#define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7)
+#define DWC3_GUSB3PIPECTL_DEP1P2P3_EN DWC3_GUSB3PIPECTL_DEP1P2P3(1)
+#define DWC3_GUSB3PIPECTL_DEPOCHANGE BIT(18)
+#define DWC3_GUSB3PIPECTL_SUSPHY BIT(17)
+#define DWC3_GUSB3PIPECTL_LFPSFILT BIT(9)
+#define DWC3_GUSB3PIPECTL_RX_DETOPOLL BIT(8)
+#define DWC3_GUSB3PIPECTL_TX_DEEPH_MASK DWC3_GUSB3PIPECTL_TX_DEEPH(3)
+#define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1)
+
+/* Global TX Fifo Size Register */
+#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */
+#define DWC31_GTXFIFOSIZ_TXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */
+#define DWC3_GTXFIFOSIZ_TXFDEP(n) ((n) & 0xffff)
+#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
+
+/* Global RX Fifo Size Register */
+#define DWC31_GRXFIFOSIZ_RXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */
+#define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff)
+
+/* Global Event Size Registers */
+#define DWC3_GEVNTSIZ_INTMASK BIT(31)
+#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
+
+/* Global HWPARAMS0 Register */
+#define DWC3_GHWPARAMS0_MODE(n) ((n) & 0x3)
+#define DWC3_GHWPARAMS0_MODE_GADGET 0
+#define DWC3_GHWPARAMS0_MODE_HOST 1
+#define DWC3_GHWPARAMS0_MODE_DRD 2
+#define DWC3_GHWPARAMS0_MBUS_TYPE(n) (((n) >> 3) & 0x7)
+#define DWC3_GHWPARAMS0_SBUS_TYPE(n) (((n) >> 6) & 0x3)
+#define DWC3_GHWPARAMS0_MDWIDTH(n) (((n) >> 8) & 0xff)
+#define DWC3_GHWPARAMS0_SDWIDTH(n) (((n) >> 16) & 0xff)
+#define DWC3_GHWPARAMS0_AWIDTH(n) (((n) >> 24) & 0xff)
+
+/* Global HWPARAMS1 Register */
+#define DWC3_GHWPARAMS1_EN_PWROPT(n) (((n) & (3 << 24)) >> 24)
+#define DWC3_GHWPARAMS1_EN_PWROPT_NO 0
+#define DWC3_GHWPARAMS1_EN_PWROPT_CLK 1
+#define DWC3_GHWPARAMS1_EN_PWROPT_HIB 2
+#define DWC3_GHWPARAMS1_PWROPT(n) ((n) << 24)
+#define DWC3_GHWPARAMS1_PWROPT_MASK DWC3_GHWPARAMS1_PWROPT(3)
+#define DWC3_GHWPARAMS1_ENDBC BIT(31)
+
+/* Global HWPARAMS3 Register */
+#define DWC3_GHWPARAMS3_SSPHY_IFC(n) ((n) & 3)
+#define DWC3_GHWPARAMS3_SSPHY_IFC_DIS 0
+#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN1 1
+#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN2 2 /* DWC_usb31 only */
+#define DWC3_GHWPARAMS3_HSPHY_IFC(n) (((n) & (3 << 2)) >> 2)
+#define DWC3_GHWPARAMS3_HSPHY_IFC_DIS 0
+#define DWC3_GHWPARAMS3_HSPHY_IFC_UTMI 1
+#define DWC3_GHWPARAMS3_HSPHY_IFC_ULPI 2
+#define DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI 3
+#define DWC3_GHWPARAMS3_FSPHY_IFC(n) (((n) & (3 << 4)) >> 4)
+#define DWC3_GHWPARAMS3_FSPHY_IFC_DIS 0
+#define DWC3_GHWPARAMS3_FSPHY_IFC_ENA 1
+
+/* Global HWPARAMS4 Register */
+#define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13)
+#define DWC3_MAX_HIBER_SCRATCHBUFS 15
+
+/* Global HWPARAMS6 Register */
+#define DWC3_GHWPARAMS6_BCSUPPORT BIT(14)
+#define DWC3_GHWPARAMS6_OTG3SUPPORT BIT(13)
+#define DWC3_GHWPARAMS6_ADPSUPPORT BIT(12)
+#define DWC3_GHWPARAMS6_HNPSUPPORT BIT(11)
+#define DWC3_GHWPARAMS6_SRPSUPPORT BIT(10)
+#define DWC3_GHWPARAMS6_EN_FPGA BIT(7)
+
+/* DWC_usb32 only */
+#define DWC3_GHWPARAMS6_MDWIDTH(n) ((n) & (0x3 << 8))
+
+/* Global HWPARAMS7 Register */
+#define DWC3_GHWPARAMS7_RAM1_DEPTH(n) ((n) & 0xffff)
+#define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff)
+
+/* Global HWPARAMS9 Register */
+#define DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS BIT(0)
+#define DWC3_GHWPARAMS9_DEV_MST BIT(1)
+
+/* Global Frame Length Adjustment Register */
+#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
+#define DWC3_GFLADJ_30MHZ_MASK 0x3f
+#define DWC3_GFLADJ_REFCLK_FLADJ_MASK GENMASK(21, 8)
+#define DWC3_GFLADJ_REFCLK_LPM_SEL BIT(23)
+#define DWC3_GFLADJ_240MHZDECR GENMASK(30, 24)
+#define DWC3_GFLADJ_240MHZDECR_PLS1 BIT(31)
+
+/* Global User Control Register*/
+#define DWC3_GUCTL_REFCLKPER_MASK 0xffc00000
+#define DWC3_GUCTL_REFCLKPER_SEL 22
+
+/* Global User Control Register 2 */
+#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
+
+/* Global User Control Register 3 */
+#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
+
+/* Device Configuration Register */
+#define DWC3_DCFG_NUMLANES(n) (((n) & 0x3) << 30) /* DWC_usb32 only */
+
+#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
+#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
+
+#define DWC3_DCFG_SPEED_MASK (7 << 0)
+#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
+#define DWC3_DCFG_SUPERSPEED (4 << 0)
+#define DWC3_DCFG_HIGHSPEED (0 << 0)
+#define DWC3_DCFG_FULLSPEED BIT(0)
+
+#define DWC3_DCFG_NUMP_SHIFT 17
+#define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f)
+#define DWC3_DCFG_NUMP_MASK (0x1f << DWC3_DCFG_NUMP_SHIFT)
+#define DWC3_DCFG_LPM_CAP BIT(22)
+#define DWC3_DCFG_IGNSTRMPP BIT(23)
+
+/* Device Control Register */
+#define DWC3_DCTL_RUN_STOP BIT(31)
+#define DWC3_DCTL_CSFTRST BIT(30)
+#define DWC3_DCTL_LSFTRST BIT(29)
+
+#define DWC3_DCTL_HIRD_THRES_MASK (0x1f << 24)
+#define DWC3_DCTL_HIRD_THRES(n) ((n) << 24)
+
+#define DWC3_DCTL_APPL1RES BIT(23)
+
+/* These apply for core versions 1.87a and earlier */
+#define DWC3_DCTL_TRGTULST_MASK (0x0f << 17)
+#define DWC3_DCTL_TRGTULST(n) ((n) << 17)
+#define DWC3_DCTL_TRGTULST_U2 (DWC3_DCTL_TRGTULST(2))
+#define DWC3_DCTL_TRGTULST_U3 (DWC3_DCTL_TRGTULST(3))
+#define DWC3_DCTL_TRGTULST_SS_DIS (DWC3_DCTL_TRGTULST(4))
+#define DWC3_DCTL_TRGTULST_RX_DET (DWC3_DCTL_TRGTULST(5))
+#define DWC3_DCTL_TRGTULST_SS_INACT (DWC3_DCTL_TRGTULST(6))
+
+/* These apply for core versions 1.94a and later */
+#define DWC3_DCTL_NYET_THRES(n) (((n) & 0xf) << 20)
+
+#define DWC3_DCTL_KEEP_CONNECT BIT(19)
+#define DWC3_DCTL_L1_HIBER_EN BIT(18)
+#define DWC3_DCTL_CRS BIT(17)
+#define DWC3_DCTL_CSS BIT(16)
+
+#define DWC3_DCTL_INITU2ENA BIT(12)
+#define DWC3_DCTL_ACCEPTU2ENA BIT(11)
+#define DWC3_DCTL_INITU1ENA BIT(10)
+#define DWC3_DCTL_ACCEPTU1ENA BIT(9)
+#define DWC3_DCTL_TSTCTRL_MASK (0xf << 1)
+
+#define DWC3_DCTL_ULSTCHNGREQ_MASK (0x0f << 5)
+#define DWC3_DCTL_ULSTCHNGREQ(n) (((n) << 5) & DWC3_DCTL_ULSTCHNGREQ_MASK)
+
+#define DWC3_DCTL_ULSTCHNG_NO_ACTION (DWC3_DCTL_ULSTCHNGREQ(0))
+#define DWC3_DCTL_ULSTCHNG_SS_DISABLED (DWC3_DCTL_ULSTCHNGREQ(4))
+#define DWC3_DCTL_ULSTCHNG_RX_DETECT (DWC3_DCTL_ULSTCHNGREQ(5))
+#define DWC3_DCTL_ULSTCHNG_SS_INACTIVE (DWC3_DCTL_ULSTCHNGREQ(6))
+#define DWC3_DCTL_ULSTCHNG_RECOVERY (DWC3_DCTL_ULSTCHNGREQ(8))
+#define DWC3_DCTL_ULSTCHNG_COMPLIANCE (DWC3_DCTL_ULSTCHNGREQ(10))
+#define DWC3_DCTL_ULSTCHNG_LOOPBACK (DWC3_DCTL_ULSTCHNGREQ(11))
+
+/* Device Event Enable Register */
+#define DWC3_DEVTEN_VNDRDEVTSTRCVEDEN BIT(12)
+#define DWC3_DEVTEN_EVNTOVERFLOWEN BIT(11)
+#define DWC3_DEVTEN_CMDCMPLTEN BIT(10)
+#define DWC3_DEVTEN_ERRTICERREN BIT(9)
+#define DWC3_DEVTEN_SOFEN BIT(7)
+#define DWC3_DEVTEN_U3L2L1SUSPEN BIT(6)
+#define DWC3_DEVTEN_HIBERNATIONREQEVTEN BIT(5)
+#define DWC3_DEVTEN_WKUPEVTEN BIT(4)
+#define DWC3_DEVTEN_ULSTCNGEN BIT(3)
+#define DWC3_DEVTEN_CONNECTDONEEN BIT(2)
+#define DWC3_DEVTEN_USBRSTEN BIT(1)
+#define DWC3_DEVTEN_DISCONNEVTEN BIT(0)
+
+#define DWC3_DSTS_CONNLANES(n) (((n) >> 30) & 0x3) /* DWC_usb32 only */
+
+/* Device Status Register */
+#define DWC3_DSTS_DCNRD BIT(29)
+
+/* This applies for core versions 1.87a and earlier */
+#define DWC3_DSTS_PWRUPREQ BIT(24)
+
+/* These apply for core versions 1.94a and later */
+#define DWC3_DSTS_RSS BIT(25)
+#define DWC3_DSTS_SSS BIT(24)
+
+#define DWC3_DSTS_COREIDLE BIT(23)
+#define DWC3_DSTS_DEVCTRLHLT BIT(22)
+
+#define DWC3_DSTS_USBLNKST_MASK (0x0f << 18)
+#define DWC3_DSTS_USBLNKST(n) (((n) & DWC3_DSTS_USBLNKST_MASK) >> 18)
+
+#define DWC3_DSTS_RXFIFOEMPTY BIT(17)
+
+#define DWC3_DSTS_SOFFN_MASK (0x3fff << 3)
+#define DWC3_DSTS_SOFFN(n) (((n) & DWC3_DSTS_SOFFN_MASK) >> 3)
+
+#define DWC3_DSTS_CONNECTSPD (7 << 0)
+
+#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
+#define DWC3_DSTS_SUPERSPEED (4 << 0)
+#define DWC3_DSTS_HIGHSPEED (0 << 0)
+#define DWC3_DSTS_FULLSPEED BIT(0)
+
+/* Device Generic Command Register */
+#define DWC3_DGCMD_SET_LMP 0x01
+#define DWC3_DGCMD_SET_PERIODIC_PAR 0x02
+#define DWC3_DGCMD_XMIT_FUNCTION 0x03
+
+/* These apply for core versions 1.94a and later */
+#define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO 0x04
+#define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI 0x05
+
+#define DWC3_DGCMD_SELECTED_FIFO_FLUSH 0x09
+#define DWC3_DGCMD_ALL_FIFO_FLUSH 0x0a
+#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
+#define DWC3_DGCMD_SET_ENDPOINT_PRIME 0x0d
+#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
+
+#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
+#define DWC3_DGCMD_CMDACT BIT(10)
+#define DWC3_DGCMD_CMDIOC BIT(8)
+
+/* Device Generic Command Parameter Register */
+#define DWC3_DGCMDPAR_FORCE_LINKPM_ACCEPT BIT(0)
+#define DWC3_DGCMDPAR_FIFO_NUM(n) ((n) << 0)
+#define DWC3_DGCMDPAR_RX_FIFO (0 << 5)
+#define DWC3_DGCMDPAR_TX_FIFO BIT(5)
+#define DWC3_DGCMDPAR_LOOPBACK_DIS (0 << 0)
+#define DWC3_DGCMDPAR_LOOPBACK_ENA BIT(0)
+
+/* Device Endpoint Command Register */
+#define DWC3_DEPCMD_PARAM_SHIFT 16
+#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
+#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
+#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
+#define DWC3_DEPCMD_HIPRI_FORCERM BIT(11)
+#define DWC3_DEPCMD_CLEARPENDIN BIT(11)
+#define DWC3_DEPCMD_CMDACT BIT(10)
+#define DWC3_DEPCMD_CMDIOC BIT(8)
+
+#define DWC3_DEPCMD_DEPSTARTCFG (0x09 << 0)
+#define DWC3_DEPCMD_ENDTRANSFER (0x08 << 0)
+#define DWC3_DEPCMD_UPDATETRANSFER (0x07 << 0)
+#define DWC3_DEPCMD_STARTTRANSFER (0x06 << 0)
+#define DWC3_DEPCMD_CLEARSTALL (0x05 << 0)
+#define DWC3_DEPCMD_SETSTALL (0x04 << 0)
+/* This applies for core versions 1.90a and earlier */
+#define DWC3_DEPCMD_GETSEQNUMBER (0x03 << 0)
+/* This applies for core versions 1.94a and later */
+#define DWC3_DEPCMD_GETEPSTATE (0x03 << 0)
+#define DWC3_DEPCMD_SETTRANSFRESOURCE (0x02 << 0)
+#define DWC3_DEPCMD_SETEPCONFIG (0x01 << 0)
+
+#define DWC3_DEPCMD_CMD(x) ((x) & 0xf)
+
+/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
+#define DWC3_DALEPENA_EP(n) BIT(n)
+
+/* DWC_usb32 DCFG1 config */
+#define DWC3_DCFG1_DIS_MST_ENH BIT(1)
+
+#define DWC3_DEPCMD_TYPE_CONTROL 0
+#define DWC3_DEPCMD_TYPE_ISOC 1
+#define DWC3_DEPCMD_TYPE_BULK 2
+#define DWC3_DEPCMD_TYPE_INTR 3
+
+#define DWC3_DEV_IMOD_COUNT_SHIFT 16
+#define DWC3_DEV_IMOD_COUNT_MASK (0xffff << 16)
+#define DWC3_DEV_IMOD_INTERVAL_SHIFT 0
+#define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0)
+
+/* OTG Configuration Register */
+#define DWC3_OCFG_DISPWRCUTTOFF BIT(5)
+#define DWC3_OCFG_HIBDISMASK BIT(4)
+#define DWC3_OCFG_SFTRSTMASK BIT(3)
+#define DWC3_OCFG_OTGVERSION BIT(2)
+#define DWC3_OCFG_HNPCAP BIT(1)
+#define DWC3_OCFG_SRPCAP BIT(0)
+
+/* OTG CTL Register */
+#define DWC3_OCTL_OTG3GOERR BIT(7)
+#define DWC3_OCTL_PERIMODE BIT(6)
+#define DWC3_OCTL_PRTPWRCTL BIT(5)
+#define DWC3_OCTL_HNPREQ BIT(4)
+#define DWC3_OCTL_SESREQ BIT(3)
+#define DWC3_OCTL_TERMSELIDPULSE BIT(2)
+#define DWC3_OCTL_DEVSETHNPEN BIT(1)
+#define DWC3_OCTL_HSTSETHNPEN BIT(0)
+
+/* OTG Event Register */
+#define DWC3_OEVT_DEVICEMODE BIT(31)
+#define DWC3_OEVT_XHCIRUNSTPSET BIT(27)
+#define DWC3_OEVT_DEVRUNSTPSET BIT(26)
+#define DWC3_OEVT_HIBENTRY BIT(25)
+#define DWC3_OEVT_CONIDSTSCHNG BIT(24)
+#define DWC3_OEVT_HRRCONFNOTIF BIT(23)
+#define DWC3_OEVT_HRRINITNOTIF BIT(22)
+#define DWC3_OEVT_ADEVIDLE BIT(21)
+#define DWC3_OEVT_ADEVBHOSTEND BIT(20)
+#define DWC3_OEVT_ADEVHOST BIT(19)
+#define DWC3_OEVT_ADEVHNPCHNG BIT(18)
+#define DWC3_OEVT_ADEVSRPDET BIT(17)
+#define DWC3_OEVT_ADEVSESSENDDET BIT(16)
+#define DWC3_OEVT_BDEVBHOSTEND BIT(11)
+#define DWC3_OEVT_BDEVHNPCHNG BIT(10)
+#define DWC3_OEVT_BDEVSESSVLDDET BIT(9)
+#define DWC3_OEVT_BDEVVBUSCHNG BIT(8)
+#define DWC3_OEVT_BSESSVLD BIT(3)
+#define DWC3_OEVT_HSTNEGSTS BIT(2)
+#define DWC3_OEVT_SESREQSTS BIT(1)
+#define DWC3_OEVT_ERROR BIT(0)
+
+/* OTG Event Enable Register */
+#define DWC3_OEVTEN_XHCIRUNSTPSETEN BIT(27)
+#define DWC3_OEVTEN_DEVRUNSTPSETEN BIT(26)
+#define DWC3_OEVTEN_HIBENTRYEN BIT(25)
+#define DWC3_OEVTEN_CONIDSTSCHNGEN BIT(24)
+#define DWC3_OEVTEN_HRRCONFNOTIFEN BIT(23)
+#define DWC3_OEVTEN_HRRINITNOTIFEN BIT(22)
+#define DWC3_OEVTEN_ADEVIDLEEN BIT(21)
+#define DWC3_OEVTEN_ADEVBHOSTENDEN BIT(20)
+#define DWC3_OEVTEN_ADEVHOSTEN BIT(19)
+#define DWC3_OEVTEN_ADEVHNPCHNGEN BIT(18)
+#define DWC3_OEVTEN_ADEVSRPDETEN BIT(17)
+#define DWC3_OEVTEN_ADEVSESSENDDETEN BIT(16)
+#define DWC3_OEVTEN_BDEVBHOSTENDEN BIT(11)
+#define DWC3_OEVTEN_BDEVHNPCHNGEN BIT(10)
+#define DWC3_OEVTEN_BDEVSESSVLDDETEN BIT(9)
+#define DWC3_OEVTEN_BDEVVBUSCHNGEN BIT(8)
+
+/* OTG Status Register */
+#define DWC3_OSTS_DEVRUNSTP BIT(13)
+#define DWC3_OSTS_XHCIRUNSTP BIT(12)
+#define DWC3_OSTS_PERIPHERALSTATE BIT(4)
+#define DWC3_OSTS_XHCIPRTPOWER BIT(3)
+#define DWC3_OSTS_BSESVLD BIT(2)
+#define DWC3_OSTS_VBUSVLD BIT(1)
+#define DWC3_OSTS_CONIDSTS BIT(0)
+
+/* Structures */
+
+struct dwc3_trb;
+
+/**
+ * struct dwc3_event_buffer - Software event buffer representation
+ * @buf: _THE_ buffer
+ * @cache: The buffer cache used in the threaded interrupt
+ * @length: size of this buffer
+ * @lpos: event offset
+ * @count: cache of last read event count register
+ * @flags: flags related to this event buffer
+ * @dma: dma_addr_t
+ * @dwc: pointer to DWC controller
+ */
+struct dwc3_event_buffer {
+ void *buf;
+ void *cache;
+ unsigned int length;
+ unsigned int lpos;
+ unsigned int count;
+ unsigned int flags;
+
+#define DWC3_EVENT_PENDING BIT(0)
+
+ dma_addr_t dma;
+
+ struct dwc3 *dwc;
+};
+
+#define DWC3_EP_FLAG_STALLED BIT(0)
+#define DWC3_EP_FLAG_WEDGED BIT(1)
+
+#define DWC3_EP_DIRECTION_TX true
+#define DWC3_EP_DIRECTION_RX false
+
+#define DWC3_TRB_NUM 256
+
+/**
+ * struct dwc3_ep - device side endpoint representation
+ * @endpoint: usb endpoint
+ * @cancelled_list: list of cancelled requests for this endpoint
+ * @pending_list: list of pending requests for this endpoint
+ * @started_list: list of started requests on this endpoint
+ * @regs: pointer to first endpoint register
+ * @trb_pool: array of transaction buffers
+ * @trb_pool_dma: dma address of @trb_pool
+ * @trb_enqueue: enqueue 'pointer' into TRB array
+ * @trb_dequeue: dequeue 'pointer' into TRB array
+ * @dwc: pointer to DWC controller
+ * @saved_state: ep state saved during hibernation
+ * @flags: endpoint flags (wedged, stalled, ...)
+ * @number: endpoint number (1 - 15)
+ * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
+ * @resource_index: Resource transfer index
+ * @frame_number: set to the frame number we want this transfer to start (ISOC)
+ * @interval: the interval on which the ISOC transfer is started
+ * @name: a human readable name e.g. ep1out-bulk
+ * @direction: true for TX, false for RX
+ * @stream_capable: true when streams are enabled
+ * @combo_num: the test combination BIT[15:14] of the frame number to test
+ * isochronous START TRANSFER command failure workaround
+ * @start_cmd_status: the status of testing START TRANSFER command with
+ * combo_num = 'b00
+ */
+struct dwc3_ep {
+ struct usb_ep endpoint;
+ struct list_head cancelled_list;
+ struct list_head pending_list;
+ struct list_head started_list;
+
+ void __iomem *regs;
+
+ struct dwc3_trb *trb_pool;
+ dma_addr_t trb_pool_dma;
+ struct dwc3 *dwc;
+
+ u32 saved_state;
+ unsigned int flags;
+#define DWC3_EP_ENABLED BIT(0)
+#define DWC3_EP_STALL BIT(1)
+#define DWC3_EP_WEDGE BIT(2)
+#define DWC3_EP_TRANSFER_STARTED BIT(3)
+#define DWC3_EP_END_TRANSFER_PENDING BIT(4)
+#define DWC3_EP_PENDING_REQUEST BIT(5)
+#define DWC3_EP_DELAY_START BIT(6)
+#define DWC3_EP_WAIT_TRANSFER_COMPLETE BIT(7)
+#define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8)
+#define DWC3_EP_FORCE_RESTART_STREAM BIT(9)
+#define DWC3_EP_FIRST_STREAM_PRIMED BIT(10)
+#define DWC3_EP_PENDING_CLEAR_STALL BIT(11)
+#define DWC3_EP_TXFIFO_RESIZED BIT(12)
+#define DWC3_EP_DELAY_STOP BIT(13)
+
+ /* This last one is specific to EP0 */
+#define DWC3_EP0_DIR_IN BIT(31)
+
+ /*
+ * IMPORTANT: we *know* we have 256 TRBs in our @trb_pool, so we will
+ * use a u8 type here. If anybody decides to increase number of TRBs to
+ * anything larger than 256 - I can't see why people would want to do
+ * this though - then this type needs to be changed.
+ *
+ * By using u8 types we ensure that our % operator when incrementing
+ * enqueue and dequeue get optimized away by the compiler.
+ */
+ u8 trb_enqueue;
+ u8 trb_dequeue;
+
+ u8 number;
+ u8 type;
+ u8 resource_index;
+ u32 frame_number;
+ u32 interval;
+
+ char name[20];
+
+ unsigned direction:1;
+ unsigned stream_capable:1;
+
+ /* For isochronous START TRANSFER workaround only */
+ u8 combo_num;
+ int start_cmd_status;
+};
+
+enum dwc3_phy {
+ DWC3_PHY_UNKNOWN = 0,
+ DWC3_PHY_USB3,
+ DWC3_PHY_USB2,
+};
+
+enum dwc3_ep0_next {
+ DWC3_EP0_UNKNOWN = 0,
+ DWC3_EP0_COMPLETE,
+ DWC3_EP0_NRDY_DATA,
+ DWC3_EP0_NRDY_STATUS,
+};
+
+enum dwc3_ep0_state {
+ EP0_UNCONNECTED = 0,
+ EP0_SETUP_PHASE,
+ EP0_DATA_PHASE,
+ EP0_STATUS_PHASE,
+};
+
+enum dwc3_link_state {
+ /* In SuperSpeed */
+ DWC3_LINK_STATE_U0 = 0x00, /* in HS, means ON */
+ DWC3_LINK_STATE_U1 = 0x01,
+ DWC3_LINK_STATE_U2 = 0x02, /* in HS, means SLEEP */
+ DWC3_LINK_STATE_U3 = 0x03, /* in HS, means SUSPEND */
+ DWC3_LINK_STATE_SS_DIS = 0x04,
+ DWC3_LINK_STATE_RX_DET = 0x05, /* in HS, means Early Suspend */
+ DWC3_LINK_STATE_SS_INACT = 0x06,
+ DWC3_LINK_STATE_POLL = 0x07,
+ DWC3_LINK_STATE_RECOV = 0x08,
+ DWC3_LINK_STATE_HRESET = 0x09,
+ DWC3_LINK_STATE_CMPLY = 0x0a,
+ DWC3_LINK_STATE_LPBK = 0x0b,
+ DWC3_LINK_STATE_RESET = 0x0e,
+ DWC3_LINK_STATE_RESUME = 0x0f,
+ DWC3_LINK_STATE_MASK = 0x0f,
+};
+
+/* TRB Length, PCM and Status */
+#define DWC3_TRB_SIZE_MASK (0x00ffffff)
+#define DWC3_TRB_SIZE_LENGTH(n) ((n) & DWC3_TRB_SIZE_MASK)
+#define DWC3_TRB_SIZE_PCM1(n) (((n) & 0x03) << 24)
+#define DWC3_TRB_SIZE_TRBSTS(n) (((n) & (0x0f << 28)) >> 28)
+
+#define DWC3_TRBSTS_OK 0
+#define DWC3_TRBSTS_MISSED_ISOC 1
+#define DWC3_TRBSTS_SETUP_PENDING 2
+#define DWC3_TRB_STS_XFER_IN_PROG 4
+
+/* TRB Control */
+#define DWC3_TRB_CTRL_HWO BIT(0)
+#define DWC3_TRB_CTRL_LST BIT(1)
+#define DWC3_TRB_CTRL_CHN BIT(2)
+#define DWC3_TRB_CTRL_CSP BIT(3)
+#define DWC3_TRB_CTRL_TRBCTL(n) (((n) & 0x3f) << 4)
+#define DWC3_TRB_CTRL_ISP_IMI BIT(10)
+#define DWC3_TRB_CTRL_IOC BIT(11)
+#define DWC3_TRB_CTRL_SID_SOFN(n) (((n) & 0xffff) << 14)
+#define DWC3_TRB_CTRL_GET_SID_SOFN(n) (((n) & (0xffff << 14)) >> 14)
+
+#define DWC3_TRBCTL_TYPE(n) ((n) & (0x3f << 4))
+#define DWC3_TRBCTL_NORMAL DWC3_TRB_CTRL_TRBCTL(1)
+#define DWC3_TRBCTL_CONTROL_SETUP DWC3_TRB_CTRL_TRBCTL(2)
+#define DWC3_TRBCTL_CONTROL_STATUS2 DWC3_TRB_CTRL_TRBCTL(3)
+#define DWC3_TRBCTL_CONTROL_STATUS3 DWC3_TRB_CTRL_TRBCTL(4)
+#define DWC3_TRBCTL_CONTROL_DATA DWC3_TRB_CTRL_TRBCTL(5)
+#define DWC3_TRBCTL_ISOCHRONOUS_FIRST DWC3_TRB_CTRL_TRBCTL(6)
+#define DWC3_TRBCTL_ISOCHRONOUS DWC3_TRB_CTRL_TRBCTL(7)
+#define DWC3_TRBCTL_LINK_TRB DWC3_TRB_CTRL_TRBCTL(8)
+
+/**
+ * struct dwc3_trb - transfer request block (hw format)
+ * @bpl: DW0-3
+ * @bph: DW4-7
+ * @size: DW8-B
+ * @ctrl: DWC-F
+ */
+struct dwc3_trb {
+ u32 bpl;
+ u32 bph;
+ u32 size;
+ u32 ctrl;
+} __packed;
+
+/**
+ * struct dwc3_hwparams - copy of HWPARAMS registers
+ * @hwparams0: GHWPARAMS0
+ * @hwparams1: GHWPARAMS1
+ * @hwparams2: GHWPARAMS2
+ * @hwparams3: GHWPARAMS3
+ * @hwparams4: GHWPARAMS4
+ * @hwparams5: GHWPARAMS5
+ * @hwparams6: GHWPARAMS6
+ * @hwparams7: GHWPARAMS7
+ * @hwparams8: GHWPARAMS8
+ * @hwparams9: GHWPARAMS9
+ */
+struct dwc3_hwparams {
+ u32 hwparams0;
+ u32 hwparams1;
+ u32 hwparams2;
+ u32 hwparams3;
+ u32 hwparams4;
+ u32 hwparams5;
+ u32 hwparams6;
+ u32 hwparams7;
+ u32 hwparams8;
+ u32 hwparams9;
+};
+
+/* HWPARAMS0 */
+#define DWC3_MODE(n) ((n) & 0x7)
+
+/* HWPARAMS1 */
+#define DWC3_NUM_INT(n) (((n) & (0x3f << 15)) >> 15)
+
+/* HWPARAMS3 */
+#define DWC3_NUM_IN_EPS_MASK (0x1f << 18)
+#define DWC3_NUM_EPS_MASK (0x3f << 12)
+#define DWC3_NUM_EPS(p) (((p)->hwparams3 & \
+ (DWC3_NUM_EPS_MASK)) >> 12)
+#define DWC3_NUM_IN_EPS(p) (((p)->hwparams3 & \
+ (DWC3_NUM_IN_EPS_MASK)) >> 18)
+
+/* HWPARAMS7 */
+#define DWC3_RAM1_DEPTH(n) ((n) & 0xffff)
+
+/* HWPARAMS9 */
+#define DWC3_MST_CAPABLE(p) (!!((p)->hwparams9 & \
+ DWC3_GHWPARAMS9_DEV_MST))
+
+/**
+ * struct dwc3_request - representation of a transfer request
+ * @request: struct usb_request to be transferred
+ * @list: a list_head used for request queueing
+ * @dep: struct dwc3_ep owning this request
+ * @sg: pointer to first incomplete sg
+ * @start_sg: pointer to the sg which should be queued next
+ * @num_pending_sgs: counter to pending sgs
+ * @num_queued_sgs: counter to the number of sgs which already got queued
+ * @remaining: amount of data remaining
+ * @status: internal dwc3 request status tracking
+ * @epnum: endpoint number to which this request refers
+ * @trb: pointer to struct dwc3_trb
+ * @trb_dma: DMA address of @trb
+ * @num_trbs: number of TRBs used by this request
+ * @needs_extra_trb: true when request needs one extra TRB (either due to ZLP
+ * or unaligned OUT)
+ * @direction: IN or OUT direction flag
+ * @mapped: true when request has been dma-mapped
+ */
+struct dwc3_request {
+ struct usb_request request;
+ struct list_head list;
+ struct dwc3_ep *dep;
+ struct scatterlist *sg;
+ struct scatterlist *start_sg;
+
+ unsigned int num_pending_sgs;
+ unsigned int num_queued_sgs;
+ unsigned int remaining;
+
+ unsigned int status;
+#define DWC3_REQUEST_STATUS_QUEUED 0
+#define DWC3_REQUEST_STATUS_STARTED 1
+#define DWC3_REQUEST_STATUS_DISCONNECTED 2
+#define DWC3_REQUEST_STATUS_DEQUEUED 3
+#define DWC3_REQUEST_STATUS_STALLED 4
+#define DWC3_REQUEST_STATUS_COMPLETED 5
+#define DWC3_REQUEST_STATUS_UNKNOWN -1
+
+ u8 epnum;
+ struct dwc3_trb *trb;
+ dma_addr_t trb_dma;
+
+ unsigned int num_trbs;
+
+ unsigned int needs_extra_trb:1;
+ unsigned int direction:1;
+ unsigned int mapped:1;
+};
+
+/*
+ * struct dwc3_scratchpad_array - hibernation scratchpad array
+ * (format defined by hw)
+ */
+struct dwc3_scratchpad_array {
+ __le64 dma_adr[DWC3_MAX_HIBER_SCRATCHBUFS];
+};
+
+/**
+ * struct dwc3 - representation of our controller
+ * @drd_work: workqueue used for role swapping
+ * @ep0_trb: trb which is used for the ctrl_req
+ * @bounce: address of bounce buffer
+ * @scratchbuf: address of scratch buffer
+ * @setup_buf: used while precessing STD USB requests
+ * @ep0_trb_addr: dma address of @ep0_trb
+ * @bounce_addr: dma address of @bounce
+ * @ep0_usb_req: dummy req used while handling STD USB requests
+ * @scratch_addr: dma address of scratchbuf
+ * @ep0_in_setup: one control transfer is completed and enter setup phase
+ * @lock: for synchronizing
+ * @mutex: for mode switching
+ * @dev: pointer to our struct device
+ * @sysdev: pointer to the DMA-capable device
+ * @xhci: pointer to our xHCI child
+ * @xhci_resources: struct resources for our @xhci child
+ * @ev_buf: struct dwc3_event_buffer pointer
+ * @eps: endpoint array
+ * @gadget: device side representation of the peripheral controller
+ * @gadget_driver: pointer to the gadget driver
+ * @bus_clk: clock for accessing the registers
+ * @ref_clk: reference clock
+ * @susp_clk: clock used when the SS phy is in low power (S3) state
+ * @reset: reset control
+ * @regs: base address for our registers
+ * @regs_size: address space size
+ * @fladj: frame length adjustment
+ * @ref_clk_per: reference clock period configuration
+ * @irq_gadget: peripheral controller's IRQ number
+ * @otg_irq: IRQ number for OTG IRQs
+ * @current_otg_role: current role of operation while using the OTG block
+ * @desired_otg_role: desired role of operation while using the OTG block
+ * @otg_restart_host: flag that OTG controller needs to restart host
+ * @nr_scratch: number of scratch buffers
+ * @u1u2: only used on revisions <1.83a for workaround
+ * @maximum_speed: maximum speed requested (mainly for testing purposes)
+ * @max_ssp_rate: SuperSpeed Plus maximum signaling rate and lane count
+ * @gadget_max_speed: maximum gadget speed requested
+ * @gadget_ssp_rate: Gadget driver's maximum supported SuperSpeed Plus signaling
+ * rate and lane count.
+ * @ip: controller's ID
+ * @revision: controller's version of an IP
+ * @version_type: VERSIONTYPE register contents, a sub release of a revision
+ * @dr_mode: requested mode of operation
+ * @current_dr_role: current role of operation when in dual-role mode
+ * @desired_dr_role: desired role of operation when in dual-role mode
+ * @edev: extcon handle
+ * @edev_nb: extcon notifier
+ * @hsphy_mode: UTMI phy mode, one of following:
+ * - USBPHY_INTERFACE_MODE_UTMI
+ * - USBPHY_INTERFACE_MODE_UTMIW
+ * @role_sw: usb_role_switch handle
+ * @role_switch_default_mode: default operation mode of controller while
+ * usb role is USB_ROLE_NONE.
+ * @usb_psy: pointer to power supply interface.
+ * @usb2_phy: pointer to USB2 PHY
+ * @usb3_phy: pointer to USB3 PHY
+ * @usb2_generic_phy: pointer to USB2 PHY
+ * @usb3_generic_phy: pointer to USB3 PHY
+ * @phys_ready: flag to indicate that PHYs are ready
+ * @ulpi: pointer to ulpi interface
+ * @ulpi_ready: flag to indicate that ULPI is initialized
+ * @u2sel: parameter from Set SEL request.
+ * @u2pel: parameter from Set SEL request.
+ * @u1sel: parameter from Set SEL request.
+ * @u1pel: parameter from Set SEL request.
+ * @num_eps: number of endpoints
+ * @ep0_next_event: hold the next expected event
+ * @ep0state: state of endpoint zero
+ * @link_state: link state
+ * @speed: device speed (super, high, full, low)
+ * @hwparams: copy of hwparams registers
+ * @regset: debugfs pointer to regdump file
+ * @dbg_lsp_select: current debug lsp mux register selection
+ * @test_mode: true when we're entering a USB test mode
+ * @test_mode_nr: test feature selector
+ * @lpm_nyet_threshold: LPM NYET response threshold
+ * @hird_threshold: HIRD threshold
+ * @rx_thr_num_pkt: USB receive packet count
+ * @rx_max_burst: max USB receive burst size
+ * @tx_thr_num_pkt: USB transmit packet count
+ * @tx_max_burst: max USB transmit burst size
+ * @rx_thr_num_pkt_prd: periodic ESS receive packet count
+ * @rx_max_burst_prd: max periodic ESS receive burst size
+ * @tx_thr_num_pkt_prd: periodic ESS transmit packet count
+ * @tx_max_burst_prd: max periodic ESS transmit burst size
+ * @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize
+ * @clear_stall_protocol: endpoint number that requires a delayed status phase
+ * @hsphy_interface: "utmi" or "ulpi"
+ * @connected: true when we're connected to a host, false otherwise
+ * @softconnect: true when gadget connect is called, false when disconnect runs
+ * @delayed_status: true when gadget driver asks for delayed status
+ * @ep0_bounced: true when we used bounce buffer
+ * @ep0_expect_in: true when we expect a DATA IN transfer
+ * @has_hibernation: true when dwc3 was configured with Hibernation
+ * @sysdev_is_parent: true when dwc3 device has a parent driver
+ * @has_lpm_erratum: true when core was configured with LPM Erratum. Note that
+ * there's now way for software to detect this in runtime.
+ * @is_utmi_l1_suspend: the core asserts output signal
+ * 0 - utmi_sleep_n
+ * 1 - utmi_l1_suspend_n
+ * @is_fpga: true when we are using the FPGA board
+ * @pending_events: true when we have pending IRQs to be handled
+ * @do_fifo_resize: true when txfifo resizing is enabled for dwc3 endpoints
+ * @pullups_connected: true when Run/Stop bit is set
+ * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
+ * @three_stage_setup: set if we perform a three phase setup
+ * @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
+ * not needed for DWC_usb31 version 1.70a-ea06 and below
+ * @usb3_lpm_capable: set if hadrware supports Link Power Management
+ * @usb2_lpm_disable: set to disable usb2 lpm for host
+ * @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget
+ * @disable_scramble_quirk: set if we enable the disable scramble quirk
+ * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
+ * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
+ * @req_p1p2p3_quirk: set if we enable request p1p2p3 quirk
+ * @del_p1p2p3_quirk: set if we enable delay p1p2p3 quirk
+ * @del_phy_power_chg_quirk: set if we enable delay phy power change quirk
+ * @lfps_filter_quirk: set if we enable LFPS filter quirk
+ * @rx_detect_poll_quirk: set if we enable rx_detect to polling lfps quirk
+ * @dis_u3_susphy_quirk: set if we disable usb3 suspend phy
+ * @dis_u2_susphy_quirk: set if we disable usb2 suspend phy
+ * @dis_enblslpm_quirk: set if we clear enblslpm in GUSB2PHYCFG,
+ * disabling the suspend signal to the PHY.
+ * @dis_u1_entry_quirk: set if link entering into U1 state needs to be disabled.
+ * @dis_u2_entry_quirk: set if link entering into U2 state needs to be disabled.
+ * @dis_rxdet_inp3_quirk: set if we disable Rx.Detect in P3
+ * @async_callbacks: if set, indicate that async callbacks will be used.
+ *
+ * @dis_u2_freeclk_exists_quirk : set if we clear u2_freeclk_exists
+ * in GUSB2PHYCFG, specify that USB2 PHY doesn't
+ * provide a free-running PHY clock.
+ * @dis_del_phy_power_chg_quirk: set if we disable delay phy power
+ * change quirk.
+ * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
+ * check during HS transmit.
+ * @resume-hs-terminations: Set if we enable quirk for fixing improper crc
+ * generation after resume from suspend.
+ * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
+ * instances in park mode.
+ * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
+ * @tx_de_emphasis: Tx de-emphasis value
+ * 0 - -6dB de-emphasis
+ * 1 - -3.5dB de-emphasis
+ * 2 - No de-emphasis
+ * 3 - Reserved
+ * @dis_metastability_quirk: set to disable metastability quirk.
+ * @dis_split_quirk: set to disable split boundary.
+ * @suspended: set to track suspend event due to U3/L2.
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ * increments or 0 to disable.
+ * @max_cfg_eps: current max number of IN eps used across all USB configs.
+ * @last_fifo_depth: last fifo depth used to determine next fifo ram start
+ * address.
+ * @num_ep_resized: carries the current number endpoints which have had its tx
+ * fifo resized.
+ * @debug_root: root debugfs directory for this device to put its files in.
+ */
+struct dwc3 {
+ struct work_struct drd_work;
+ struct dwc3_trb *ep0_trb;
+ void *bounce;
+ void *scratchbuf;
+ u8 *setup_buf;
+ dma_addr_t ep0_trb_addr;
+ dma_addr_t bounce_addr;
+ dma_addr_t scratch_addr;
+ struct dwc3_request ep0_usb_req;
+ struct completion ep0_in_setup;
+
+ /* device lock */
+ spinlock_t lock;
+
+ /* mode switching lock */
+ struct mutex mutex;
+
+ struct device *dev;
+ struct device *sysdev;
+
+ struct platform_device *xhci;
+ struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM];
+
+ struct dwc3_event_buffer *ev_buf;
+ struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM];
+
+ struct usb_gadget *gadget;
+ struct usb_gadget_driver *gadget_driver;
+
+ struct clk *bus_clk;
+ struct clk *ref_clk;
+ struct clk *susp_clk;
+
+ struct reset_control *reset;
+
+ struct usb_phy *usb2_phy;
+ struct usb_phy *usb3_phy;
+
+ struct phy *usb2_generic_phy;
+ struct phy *usb3_generic_phy;
+
+ bool phys_ready;
+
+ struct ulpi *ulpi;
+ bool ulpi_ready;
+
+ void __iomem *regs;
+ size_t regs_size;
+
+ enum usb_dr_mode dr_mode;
+ u32 current_dr_role;
+ u32 desired_dr_role;
+ struct extcon_dev *edev;
+ struct notifier_block edev_nb;
+ enum usb_phy_interface hsphy_mode;
+ struct usb_role_switch *role_sw;
+ enum usb_dr_mode role_switch_default_mode;
+
+ struct power_supply *usb_psy;
+
+ u32 fladj;
+ u32 ref_clk_per;
+ u32 irq_gadget;
+ u32 otg_irq;
+ u32 current_otg_role;
+ u32 desired_otg_role;
+ bool otg_restart_host;
+ u32 nr_scratch;
+ u32 u1u2;
+ u32 maximum_speed;
+ u32 gadget_max_speed;
+ enum usb_ssp_rate max_ssp_rate;
+ enum usb_ssp_rate gadget_ssp_rate;
+
+ u32 ip;
+
+#define DWC3_IP 0x5533
+#define DWC31_IP 0x3331
+#define DWC32_IP 0x3332
+
+ u32 revision;
+
+#define DWC3_REVISION_ANY 0x0
+#define DWC3_REVISION_173A 0x5533173a
+#define DWC3_REVISION_175A 0x5533175a
+#define DWC3_REVISION_180A 0x5533180a
+#define DWC3_REVISION_183A 0x5533183a
+#define DWC3_REVISION_185A 0x5533185a
+#define DWC3_REVISION_187A 0x5533187a
+#define DWC3_REVISION_188A 0x5533188a
+#define DWC3_REVISION_190A 0x5533190a
+#define DWC3_REVISION_194A 0x5533194a
+#define DWC3_REVISION_200A 0x5533200a
+#define DWC3_REVISION_202A 0x5533202a
+#define DWC3_REVISION_210A 0x5533210a
+#define DWC3_REVISION_220A 0x5533220a
+#define DWC3_REVISION_230A 0x5533230a
+#define DWC3_REVISION_240A 0x5533240a
+#define DWC3_REVISION_250A 0x5533250a
+#define DWC3_REVISION_260A 0x5533260a
+#define DWC3_REVISION_270A 0x5533270a
+#define DWC3_REVISION_280A 0x5533280a
+#define DWC3_REVISION_290A 0x5533290a
+#define DWC3_REVISION_300A 0x5533300a
+#define DWC3_REVISION_310A 0x5533310a
+#define DWC3_REVISION_330A 0x5533330a
+
+#define DWC31_REVISION_ANY 0x0
+#define DWC31_REVISION_110A 0x3131302a
+#define DWC31_REVISION_120A 0x3132302a
+#define DWC31_REVISION_160A 0x3136302a
+#define DWC31_REVISION_170A 0x3137302a
+#define DWC31_REVISION_180A 0x3138302a
+#define DWC31_REVISION_190A 0x3139302a
+
+#define DWC32_REVISION_ANY 0x0
+#define DWC32_REVISION_100A 0x3130302a
+
+ u32 version_type;
+
+#define DWC31_VERSIONTYPE_ANY 0x0
+#define DWC31_VERSIONTYPE_EA01 0x65613031
+#define DWC31_VERSIONTYPE_EA02 0x65613032
+#define DWC31_VERSIONTYPE_EA03 0x65613033
+#define DWC31_VERSIONTYPE_EA04 0x65613034
+#define DWC31_VERSIONTYPE_EA05 0x65613035
+#define DWC31_VERSIONTYPE_EA06 0x65613036
+
+ enum dwc3_ep0_next ep0_next_event;
+ enum dwc3_ep0_state ep0state;
+ enum dwc3_link_state link_state;
+
+ u16 u2sel;
+ u16 u2pel;
+ u8 u1sel;
+ u8 u1pel;
+
+ u8 speed;
+
+ u8 num_eps;
+
+ struct dwc3_hwparams hwparams;
+ struct debugfs_regset32 *regset;
+
+ u32 dbg_lsp_select;
+
+ u8 test_mode;
+ u8 test_mode_nr;
+ u8 lpm_nyet_threshold;
+ u8 hird_threshold;
+ u8 rx_thr_num_pkt;
+ u8 rx_max_burst;
+ u8 tx_thr_num_pkt;
+ u8 tx_max_burst;
+ u8 rx_thr_num_pkt_prd;
+ u8 rx_max_burst_prd;
+ u8 tx_thr_num_pkt_prd;
+ u8 tx_max_burst_prd;
+ u8 tx_fifo_resize_max_num;
+ u8 clear_stall_protocol;
+
+ const char *hsphy_interface;
+
+ unsigned connected:1;
+ unsigned softconnect:1;
+ unsigned delayed_status:1;
+ unsigned ep0_bounced:1;
+ unsigned ep0_expect_in:1;
+ unsigned has_hibernation:1;
+ unsigned sysdev_is_parent:1;
+ unsigned has_lpm_erratum:1;
+ unsigned is_utmi_l1_suspend:1;
+ unsigned is_fpga:1;
+ unsigned pending_events:1;
+ unsigned do_fifo_resize:1;
+ unsigned pullups_connected:1;
+ unsigned setup_packet_pending:1;
+ unsigned three_stage_setup:1;
+ unsigned dis_start_transfer_quirk:1;
+ unsigned usb3_lpm_capable:1;
+ unsigned usb2_lpm_disable:1;
+ unsigned usb2_gadget_lpm_disable:1;
+
+ unsigned disable_scramble_quirk:1;
+ unsigned u2exit_lfps_quirk:1;
+ unsigned u2ss_inp3_quirk:1;
+ unsigned req_p1p2p3_quirk:1;
+ unsigned del_p1p2p3_quirk:1;
+ unsigned del_phy_power_chg_quirk:1;
+ unsigned lfps_filter_quirk:1;
+ unsigned rx_detect_poll_quirk:1;
+ unsigned dis_u3_susphy_quirk:1;
+ unsigned dis_u2_susphy_quirk:1;
+ unsigned dis_enblslpm_quirk:1;
+ unsigned dis_u1_entry_quirk:1;
+ unsigned dis_u2_entry_quirk:1;
+ unsigned dis_rxdet_inp3_quirk:1;
+ unsigned dis_u2_freeclk_exists_quirk:1;
+ unsigned dis_del_phy_power_chg_quirk:1;
+ unsigned dis_tx_ipgap_linecheck_quirk:1;
+ unsigned resume_hs_terminations:1;
+ unsigned parkmode_disable_ss_quirk:1;
+ unsigned gfladj_refclk_lpm_sel:1;
+
+ unsigned tx_de_emphasis_quirk:1;
+ unsigned tx_de_emphasis:2;
+
+ unsigned dis_metastability_quirk:1;
+
+ unsigned dis_split_quirk:1;
+ unsigned async_callbacks:1;
+ unsigned suspended:1;
+
+ u16 imod_interval;
+
+ int max_cfg_eps;
+ int last_fifo_depth;
+ int num_ep_resized;
+ struct dentry *debug_root;
+};
+
+#define INCRX_BURST_MODE 0
+#define INCRX_UNDEF_LENGTH_BURST_MODE 1
+
+#define work_to_dwc(w) (container_of((w), struct dwc3, drd_work))
+
+/* -------------------------------------------------------------------------- */
+
+struct dwc3_event_type {
+ u32 is_devspec:1;
+ u32 type:7;
+ u32 reserved8_31:24;
+} __packed;
+
+#define DWC3_DEPEVT_XFERCOMPLETE 0x01
+#define DWC3_DEPEVT_XFERINPROGRESS 0x02
+#define DWC3_DEPEVT_XFERNOTREADY 0x03
+#define DWC3_DEPEVT_RXTXFIFOEVT 0x04
+#define DWC3_DEPEVT_STREAMEVT 0x06
+#define DWC3_DEPEVT_EPCMDCMPLT 0x07
+
+/**
+ * struct dwc3_event_depevt - Device Endpoint Events
+ * @one_bit: indicates this is an endpoint event (not used)
+ * @endpoint_number: number of the endpoint
+ * @endpoint_event: The event we have:
+ * 0x00 - Reserved
+ * 0x01 - XferComplete
+ * 0x02 - XferInProgress
+ * 0x03 - XferNotReady
+ * 0x04 - RxTxFifoEvt (IN->Underrun, OUT->Overrun)
+ * 0x05 - Reserved
+ * 0x06 - StreamEvt
+ * 0x07 - EPCmdCmplt
+ * @reserved11_10: Reserved, don't use.
+ * @status: Indicates the status of the event. Refer to databook for
+ * more information.
+ * @parameters: Parameters of the current event. Refer to databook for
+ * more information.
+ */
+struct dwc3_event_depevt {
+ u32 one_bit:1;
+ u32 endpoint_number:5;
+ u32 endpoint_event:4;
+ u32 reserved11_10:2;
+ u32 status:4;
+
+/* Within XferNotReady */
+#define DEPEVT_STATUS_TRANSFER_ACTIVE BIT(3)
+
+/* Within XferComplete or XferInProgress */
+#define DEPEVT_STATUS_BUSERR BIT(0)
+#define DEPEVT_STATUS_SHORT BIT(1)
+#define DEPEVT_STATUS_IOC BIT(2)
+#define DEPEVT_STATUS_LST BIT(3) /* XferComplete */
+#define DEPEVT_STATUS_MISSED_ISOC BIT(3) /* XferInProgress */
+
+/* Stream event only */
+#define DEPEVT_STREAMEVT_FOUND 1
+#define DEPEVT_STREAMEVT_NOTFOUND 2
+
+/* Stream event parameter */
+#define DEPEVT_STREAM_PRIME 0xfffe
+#define DEPEVT_STREAM_NOSTREAM 0x0
+
+/* Control-only Status */
+#define DEPEVT_STATUS_CONTROL_DATA 1
+#define DEPEVT_STATUS_CONTROL_STATUS 2
+#define DEPEVT_STATUS_CONTROL_PHASE(n) ((n) & 3)
+
+/* In response to Start Transfer */
+#define DEPEVT_TRANSFER_NO_RESOURCE 1
+#define DEPEVT_TRANSFER_BUS_EXPIRY 2
+
+ u32 parameters:16;
+
+/* For Command Complete Events */
+#define DEPEVT_PARAMETER_CMD(n) (((n) & (0xf << 8)) >> 8)
+} __packed;
+
+/**
+ * struct dwc3_event_devt - Device Events
+ * @one_bit: indicates this is a non-endpoint event (not used)
+ * @device_event: indicates it's a device event. Should read as 0x00
+ * @type: indicates the type of device event.
+ * 0 - DisconnEvt
+ * 1 - USBRst
+ * 2 - ConnectDone
+ * 3 - ULStChng
+ * 4 - WkUpEvt
+ * 5 - Reserved
+ * 6 - Suspend (EOPF on revisions 2.10a and prior)
+ * 7 - SOF
+ * 8 - Reserved
+ * 9 - ErrticErr
+ * 10 - CmdCmplt
+ * 11 - EvntOverflow
+ * 12 - VndrDevTstRcved
+ * @reserved15_12: Reserved, not used
+ * @event_info: Information about this event
+ * @reserved31_25: Reserved, not used
+ */
+struct dwc3_event_devt {
+ u32 one_bit:1;
+ u32 device_event:7;
+ u32 type:4;
+ u32 reserved15_12:4;
+ u32 event_info:9;
+ u32 reserved31_25:7;
+} __packed;
+
+/**
+ * struct dwc3_event_gevt - Other Core Events
+ * @one_bit: indicates this is a non-endpoint event (not used)
+ * @device_event: indicates it's (0x03) Carkit or (0x04) I2C event.
+ * @phy_port_number: self-explanatory
+ * @reserved31_12: Reserved, not used.
+ */
+struct dwc3_event_gevt {
+ u32 one_bit:1;
+ u32 device_event:7;
+ u32 phy_port_number:4;
+ u32 reserved31_12:20;
+} __packed;
+
+/**
+ * union dwc3_event - representation of Event Buffer contents
+ * @raw: raw 32-bit event
+ * @type: the type of the event
+ * @depevt: Device Endpoint Event
+ * @devt: Device Event
+ * @gevt: Global Event
+ */
+union dwc3_event {
+ u32 raw;
+ struct dwc3_event_type type;
+ struct dwc3_event_depevt depevt;
+ struct dwc3_event_devt devt;
+ struct dwc3_event_gevt gevt;
+};
+
+/**
+ * struct dwc3_gadget_ep_cmd_params - representation of endpoint command
+ * parameters
+ * @param2: third parameter
+ * @param1: second parameter
+ * @param0: first parameter
+ */
+struct dwc3_gadget_ep_cmd_params {
+ u32 param2;
+ u32 param1;
+ u32 param0;
+};
+
+/*
+ * DWC3 Features to be used as Driver Data
+ */
+
+#define DWC3_HAS_PERIPHERAL BIT(0)
+#define DWC3_HAS_XHCI BIT(1)
+#define DWC3_HAS_OTG BIT(3)
+
+/* prototypes */
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode);
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
+u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
+
+#define DWC3_IP_IS(_ip) \
+ (dwc->ip == _ip##_IP)
+
+#define DWC3_VER_IS(_ip, _ver) \
+ (DWC3_IP_IS(_ip) && dwc->revision == _ip##_REVISION_##_ver)
+
+#define DWC3_VER_IS_PRIOR(_ip, _ver) \
+ (DWC3_IP_IS(_ip) && dwc->revision < _ip##_REVISION_##_ver)
+
+#define DWC3_VER_IS_WITHIN(_ip, _from, _to) \
+ (DWC3_IP_IS(_ip) && \
+ dwc->revision >= _ip##_REVISION_##_from && \
+ (!(_ip##_REVISION_##_to) || \
+ dwc->revision <= _ip##_REVISION_##_to))
+
+#define DWC3_VER_TYPE_IS_WITHIN(_ip, _ver, _from, _to) \
+ (DWC3_VER_IS(_ip, _ver) && \
+ dwc->version_type >= _ip##_VERSIONTYPE_##_from && \
+ (!(_ip##_VERSIONTYPE_##_to) || \
+ dwc->version_type <= _ip##_VERSIONTYPE_##_to))
+
+/**
+ * dwc3_mdwidth - get MDWIDTH value in bits
+ * @dwc: pointer to our context structure
+ *
+ * Return MDWIDTH configuration value in bits.
+ */
+static inline u32 dwc3_mdwidth(struct dwc3 *dwc)
+{
+ u32 mdwidth;
+
+ mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
+
+ return mdwidth;
+}
+
+bool dwc3_has_imod(struct dwc3 *dwc);
+
+int dwc3_event_buffers_setup(struct dwc3 *dwc);
+void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
+
+int dwc3_core_soft_reset(struct dwc3 *dwc);
+
+#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+int dwc3_host_init(struct dwc3 *dwc);
+void dwc3_host_exit(struct dwc3 *dwc);
+#else
+static inline int dwc3_host_init(struct dwc3 *dwc)
+{ return 0; }
+static inline void dwc3_host_exit(struct dwc3 *dwc)
+{ }
+#endif
+
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+int dwc3_gadget_init(struct dwc3 *dwc);
+void dwc3_gadget_exit(struct dwc3 *dwc);
+int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
+int dwc3_gadget_get_link_state(struct dwc3 *dwc);
+int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
+ struct dwc3_gadget_ep_cmd_params *params);
+int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
+ u32 param);
+void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc);
+void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status);
+#else
+static inline int dwc3_gadget_init(struct dwc3 *dwc)
+{ return 0; }
+static inline void dwc3_gadget_exit(struct dwc3 *dwc)
+{ }
+static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
+{ return 0; }
+static inline int dwc3_gadget_get_link_state(struct dwc3 *dwc)
+{ return 0; }
+static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc,
+ enum dwc3_link_state state)
+{ return 0; }
+
+static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
+ struct dwc3_gadget_ep_cmd_params *params)
+{ return 0; }
+static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
+ int cmd, u32 param)
+{ return 0; }
+static inline void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
+{ }
+#endif
+
+#if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+int dwc3_drd_init(struct dwc3 *dwc);
+void dwc3_drd_exit(struct dwc3 *dwc);
+void dwc3_otg_init(struct dwc3 *dwc);
+void dwc3_otg_exit(struct dwc3 *dwc);
+void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus);
+void dwc3_otg_host_init(struct dwc3 *dwc);
+#else
+static inline int dwc3_drd_init(struct dwc3 *dwc)
+{ return 0; }
+static inline void dwc3_drd_exit(struct dwc3 *dwc)
+{ }
+static inline void dwc3_otg_init(struct dwc3 *dwc)
+{ }
+static inline void dwc3_otg_exit(struct dwc3 *dwc)
+{ }
+static inline void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus)
+{ }
+static inline void dwc3_otg_host_init(struct dwc3 *dwc)
+{ }
+#endif
+
+/* power management interface */
+#if !IS_ENABLED(CONFIG_USB_DWC3_HOST)
+int dwc3_gadget_suspend(struct dwc3 *dwc);
+int dwc3_gadget_resume(struct dwc3 *dwc);
+void dwc3_gadget_process_pending_events(struct dwc3 *dwc);
+#else
+static inline int dwc3_gadget_suspend(struct dwc3 *dwc)
+{
+ return 0;
+}
+
+static inline int dwc3_gadget_resume(struct dwc3 *dwc)
+{
+ return 0;
+}
+
+static inline void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+{
+}
+#endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */
+
+#if IS_ENABLED(CONFIG_USB_DWC3_ULPI)
+int dwc3_ulpi_init(struct dwc3 *dwc);
+void dwc3_ulpi_exit(struct dwc3 *dwc);
+#else
+static inline int dwc3_ulpi_init(struct dwc3 *dwc)
+{ return 0; }
+static inline void dwc3_ulpi_exit(struct dwc3 *dwc)
+{ }
+#endif
+
+#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
new file mode 100644
index 000000000..8bb2c9e3b
--- /dev/null
+++ b/drivers/usb/dwc3/debug.h
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * debug.h - DesignWare USB3 DRD Controller Debug Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#ifndef __DWC3_DEBUG_H
+#define __DWC3_DEBUG_H
+
+#include "core.h"
+
+/**
+ * dwc3_gadget_ep_cmd_string - returns endpoint command string
+ * @cmd: command code
+ */
+static inline const char *
+dwc3_gadget_ep_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ case DWC3_DEPCMD_DEPSTARTCFG:
+ return "Start New Configuration";
+ case DWC3_DEPCMD_ENDTRANSFER:
+ return "End Transfer";
+ case DWC3_DEPCMD_UPDATETRANSFER:
+ return "Update Transfer";
+ case DWC3_DEPCMD_STARTTRANSFER:
+ return "Start Transfer";
+ case DWC3_DEPCMD_CLEARSTALL:
+ return "Clear Stall";
+ case DWC3_DEPCMD_SETSTALL:
+ return "Set Stall";
+ case DWC3_DEPCMD_GETEPSTATE:
+ return "Get Endpoint State";
+ case DWC3_DEPCMD_SETTRANSFRESOURCE:
+ return "Set Endpoint Transfer Resource";
+ case DWC3_DEPCMD_SETEPCONFIG:
+ return "Set Endpoint Configuration";
+ default:
+ return "UNKNOWN command";
+ }
+}
+
+/**
+ * dwc3_gadget_generic_cmd_string - returns generic command string
+ * @cmd: command code
+ */
+static inline const char *
+dwc3_gadget_generic_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ case DWC3_DGCMD_SET_LMP:
+ return "Set LMP";
+ case DWC3_DGCMD_SET_PERIODIC_PAR:
+ return "Set Periodic Parameters";
+ case DWC3_DGCMD_XMIT_FUNCTION:
+ return "Transmit Function Wake Device Notification";
+ case DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO:
+ return "Set Scratchpad Buffer Array Address Lo";
+ case DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI:
+ return "Set Scratchpad Buffer Array Address Hi";
+ case DWC3_DGCMD_SELECTED_FIFO_FLUSH:
+ return "Selected FIFO Flush";
+ case DWC3_DGCMD_ALL_FIFO_FLUSH:
+ return "All FIFO Flush";
+ case DWC3_DGCMD_SET_ENDPOINT_NRDY:
+ return "Set Endpoint NRDY";
+ case DWC3_DGCMD_SET_ENDPOINT_PRIME:
+ return "Set Endpoint Prime";
+ case DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK:
+ return "Run SoC Bus Loopback Test";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
+ * dwc3_gadget_link_string - returns link name
+ * @link_state: link state code
+ */
+static inline const char *
+dwc3_gadget_link_string(enum dwc3_link_state link_state)
+{
+ switch (link_state) {
+ case DWC3_LINK_STATE_U0:
+ return "U0";
+ case DWC3_LINK_STATE_U1:
+ return "U1";
+ case DWC3_LINK_STATE_U2:
+ return "U2";
+ case DWC3_LINK_STATE_U3:
+ return "U3";
+ case DWC3_LINK_STATE_SS_DIS:
+ return "SS.Disabled";
+ case DWC3_LINK_STATE_RX_DET:
+ return "RX.Detect";
+ case DWC3_LINK_STATE_SS_INACT:
+ return "SS.Inactive";
+ case DWC3_LINK_STATE_POLL:
+ return "Polling";
+ case DWC3_LINK_STATE_RECOV:
+ return "Recovery";
+ case DWC3_LINK_STATE_HRESET:
+ return "Hot Reset";
+ case DWC3_LINK_STATE_CMPLY:
+ return "Compliance";
+ case DWC3_LINK_STATE_LPBK:
+ return "Loopback";
+ case DWC3_LINK_STATE_RESET:
+ return "Reset";
+ case DWC3_LINK_STATE_RESUME:
+ return "Resume";
+ default:
+ return "UNKNOWN link state";
+ }
+}
+
+/**
+ * dwc3_gadget_hs_link_string - returns highspeed and below link name
+ * @link_state: link state code
+ */
+static inline const char *
+dwc3_gadget_hs_link_string(enum dwc3_link_state link_state)
+{
+ switch (link_state) {
+ case DWC3_LINK_STATE_U0:
+ return "On";
+ case DWC3_LINK_STATE_U2:
+ return "Sleep";
+ case DWC3_LINK_STATE_U3:
+ return "Suspend";
+ case DWC3_LINK_STATE_SS_DIS:
+ return "Disconnected";
+ case DWC3_LINK_STATE_RX_DET:
+ return "Early Suspend";
+ case DWC3_LINK_STATE_RECOV:
+ return "Recovery";
+ case DWC3_LINK_STATE_RESET:
+ return "Reset";
+ case DWC3_LINK_STATE_RESUME:
+ return "Resume";
+ default:
+ return "UNKNOWN link state";
+ }
+}
+
+/**
+ * dwc3_trb_type_string - returns TRB type as a string
+ * @type: the type of the TRB
+ */
+static inline const char *dwc3_trb_type_string(unsigned int type)
+{
+ switch (type) {
+ case DWC3_TRBCTL_NORMAL:
+ return "normal";
+ case DWC3_TRBCTL_CONTROL_SETUP:
+ return "setup";
+ case DWC3_TRBCTL_CONTROL_STATUS2:
+ return "status2";
+ case DWC3_TRBCTL_CONTROL_STATUS3:
+ return "status3";
+ case DWC3_TRBCTL_CONTROL_DATA:
+ return "data";
+ case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
+ return "isoc-first";
+ case DWC3_TRBCTL_ISOCHRONOUS:
+ return "isoc";
+ case DWC3_TRBCTL_LINK_TRB:
+ return "link";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
+{
+ switch (state) {
+ case EP0_UNCONNECTED:
+ return "Unconnected";
+ case EP0_SETUP_PHASE:
+ return "Setup Phase";
+ case EP0_DATA_PHASE:
+ return "Data Phase";
+ case EP0_STATUS_PHASE:
+ return "Status Phase";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
+ * dwc3_gadget_event_string - returns event name
+ * @event: the event code
+ */
+static inline const char *dwc3_gadget_event_string(char *str, size_t size,
+ const struct dwc3_event_devt *event)
+{
+ enum dwc3_link_state state = event->event_info & DWC3_LINK_STATE_MASK;
+
+ switch (event->type) {
+ case DWC3_DEVICE_EVENT_DISCONNECT:
+ snprintf(str, size, "Disconnect: [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ case DWC3_DEVICE_EVENT_RESET:
+ snprintf(str, size, "Reset [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ case DWC3_DEVICE_EVENT_CONNECT_DONE:
+ snprintf(str, size, "Connection Done [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
+ snprintf(str, size, "Link Change [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ case DWC3_DEVICE_EVENT_WAKEUP:
+ snprintf(str, size, "WakeUp [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ case DWC3_DEVICE_EVENT_SUSPEND:
+ snprintf(str, size, "Suspend [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ case DWC3_DEVICE_EVENT_SOF:
+ snprintf(str, size, "Start-Of-Frame [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
+ snprintf(str, size, "Erratic Error [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ case DWC3_DEVICE_EVENT_CMD_CMPL:
+ snprintf(str, size, "Command Complete [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ case DWC3_DEVICE_EVENT_OVERFLOW:
+ snprintf(str, size, "Overflow [%s]",
+ dwc3_gadget_link_string(state));
+ break;
+ default:
+ snprintf(str, size, "UNKNOWN");
+ }
+
+ return str;
+}
+
+/**
+ * dwc3_ep_event_string - returns event name
+ * @event: then event code
+ */
+static inline const char *dwc3_ep_event_string(char *str, size_t size,
+ const struct dwc3_event_depevt *event, u32 ep0state)
+{
+ u8 epnum = event->endpoint_number;
+ size_t len;
+ int status;
+
+ len = scnprintf(str, size, "ep%d%s: ", epnum >> 1,
+ (epnum & 1) ? "in" : "out");
+
+ status = event->status;
+
+ switch (event->endpoint_event) {
+ case DWC3_DEPEVT_XFERCOMPLETE:
+ len += scnprintf(str + len, size - len,
+ "Transfer Complete (%c%c%c)",
+ status & DEPEVT_STATUS_SHORT ? 'S' : 's',
+ status & DEPEVT_STATUS_IOC ? 'I' : 'i',
+ status & DEPEVT_STATUS_LST ? 'L' : 'l');
+
+ if (epnum <= 1)
+ scnprintf(str + len, size - len, " [%s]",
+ dwc3_ep0_state_string(ep0state));
+ break;
+ case DWC3_DEPEVT_XFERINPROGRESS:
+ scnprintf(str + len, size - len,
+ "Transfer In Progress [%08x] (%c%c%c)",
+ event->parameters,
+ status & DEPEVT_STATUS_SHORT ? 'S' : 's',
+ status & DEPEVT_STATUS_IOC ? 'I' : 'i',
+ status & DEPEVT_STATUS_LST ? 'M' : 'm');
+ break;
+ case DWC3_DEPEVT_XFERNOTREADY:
+ len += scnprintf(str + len, size - len,
+ "Transfer Not Ready [%08x]%s",
+ event->parameters,
+ status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
+ " (Active)" : " (Not Active)");
+
+ /* Control Endpoints */
+ if (epnum <= 1) {
+ int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
+
+ switch (phase) {
+ case DEPEVT_STATUS_CONTROL_DATA:
+ scnprintf(str + len, size - len,
+ " [Data Phase]");
+ break;
+ case DEPEVT_STATUS_CONTROL_STATUS:
+ scnprintf(str + len, size - len,
+ " [Status Phase]");
+ }
+ }
+ break;
+ case DWC3_DEPEVT_RXTXFIFOEVT:
+ scnprintf(str + len, size - len, "FIFO");
+ break;
+ case DWC3_DEPEVT_STREAMEVT:
+ status = event->status;
+
+ switch (status) {
+ case DEPEVT_STREAMEVT_FOUND:
+ scnprintf(str + len, size - len, " Stream %d Found",
+ event->parameters);
+ break;
+ case DEPEVT_STREAMEVT_NOTFOUND:
+ default:
+ scnprintf(str + len, size - len, " Stream Not Found");
+ break;
+ }
+
+ break;
+ case DWC3_DEPEVT_EPCMDCMPLT:
+ scnprintf(str + len, size - len, "Endpoint Command Complete");
+ break;
+ default:
+ scnprintf(str + len, size - len, "UNKNOWN");
+ }
+
+ return str;
+}
+
+/**
+ * dwc3_gadget_event_type_string - return event name
+ * @event: the event code
+ */
+static inline const char *dwc3_gadget_event_type_string(u8 event)
+{
+ switch (event) {
+ case DWC3_DEVICE_EVENT_DISCONNECT:
+ return "Disconnect";
+ case DWC3_DEVICE_EVENT_RESET:
+ return "Reset";
+ case DWC3_DEVICE_EVENT_CONNECT_DONE:
+ return "Connect Done";
+ case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
+ return "Link Status Change";
+ case DWC3_DEVICE_EVENT_WAKEUP:
+ return "Wake-Up";
+ case DWC3_DEVICE_EVENT_HIBER_REQ:
+ return "Hibernation";
+ case DWC3_DEVICE_EVENT_SUSPEND:
+ return "Suspend";
+ case DWC3_DEVICE_EVENT_SOF:
+ return "Start of Frame";
+ case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
+ return "Erratic Error";
+ case DWC3_DEVICE_EVENT_CMD_CMPL:
+ return "Command Complete";
+ case DWC3_DEVICE_EVENT_OVERFLOW:
+ return "Overflow";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const char *dwc3_decode_event(char *str, size_t size, u32 event,
+ u32 ep0state)
+{
+ union dwc3_event evt;
+
+ memcpy(&evt, &event, sizeof(event));
+
+ if (evt.type.is_devspec)
+ return dwc3_gadget_event_string(str, size, &evt.devt);
+ else
+ return dwc3_ep_event_string(str, size, &evt.depevt, ep0state);
+}
+
+static inline const char *dwc3_ep_cmd_status_string(int status)
+{
+ switch (status) {
+ case -ETIMEDOUT:
+ return "Timed Out";
+ case 0:
+ return "Successful";
+ case DEPEVT_TRANSFER_NO_RESOURCE:
+ return "No Resource";
+ case DEPEVT_TRANSFER_BUS_EXPIRY:
+ return "Bus Expiry";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
+{
+ switch (status) {
+ case -ETIMEDOUT:
+ return "Timed Out";
+ case 0:
+ return "Successful";
+ case 1:
+ return "Error";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
+extern void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep);
+extern void dwc3_debugfs_init(struct dwc3 *d);
+extern void dwc3_debugfs_exit(struct dwc3 *d);
+#else
+static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+{ }
+static inline void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
+{ }
+static inline void dwc3_debugfs_init(struct dwc3 *d)
+{ }
+static inline void dwc3_debugfs_exit(struct dwc3 *d)
+{ }
+#endif
+#endif /* __DWC3_DEBUG_H */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
new file mode 100644
index 000000000..f0ffd2e5c
--- /dev/null
+++ b/drivers/usb/dwc3/debugfs.c
@@ -0,0 +1,1040 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * debugfs.c - DesignWare USB3 DRD Controller DebugFS file
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/ptrace.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include <linux/usb/ch9.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+#include "debug.h"
+
+#define DWC3_LSP_MUX_UNSELECTED 0xfffff
+
+#define dump_register(nm) \
+{ \
+ .name = __stringify(nm), \
+ .offset = DWC3_ ##nm, \
+}
+
+#define dump_ep_register_set(n) \
+ { \
+ .name = "DEPCMDPAR2("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMDPAR2, \
+ }, \
+ { \
+ .name = "DEPCMDPAR1("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMDPAR1, \
+ }, \
+ { \
+ .name = "DEPCMDPAR0("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMDPAR0, \
+ }, \
+ { \
+ .name = "DEPCMD("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMD, \
+ }
+
+
+static const struct debugfs_reg32 dwc3_regs[] = {
+ dump_register(GSBUSCFG0),
+ dump_register(GSBUSCFG1),
+ dump_register(GTXTHRCFG),
+ dump_register(GRXTHRCFG),
+ dump_register(GCTL),
+ dump_register(GEVTEN),
+ dump_register(GSTS),
+ dump_register(GUCTL1),
+ dump_register(GSNPSID),
+ dump_register(GGPIO),
+ dump_register(GUID),
+ dump_register(GUCTL),
+ dump_register(GBUSERRADDR0),
+ dump_register(GBUSERRADDR1),
+ dump_register(GPRTBIMAP0),
+ dump_register(GPRTBIMAP1),
+ dump_register(GHWPARAMS0),
+ dump_register(GHWPARAMS1),
+ dump_register(GHWPARAMS2),
+ dump_register(GHWPARAMS3),
+ dump_register(GHWPARAMS4),
+ dump_register(GHWPARAMS5),
+ dump_register(GHWPARAMS6),
+ dump_register(GHWPARAMS7),
+ dump_register(GDBGFIFOSPACE),
+ dump_register(GDBGLTSSM),
+ dump_register(GDBGBMU),
+ dump_register(GPRTBIMAP_HS0),
+ dump_register(GPRTBIMAP_HS1),
+ dump_register(GPRTBIMAP_FS0),
+ dump_register(GPRTBIMAP_FS1),
+
+ dump_register(GUSB2PHYCFG(0)),
+ dump_register(GUSB2PHYCFG(1)),
+ dump_register(GUSB2PHYCFG(2)),
+ dump_register(GUSB2PHYCFG(3)),
+ dump_register(GUSB2PHYCFG(4)),
+ dump_register(GUSB2PHYCFG(5)),
+ dump_register(GUSB2PHYCFG(6)),
+ dump_register(GUSB2PHYCFG(7)),
+ dump_register(GUSB2PHYCFG(8)),
+ dump_register(GUSB2PHYCFG(9)),
+ dump_register(GUSB2PHYCFG(10)),
+ dump_register(GUSB2PHYCFG(11)),
+ dump_register(GUSB2PHYCFG(12)),
+ dump_register(GUSB2PHYCFG(13)),
+ dump_register(GUSB2PHYCFG(14)),
+ dump_register(GUSB2PHYCFG(15)),
+
+ dump_register(GUSB2I2CCTL(0)),
+ dump_register(GUSB2I2CCTL(1)),
+ dump_register(GUSB2I2CCTL(2)),
+ dump_register(GUSB2I2CCTL(3)),
+ dump_register(GUSB2I2CCTL(4)),
+ dump_register(GUSB2I2CCTL(5)),
+ dump_register(GUSB2I2CCTL(6)),
+ dump_register(GUSB2I2CCTL(7)),
+ dump_register(GUSB2I2CCTL(8)),
+ dump_register(GUSB2I2CCTL(9)),
+ dump_register(GUSB2I2CCTL(10)),
+ dump_register(GUSB2I2CCTL(11)),
+ dump_register(GUSB2I2CCTL(12)),
+ dump_register(GUSB2I2CCTL(13)),
+ dump_register(GUSB2I2CCTL(14)),
+ dump_register(GUSB2I2CCTL(15)),
+
+ dump_register(GUSB2PHYACC(0)),
+ dump_register(GUSB2PHYACC(1)),
+ dump_register(GUSB2PHYACC(2)),
+ dump_register(GUSB2PHYACC(3)),
+ dump_register(GUSB2PHYACC(4)),
+ dump_register(GUSB2PHYACC(5)),
+ dump_register(GUSB2PHYACC(6)),
+ dump_register(GUSB2PHYACC(7)),
+ dump_register(GUSB2PHYACC(8)),
+ dump_register(GUSB2PHYACC(9)),
+ dump_register(GUSB2PHYACC(10)),
+ dump_register(GUSB2PHYACC(11)),
+ dump_register(GUSB2PHYACC(12)),
+ dump_register(GUSB2PHYACC(13)),
+ dump_register(GUSB2PHYACC(14)),
+ dump_register(GUSB2PHYACC(15)),
+
+ dump_register(GUSB3PIPECTL(0)),
+ dump_register(GUSB3PIPECTL(1)),
+ dump_register(GUSB3PIPECTL(2)),
+ dump_register(GUSB3PIPECTL(3)),
+ dump_register(GUSB3PIPECTL(4)),
+ dump_register(GUSB3PIPECTL(5)),
+ dump_register(GUSB3PIPECTL(6)),
+ dump_register(GUSB3PIPECTL(7)),
+ dump_register(GUSB3PIPECTL(8)),
+ dump_register(GUSB3PIPECTL(9)),
+ dump_register(GUSB3PIPECTL(10)),
+ dump_register(GUSB3PIPECTL(11)),
+ dump_register(GUSB3PIPECTL(12)),
+ dump_register(GUSB3PIPECTL(13)),
+ dump_register(GUSB3PIPECTL(14)),
+ dump_register(GUSB3PIPECTL(15)),
+
+ dump_register(GTXFIFOSIZ(0)),
+ dump_register(GTXFIFOSIZ(1)),
+ dump_register(GTXFIFOSIZ(2)),
+ dump_register(GTXFIFOSIZ(3)),
+ dump_register(GTXFIFOSIZ(4)),
+ dump_register(GTXFIFOSIZ(5)),
+ dump_register(GTXFIFOSIZ(6)),
+ dump_register(GTXFIFOSIZ(7)),
+ dump_register(GTXFIFOSIZ(8)),
+ dump_register(GTXFIFOSIZ(9)),
+ dump_register(GTXFIFOSIZ(10)),
+ dump_register(GTXFIFOSIZ(11)),
+ dump_register(GTXFIFOSIZ(12)),
+ dump_register(GTXFIFOSIZ(13)),
+ dump_register(GTXFIFOSIZ(14)),
+ dump_register(GTXFIFOSIZ(15)),
+ dump_register(GTXFIFOSIZ(16)),
+ dump_register(GTXFIFOSIZ(17)),
+ dump_register(GTXFIFOSIZ(18)),
+ dump_register(GTXFIFOSIZ(19)),
+ dump_register(GTXFIFOSIZ(20)),
+ dump_register(GTXFIFOSIZ(21)),
+ dump_register(GTXFIFOSIZ(22)),
+ dump_register(GTXFIFOSIZ(23)),
+ dump_register(GTXFIFOSIZ(24)),
+ dump_register(GTXFIFOSIZ(25)),
+ dump_register(GTXFIFOSIZ(26)),
+ dump_register(GTXFIFOSIZ(27)),
+ dump_register(GTXFIFOSIZ(28)),
+ dump_register(GTXFIFOSIZ(29)),
+ dump_register(GTXFIFOSIZ(30)),
+ dump_register(GTXFIFOSIZ(31)),
+
+ dump_register(GRXFIFOSIZ(0)),
+ dump_register(GRXFIFOSIZ(1)),
+ dump_register(GRXFIFOSIZ(2)),
+ dump_register(GRXFIFOSIZ(3)),
+ dump_register(GRXFIFOSIZ(4)),
+ dump_register(GRXFIFOSIZ(5)),
+ dump_register(GRXFIFOSIZ(6)),
+ dump_register(GRXFIFOSIZ(7)),
+ dump_register(GRXFIFOSIZ(8)),
+ dump_register(GRXFIFOSIZ(9)),
+ dump_register(GRXFIFOSIZ(10)),
+ dump_register(GRXFIFOSIZ(11)),
+ dump_register(GRXFIFOSIZ(12)),
+ dump_register(GRXFIFOSIZ(13)),
+ dump_register(GRXFIFOSIZ(14)),
+ dump_register(GRXFIFOSIZ(15)),
+ dump_register(GRXFIFOSIZ(16)),
+ dump_register(GRXFIFOSIZ(17)),
+ dump_register(GRXFIFOSIZ(18)),
+ dump_register(GRXFIFOSIZ(19)),
+ dump_register(GRXFIFOSIZ(20)),
+ dump_register(GRXFIFOSIZ(21)),
+ dump_register(GRXFIFOSIZ(22)),
+ dump_register(GRXFIFOSIZ(23)),
+ dump_register(GRXFIFOSIZ(24)),
+ dump_register(GRXFIFOSIZ(25)),
+ dump_register(GRXFIFOSIZ(26)),
+ dump_register(GRXFIFOSIZ(27)),
+ dump_register(GRXFIFOSIZ(28)),
+ dump_register(GRXFIFOSIZ(29)),
+ dump_register(GRXFIFOSIZ(30)),
+ dump_register(GRXFIFOSIZ(31)),
+
+ dump_register(GEVNTADRLO(0)),
+ dump_register(GEVNTADRHI(0)),
+ dump_register(GEVNTSIZ(0)),
+ dump_register(GEVNTCOUNT(0)),
+
+ dump_register(GHWPARAMS8),
+ dump_register(DCFG),
+ dump_register(DCTL),
+ dump_register(DEVTEN),
+ dump_register(DSTS),
+ dump_register(DGCMDPAR),
+ dump_register(DGCMD),
+ dump_register(DALEPENA),
+
+ dump_ep_register_set(0),
+ dump_ep_register_set(1),
+ dump_ep_register_set(2),
+ dump_ep_register_set(3),
+ dump_ep_register_set(4),
+ dump_ep_register_set(5),
+ dump_ep_register_set(6),
+ dump_ep_register_set(7),
+ dump_ep_register_set(8),
+ dump_ep_register_set(9),
+ dump_ep_register_set(10),
+ dump_ep_register_set(11),
+ dump_ep_register_set(12),
+ dump_ep_register_set(13),
+ dump_ep_register_set(14),
+ dump_ep_register_set(15),
+ dump_ep_register_set(16),
+ dump_ep_register_set(17),
+ dump_ep_register_set(18),
+ dump_ep_register_set(19),
+ dump_ep_register_set(20),
+ dump_ep_register_set(21),
+ dump_ep_register_set(22),
+ dump_ep_register_set(23),
+ dump_ep_register_set(24),
+ dump_ep_register_set(25),
+ dump_ep_register_set(26),
+ dump_ep_register_set(27),
+ dump_ep_register_set(28),
+ dump_ep_register_set(29),
+ dump_ep_register_set(30),
+ dump_ep_register_set(31),
+
+ dump_register(OCFG),
+ dump_register(OCTL),
+ dump_register(OEVT),
+ dump_register(OEVTEN),
+ dump_register(OSTS),
+};
+
+static void dwc3_host_lsp(struct seq_file *s)
+{
+ struct dwc3 *dwc = s->private;
+ bool dbc_enabled;
+ u32 sel;
+ u32 reg;
+ u32 val;
+
+ dbc_enabled = !!(dwc->hwparams.hwparams1 & DWC3_GHWPARAMS1_ENDBC);
+
+ sel = dwc->dbg_lsp_select;
+ if (sel == DWC3_LSP_MUX_UNSELECTED) {
+ seq_puts(s, "Write LSP selection to print for host\n");
+ return;
+ }
+
+ reg = DWC3_GDBGLSPMUX_HOSTSELECT(sel);
+
+ dwc3_writel(dwc->regs, DWC3_GDBGLSPMUX, reg);
+ val = dwc3_readl(dwc->regs, DWC3_GDBGLSP);
+ seq_printf(s, "GDBGLSP[%d] = 0x%08x\n", sel, val);
+
+ if (dbc_enabled && sel < 256) {
+ reg |= DWC3_GDBGLSPMUX_ENDBC;
+ dwc3_writel(dwc->regs, DWC3_GDBGLSPMUX, reg);
+ val = dwc3_readl(dwc->regs, DWC3_GDBGLSP);
+ seq_printf(s, "GDBGLSP_DBC[%d] = 0x%08x\n", sel, val);
+ }
+}
+
+static void dwc3_gadget_lsp(struct seq_file *s)
+{
+ struct dwc3 *dwc = s->private;
+ int i;
+ u32 reg;
+
+ for (i = 0; i < 16; i++) {
+ reg = DWC3_GDBGLSPMUX_DEVSELECT(i);
+ dwc3_writel(dwc->regs, DWC3_GDBGLSPMUX, reg);
+ reg = dwc3_readl(dwc->regs, DWC3_GDBGLSP);
+ seq_printf(s, "GDBGLSP[%d] = 0x%08x\n", i, reg);
+ }
+}
+
+static int dwc3_lsp_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ unsigned int current_mode;
+ unsigned long flags;
+ u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GSTS);
+ current_mode = DWC3_GSTS_CURMOD(reg);
+
+ switch (current_mode) {
+ case DWC3_GSTS_CURMOD_HOST:
+ dwc3_host_lsp(s);
+ break;
+ case DWC3_GSTS_CURMOD_DEVICE:
+ dwc3_gadget_lsp(s);
+ break;
+ default:
+ seq_puts(s, "Mode is unknown, no LSP register printed\n");
+ break;
+ }
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_lsp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_lsp_show, inode->i_private);
+}
+
+static ssize_t dwc3_lsp_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ char buf[32] = { 0 };
+ u32 sel;
+ int ret;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ ret = kstrtouint(buf, 0, &sel);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->dbg_lsp_select = sel;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return count;
+}
+
+static const struct file_operations dwc3_lsp_fops = {
+ .open = dwc3_lsp_open,
+ .write = dwc3_lsp_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dwc3_mode_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ switch (DWC3_GCTL_PRTCAP(reg)) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ seq_puts(s, "host\n");
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ seq_puts(s, "device\n");
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ seq_puts(s, "otg\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
+ }
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_mode_show, inode->i_private);
+}
+
+static ssize_t dwc3_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ u32 mode = 0;
+ char buf[32];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (dwc->dr_mode != USB_DR_MODE_OTG)
+ return count;
+
+ if (!strncmp(buf, "host", 4))
+ mode = DWC3_GCTL_PRTCAP_HOST;
+
+ if (!strncmp(buf, "device", 6))
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
+
+ if (!strncmp(buf, "otg", 3))
+ mode = DWC3_GCTL_PRTCAP_OTG;
+
+ dwc3_set_mode(dwc, mode);
+
+ return count;
+}
+
+static const struct file_operations dwc3_mode_fops = {
+ .open = dwc3_mode_open,
+ .write = dwc3_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dwc3_testmode_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= DWC3_DCTL_TSTCTRL_MASK;
+ reg >>= 1;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ switch (reg) {
+ case 0:
+ seq_puts(s, "no test\n");
+ break;
+ case USB_TEST_J:
+ seq_puts(s, "test_j\n");
+ break;
+ case USB_TEST_K:
+ seq_puts(s, "test_k\n");
+ break;
+ case USB_TEST_SE0_NAK:
+ seq_puts(s, "test_se0_nak\n");
+ break;
+ case USB_TEST_PACKET:
+ seq_puts(s, "test_packet\n");
+ break;
+ case USB_TEST_FORCE_ENABLE:
+ seq_puts(s, "test_force_enable\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN %d\n", reg);
+ }
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_testmode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_testmode_show, inode->i_private);
+}
+
+static ssize_t dwc3_testmode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 testmode = 0;
+ char buf[32];
+ int ret;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "test_j", 6))
+ testmode = USB_TEST_J;
+ else if (!strncmp(buf, "test_k", 6))
+ testmode = USB_TEST_K;
+ else if (!strncmp(buf, "test_se0_nak", 12))
+ testmode = USB_TEST_SE0_NAK;
+ else if (!strncmp(buf, "test_packet", 11))
+ testmode = USB_TEST_PACKET;
+ else if (!strncmp(buf, "test_force_enable", 17))
+ testmode = USB_TEST_FORCE_ENABLE;
+ else
+ testmode = 0;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_set_test_mode(dwc, testmode);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return count;
+}
+
+static const struct file_operations dwc3_testmode_fops = {
+ .open = dwc3_testmode_open,
+ .write = dwc3_testmode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dwc3_link_state_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ enum dwc3_link_state state;
+ u32 reg;
+ u8 speed;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GSTS);
+ if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
+ seq_puts(s, "Not available\n");
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+ return 0;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ state = DWC3_DSTS_USBLNKST(reg);
+ speed = reg & DWC3_DSTS_CONNECTSPD;
+
+ seq_printf(s, "%s\n", (speed >= DWC3_DSTS_SUPERSPEED) ?
+ dwc3_gadget_link_string(state) :
+ dwc3_gadget_hs_link_string(state));
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_link_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_link_state_show, inode->i_private);
+}
+
+static ssize_t dwc3_link_state_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ enum dwc3_link_state state = 0;
+ char buf[32];
+ u32 reg;
+ u8 speed;
+ int ret;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "SS.Disabled", 11))
+ state = DWC3_LINK_STATE_SS_DIS;
+ else if (!strncmp(buf, "Rx.Detect", 9))
+ state = DWC3_LINK_STATE_RX_DET;
+ else if (!strncmp(buf, "SS.Inactive", 11))
+ state = DWC3_LINK_STATE_SS_INACT;
+ else if (!strncmp(buf, "Recovery", 8))
+ state = DWC3_LINK_STATE_RECOV;
+ else if (!strncmp(buf, "Compliance", 10))
+ state = DWC3_LINK_STATE_CMPLY;
+ else if (!strncmp(buf, "Loopback", 8))
+ state = DWC3_LINK_STATE_LPBK;
+ else
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GSTS);
+ if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+ return -EINVAL;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ speed = reg & DWC3_DSTS_CONNECTSPD;
+
+ if (speed < DWC3_DSTS_SUPERSPEED &&
+ state != DWC3_LINK_STATE_RECOV) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+ return -EINVAL;
+ }
+
+ dwc3_gadget_set_link_state(dwc, state);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return count;
+}
+
+static const struct file_operations dwc3_link_state_fops = {
+ .open = dwc3_link_state_open,
+ .write = dwc3_link_state_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+struct dwc3_ep_file_map {
+ const char name[25];
+ const struct file_operations *const fops;
+};
+
+static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 mdwidth;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
+
+ /* Convert to bytes */
+ mdwidth = dwc3_mdwidth(dwc);
+
+ val *= mdwidth;
+ val >>= 3;
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 mdwidth;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
+
+ /* Convert to bytes */
+ mdwidth = dwc3_mdwidth(dwc);
+
+ val *= mdwidth;
+ val >>= 3;
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_event_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_transfer_type_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (!(dep->flags & DWC3_EP_ENABLED) || !dep->endpoint.desc) {
+ seq_puts(s, "--\n");
+ goto out;
+ }
+
+ switch (usb_endpoint_type(dep->endpoint.desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ seq_puts(s, "control\n");
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ seq_puts(s, "isochronous\n");
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ seq_puts(s, "bulk\n");
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ seq_puts(s, "interrupt\n");
+ break;
+ default:
+ seq_puts(s, "--\n");
+ }
+
+out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ int i;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (dep->number <= 1) {
+ seq_puts(s, "--\n");
+ goto out;
+ }
+
+ seq_puts(s, "buffer_addr,size,type,ioc,isp_imi,csp,chn,lst,hwo\n");
+
+ for (i = 0; i < DWC3_TRB_NUM; i++) {
+ struct dwc3_trb *trb = &dep->trb_pool[i];
+ unsigned int type = DWC3_TRBCTL_TYPE(trb->ctrl);
+
+ seq_printf(s, "%08x%08x,%d,%s,%d,%d,%d,%d,%d,%d %c%c\n",
+ trb->bph, trb->bpl, trb->size,
+ dwc3_trb_type_string(type),
+ !!(trb->ctrl & DWC3_TRB_CTRL_IOC),
+ !!(trb->ctrl & DWC3_TRB_CTRL_ISP_IMI),
+ !!(trb->ctrl & DWC3_TRB_CTRL_CSP),
+ !!(trb->ctrl & DWC3_TRB_CTRL_CHN),
+ !!(trb->ctrl & DWC3_TRB_CTRL_LST),
+ !!(trb->ctrl & DWC3_TRB_CTRL_HWO),
+ dep->trb_enqueue == i ? 'E' : ' ',
+ dep->trb_dequeue == i ? 'D' : ' ');
+ }
+
+out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u64 ep_info;
+ u32 lower_32_bits;
+ u32 upper_32_bits;
+ u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
+ dwc3_writel(dwc->regs, DWC3_GDBGLSPMUX, reg);
+
+ lower_32_bits = dwc3_readl(dwc->regs, DWC3_GDBGEPINFO0);
+ upper_32_bits = dwc3_readl(dwc->regs, DWC3_GDBGEPINFO1);
+
+ ep_info = ((u64)upper_32_bits << 32) | lower_32_bits;
+ seq_printf(s, "0x%016llx\n", ep_info);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ pm_runtime_put_sync(dwc->dev);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dwc3_tx_fifo_size);
+DEFINE_SHOW_ATTRIBUTE(dwc3_rx_fifo_size);
+DEFINE_SHOW_ATTRIBUTE(dwc3_tx_request_queue);
+DEFINE_SHOW_ATTRIBUTE(dwc3_rx_request_queue);
+DEFINE_SHOW_ATTRIBUTE(dwc3_rx_info_queue);
+DEFINE_SHOW_ATTRIBUTE(dwc3_descriptor_fetch_queue);
+DEFINE_SHOW_ATTRIBUTE(dwc3_event_queue);
+DEFINE_SHOW_ATTRIBUTE(dwc3_transfer_type);
+DEFINE_SHOW_ATTRIBUTE(dwc3_trb_ring);
+DEFINE_SHOW_ATTRIBUTE(dwc3_ep_info_register);
+
+static const struct dwc3_ep_file_map dwc3_ep_file_map[] = {
+ { "tx_fifo_size", &dwc3_tx_fifo_size_fops, },
+ { "rx_fifo_size", &dwc3_rx_fifo_size_fops, },
+ { "tx_request_queue", &dwc3_tx_request_queue_fops, },
+ { "rx_request_queue", &dwc3_rx_request_queue_fops, },
+ { "rx_info_queue", &dwc3_rx_info_queue_fops, },
+ { "descriptor_fetch_queue", &dwc3_descriptor_fetch_queue_fops, },
+ { "event_queue", &dwc3_event_queue_fops, },
+ { "transfer_type", &dwc3_transfer_type_fops, },
+ { "trb_ring", &dwc3_trb_ring_fops, },
+ { "GDBGEPINFO", &dwc3_ep_info_register_fops, },
+};
+
+void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+{
+ struct dentry *dir;
+ int i;
+
+ dir = debugfs_create_dir(dep->name, dep->dwc->debug_root);
+ for (i = 0; i < ARRAY_SIZE(dwc3_ep_file_map); i++) {
+ const struct file_operations *fops = dwc3_ep_file_map[i].fops;
+ const char *name = dwc3_ep_file_map[i].name;
+
+ debugfs_create_file(name, 0444, dir, dep, fops);
+ }
+}
+
+void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
+{
+ debugfs_lookup_and_remove(dep->name, dep->dwc->debug_root);
+}
+
+void dwc3_debugfs_init(struct dwc3 *dwc)
+{
+ struct dentry *root;
+
+ dwc->regset = kzalloc(sizeof(*dwc->regset), GFP_KERNEL);
+ if (!dwc->regset)
+ return;
+
+ dwc->dbg_lsp_select = DWC3_LSP_MUX_UNSELECTED;
+
+ dwc->regset->regs = dwc3_regs;
+ dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
+ dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
+ dwc->regset->dev = dwc->dev;
+
+ root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
+ dwc->debug_root = root;
+ debugfs_create_regset32("regdump", 0444, root, dwc->regset);
+ debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops);
+
+ if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE))
+ debugfs_create_file("mode", 0644, root, dwc,
+ &dwc3_mode_fops);
+
+ if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) ||
+ IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
+ debugfs_create_file("testmode", 0644, root, dwc,
+ &dwc3_testmode_fops);
+ debugfs_create_file("link_state", 0644, root, dwc,
+ &dwc3_link_state_fops);
+ }
+}
+
+void dwc3_debugfs_exit(struct dwc3 *dwc)
+{
+ debugfs_lookup_and_remove(dev_name(dwc->dev), usb_debug_root);
+ kfree(dwc->regset);
+}
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
new file mode 100644
index 000000000..57ddd2e43
--- /dev/null
+++ b/drivers/usb/dwc3/drd.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drd.c - DesignWare USB3 DRD Controller Dual-role support
+ *
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Roger Quadros <rogerq@ti.com>
+ */
+
+#include <linux/extcon.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "debug.h"
+#include "core.h"
+#include "gadget.h"
+
+static void dwc3_otg_disable_events(struct dwc3 *dwc, u32 disable_mask)
+{
+ u32 reg = dwc3_readl(dwc->regs, DWC3_OEVTEN);
+
+ reg &= ~(disable_mask);
+ dwc3_writel(dwc->regs, DWC3_OEVTEN, reg);
+}
+
+static void dwc3_otg_enable_events(struct dwc3 *dwc, u32 enable_mask)
+{
+ u32 reg = dwc3_readl(dwc->regs, DWC3_OEVTEN);
+
+ reg |= (enable_mask);
+ dwc3_writel(dwc->regs, DWC3_OEVTEN, reg);
+}
+
+static void dwc3_otg_clear_events(struct dwc3 *dwc)
+{
+ u32 reg = dwc3_readl(dwc->regs, DWC3_OEVT);
+
+ dwc3_writel(dwc->regs, DWC3_OEVTEN, reg);
+}
+
+#define DWC3_OTG_ALL_EVENTS (DWC3_OEVTEN_XHCIRUNSTPSETEN | \
+ DWC3_OEVTEN_DEVRUNSTPSETEN | DWC3_OEVTEN_HIBENTRYEN | \
+ DWC3_OEVTEN_CONIDSTSCHNGEN | DWC3_OEVTEN_HRRCONFNOTIFEN | \
+ DWC3_OEVTEN_HRRINITNOTIFEN | DWC3_OEVTEN_ADEVIDLEEN | \
+ DWC3_OEVTEN_ADEVBHOSTENDEN | DWC3_OEVTEN_ADEVHOSTEN | \
+ DWC3_OEVTEN_ADEVHNPCHNGEN | DWC3_OEVTEN_ADEVSRPDETEN | \
+ DWC3_OEVTEN_ADEVSESSENDDETEN | DWC3_OEVTEN_BDEVBHOSTENDEN | \
+ DWC3_OEVTEN_BDEVHNPCHNGEN | DWC3_OEVTEN_BDEVSESSVLDDETEN | \
+ DWC3_OEVTEN_BDEVVBUSCHNGEN)
+
+static irqreturn_t dwc3_otg_thread_irq(int irq, void *_dwc)
+{
+ struct dwc3 *dwc = _dwc;
+
+ spin_lock(&dwc->lock);
+ if (dwc->otg_restart_host) {
+ dwc3_otg_host_init(dwc);
+ dwc->otg_restart_host = false;
+ }
+
+ spin_unlock(&dwc->lock);
+
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dwc3_otg_irq(int irq, void *_dwc)
+{
+ u32 reg;
+ struct dwc3 *dwc = _dwc;
+ irqreturn_t ret = IRQ_NONE;
+
+ reg = dwc3_readl(dwc->regs, DWC3_OEVT);
+ if (reg) {
+ /* ignore non OTG events, we can't disable them in OEVTEN */
+ if (!(reg & DWC3_OTG_ALL_EVENTS)) {
+ dwc3_writel(dwc->regs, DWC3_OEVT, reg);
+ return IRQ_NONE;
+ }
+
+ if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST &&
+ !(reg & DWC3_OEVT_DEVICEMODE))
+ dwc->otg_restart_host = true;
+ dwc3_writel(dwc->regs, DWC3_OEVT, reg);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static void dwc3_otgregs_init(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ /*
+ * Prevent host/device reset from resetting OTG core.
+ * If we don't do this then xhci_reset (USBCMD.HCRST) will reset
+ * the signal outputs sent to the PHY, the OTG FSM logic of the
+ * core and also the resets to the VBUS filters inside the core.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_OCFG);
+ reg |= DWC3_OCFG_SFTRSTMASK;
+ dwc3_writel(dwc->regs, DWC3_OCFG, reg);
+
+ /* Disable hibernation for simplicity */
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~DWC3_GCTL_GBLHIBERNATIONEN;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ /*
+ * Initialize OTG registers as per
+ * Figure 11-4 OTG Driver Overall Programming Flow
+ */
+ /* OCFG.SRPCap = 0, OCFG.HNPCap = 0 */
+ reg = dwc3_readl(dwc->regs, DWC3_OCFG);
+ reg &= ~(DWC3_OCFG_SRPCAP | DWC3_OCFG_HNPCAP);
+ dwc3_writel(dwc->regs, DWC3_OCFG, reg);
+ /* OEVT = FFFF */
+ dwc3_otg_clear_events(dwc);
+ /* OEVTEN = 0 */
+ dwc3_otg_disable_events(dwc, DWC3_OTG_ALL_EVENTS);
+ /* OEVTEN.ConIDStsChngEn = 1. Instead we enable all events */
+ dwc3_otg_enable_events(dwc, DWC3_OTG_ALL_EVENTS);
+ /*
+ * OCTL.PeriMode = 1, OCTL.DevSetHNPEn = 0, OCTL.HstSetHNPEn = 0,
+ * OCTL.HNPReq = 0
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg |= DWC3_OCTL_PERIMODE;
+ reg &= ~(DWC3_OCTL_DEVSETHNPEN | DWC3_OCTL_HSTSETHNPEN |
+ DWC3_OCTL_HNPREQ);
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+}
+
+static int dwc3_otg_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "otg");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
+void dwc3_otg_init(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ /*
+ * As per Figure 11-4 OTG Driver Overall Programming Flow,
+ * block "Initialize GCTL for OTG operation".
+ */
+ /* GCTL.PrtCapDir=2'b11 */
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ /* GUSB2PHYCFG0.SusPHY=0 */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ /* Initialize OTG registers */
+ dwc3_otgregs_init(dwc);
+}
+
+void dwc3_otg_exit(struct dwc3 *dwc)
+{
+ /* disable all OTG IRQs */
+ dwc3_otg_disable_events(dwc, DWC3_OTG_ALL_EVENTS);
+ /* clear all events */
+ dwc3_otg_clear_events(dwc);
+}
+
+/* should be called before Host controller driver is started */
+void dwc3_otg_host_init(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ /* As per Figure 11-10 A-Device Flow Diagram */
+ /* OCFG.HNPCap = 0, OCFG.SRPCap = 0. Already 0 */
+
+ /*
+ * OCTL.PeriMode=0, OCTL.TermSelDLPulse = 0,
+ * OCTL.DevSetHNPEn = 0, OCTL.HstSetHNPEn = 0
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg &= ~(DWC3_OCTL_PERIMODE | DWC3_OCTL_TERMSELIDPULSE |
+ DWC3_OCTL_DEVSETHNPEN | DWC3_OCTL_HSTSETHNPEN);
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+
+ /*
+ * OCFG.DisPrtPwrCutoff = 0/1
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_OCFG);
+ reg &= ~DWC3_OCFG_DISPWRCUTTOFF;
+ dwc3_writel(dwc->regs, DWC3_OCFG, reg);
+
+ /*
+ * OCFG.SRPCap = 1, OCFG.HNPCap = GHWPARAMS6.HNP_CAP
+ * We don't want SRP/HNP for simple dual-role so leave
+ * these disabled.
+ */
+
+ /*
+ * OEVTEN.OTGADevHostEvntEn = 1
+ * OEVTEN.OTGADevSessEndDetEvntEn = 1
+ * We don't want HNP/role-swap so leave these disabled.
+ */
+
+ /* GUSB2PHYCFG.ULPIAutoRes = 1/0, GUSB2PHYCFG.SusPHY = 1 */
+ if (!dwc->dis_u2_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+
+ /* Set Port Power to enable VBUS: OCTL.PrtPwrCtl = 1 */
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg |= DWC3_OCTL_PRTPWRCTL;
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+}
+
+/* should be called after Host controller driver is stopped */
+static void dwc3_otg_host_exit(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ /*
+ * Exit from A-device flow as per
+ * Figure 11-4 OTG Driver Overall Programming Flow
+ */
+
+ /*
+ * OEVTEN.OTGADevBHostEndEvntEn=0, OEVTEN.OTGADevHNPChngEvntEn=0
+ * OEVTEN.OTGADevSessEndDetEvntEn=0,
+ * OEVTEN.OTGADevHostEvntEn = 0
+ * But we don't disable any OTG events
+ */
+
+ /* OCTL.HstSetHNPEn = 0, OCTL.PrtPwrCtl=0 */
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg &= ~(DWC3_OCTL_HSTSETHNPEN | DWC3_OCTL_PRTPWRCTL);
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+}
+
+/* should be called before the gadget controller driver is started */
+static void dwc3_otg_device_init(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ /* As per Figure 11-20 B-Device Flow Diagram */
+
+ /*
+ * OCFG.HNPCap = GHWPARAMS6.HNP_CAP, OCFG.SRPCap = 1
+ * but we keep them 0 for simple dual-role operation.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_OCFG);
+ /* OCFG.OTGSftRstMsk = 0/1 */
+ reg |= DWC3_OCFG_SFTRSTMASK;
+ dwc3_writel(dwc->regs, DWC3_OCFG, reg);
+ /*
+ * OCTL.PeriMode = 1
+ * OCTL.TermSelDLPulse = 0/1, OCTL.HNPReq = 0
+ * OCTL.DevSetHNPEn = 0, OCTL.HstSetHNPEn = 0
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg |= DWC3_OCTL_PERIMODE;
+ reg &= ~(DWC3_OCTL_TERMSELIDPULSE | DWC3_OCTL_HNPREQ |
+ DWC3_OCTL_DEVSETHNPEN | DWC3_OCTL_HSTSETHNPEN);
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+ /* OEVTEN.OTGBDevSesVldDetEvntEn = 1 */
+ dwc3_otg_enable_events(dwc, DWC3_OEVTEN_BDEVSESSVLDDETEN);
+ /* GUSB2PHYCFG.ULPIAutoRes = 0, GUSB2PHYCFG0.SusPHY = 1 */
+ if (!dwc->dis_u2_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+ /* GCTL.GblHibernationEn = 0. Already 0. */
+}
+
+/* should be called after the gadget controller driver is stopped */
+static void dwc3_otg_device_exit(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ /*
+ * Exit from B-device flow as per
+ * Figure 11-4 OTG Driver Overall Programming Flow
+ */
+
+ /*
+ * OEVTEN.OTGBDevHNPChngEvntEn = 0
+ * OEVTEN.OTGBDevVBusChngEvntEn = 0
+ * OEVTEN.OTGBDevBHostEndEvntEn = 0
+ */
+ dwc3_otg_disable_events(dwc, DWC3_OEVTEN_BDEVHNPCHNGEN |
+ DWC3_OEVTEN_BDEVVBUSCHNGEN |
+ DWC3_OEVTEN_BDEVBHOSTENDEN);
+
+ /* OCTL.DevSetHNPEn = 0, OCTL.HNPReq = 0, OCTL.PeriMode=1 */
+ reg = dwc3_readl(dwc->regs, DWC3_OCTL);
+ reg &= ~(DWC3_OCTL_DEVSETHNPEN | DWC3_OCTL_HNPREQ);
+ reg |= DWC3_OCTL_PERIMODE;
+ dwc3_writel(dwc->regs, DWC3_OCTL, reg);
+}
+
+void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus)
+{
+ int ret;
+ u32 reg;
+ int id;
+ unsigned long flags;
+
+ if (dwc->dr_mode != USB_DR_MODE_OTG)
+ return;
+
+ /* don't do anything if debug user changed role to not OTG */
+ if (dwc->current_dr_role != DWC3_GCTL_PRTCAP_OTG)
+ return;
+
+ if (!ignore_idstatus) {
+ reg = dwc3_readl(dwc->regs, DWC3_OSTS);
+ id = !!(reg & DWC3_OSTS_CONIDSTS);
+
+ dwc->desired_otg_role = id ? DWC3_OTG_ROLE_DEVICE :
+ DWC3_OTG_ROLE_HOST;
+ }
+
+ if (dwc->desired_otg_role == dwc->current_otg_role)
+ return;
+
+ switch (dwc->current_otg_role) {
+ case DWC3_OTG_ROLE_HOST:
+ dwc3_host_exit(dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_otg_host_exit(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ break;
+ case DWC3_OTG_ROLE_DEVICE:
+ dwc3_gadget_exit(dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_event_buffers_cleanup(dwc);
+ dwc3_otg_device_exit(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ dwc->current_otg_role = dwc->desired_otg_role;
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ switch (dwc->desired_otg_role) {
+ case DWC3_OTG_ROLE_HOST:
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_otgregs_init(dwc);
+ dwc3_otg_host_init(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to initialize host\n");
+ } else {
+ if (dwc->usb2_phy)
+ otg_set_vbus(dwc->usb2_phy->otg, true);
+ if (dwc->usb2_generic_phy)
+ phy_set_mode(dwc->usb2_generic_phy,
+ PHY_MODE_USB_HOST);
+ }
+ break;
+ case DWC3_OTG_ROLE_DEVICE:
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_otgregs_init(dwc);
+ dwc3_otg_device_init(dwc);
+ dwc3_event_buffers_setup(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ if (dwc->usb2_phy)
+ otg_set_vbus(dwc->usb2_phy->otg, false);
+ if (dwc->usb2_generic_phy)
+ phy_set_mode(dwc->usb2_generic_phy,
+ PHY_MODE_USB_DEVICE);
+ ret = dwc3_gadget_init(dwc);
+ if (ret)
+ dev_err(dwc->dev, "failed to initialize peripheral\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void dwc3_drd_update(struct dwc3 *dwc)
+{
+ int id;
+
+ if (dwc->edev) {
+ id = extcon_get_state(dwc->edev, EXTCON_USB_HOST);
+ if (id < 0)
+ id = 0;
+ dwc3_set_mode(dwc, id ?
+ DWC3_GCTL_PRTCAP_HOST :
+ DWC3_GCTL_PRTCAP_DEVICE);
+ }
+}
+
+static int dwc3_drd_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3 *dwc = container_of(nb, struct dwc3, edev_nb);
+
+ dwc3_set_mode(dwc, event ?
+ DWC3_GCTL_PRTCAP_HOST :
+ DWC3_GCTL_PRTCAP_DEVICE);
+
+ return NOTIFY_DONE;
+}
+
+#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
+#define ROLE_SWITCH 1
+static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
+ enum usb_role role)
+{
+ struct dwc3 *dwc = usb_role_switch_get_drvdata(sw);
+ u32 mode;
+
+ switch (role) {
+ case USB_ROLE_HOST:
+ mode = DWC3_GCTL_PRTCAP_HOST;
+ break;
+ case USB_ROLE_DEVICE:
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
+ break;
+ default:
+ if (dwc->role_switch_default_mode == USB_DR_MODE_HOST)
+ mode = DWC3_GCTL_PRTCAP_HOST;
+ else
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
+ break;
+ }
+
+ dwc3_set_mode(dwc, mode);
+ return 0;
+}
+
+static enum usb_role dwc3_usb_role_switch_get(struct usb_role_switch *sw)
+{
+ struct dwc3 *dwc = usb_role_switch_get_drvdata(sw);
+ unsigned long flags;
+ enum usb_role role;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ role = USB_ROLE_HOST;
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ role = USB_ROLE_DEVICE;
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ role = dwc->current_otg_role;
+ break;
+ default:
+ if (dwc->role_switch_default_mode == USB_DR_MODE_HOST)
+ role = USB_ROLE_HOST;
+ else
+ role = USB_ROLE_DEVICE;
+ break;
+ }
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return role;
+}
+
+static int dwc3_setup_role_switch(struct dwc3 *dwc)
+{
+ struct usb_role_switch_desc dwc3_role_switch = {NULL};
+ u32 mode;
+
+ dwc->role_switch_default_mode = usb_get_role_switch_default_mode(dwc->dev);
+ if (dwc->role_switch_default_mode == USB_DR_MODE_HOST) {
+ mode = DWC3_GCTL_PRTCAP_HOST;
+ } else {
+ dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
+ }
+ dwc3_set_mode(dwc, mode);
+
+ dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
+ dwc3_role_switch.set = dwc3_usb_role_switch_set;
+ dwc3_role_switch.get = dwc3_usb_role_switch_get;
+ dwc3_role_switch.driver_data = dwc;
+ dwc->role_sw = usb_role_switch_register(dwc->dev, &dwc3_role_switch);
+ if (IS_ERR(dwc->role_sw))
+ return PTR_ERR(dwc->role_sw);
+
+ if (dwc->dev->of_node) {
+ /* populate connector entry */
+ int ret = devm_of_platform_populate(dwc->dev);
+
+ if (ret) {
+ usb_role_switch_unregister(dwc->role_sw);
+ dwc->role_sw = NULL;
+ dev_err(dwc->dev, "DWC3 platform devices creation failed: %i\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#else
+#define ROLE_SWITCH 0
+#define dwc3_setup_role_switch(x) 0
+#endif
+
+int dwc3_drd_init(struct dwc3 *dwc)
+{
+ int ret, irq;
+
+ if (ROLE_SWITCH &&
+ device_property_read_bool(dwc->dev, "usb-role-switch"))
+ return dwc3_setup_role_switch(dwc);
+
+ if (dwc->edev) {
+ dwc->edev_nb.notifier_call = dwc3_drd_notifier;
+ ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
+ &dwc->edev_nb);
+ if (ret < 0) {
+ dev_err(dwc->dev, "couldn't register cable notifier\n");
+ return ret;
+ }
+
+ dwc3_drd_update(dwc);
+ } else {
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+
+ /* use OTG block to get ID event */
+ irq = dwc3_otg_get_irq(dwc);
+ if (irq < 0)
+ return irq;
+
+ dwc->otg_irq = irq;
+
+ /* disable all OTG IRQs */
+ dwc3_otg_disable_events(dwc, DWC3_OTG_ALL_EVENTS);
+ /* clear all events */
+ dwc3_otg_clear_events(dwc);
+
+ ret = request_threaded_irq(dwc->otg_irq, dwc3_otg_irq,
+ dwc3_otg_thread_irq,
+ IRQF_SHARED, "dwc3-otg", dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+ dwc->otg_irq, ret);
+ ret = -ENODEV;
+ return ret;
+ }
+
+ dwc3_otg_init(dwc);
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+ }
+
+ return 0;
+}
+
+void dwc3_drd_exit(struct dwc3 *dwc)
+{
+ unsigned long flags;
+
+ if (dwc->role_sw)
+ usb_role_switch_unregister(dwc->role_sw);
+
+ if (dwc->edev)
+ extcon_unregister_notifier(dwc->edev, EXTCON_USB_HOST,
+ &dwc->edev_nb);
+
+ cancel_work_sync(&dwc->drd_work);
+
+ /* debug user might have changed role, clean based on current role */
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ dwc3_gadget_exit(dwc);
+ dwc3_event_buffers_cleanup(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ dwc3_otg_exit(dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->desired_otg_role = DWC3_OTG_ROLE_IDLE;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc3_otg_update(dwc, 1);
+ break;
+ default:
+ break;
+ }
+
+ if (dwc->otg_irq)
+ free_irq(dwc->otg_irq, dwc);
+}
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
new file mode 100644
index 000000000..173cf3579
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-am62.c - TI specific Glue layer for AM62 DWC3 USB Controller
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/consumer.h>
+
+/* USB WRAPPER register offsets */
+#define USBSS_PID 0x0
+#define USBSS_OVERCURRENT_CTRL 0x4
+#define USBSS_PHY_CONFIG 0x8
+#define USBSS_PHY_TEST 0xc
+#define USBSS_CORE_STAT 0x14
+#define USBSS_HOST_VBUS_CTRL 0x18
+#define USBSS_MODE_CONTROL 0x1c
+#define USBSS_WAKEUP_CONFIG 0x30
+#define USBSS_WAKEUP_STAT 0x34
+#define USBSS_OVERRIDE_CONFIG 0x38
+#define USBSS_IRQ_MISC_STATUS_RAW 0x430
+#define USBSS_IRQ_MISC_STATUS 0x434
+#define USBSS_IRQ_MISC_ENABLE_SET 0x438
+#define USBSS_IRQ_MISC_ENABLE_CLR 0x43c
+#define USBSS_IRQ_MISC_EOI 0x440
+#define USBSS_INTR_TEST 0x490
+#define USBSS_VBUS_FILTER 0x614
+#define USBSS_VBUS_STAT 0x618
+#define USBSS_DEBUG_CFG 0x708
+#define USBSS_DEBUG_DATA 0x70c
+#define USBSS_HOST_HUB_CTRL 0x714
+
+/* PHY CONFIG register bits */
+#define USBSS_PHY_VBUS_SEL_MASK GENMASK(2, 1)
+#define USBSS_PHY_VBUS_SEL_SHIFT 1
+#define USBSS_PHY_LANE_REVERSE BIT(0)
+
+/* MODE CONTROL register bits */
+#define USBSS_MODE_VALID BIT(0)
+
+/* WAKEUP CONFIG register bits */
+#define USBSS_WAKEUP_CFG_OVERCURRENT_EN BIT(3)
+#define USBSS_WAKEUP_CFG_LINESTATE_EN BIT(2)
+#define USBSS_WAKEUP_CFG_SESSVALID_EN BIT(1)
+#define USBSS_WAKEUP_CFG_VBUSVALID_EN BIT(0)
+
+/* WAKEUP STAT register bits */
+#define USBSS_WAKEUP_STAT_OVERCURRENT BIT(4)
+#define USBSS_WAKEUP_STAT_LINESTATE BIT(3)
+#define USBSS_WAKEUP_STAT_SESSVALID BIT(2)
+#define USBSS_WAKEUP_STAT_VBUSVALID BIT(1)
+#define USBSS_WAKEUP_STAT_CLR BIT(0)
+
+/* IRQ_MISC_STATUS_RAW register bits */
+#define USBSS_IRQ_MISC_RAW_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_RAW_SESSVALID BIT(20)
+
+/* IRQ_MISC_STATUS register bits */
+#define USBSS_IRQ_MISC_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_SESSVALID BIT(20)
+
+/* IRQ_MISC_ENABLE_SET register bits */
+#define USBSS_IRQ_MISC_ENABLE_SET_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_SET_SESSVALID BIT(20)
+
+/* IRQ_MISC_ENABLE_CLR register bits */
+#define USBSS_IRQ_MISC_ENABLE_CLR_VBUSVALID BIT(22)
+#define USBSS_IRQ_MISC_ENABLE_CLR_SESSVALID BIT(20)
+
+/* IRQ_MISC_EOI register bits */
+#define USBSS_IRQ_MISC_EOI_VECTOR BIT(0)
+
+/* VBUS_STAT register bits */
+#define USBSS_VBUS_STAT_SESSVALID BIT(2)
+#define USBSS_VBUS_STAT_VBUSVALID BIT(0)
+
+/* Mask for PHY PLL REFCLK */
+#define PHY_PLL_REFCLK_MASK GENMASK(3, 0)
+
+#define DWC3_AM62_AUTOSUSPEND_DELAY 100
+
+struct dwc3_data {
+ struct device *dev;
+ void __iomem *usbss;
+ struct clk *usb2_refclk;
+ int rate_code;
+ struct regmap *syscon;
+ unsigned int offset;
+ unsigned int vbus_divider;
+};
+
+static const int dwc3_ti_rate_table[] = { /* in KHZ */
+ 9600,
+ 10000,
+ 12000,
+ 19200,
+ 20000,
+ 24000,
+ 25000,
+ 26000,
+ 38400,
+ 40000,
+ 58000,
+ 50000,
+ 52000,
+};
+
+static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset)
+{
+ return readl((data->usbss) + offset);
+}
+
+static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value)
+{
+ writel(value, (data->usbss) + offset);
+}
+
+static int phy_syscon_pll_refclk(struct dwc3_data *data)
+{
+ struct device *dev = data->dev;
+ struct device_node *node = dev->of_node;
+ struct of_phandle_args args;
+ struct regmap *syscon;
+ int ret;
+
+ syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-phy-pll-refclk");
+ if (IS_ERR(syscon)) {
+ dev_err(dev, "unable to get ti,syscon-phy-pll-refclk regmap\n");
+ return PTR_ERR(syscon);
+ }
+
+ data->syscon = syscon;
+
+ ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
+ 0, &args);
+ if (ret)
+ return ret;
+
+ data->offset = args.args[0];
+
+ ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code);
+ if (ret) {
+ dev_err(dev, "failed to set phy pll reference clock rate\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dwc3_ti_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct dwc3_data *data;
+ int i, ret;
+ unsigned long rate;
+ u32 reg;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+ platform_set_drvdata(pdev, data);
+
+ data->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->usbss)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->usbss);
+ }
+
+ data->usb2_refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(data->usb2_refclk)) {
+ dev_err(dev, "can't get usb2_refclk\n");
+ return PTR_ERR(data->usb2_refclk);
+ }
+
+ /* Calculate the rate code */
+ rate = clk_get_rate(data->usb2_refclk);
+ rate /= 1000; // To KHz
+ for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) {
+ if (dwc3_ti_rate_table[i] == rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(dwc3_ti_rate_table)) {
+ dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
+ return -EINVAL;
+ }
+
+ data->rate_code = i;
+
+ /* Read the syscon property and set the rate code */
+ ret = phy_syscon_pll_refclk(data);
+ if (ret)
+ return ret;
+
+ /* VBUS divider select */
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG);
+ if (data->vbus_divider)
+ reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
+
+ dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Don't ignore its dependencies with its children
+ */
+ pm_suspend_ignore_children(dev, false);
+ clk_prepare_enable(data->usb2_refclk);
+ pm_runtime_get_noresume(dev);
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to create dwc3 core: %d\n", ret);
+ goto err_pm_disable;
+ }
+
+ /* Set mode valid bit to indicate role is valid */
+ reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg |= USBSS_MODE_VALID;
+ dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+ /* Setting up autosuspend */
+ pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+
+err_pm_disable:
+ clk_disable_unprepare(data->usb2_refclk);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ return ret;
+}
+
+static int dwc3_ti_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+ return 0;
+}
+
+static int dwc3_ti_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dwc3_data *data = platform_get_drvdata(pdev);
+ u32 reg;
+
+ device_for_each_child(dev, NULL, dwc3_ti_remove_core);
+
+ /* Clear mode valid bit */
+ reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg &= ~USBSS_MODE_VALID;
+ dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+
+ pm_runtime_put_sync(dev);
+ clk_disable_unprepare(data->usb2_refclk);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int dwc3_ti_suspend_common(struct device *dev)
+{
+ struct dwc3_data *data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(data->usb2_refclk);
+
+ return 0;
+}
+
+static int dwc3_ti_resume_common(struct device *dev)
+{
+ struct dwc3_data *data = dev_get_drvdata(dev);
+
+ clk_prepare_enable(data->usb2_refclk);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(dwc3_ti_pm_ops, dwc3_ti_suspend_common,
+ dwc3_ti_resume_common, NULL);
+
+#define DEV_PM_OPS (&dwc3_ti_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static const struct of_device_id dwc3_ti_of_match[] = {
+ { .compatible = "ti,am62-usb"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc3_ti_of_match);
+
+static struct platform_driver dwc3_ti_driver = {
+ .probe = dwc3_ti_probe,
+ .remove = dwc3_ti_remove,
+ .driver = {
+ .name = "dwc3-am62",
+ .pm = DEV_PM_OPS,
+ .of_match_table = dwc3_ti_of_match,
+ },
+};
+
+module_platform_driver(dwc3_ti_driver);
+
+MODULE_ALIAS("platform:dwc3-am62");
+MODULE_AUTHOR("Aswath Govindraju <a-govindraju@ti.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DesignWare USB3 TI Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
new file mode 100644
index 000000000..4be6a873b
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-exynos.c - Samsung Exynos DWC3 Specific Glue layer
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Anton Tikhomirov <av.tikhomirov@samsung.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/regulator/consumer.h>
+
+#define DWC3_EXYNOS_MAX_CLOCKS 4
+
+struct dwc3_exynos_driverdata {
+ const char *clk_names[DWC3_EXYNOS_MAX_CLOCKS];
+ int num_clks;
+ int suspend_clk_idx;
+};
+
+struct dwc3_exynos {
+ struct device *dev;
+
+ const char **clk_names;
+ struct clk *clks[DWC3_EXYNOS_MAX_CLOCKS];
+ int num_clks;
+ int suspend_clk_idx;
+
+ struct regulator *vdd33;
+ struct regulator *vdd10;
+};
+
+static int dwc3_exynos_probe(struct platform_device *pdev)
+{
+ struct dwc3_exynos *exynos;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ const struct dwc3_exynos_driverdata *driver_data;
+ int i, ret;
+
+ exynos = devm_kzalloc(dev, sizeof(*exynos), GFP_KERNEL);
+ if (!exynos)
+ return -ENOMEM;
+
+ driver_data = of_device_get_match_data(dev);
+ exynos->dev = dev;
+ exynos->num_clks = driver_data->num_clks;
+ exynos->clk_names = (const char **)driver_data->clk_names;
+ exynos->suspend_clk_idx = driver_data->suspend_clk_idx;
+
+ platform_set_drvdata(pdev, exynos);
+
+ for (i = 0; i < exynos->num_clks; i++) {
+ exynos->clks[i] = devm_clk_get(dev, exynos->clk_names[i]);
+ if (IS_ERR(exynos->clks[i])) {
+ dev_err(dev, "failed to get clock: %s\n",
+ exynos->clk_names[i]);
+ return PTR_ERR(exynos->clks[i]);
+ }
+ }
+
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+ while (i-- > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
+ }
+ }
+
+ if (exynos->suspend_clk_idx >= 0)
+ clk_prepare_enable(exynos->clks[exynos->suspend_clk_idx]);
+
+ exynos->vdd33 = devm_regulator_get(dev, "vdd33");
+ if (IS_ERR(exynos->vdd33)) {
+ ret = PTR_ERR(exynos->vdd33);
+ goto vdd33_err;
+ }
+ ret = regulator_enable(exynos->vdd33);
+ if (ret) {
+ dev_err(dev, "Failed to enable VDD33 supply\n");
+ goto vdd33_err;
+ }
+
+ exynos->vdd10 = devm_regulator_get(dev, "vdd10");
+ if (IS_ERR(exynos->vdd10)) {
+ ret = PTR_ERR(exynos->vdd10);
+ goto vdd10_err;
+ }
+ ret = regulator_enable(exynos->vdd10);
+ if (ret) {
+ dev_err(dev, "Failed to enable VDD10 supply\n");
+ goto vdd10_err;
+ }
+
+ if (node) {
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to add dwc3 core\n");
+ goto populate_err;
+ }
+ } else {
+ dev_err(dev, "no device node, failed to add dwc3 core\n");
+ ret = -ENODEV;
+ goto populate_err;
+ }
+
+ return 0;
+
+populate_err:
+ regulator_disable(exynos->vdd10);
+vdd10_err:
+ regulator_disable(exynos->vdd33);
+vdd33_err:
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
+
+ if (exynos->suspend_clk_idx >= 0)
+ clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
+
+ return ret;
+}
+
+static int dwc3_exynos_remove(struct platform_device *pdev)
+{
+ struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
+ int i;
+
+ of_platform_depopulate(&pdev->dev);
+
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
+
+ if (exynos->suspend_clk_idx >= 0)
+ clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
+
+ regulator_disable(exynos->vdd33);
+ regulator_disable(exynos->vdd10);
+
+ return 0;
+}
+
+static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
+ .clk_names = { "usbdrd30" },
+ .num_clks = 1,
+ .suspend_clk_idx = -1,
+};
+
+static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
+ .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
+ .num_clks = 4,
+ .suspend_clk_idx = 1,
+};
+
+static const struct dwc3_exynos_driverdata exynos7_drvdata = {
+ .clk_names = { "usbdrd30", "usbdrd30_susp_clk", "usbdrd30_axius_clk" },
+ .num_clks = 3,
+ .suspend_clk_idx = 1,
+};
+
+static const struct of_device_id exynos_dwc3_match[] = {
+ {
+ .compatible = "samsung,exynos5250-dwusb3",
+ .data = &exynos5250_drvdata,
+ }, {
+ .compatible = "samsung,exynos5433-dwusb3",
+ .data = &exynos5433_drvdata,
+ }, {
+ .compatible = "samsung,exynos7-dwusb3",
+ .data = &exynos7_drvdata,
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_exynos_suspend(struct device *dev)
+{
+ struct dwc3_exynos *exynos = dev_get_drvdata(dev);
+ int i;
+
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
+
+ regulator_disable(exynos->vdd33);
+ regulator_disable(exynos->vdd10);
+
+ return 0;
+}
+
+static int dwc3_exynos_resume(struct device *dev)
+{
+ struct dwc3_exynos *exynos = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = regulator_enable(exynos->vdd33);
+ if (ret) {
+ dev_err(dev, "Failed to enable VDD33 supply\n");
+ return ret;
+ }
+ ret = regulator_enable(exynos->vdd10);
+ if (ret) {
+ dev_err(dev, "Failed to enable VDD10 supply\n");
+ return ret;
+ }
+
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+ while (i-- > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops dwc3_exynos_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_exynos_suspend, dwc3_exynos_resume)
+};
+
+#define DEV_PM_OPS (&dwc3_exynos_dev_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver dwc3_exynos_driver = {
+ .probe = dwc3_exynos_probe,
+ .remove = dwc3_exynos_remove,
+ .driver = {
+ .name = "exynos-dwc3",
+ .of_match_table = exynos_dwc3_match,
+ .pm = DEV_PM_OPS,
+ },
+};
+
+module_platform_driver(dwc3_exynos_driver);
+
+MODULE_AUTHOR("Anton Tikhomirov <av.tikhomirov@samsung.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 Exynos Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-haps.c b/drivers/usb/dwc3/dwc3-haps.c
new file mode 100644
index 000000000..f6e3817fa
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-haps.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-haps.c - Synopsys HAPS PCI Specific glue layer
+ *
+ * Copyright (C) 2018 Synopsys, Inc.
+ *
+ * Authors: Thinh Nguyen <thinhn@synopsys.com>,
+ * John Youn <johnyoun@synopsys.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+/**
+ * struct dwc3_haps - Driver private structure
+ * @dwc3: child dwc3 platform_device
+ * @pci: our link to PCI bus
+ */
+struct dwc3_haps {
+ struct platform_device *dwc3;
+ struct pci_dev *pci;
+};
+
+static const struct property_entry initial_properties[] = {
+ PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
+ PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+ PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ { },
+};
+
+static const struct software_node dwc3_haps_swnode = {
+ .properties = initial_properties,
+};
+
+static int dwc3_haps_probe(struct pci_dev *pci,
+ const struct pci_device_id *id)
+{
+ struct dwc3_haps *dwc;
+ struct device *dev = &pci->dev;
+ struct resource res[2];
+ int ret;
+
+ ret = pcim_enable_device(pci);
+ if (ret) {
+ dev_err(dev, "failed to enable pci device\n");
+ return -ENODEV;
+ }
+
+ pci_set_master(pci);
+
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
+ if (!dwc->dwc3)
+ return -ENOMEM;
+
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+ res[0].name = "dwc_usb3";
+ res[0].flags = IORESOURCE_MEM;
+
+ res[1].start = pci->irq;
+ res[1].name = "dwc_usb3";
+ res[1].flags = IORESOURCE_IRQ;
+
+ ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(dev, "couldn't add resources to dwc3 device\n");
+ goto err;
+ }
+
+ dwc->pci = pci;
+ dwc->dwc3->dev.parent = dev;
+
+ ret = device_add_software_node(&dwc->dwc3->dev, &dwc3_haps_swnode);
+ if (ret)
+ goto err;
+
+ ret = platform_device_add(dwc->dwc3);
+ if (ret) {
+ dev_err(dev, "failed to register dwc3 device\n");
+ goto err;
+ }
+
+ pci_set_drvdata(pci, dwc);
+
+ return 0;
+err:
+ device_remove_software_node(&dwc->dwc3->dev);
+ platform_device_put(dwc->dwc3);
+ return ret;
+}
+
+static void dwc3_haps_remove(struct pci_dev *pci)
+{
+ struct dwc3_haps *dwc = pci_get_drvdata(pci);
+
+ device_remove_software_node(&dwc->dwc3->dev);
+ platform_device_unregister(dwc->dwc3);
+}
+
+static const struct pci_device_id dwc3_haps_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+ /*
+ * i.MX6QP and i.MX7D platform use a PCIe controller with the
+ * same VID and PID as this USB controller. The system may
+ * incorrectly match this driver to that PCIe controller. To
+ * workaround this, specifically use class type USB to prevent
+ * incorrect driver matching.
+ */
+ .class = (PCI_CLASS_SERIAL_USB << 8),
+ .class_mask = 0xffff00,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI),
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31),
+ },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(pci, dwc3_haps_id_table);
+
+static struct pci_driver dwc3_haps_driver = {
+ .name = "dwc3-haps",
+ .id_table = dwc3_haps_id_table,
+ .probe = dwc3_haps_probe,
+ .remove = dwc3_haps_remove,
+};
+
+MODULE_AUTHOR("Thinh Nguyen <thinhn@synopsys.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys HAPS PCI Glue Layer");
+
+module_pci_driver(dwc3_haps_driver);
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
new file mode 100644
index 000000000..174f07614
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-imx8mp.c - NXP imx8mp Specific Glue layer
+ *
+ * Copyright (c) 2020 NXP.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "core.h"
+
+/* USB wakeup registers */
+#define USB_WAKEUP_CTRL 0x00
+
+/* Global wakeup interrupt enable, also used to clear interrupt */
+#define USB_WAKEUP_EN BIT(31)
+/* Wakeup from connect or disconnect, only for superspeed */
+#define USB_WAKEUP_SS_CONN BIT(5)
+/* 0 select vbus_valid, 1 select sessvld */
+#define USB_WAKEUP_VBUS_SRC_SESS_VAL BIT(4)
+/* Enable signal for wake up from u3 state */
+#define USB_WAKEUP_U3_EN BIT(3)
+/* Enable signal for wake up from id change */
+#define USB_WAKEUP_ID_EN BIT(2)
+/* Enable signal for wake up from vbus change */
+#define USB_WAKEUP_VBUS_EN BIT(1)
+/* Enable signal for wake up from dp/dm change */
+#define USB_WAKEUP_DPDM_EN BIT(0)
+
+#define USB_WAKEUP_EN_MASK GENMASK(5, 0)
+
+/* USB glue registers */
+#define USB_CTRL0 0x00
+#define USB_CTRL1 0x04
+
+#define USB_CTRL0_PORTPWR_EN BIT(12) /* 1 - PPC enabled (default) */
+#define USB_CTRL0_USB3_FIXED BIT(22) /* 1 - USB3 permanent attached */
+#define USB_CTRL0_USB2_FIXED BIT(23) /* 1 - USB2 permanent attached */
+
+#define USB_CTRL1_OC_POLARITY BIT(16) /* 0 - HIGH / 1 - LOW */
+#define USB_CTRL1_PWR_POLARITY BIT(17) /* 0 - HIGH / 1 - LOW */
+
+struct dwc3_imx8mp {
+ struct device *dev;
+ struct platform_device *dwc3;
+ void __iomem *hsio_blk_base;
+ void __iomem *glue_base;
+ struct clk *hsio_clk;
+ struct clk *suspend_clk;
+ int irq;
+ bool pm_suspended;
+ bool wakeup_pending;
+};
+
+static void imx8mp_configure_glue(struct dwc3_imx8mp *dwc3_imx)
+{
+ struct device *dev = dwc3_imx->dev;
+ u32 value;
+
+ if (!dwc3_imx->glue_base)
+ return;
+
+ value = readl(dwc3_imx->glue_base + USB_CTRL0);
+
+ if (device_property_read_bool(dev, "fsl,permanently-attached"))
+ value |= (USB_CTRL0_USB2_FIXED | USB_CTRL0_USB3_FIXED);
+ else
+ value &= ~(USB_CTRL0_USB2_FIXED | USB_CTRL0_USB3_FIXED);
+
+ if (device_property_read_bool(dev, "fsl,disable-port-power-control"))
+ value &= ~(USB_CTRL0_PORTPWR_EN);
+ else
+ value |= USB_CTRL0_PORTPWR_EN;
+
+ writel(value, dwc3_imx->glue_base + USB_CTRL0);
+
+ value = readl(dwc3_imx->glue_base + USB_CTRL1);
+ if (device_property_read_bool(dev, "fsl,over-current-active-low"))
+ value |= USB_CTRL1_OC_POLARITY;
+ else
+ value &= ~USB_CTRL1_OC_POLARITY;
+
+ if (device_property_read_bool(dev, "fsl,power-active-low"))
+ value |= USB_CTRL1_PWR_POLARITY;
+ else
+ value &= ~USB_CTRL1_PWR_POLARITY;
+
+ writel(value, dwc3_imx->glue_base + USB_CTRL1);
+}
+
+static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx)
+{
+ struct dwc3 *dwc3 = platform_get_drvdata(dwc3_imx->dwc3);
+ u32 val;
+
+ if (!dwc3)
+ return;
+
+ val = readl(dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
+
+ if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci)
+ val |= USB_WAKEUP_EN | USB_WAKEUP_SS_CONN |
+ USB_WAKEUP_U3_EN | USB_WAKEUP_DPDM_EN;
+ else if (dwc3->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
+ val |= USB_WAKEUP_EN | USB_WAKEUP_VBUS_EN |
+ USB_WAKEUP_VBUS_SRC_SESS_VAL;
+
+ writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
+}
+
+static void dwc3_imx8mp_wakeup_disable(struct dwc3_imx8mp *dwc3_imx)
+{
+ u32 val;
+
+ val = readl(dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
+ val &= ~(USB_WAKEUP_EN | USB_WAKEUP_EN_MASK);
+ writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
+}
+
+static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
+{
+ struct dwc3_imx8mp *dwc3_imx = _dwc3_imx;
+ struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
+
+ if (!dwc3_imx->pm_suspended)
+ return IRQ_HANDLED;
+
+ disable_irq_nosync(dwc3_imx->irq);
+ dwc3_imx->wakeup_pending = true;
+
+ if ((dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc->xhci)
+ pm_runtime_resume(&dwc->xhci->dev);
+ else if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
+ pm_runtime_get(dwc->dev);
+
+ return IRQ_HANDLED;
+}
+
+static int dwc3_imx8mp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dwc3_np, *node = dev->of_node;
+ struct dwc3_imx8mp *dwc3_imx;
+ struct resource *res;
+ int err, irq;
+
+ if (!node) {
+ dev_err(dev, "device node not found\n");
+ return -EINVAL;
+ }
+
+ dwc3_imx = devm_kzalloc(dev, sizeof(*dwc3_imx), GFP_KERNEL);
+ if (!dwc3_imx)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dwc3_imx);
+
+ dwc3_imx->dev = dev;
+
+ dwc3_imx->hsio_blk_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dwc3_imx->hsio_blk_base))
+ return PTR_ERR(dwc3_imx->hsio_blk_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_warn(dev, "Base address for glue layer missing. Continuing without, some features are missing though.");
+ } else {
+ dwc3_imx->glue_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dwc3_imx->glue_base))
+ return PTR_ERR(dwc3_imx->glue_base);
+ }
+
+ dwc3_imx->hsio_clk = devm_clk_get(dev, "hsio");
+ if (IS_ERR(dwc3_imx->hsio_clk)) {
+ err = PTR_ERR(dwc3_imx->hsio_clk);
+ dev_err(dev, "Failed to get hsio clk, err=%d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(dwc3_imx->hsio_clk);
+ if (err) {
+ dev_err(dev, "Failed to enable hsio clk, err=%d\n", err);
+ return err;
+ }
+
+ dwc3_imx->suspend_clk = devm_clk_get(dev, "suspend");
+ if (IS_ERR(dwc3_imx->suspend_clk)) {
+ err = PTR_ERR(dwc3_imx->suspend_clk);
+ dev_err(dev, "Failed to get suspend clk, err=%d\n", err);
+ goto disable_hsio_clk;
+ }
+
+ err = clk_prepare_enable(dwc3_imx->suspend_clk);
+ if (err) {
+ dev_err(dev, "Failed to enable suspend clk, err=%d\n", err);
+ goto disable_hsio_clk;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto disable_clks;
+ }
+ dwc3_imx->irq = irq;
+
+ imx8mp_configure_glue(dwc3_imx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0)
+ goto disable_rpm;
+
+ dwc3_np = of_get_compatible_child(node, "snps,dwc3");
+ if (!dwc3_np) {
+ err = -ENODEV;
+ dev_err(dev, "failed to find dwc3 core child\n");
+ goto disable_rpm;
+ }
+
+ err = of_platform_populate(node, NULL, NULL, dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to create dwc3 core\n");
+ goto err_node_put;
+ }
+
+ dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np);
+ if (!dwc3_imx->dwc3) {
+ dev_err(dev, "failed to get dwc3 platform device\n");
+ err = -ENODEV;
+ goto depopulate;
+ }
+ of_node_put(dwc3_np);
+
+ err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
+ IRQF_ONESHOT, dev_name(dev), dwc3_imx);
+ if (err) {
+ dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err);
+ goto depopulate;
+ }
+
+ device_set_wakeup_capable(dev, true);
+ pm_runtime_put(dev);
+
+ return 0;
+
+depopulate:
+ of_platform_depopulate(dev);
+err_node_put:
+ of_node_put(dwc3_np);
+disable_rpm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+disable_clks:
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
+disable_hsio_clk:
+ clk_disable_unprepare(dwc3_imx->hsio_clk);
+
+ return err;
+}
+
+static int dwc3_imx8mp_remove(struct platform_device *pdev)
+{
+ struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ of_platform_depopulate(dev);
+
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
+ clk_disable_unprepare(dwc3_imx->hsio_clk);
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx,
+ pm_message_t msg)
+{
+ if (dwc3_imx->pm_suspended)
+ return 0;
+
+ /* Wakeup enable */
+ if (PMSG_IS_AUTO(msg) || device_may_wakeup(dwc3_imx->dev))
+ dwc3_imx8mp_wakeup_enable(dwc3_imx);
+
+ dwc3_imx->pm_suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx,
+ pm_message_t msg)
+{
+ struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
+ int ret = 0;
+
+ if (!dwc3_imx->pm_suspended)
+ return 0;
+
+ /* Wakeup disable */
+ dwc3_imx8mp_wakeup_disable(dwc3_imx);
+ dwc3_imx->pm_suspended = false;
+
+ /* Upon power loss any previous configuration is lost, restore it */
+ imx8mp_configure_glue(dwc3_imx);
+
+ if (dwc3_imx->wakeup_pending) {
+ dwc3_imx->wakeup_pending = false;
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE) {
+ pm_runtime_mark_last_busy(dwc->dev);
+ pm_runtime_put_autosuspend(dwc->dev);
+ } else {
+ /*
+ * Add wait for xhci switch from suspend
+ * clock to normal clock to detect connection.
+ */
+ usleep_range(9000, 10000);
+ }
+ enable_irq(dwc3_imx->irq);
+ }
+
+ return ret;
+}
+
+static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dwc3_imx8mp_suspend(dwc3_imx, PMSG_SUSPEND);
+
+ if (device_may_wakeup(dwc3_imx->dev))
+ enable_irq_wake(dwc3_imx->irq);
+ else
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
+
+ clk_disable_unprepare(dwc3_imx->hsio_clk);
+ dev_dbg(dev, "dwc3 imx8mp pm suspend.\n");
+
+ return ret;
+}
+
+static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+ int ret;
+
+ if (device_may_wakeup(dwc3_imx->dev)) {
+ disable_irq_wake(dwc3_imx->irq);
+ } else {
+ ret = clk_prepare_enable(dwc3_imx->suspend_clk);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwc3_imx->hsio_clk);
+ if (ret)
+ return ret;
+
+ ret = dwc3_imx8mp_resume(dwc3_imx, PMSG_RESUME);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ dev_dbg(dev, "dwc3 imx8mp pm resume.\n");
+
+ return ret;
+}
+
+static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3 imx8mp runtime suspend.\n");
+
+ return dwc3_imx8mp_suspend(dwc3_imx, PMSG_AUTO_SUSPEND);
+}
+
+static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3 imx8mp runtime resume.\n");
+
+ return dwc3_imx8mp_resume(dwc3_imx, PMSG_AUTO_RESUME);
+}
+
+static const struct dev_pm_ops dwc3_imx8mp_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume)
+ SET_RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend,
+ dwc3_imx8mp_runtime_resume, NULL)
+};
+
+static const struct of_device_id dwc3_imx8mp_of_match[] = {
+ { .compatible = "fsl,imx8mp-dwc3", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc3_imx8mp_of_match);
+
+static struct platform_driver dwc3_imx8mp_driver = {
+ .probe = dwc3_imx8mp_probe,
+ .remove = dwc3_imx8mp_remove,
+ .driver = {
+ .name = "imx8mp-dwc3",
+ .pm = &dwc3_imx8mp_dev_pm_ops,
+ .of_match_table = dwc3_imx8mp_of_match,
+ },
+};
+
+module_platform_driver(dwc3_imx8mp_driver);
+
+MODULE_ALIAS("platform:imx8mp-dwc3");
+MODULE_AUTHOR("jun.li@nxp.com");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 imx8mp Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
new file mode 100644
index 000000000..131795929
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-keystone.c - Keystone Specific Glue layer
+ *
+ * Copyright (C) 2010-2013 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Author: WingMan Kwok <w-kwok2@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+
+/* USBSS register offsets */
+#define USBSS_REVISION 0x0000
+#define USBSS_SYSCONFIG 0x0010
+#define USBSS_IRQ_EOI 0x0018
+#define USBSS_IRQSTATUS_RAW_0 0x0020
+#define USBSS_IRQSTATUS_0 0x0024
+#define USBSS_IRQENABLE_SET_0 0x0028
+#define USBSS_IRQENABLE_CLR_0 0x002c
+
+/* IRQ register bits */
+#define USBSS_IRQ_EOI_LINE(n) BIT(n)
+#define USBSS_IRQ_EVENT_ST BIT(0)
+#define USBSS_IRQ_COREIRQ_EN BIT(0)
+#define USBSS_IRQ_COREIRQ_CLR BIT(0)
+
+struct dwc3_keystone {
+ struct device *dev;
+ void __iomem *usbss;
+ struct phy *usb3_phy;
+};
+
+static inline u32 kdwc3_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void kdwc3_writel(void __iomem *base, u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
+
+static void kdwc3_enable_irqs(struct dwc3_keystone *kdwc)
+{
+ u32 val;
+
+ val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
+ val |= USBSS_IRQ_COREIRQ_EN;
+ kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
+}
+
+static void kdwc3_disable_irqs(struct dwc3_keystone *kdwc)
+{
+ u32 val;
+
+ val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
+ val &= ~USBSS_IRQ_COREIRQ_EN;
+ kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
+}
+
+static irqreturn_t dwc3_keystone_interrupt(int irq, void *_kdwc)
+{
+ struct dwc3_keystone *kdwc = _kdwc;
+
+ kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_CLR_0, USBSS_IRQ_COREIRQ_CLR);
+ kdwc3_writel(kdwc->usbss, USBSS_IRQSTATUS_0, USBSS_IRQ_EVENT_ST);
+ kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, USBSS_IRQ_COREIRQ_EN);
+ kdwc3_writel(kdwc->usbss, USBSS_IRQ_EOI, USBSS_IRQ_EOI_LINE(0));
+
+ return IRQ_HANDLED;
+}
+
+static int kdwc3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct dwc3_keystone *kdwc;
+ int error, irq;
+
+ kdwc = devm_kzalloc(dev, sizeof(*kdwc), GFP_KERNEL);
+ if (!kdwc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, kdwc);
+
+ kdwc->dev = dev;
+
+ kdwc->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(kdwc->usbss))
+ return PTR_ERR(kdwc->usbss);
+
+ /* PSC dependency on AM65 needs SERDES0 to be powered before USB0 */
+ kdwc->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
+ if (IS_ERR(kdwc->usb3_phy))
+ return dev_err_probe(dev, PTR_ERR(kdwc->usb3_phy), "couldn't get usb3 phy\n");
+
+ phy_pm_runtime_get_sync(kdwc->usb3_phy);
+
+ error = phy_reset(kdwc->usb3_phy);
+ if (error < 0) {
+ dev_err(dev, "usb3 phy reset failed: %d\n", error);
+ return error;
+ }
+
+ error = phy_init(kdwc->usb3_phy);
+ if (error < 0) {
+ dev_err(dev, "usb3 phy init failed: %d\n", error);
+ return error;
+ }
+
+ error = phy_power_on(kdwc->usb3_phy);
+ if (error < 0) {
+ dev_err(dev, "usb3 phy power on failed: %d\n", error);
+ phy_exit(kdwc->usb3_phy);
+ return error;
+ }
+
+ pm_runtime_enable(kdwc->dev);
+ error = pm_runtime_get_sync(kdwc->dev);
+ if (error < 0) {
+ dev_err(kdwc->dev, "pm_runtime_get_sync failed, error %d\n",
+ error);
+ goto err_irq;
+ }
+
+ /* IRQ processing not required currently for AM65 */
+ if (of_device_is_compatible(node, "ti,am654-dwc3"))
+ goto skip_irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ error = irq;
+ goto err_irq;
+ }
+
+ error = devm_request_irq(dev, irq, dwc3_keystone_interrupt, IRQF_SHARED,
+ dev_name(dev), kdwc);
+ if (error) {
+ dev_err(dev, "failed to request IRQ #%d --> %d\n",
+ irq, error);
+ goto err_irq;
+ }
+
+ kdwc3_enable_irqs(kdwc);
+
+skip_irq:
+ error = of_platform_populate(node, NULL, NULL, dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to create dwc3 core\n");
+ goto err_core;
+ }
+
+ return 0;
+
+err_core:
+ kdwc3_disable_irqs(kdwc);
+err_irq:
+ pm_runtime_put_sync(kdwc->dev);
+ pm_runtime_disable(kdwc->dev);
+ phy_power_off(kdwc->usb3_phy);
+ phy_exit(kdwc->usb3_phy);
+ phy_pm_runtime_put_sync(kdwc->usb3_phy);
+
+ return error;
+}
+
+static int kdwc3_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int kdwc3_remove(struct platform_device *pdev)
+{
+ struct dwc3_keystone *kdwc = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ if (!of_device_is_compatible(node, "ti,am654-dwc3"))
+ kdwc3_disable_irqs(kdwc);
+
+ device_for_each_child(&pdev->dev, NULL, kdwc3_remove_core);
+ pm_runtime_put_sync(kdwc->dev);
+ pm_runtime_disable(kdwc->dev);
+
+ phy_power_off(kdwc->usb3_phy);
+ phy_exit(kdwc->usb3_phy);
+ phy_pm_runtime_put_sync(kdwc->usb3_phy);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id kdwc3_of_match[] = {
+ { .compatible = "ti,keystone-dwc3", },
+ { .compatible = "ti,am654-dwc3" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, kdwc3_of_match);
+
+static struct platform_driver kdwc3_driver = {
+ .probe = kdwc3_probe,
+ .remove = kdwc3_remove,
+ .driver = {
+ .name = "keystone-dwc3",
+ .of_match_table = kdwc3_of_match,
+ },
+};
+
+module_platform_driver(kdwc3_driver);
+
+MODULE_ALIAS("platform:keystone-dwc3");
+MODULE_AUTHOR("WingMan Kwok <w-kwok2@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 KEYSTONE Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
new file mode 100644
index 000000000..10298b917
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -0,0 +1,994 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB Glue for Amlogic G12A SoCs
+ *
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+/*
+ * The USB is organized with a glue around the DWC3 Controller IP as :
+ * - Control registers for each USB2 Ports
+ * - Control registers for the USB PHY layer
+ * - SuperSpeed PHY can be enabled only if port is used
+ * - Dynamic OTG switching with ID change interrupt
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
+#include <linux/regulator/consumer.h>
+
+/* USB2 Ports Control Registers, offsets are per-port */
+
+#define U2P_REG_SIZE 0x20
+
+#define U2P_R0 0x0
+ #define U2P_R0_HOST_DEVICE BIT(0)
+ #define U2P_R0_POWER_OK BIT(1)
+ #define U2P_R0_HAST_MODE BIT(2)
+ #define U2P_R0_POWER_ON_RESET BIT(3)
+ #define U2P_R0_ID_PULLUP BIT(4)
+ #define U2P_R0_DRV_VBUS BIT(5)
+
+#define U2P_R1 0x4
+ #define U2P_R1_PHY_READY BIT(0)
+ #define U2P_R1_ID_DIG BIT(1)
+ #define U2P_R1_OTG_SESSION_VALID BIT(2)
+ #define U2P_R1_VBUS_VALID BIT(3)
+
+/* USB Glue Control Registers */
+
+#define G12A_GLUE_OFFSET 0x80
+
+#define USB_R0 0x00
+ #define USB_R0_P30_LANE0_TX2RX_LOOPBACK BIT(17)
+ #define USB_R0_P30_LANE0_EXT_PCLK_REQ BIT(18)
+ #define USB_R0_P30_PCS_RX_LOS_MASK_VAL_MASK GENMASK(28, 19)
+ #define USB_R0_U2D_SS_SCALEDOWN_MODE_MASK GENMASK(30, 29)
+ #define USB_R0_U2D_ACT BIT(31)
+
+#define USB_R1 0x04
+ #define USB_R1_U3H_BIGENDIAN_GS BIT(0)
+ #define USB_R1_U3H_PME_ENABLE BIT(1)
+ #define USB_R1_U3H_HUB_PORT_OVERCURRENT_MASK GENMASK(4, 2)
+ #define USB_R1_U3H_HUB_PORT_PERM_ATTACH_MASK GENMASK(9, 7)
+ #define USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK GENMASK(13, 12)
+ #define USB_R1_U3H_HOST_U3_PORT_DISABLE BIT(16)
+ #define USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT BIT(17)
+ #define USB_R1_U3H_HOST_MSI_ENABLE BIT(18)
+ #define USB_R1_U3H_FLADJ_30MHZ_REG_MASK GENMASK(24, 19)
+ #define USB_R1_P30_PCS_TX_SWING_FULL_MASK GENMASK(31, 25)
+
+#define USB_R2 0x08
+ #define USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK GENMASK(25, 20)
+ #define USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK GENMASK(31, 26)
+
+#define USB_R3 0x0c
+ #define USB_R3_P30_SSC_ENABLE BIT(0)
+ #define USB_R3_P30_SSC_RANGE_MASK GENMASK(3, 1)
+ #define USB_R3_P30_SSC_REF_CLK_SEL_MASK GENMASK(12, 4)
+ #define USB_R3_P30_REF_SSP_EN BIT(13)
+
+#define USB_R4 0x10
+ #define USB_R4_P21_PORT_RESET_0 BIT(0)
+ #define USB_R4_P21_SLEEP_M0 BIT(1)
+ #define USB_R4_MEM_PD_MASK GENMASK(3, 2)
+ #define USB_R4_P21_ONLY BIT(4)
+
+#define USB_R5 0x14
+ #define USB_R5_ID_DIG_SYNC BIT(0)
+ #define USB_R5_ID_DIG_REG BIT(1)
+ #define USB_R5_ID_DIG_CFG_MASK GENMASK(3, 2)
+ #define USB_R5_ID_DIG_EN_0 BIT(4)
+ #define USB_R5_ID_DIG_EN_1 BIT(5)
+ #define USB_R5_ID_DIG_CURR BIT(6)
+ #define USB_R5_ID_DIG_IRQ BIT(7)
+ #define USB_R5_ID_DIG_TH_MASK GENMASK(15, 8)
+ #define USB_R5_ID_DIG_CNT_MASK GENMASK(23, 16)
+
+#define PHY_COUNT 3
+#define USB2_OTG_PHY 1
+
+static struct clk_bulk_data meson_gxl_clocks[] = {
+ { .id = "usb_ctrl" },
+ { .id = "ddr" },
+};
+
+static struct clk_bulk_data meson_g12a_clocks[] = {
+ { .id = NULL },
+};
+
+static struct clk_bulk_data meson_a1_clocks[] = {
+ { .id = "usb_ctrl" },
+ { .id = "usb_bus" },
+ { .id = "xtal_usb_ctrl" },
+};
+
+static const char * const meson_gxm_phy_names[] = {
+ "usb2-phy0", "usb2-phy1", "usb2-phy2",
+};
+
+static const char * const meson_g12a_phy_names[] = {
+ "usb2-phy0", "usb2-phy1", "usb3-phy0",
+};
+
+/*
+ * Amlogic A1 has a single physical PHY, in slot 1, but still has the
+ * two U2 PHY controls register blocks like G12A.
+ * AXG has the similar scheme, thus needs the same tweak.
+ * Handling the first PHY on slot 1 would need a large amount of code
+ * changes, and the current management is generic enough to handle it
+ * correctly when only the "usb2-phy1" phy is specified on-par with the
+ * DT bindings.
+ */
+static const char * const meson_a1_phy_names[] = {
+ "usb2-phy0", "usb2-phy1"
+};
+
+struct dwc3_meson_g12a;
+
+struct dwc3_meson_g12a_drvdata {
+ bool otg_switch_supported;
+ bool otg_phy_host_port_disable;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ const char * const *phy_names;
+ int num_phys;
+ int (*setup_regmaps)(struct dwc3_meson_g12a *priv, void __iomem *base);
+ int (*usb2_init_phy)(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+ int (*set_phy_mode)(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+ int (*usb_init)(struct dwc3_meson_g12a *priv);
+ int (*usb_post_init)(struct dwc3_meson_g12a *priv);
+};
+
+static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base);
+static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base);
+
+static int dwc3_meson_g12a_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+static int dwc3_meson_gxl_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+
+static int dwc3_meson_g12a_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode);
+static int dwc3_meson_gxl_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode);
+
+static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv);
+static int dwc3_meson_gxl_usb_init(struct dwc3_meson_g12a *priv);
+
+static int dwc3_meson_gxl_usb_post_init(struct dwc3_meson_g12a *priv);
+
+/*
+ * For GXL and GXM SoCs:
+ * USB Phy muxing between the DWC2 Device controller and the DWC3 Host
+ * controller is buggy when switching from Device to Host when USB port
+ * is unpopulated, it causes the DWC3 to hard crash.
+ * When populated (including OTG switching with ID pin), the switch works
+ * like a charm like on the G12A platforms.
+ * In order to still switch from Host to Device on an USB Type-A port,
+ * an U2_PORT_DISABLE bit has been added to disconnect the DWC3 Host
+ * controller from the port, but when used the DWC3 controller must be
+ * reset to recover usage of the port.
+ */
+
+static const struct dwc3_meson_g12a_drvdata gxl_drvdata = {
+ .otg_switch_supported = true,
+ .otg_phy_host_port_disable = true,
+ .clks = meson_gxl_clocks,
+ .num_clks = ARRAY_SIZE(meson_g12a_clocks),
+ .phy_names = meson_a1_phy_names,
+ .num_phys = ARRAY_SIZE(meson_a1_phy_names),
+ .setup_regmaps = dwc3_meson_gxl_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_gxl_set_phy_mode,
+ .usb_init = dwc3_meson_gxl_usb_init,
+ .usb_post_init = dwc3_meson_gxl_usb_post_init,
+};
+
+static const struct dwc3_meson_g12a_drvdata gxm_drvdata = {
+ .otg_switch_supported = true,
+ .otg_phy_host_port_disable = true,
+ .clks = meson_gxl_clocks,
+ .num_clks = ARRAY_SIZE(meson_g12a_clocks),
+ .phy_names = meson_gxm_phy_names,
+ .num_phys = ARRAY_SIZE(meson_gxm_phy_names),
+ .setup_regmaps = dwc3_meson_gxl_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_gxl_set_phy_mode,
+ .usb_init = dwc3_meson_gxl_usb_init,
+ .usb_post_init = dwc3_meson_gxl_usb_post_init,
+};
+
+static const struct dwc3_meson_g12a_drvdata axg_drvdata = {
+ .otg_switch_supported = true,
+ .clks = meson_gxl_clocks,
+ .num_clks = ARRAY_SIZE(meson_gxl_clocks),
+ .phy_names = meson_a1_phy_names,
+ .num_phys = ARRAY_SIZE(meson_a1_phy_names),
+ .setup_regmaps = dwc3_meson_gxl_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_gxl_set_phy_mode,
+ .usb_init = dwc3_meson_g12a_usb_init,
+ .usb_post_init = dwc3_meson_gxl_usb_post_init,
+};
+
+static const struct dwc3_meson_g12a_drvdata g12a_drvdata = {
+ .otg_switch_supported = true,
+ .clks = meson_g12a_clocks,
+ .num_clks = ARRAY_SIZE(meson_g12a_clocks),
+ .phy_names = meson_g12a_phy_names,
+ .num_phys = ARRAY_SIZE(meson_g12a_phy_names),
+ .setup_regmaps = dwc3_meson_g12a_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_g12a_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_g12a_set_phy_mode,
+ .usb_init = dwc3_meson_g12a_usb_init,
+};
+
+static const struct dwc3_meson_g12a_drvdata a1_drvdata = {
+ .otg_switch_supported = false,
+ .clks = meson_a1_clocks,
+ .num_clks = ARRAY_SIZE(meson_a1_clocks),
+ .phy_names = meson_a1_phy_names,
+ .num_phys = ARRAY_SIZE(meson_a1_phy_names),
+ .setup_regmaps = dwc3_meson_g12a_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_g12a_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_g12a_set_phy_mode,
+ .usb_init = dwc3_meson_g12a_usb_init,
+};
+
+struct dwc3_meson_g12a {
+ struct device *dev;
+ struct regmap *u2p_regmap[PHY_COUNT];
+ struct regmap *usb_glue_regmap;
+ struct reset_control *reset;
+ struct phy *phys[PHY_COUNT];
+ enum usb_dr_mode otg_mode;
+ enum phy_mode otg_phy_mode;
+ unsigned int usb2_ports;
+ unsigned int usb3_ports;
+ struct regulator *vbus;
+ struct usb_role_switch_desc switch_desc;
+ struct usb_role_switch *role_switch;
+ const struct dwc3_meson_g12a_drvdata *drvdata;
+};
+
+static int dwc3_meson_gxl_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode)
+{
+ return phy_set_mode(priv->phys[i], mode);
+}
+
+static int dwc3_meson_gxl_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode)
+{
+ /* On GXL PHY must be started in device mode for DWC2 init */
+ return priv->drvdata->set_phy_mode(priv, i,
+ (i == USB2_OTG_PHY) ? PHY_MODE_USB_DEVICE
+ : PHY_MODE_USB_HOST);
+}
+
+static int dwc3_meson_g12a_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode)
+{
+ if (mode == PHY_MODE_USB_HOST)
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_HOST_DEVICE,
+ U2P_R0_HOST_DEVICE);
+ else
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_HOST_DEVICE, 0);
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode)
+{
+ int ret;
+
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_POWER_ON_RESET,
+ U2P_R0_POWER_ON_RESET);
+
+ if (priv->drvdata->otg_switch_supported && i == USB2_OTG_PHY) {
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
+ U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS);
+
+ ret = priv->drvdata->set_phy_mode(priv, i, mode);
+ } else
+ ret = priv->drvdata->set_phy_mode(priv, i,
+ PHY_MODE_USB_HOST);
+
+ if (ret)
+ return ret;
+
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_POWER_ON_RESET, 0);
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
+{
+ int i, ret;
+
+ for (i = 0; i < priv->drvdata->num_phys; ++i) {
+ if (!priv->phys[i])
+ continue;
+
+ if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+ continue;
+
+ ret = priv->drvdata->usb2_init_phy(priv, i, mode);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dwc3_meson_g12a_usb3_init(struct dwc3_meson_g12a *priv)
+{
+ regmap_update_bits(priv->usb_glue_regmap, USB_R3,
+ USB_R3_P30_SSC_RANGE_MASK |
+ USB_R3_P30_REF_SSP_EN,
+ USB_R3_P30_SSC_ENABLE |
+ FIELD_PREP(USB_R3_P30_SSC_RANGE_MASK, 2) |
+ USB_R3_P30_REF_SSP_EN);
+ udelay(2);
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R2,
+ USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK,
+ FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK, 0x15));
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R2,
+ USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK,
+ FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK, 0x20));
+
+ udelay(2);
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
+ USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT,
+ USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT);
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
+ USB_R1_P30_PCS_TX_SWING_FULL_MASK,
+ FIELD_PREP(USB_R1_P30_PCS_TX_SWING_FULL_MASK, 127));
+}
+
+static void dwc3_meson_g12a_usb_otg_apply_mode(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
+{
+ if (mode == PHY_MODE_USB_DEVICE) {
+ if (priv->otg_mode != USB_DR_MODE_OTG &&
+ priv->drvdata->otg_phy_host_port_disable)
+ /* Isolate the OTG PHY port from the Host Controller */
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
+ USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK,
+ FIELD_PREP(USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK,
+ BIT(USB2_OTG_PHY)));
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R0,
+ USB_R0_U2D_ACT, USB_R0_U2D_ACT);
+ regmap_update_bits(priv->usb_glue_regmap, USB_R0,
+ USB_R0_U2D_SS_SCALEDOWN_MODE_MASK, 0);
+ regmap_update_bits(priv->usb_glue_regmap, USB_R4,
+ USB_R4_P21_SLEEP_M0, USB_R4_P21_SLEEP_M0);
+ } else {
+ if (priv->otg_mode != USB_DR_MODE_OTG &&
+ priv->drvdata->otg_phy_host_port_disable) {
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
+ USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK, 0);
+ msleep(500);
+ }
+ regmap_update_bits(priv->usb_glue_regmap, USB_R0,
+ USB_R0_U2D_ACT, 0);
+ regmap_update_bits(priv->usb_glue_regmap, USB_R4,
+ USB_R4_P21_SLEEP_M0, 0);
+ }
+}
+
+static int dwc3_meson_g12a_usb_init_glue(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
+{
+ int ret;
+
+ ret = dwc3_meson_g12a_usb2_init(priv, mode);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
+ USB_R1_U3H_FLADJ_30MHZ_REG_MASK,
+ FIELD_PREP(USB_R1_U3H_FLADJ_30MHZ_REG_MASK, 0x20));
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
+ USB_R5_ID_DIG_EN_0,
+ USB_R5_ID_DIG_EN_0);
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
+ USB_R5_ID_DIG_EN_1,
+ USB_R5_ID_DIG_EN_1);
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
+ USB_R5_ID_DIG_TH_MASK,
+ FIELD_PREP(USB_R5_ID_DIG_TH_MASK, 0xff));
+
+ /* If we have an actual SuperSpeed port, initialize it */
+ if (priv->usb3_ports)
+ dwc3_meson_g12a_usb3_init(priv);
+
+ dwc3_meson_g12a_usb_otg_apply_mode(priv, mode);
+
+ return 0;
+}
+
+static const struct regmap_config phy_meson_g12a_usb_glue_regmap_conf = {
+ .name = "usb-glue",
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = USB_R5,
+};
+
+static int dwc3_meson_g12a_get_phys(struct dwc3_meson_g12a *priv)
+{
+ const char *phy_name;
+ int i;
+
+ for (i = 0 ; i < priv->drvdata->num_phys ; ++i) {
+ phy_name = priv->drvdata->phy_names[i];
+ priv->phys[i] = devm_phy_optional_get(priv->dev, phy_name);
+ if (!priv->phys[i])
+ continue;
+
+ if (IS_ERR(priv->phys[i]))
+ return PTR_ERR(priv->phys[i]);
+
+ if (strstr(phy_name, "usb3"))
+ priv->usb3_ports++;
+ else
+ priv->usb2_ports++;
+ }
+
+ dev_info(priv->dev, "USB2 ports: %d\n", priv->usb2_ports);
+ dev_info(priv->dev, "USB3 ports: %d\n", priv->usb3_ports);
+
+ return 0;
+}
+
+static enum phy_mode dwc3_meson_g12a_get_id(struct dwc3_meson_g12a *priv)
+{
+ u32 reg;
+
+ regmap_read(priv->usb_glue_regmap, USB_R5, &reg);
+
+ if (reg & (USB_R5_ID_DIG_SYNC | USB_R5_ID_DIG_REG))
+ return PHY_MODE_USB_DEVICE;
+
+ return PHY_MODE_USB_HOST;
+}
+
+static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
+{
+ int ret;
+
+ if (!priv->drvdata->otg_switch_supported || !priv->phys[USB2_OTG_PHY])
+ return -EINVAL;
+
+ if (mode == PHY_MODE_USB_HOST)
+ dev_info(priv->dev, "switching to Host Mode\n");
+ else
+ dev_info(priv->dev, "switching to Device Mode\n");
+
+ if (priv->vbus) {
+ if (mode == PHY_MODE_USB_DEVICE)
+ ret = regulator_disable(priv->vbus);
+ else
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ return ret;
+ }
+
+ priv->otg_phy_mode = mode;
+
+ ret = priv->drvdata->set_phy_mode(priv, USB2_OTG_PHY, mode);
+ if (ret)
+ return ret;
+
+ dwc3_meson_g12a_usb_otg_apply_mode(priv, mode);
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_role_set(struct usb_role_switch *sw,
+ enum usb_role role)
+{
+ struct dwc3_meson_g12a *priv = usb_role_switch_get_drvdata(sw);
+ enum phy_mode mode;
+
+ if (role == USB_ROLE_NONE)
+ return 0;
+
+ mode = (role == USB_ROLE_HOST) ? PHY_MODE_USB_HOST
+ : PHY_MODE_USB_DEVICE;
+
+ if (mode == priv->otg_phy_mode)
+ return 0;
+
+ if (priv->drvdata->otg_phy_host_port_disable)
+ dev_warn_once(priv->dev, "Broken manual OTG switch\n");
+
+ return dwc3_meson_g12a_otg_mode_set(priv, mode);
+}
+
+static enum usb_role dwc3_meson_g12a_role_get(struct usb_role_switch *sw)
+{
+ struct dwc3_meson_g12a *priv = usb_role_switch_get_drvdata(sw);
+
+ return priv->otg_phy_mode == PHY_MODE_USB_HOST ?
+ USB_ROLE_HOST : USB_ROLE_DEVICE;
+}
+
+static irqreturn_t dwc3_meson_g12a_irq_thread(int irq, void *data)
+{
+ struct dwc3_meson_g12a *priv = data;
+ enum phy_mode otg_id;
+
+ otg_id = dwc3_meson_g12a_get_id(priv);
+ if (otg_id != priv->otg_phy_mode) {
+ if (dwc3_meson_g12a_otg_mode_set(priv, otg_id))
+ dev_warn(priv->dev, "Failed to switch OTG mode\n");
+ }
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
+ USB_R5_ID_DIG_IRQ, 0);
+
+ return IRQ_HANDLED;
+}
+
+static struct device *dwc3_meson_g12_find_child(struct device *dev,
+ const char *compatible)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_get_compatible_child(dev->of_node, compatible);
+ if (!np)
+ return NULL;
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return NULL;
+
+ return &pdev->dev;
+}
+
+static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
+ struct dwc3_meson_g12a *priv)
+{
+ enum phy_mode otg_id;
+ int ret, irq;
+ struct device *dev = &pdev->dev;
+
+ if (!priv->drvdata->otg_switch_supported)
+ return 0;
+
+ if (priv->otg_mode == USB_DR_MODE_OTG) {
+ /* Ack irq before registering */
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
+ USB_R5_ID_DIG_IRQ, 0);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ dwc3_meson_g12a_irq_thread,
+ IRQF_ONESHOT, pdev->name, priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup OTG mode corresponding to the ID pin */
+ if (priv->otg_mode == USB_DR_MODE_OTG) {
+ otg_id = dwc3_meson_g12a_get_id(priv);
+ if (otg_id != priv->otg_phy_mode) {
+ if (dwc3_meson_g12a_otg_mode_set(priv, otg_id))
+ dev_warn(dev, "Failed to switch OTG mode\n");
+ }
+ }
+
+ /* Setup role switcher */
+ priv->switch_desc.usb2_port = dwc3_meson_g12_find_child(dev,
+ "snps,dwc3");
+ priv->switch_desc.udc = dwc3_meson_g12_find_child(dev, "snps,dwc2");
+ priv->switch_desc.allow_userspace_control = true;
+ priv->switch_desc.set = dwc3_meson_g12a_role_set;
+ priv->switch_desc.get = dwc3_meson_g12a_role_get;
+ priv->switch_desc.driver_data = priv;
+
+ priv->role_switch = usb_role_switch_register(dev, &priv->switch_desc);
+ if (IS_ERR(priv->role_switch))
+ dev_warn(dev, "Unable to register Role Switch\n");
+
+ return 0;
+}
+
+static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base)
+{
+ /* GXL controls the PHY mode in the PHY registers unlike G12A */
+ priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev, base,
+ &phy_meson_g12a_usb_glue_regmap_conf);
+ return PTR_ERR_OR_ZERO(priv->usb_glue_regmap);
+}
+
+static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base)
+{
+ int i;
+
+ priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev,
+ base + G12A_GLUE_OFFSET,
+ &phy_meson_g12a_usb_glue_regmap_conf);
+ if (IS_ERR(priv->usb_glue_regmap))
+ return PTR_ERR(priv->usb_glue_regmap);
+
+ /* Create a regmap for each USB2 PHY control register set */
+ for (i = 0; i < priv->drvdata->num_phys; i++) {
+ struct regmap_config u2p_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = U2P_R1,
+ };
+
+ if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+ continue;
+
+ u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
+ "u2p-%d", i);
+ if (!u2p_regmap_config.name)
+ return -ENOMEM;
+
+ priv->u2p_regmap[i] = devm_regmap_init_mmio(priv->dev,
+ base + (i * U2P_REG_SIZE),
+ &u2p_regmap_config);
+ if (IS_ERR(priv->u2p_regmap[i]))
+ return PTR_ERR(priv->u2p_regmap[i]);
+ }
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv)
+{
+ return dwc3_meson_g12a_usb_init_glue(priv, priv->otg_phy_mode);
+}
+
+static int dwc3_meson_gxl_usb_init(struct dwc3_meson_g12a *priv)
+{
+ return dwc3_meson_g12a_usb_init_glue(priv, PHY_MODE_USB_DEVICE);
+}
+
+static int dwc3_meson_gxl_usb_post_init(struct dwc3_meson_g12a *priv)
+{
+ int ret;
+
+ ret = priv->drvdata->set_phy_mode(priv, USB2_OTG_PHY,
+ priv->otg_phy_mode);
+ if (ret)
+ return ret;
+
+ dwc3_meson_g12a_usb_otg_apply_mode(priv, priv->otg_phy_mode);
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_probe(struct platform_device *pdev)
+{
+ struct dwc3_meson_g12a *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+ int ret, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->drvdata = of_device_get_match_data(&pdev->dev);
+ priv->dev = dev;
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(priv->vbus);
+ priv->vbus = NULL;
+ }
+
+ ret = devm_clk_bulk_get(dev,
+ priv->drvdata->num_clks,
+ priv->drvdata->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(priv->drvdata->num_clks,
+ priv->drvdata->clks);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->reset = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ dev_err(dev, "failed to get device reset, err=%d\n", ret);
+ goto err_disable_clks;
+ }
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ goto err_disable_clks;
+
+ ret = dwc3_meson_g12a_get_phys(priv);
+ if (ret)
+ goto err_rearm;
+
+ ret = priv->drvdata->setup_regmaps(priv, base);
+ if (ret)
+ goto err_rearm;
+
+ if (priv->vbus) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ goto err_rearm;
+ }
+
+ /* Get dr_mode */
+ priv->otg_mode = usb_get_dr_mode(dev);
+
+ if (priv->otg_mode == USB_DR_MODE_PERIPHERAL)
+ priv->otg_phy_mode = PHY_MODE_USB_DEVICE;
+ else
+ priv->otg_phy_mode = PHY_MODE_USB_HOST;
+
+ ret = priv->drvdata->usb_init(priv);
+ if (ret)
+ goto err_disable_regulator;
+
+ /* Init PHYs */
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ ret = phy_init(priv->phys[i]);
+ if (ret)
+ goto err_disable_regulator;
+ }
+
+ /* Set PHY Power */
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ ret = phy_power_on(priv->phys[i]);
+ if (ret)
+ goto err_phys_exit;
+ }
+
+ if (priv->drvdata->usb_post_init) {
+ ret = priv->drvdata->usb_post_init(priv);
+ if (ret)
+ goto err_phys_power;
+ }
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret)
+ goto err_phys_power;
+
+ ret = dwc3_meson_g12a_otg_init(pdev, priv);
+ if (ret)
+ goto err_plat_depopulate;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ return 0;
+
+err_plat_depopulate:
+ of_platform_depopulate(dev);
+
+err_phys_power:
+ for (i = 0 ; i < PHY_COUNT ; ++i)
+ phy_power_off(priv->phys[i]);
+
+err_phys_exit:
+ for (i = 0 ; i < PHY_COUNT ; ++i)
+ phy_exit(priv->phys[i]);
+
+err_disable_regulator:
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+err_rearm:
+ reset_control_rearm(priv->reset);
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(priv->drvdata->num_clks,
+ priv->drvdata->clks);
+
+ return ret;
+}
+
+static int dwc3_meson_g12a_remove(struct platform_device *pdev)
+{
+ struct dwc3_meson_g12a *priv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int i;
+
+ if (priv->drvdata->otg_switch_supported)
+ usb_role_switch_unregister(priv->role_switch);
+
+ of_platform_depopulate(dev);
+
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ phy_power_off(priv->phys[i]);
+ phy_exit(priv->phys[i]);
+ }
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+
+ reset_control_rearm(priv->reset);
+
+ clk_bulk_disable_unprepare(priv->drvdata->num_clks,
+ priv->drvdata->clks);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_meson_g12a_runtime_suspend(struct device *dev)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(priv->drvdata->num_clks,
+ priv->drvdata->clks);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_meson_g12a_runtime_resume(struct device *dev)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(priv->drvdata->num_clks,
+ priv->drvdata->clks);
+}
+
+static int __maybe_unused dwc3_meson_g12a_suspend(struct device *dev)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+ int i, ret;
+
+ if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) {
+ ret = regulator_disable(priv->vbus);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ phy_power_off(priv->phys[i]);
+ phy_exit(priv->phys[i]);
+ }
+
+ reset_control_rearm(priv->reset);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
+{
+ struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
+
+ ret = priv->drvdata->usb_init(priv);
+ if (ret)
+ return ret;
+
+ /* Init PHYs */
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ ret = phy_init(priv->phys[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Set PHY Power */
+ for (i = 0 ; i < PHY_COUNT ; ++i) {
+ ret = phy_power_on(priv->phys[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->drvdata->usb_post_init) {
+ ret = priv->drvdata->usb_post_init(priv);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops dwc3_meson_g12a_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_meson_g12a_suspend, dwc3_meson_g12a_resume)
+ SET_RUNTIME_PM_OPS(dwc3_meson_g12a_runtime_suspend,
+ dwc3_meson_g12a_runtime_resume, NULL)
+};
+
+static const struct of_device_id dwc3_meson_g12a_match[] = {
+ {
+ .compatible = "amlogic,meson-gxl-usb-ctrl",
+ .data = &gxl_drvdata,
+ },
+ {
+ .compatible = "amlogic,meson-gxm-usb-ctrl",
+ .data = &gxm_drvdata,
+ },
+ {
+ .compatible = "amlogic,meson-axg-usb-ctrl",
+ .data = &axg_drvdata,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-usb-ctrl",
+ .data = &g12a_drvdata,
+ },
+ {
+ .compatible = "amlogic,meson-a1-usb-ctrl",
+ .data = &a1_drvdata,
+ },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dwc3_meson_g12a_match);
+
+static struct platform_driver dwc3_meson_g12a_driver = {
+ .probe = dwc3_meson_g12a_probe,
+ .remove = dwc3_meson_g12a_remove,
+ .driver = {
+ .name = "dwc3-meson-g12a",
+ .of_match_table = dwc3_meson_g12a_match,
+ .pm = &dwc3_meson_g12a_dev_pm_ops,
+ },
+};
+
+module_platform_driver(dwc3_meson_g12a_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Amlogic Meson G12A USB Glue Layer");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
new file mode 100644
index 000000000..71fd620c5
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-of-simple.c - OF glue layer for simple integrations
+ *
+ * Copyright (c) 2015 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This is a combination of the old dwc3-qcom.c by Ivan T. Ivanov
+ * <iivanov@mm-sol.com> and the original patch adding support for Xilinx' SoC
+ * by Subbaraya Sundeep Bhatta <subbaraya.sundeep.bhatta@xilinx.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+struct dwc3_of_simple {
+ struct device *dev;
+ struct clk_bulk_data *clks;
+ int num_clocks;
+ struct reset_control *resets;
+ bool need_reset;
+};
+
+static int dwc3_of_simple_probe(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ int ret;
+
+ simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL);
+ if (!simple)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, simple);
+ simple->dev = dev;
+
+ /*
+ * Some controllers need to toggle the usb3-otg reset before trying to
+ * initialize the PHY, otherwise the PHY times out.
+ */
+ if (of_device_is_compatible(np, "rockchip,rk3399-dwc3"))
+ simple->need_reset = true;
+
+ simple->resets = of_reset_control_array_get(np, false, true,
+ true);
+ if (IS_ERR(simple->resets)) {
+ ret = PTR_ERR(simple->resets);
+ dev_err(dev, "failed to get device resets, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(simple->resets);
+ if (ret)
+ goto err_resetc_put;
+
+ ret = clk_bulk_get_all(simple->dev, &simple->clks);
+ if (ret < 0)
+ goto err_resetc_assert;
+
+ simple->num_clocks = ret;
+ ret = clk_bulk_prepare_enable(simple->num_clocks, simple->clks);
+ if (ret)
+ goto err_resetc_assert;
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret)
+ goto err_clk_put;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ return 0;
+
+err_clk_put:
+ clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
+ clk_bulk_put_all(simple->num_clocks, simple->clks);
+
+err_resetc_assert:
+ reset_control_assert(simple->resets);
+
+err_resetc_put:
+ reset_control_put(simple->resets);
+ return ret;
+}
+
+static void __dwc3_of_simple_teardown(struct dwc3_of_simple *simple)
+{
+ of_platform_depopulate(simple->dev);
+
+ clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
+ clk_bulk_put_all(simple->num_clocks, simple->clks);
+ simple->num_clocks = 0;
+
+ reset_control_assert(simple->resets);
+
+ reset_control_put(simple->resets);
+
+ pm_runtime_disable(simple->dev);
+ pm_runtime_put_noidle(simple->dev);
+ pm_runtime_set_suspended(simple->dev);
+}
+
+static int dwc3_of_simple_remove(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
+
+ return 0;
+}
+
+static void dwc3_of_simple_shutdown(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
+}
+
+static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
+{
+ struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+
+ clk_bulk_disable(simple->num_clocks, simple->clks);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
+{
+ struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+
+ return clk_bulk_enable(simple->num_clocks, simple->clks);
+}
+
+static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
+{
+ struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+
+ if (simple->need_reset)
+ reset_control_assert(simple->resets);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
+{
+ struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+
+ if (simple->need_reset)
+ reset_control_deassert(simple->resets);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
+ SET_RUNTIME_PM_OPS(dwc3_of_simple_runtime_suspend,
+ dwc3_of_simple_runtime_resume, NULL)
+};
+
+static const struct of_device_id of_dwc3_simple_match[] = {
+ { .compatible = "rockchip,rk3399-dwc3" },
+ { .compatible = "cavium,octeon-7130-usb-uctl" },
+ { .compatible = "sprd,sc9860-dwc3" },
+ { .compatible = "allwinner,sun50i-h6-dwc3" },
+ { .compatible = "hisilicon,hi3670-dwc3" },
+ { .compatible = "intel,keembay-dwc3" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
+
+static struct platform_driver dwc3_of_simple_driver = {
+ .probe = dwc3_of_simple_probe,
+ .remove = dwc3_of_simple_remove,
+ .shutdown = dwc3_of_simple_shutdown,
+ .driver = {
+ .name = "dwc3-of-simple",
+ .of_match_table = of_dwc3_simple_match,
+ .pm = &dwc3_of_simple_dev_pm_ops,
+ },
+};
+
+module_platform_driver(dwc3_of_simple_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 OF Simple Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
new file mode 100644
index 000000000..efaf0db59
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-omap.c - OMAP Specific Glue layer
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/extcon.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/usb/otg.h>
+
+/*
+ * All these registers belong to OMAP's Wrapper around the
+ * DesignWare USB3 Core.
+ */
+
+#define USBOTGSS_REVISION 0x0000
+#define USBOTGSS_SYSCONFIG 0x0010
+#define USBOTGSS_IRQ_EOI 0x0020
+#define USBOTGSS_EOI_OFFSET 0x0008
+#define USBOTGSS_IRQSTATUS_RAW_0 0x0024
+#define USBOTGSS_IRQSTATUS_0 0x0028
+#define USBOTGSS_IRQENABLE_SET_0 0x002c
+#define USBOTGSS_IRQENABLE_CLR_0 0x0030
+#define USBOTGSS_IRQ0_OFFSET 0x0004
+#define USBOTGSS_IRQSTATUS_RAW_1 0x0030
+#define USBOTGSS_IRQSTATUS_1 0x0034
+#define USBOTGSS_IRQENABLE_SET_1 0x0038
+#define USBOTGSS_IRQENABLE_CLR_1 0x003c
+#define USBOTGSS_IRQSTATUS_RAW_2 0x0040
+#define USBOTGSS_IRQSTATUS_2 0x0044
+#define USBOTGSS_IRQENABLE_SET_2 0x0048
+#define USBOTGSS_IRQENABLE_CLR_2 0x004c
+#define USBOTGSS_IRQSTATUS_RAW_3 0x0050
+#define USBOTGSS_IRQSTATUS_3 0x0054
+#define USBOTGSS_IRQENABLE_SET_3 0x0058
+#define USBOTGSS_IRQENABLE_CLR_3 0x005c
+#define USBOTGSS_IRQSTATUS_EOI_MISC 0x0030
+#define USBOTGSS_IRQSTATUS_RAW_MISC 0x0034
+#define USBOTGSS_IRQSTATUS_MISC 0x0038
+#define USBOTGSS_IRQENABLE_SET_MISC 0x003c
+#define USBOTGSS_IRQENABLE_CLR_MISC 0x0040
+#define USBOTGSS_IRQMISC_OFFSET 0x03fc
+#define USBOTGSS_UTMI_OTG_STATUS 0x0080
+#define USBOTGSS_UTMI_OTG_CTRL 0x0084
+#define USBOTGSS_UTMI_OTG_OFFSET 0x0480
+#define USBOTGSS_TXFIFO_DEPTH 0x0508
+#define USBOTGSS_RXFIFO_DEPTH 0x050c
+#define USBOTGSS_MMRAM_OFFSET 0x0100
+#define USBOTGSS_FLADJ 0x0104
+#define USBOTGSS_DEBUG_CFG 0x0108
+#define USBOTGSS_DEBUG_DATA 0x010c
+#define USBOTGSS_DEV_EBC_EN 0x0110
+#define USBOTGSS_DEBUG_OFFSET 0x0600
+
+/* SYSCONFIG REGISTER */
+#define USBOTGSS_SYSCONFIG_DMADISABLE BIT(16)
+
+/* IRQ_EOI REGISTER */
+#define USBOTGSS_IRQ_EOI_LINE_NUMBER BIT(0)
+
+/* IRQS0 BITS */
+#define USBOTGSS_IRQO_COREIRQ_ST BIT(0)
+
+/* IRQMISC BITS */
+#define USBOTGSS_IRQMISC_DMADISABLECLR BIT(17)
+#define USBOTGSS_IRQMISC_OEVT BIT(16)
+#define USBOTGSS_IRQMISC_DRVVBUS_RISE BIT(13)
+#define USBOTGSS_IRQMISC_CHRGVBUS_RISE BIT(12)
+#define USBOTGSS_IRQMISC_DISCHRGVBUS_RISE BIT(11)
+#define USBOTGSS_IRQMISC_IDPULLUP_RISE BIT(8)
+#define USBOTGSS_IRQMISC_DRVVBUS_FALL BIT(5)
+#define USBOTGSS_IRQMISC_CHRGVBUS_FALL BIT(4)
+#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL BIT(3)
+#define USBOTGSS_IRQMISC_IDPULLUP_FALL BIT(0)
+
+/* UTMI_OTG_STATUS REGISTER */
+#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS BIT(5)
+#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS BIT(4)
+#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS BIT(3)
+#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP BIT(0)
+
+/* UTMI_OTG_CTRL REGISTER */
+#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE BIT(31)
+#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT BIT(9)
+#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE BIT(8)
+#define USBOTGSS_UTMI_OTG_CTRL_IDDIG BIT(4)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSEND BIT(3)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID BIT(2)
+#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID BIT(1)
+
+enum dwc3_omap_utmi_mode {
+ DWC3_OMAP_UTMI_MODE_UNKNOWN = 0,
+ DWC3_OMAP_UTMI_MODE_HW,
+ DWC3_OMAP_UTMI_MODE_SW,
+};
+
+struct dwc3_omap {
+ struct device *dev;
+
+ int irq;
+ void __iomem *base;
+
+ u32 utmi_otg_ctrl;
+ u32 utmi_otg_offset;
+ u32 irqmisc_offset;
+ u32 irq_eoi_offset;
+ u32 debug_offset;
+ u32 irq0_offset;
+
+ struct extcon_dev *edev;
+ struct notifier_block vbus_nb;
+ struct notifier_block id_nb;
+
+ struct regulator *vbus_reg;
+};
+
+enum omap_dwc3_vbus_id_status {
+ OMAP_DWC3_ID_FLOAT,
+ OMAP_DWC3_ID_GROUND,
+ OMAP_DWC3_VBUS_OFF,
+ OMAP_DWC3_VBUS_VALID,
+};
+
+static inline u32 dwc3_omap_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void dwc3_omap_writel(void __iomem *base, u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
+
+static u32 dwc3_omap_read_utmi_ctrl(struct dwc3_omap *omap)
+{
+ return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_CTRL +
+ omap->utmi_otg_offset);
+}
+
+static void dwc3_omap_write_utmi_ctrl(struct dwc3_omap *omap, u32 value)
+{
+ dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_CTRL +
+ omap->utmi_otg_offset, value);
+
+}
+
+static u32 dwc3_omap_read_irq0_status(struct dwc3_omap *omap)
+{
+ return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_0 -
+ omap->irq0_offset);
+}
+
+static void dwc3_omap_write_irq0_status(struct dwc3_omap *omap, u32 value)
+{
+ dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_0 -
+ omap->irq0_offset, value);
+
+}
+
+static u32 dwc3_omap_read_irqmisc_status(struct dwc3_omap *omap)
+{
+ return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_MISC +
+ omap->irqmisc_offset);
+}
+
+static void dwc3_omap_write_irqmisc_status(struct dwc3_omap *omap, u32 value)
+{
+ dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_MISC +
+ omap->irqmisc_offset, value);
+
+}
+
+static void dwc3_omap_write_irqmisc_set(struct dwc3_omap *omap, u32 value)
+{
+ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_MISC +
+ omap->irqmisc_offset, value);
+
+}
+
+static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
+{
+ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_0 -
+ omap->irq0_offset, value);
+}
+
+static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
+{
+ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
+ omap->irqmisc_offset, value);
+}
+
+static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
+{
+ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
+ omap->irq0_offset, value);
+}
+
+static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
+ enum omap_dwc3_vbus_id_status status)
+{
+ int ret;
+ u32 val;
+
+ switch (status) {
+ case OMAP_DWC3_ID_GROUND:
+ if (omap->vbus_reg) {
+ ret = regulator_enable(omap->vbus_reg);
+ if (ret) {
+ dev_err(omap->dev, "regulator enable failed\n");
+ return;
+ }
+ }
+
+ val = dwc3_omap_read_utmi_ctrl(omap);
+ val &= ~USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+ dwc3_omap_write_utmi_ctrl(omap, val);
+ break;
+
+ case OMAP_DWC3_VBUS_VALID:
+ val = dwc3_omap_read_utmi_ctrl(omap);
+ val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
+ val |= USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_SESSVALID;
+ dwc3_omap_write_utmi_ctrl(omap, val);
+ break;
+
+ case OMAP_DWC3_ID_FLOAT:
+ if (omap->vbus_reg && regulator_is_enabled(omap->vbus_reg))
+ regulator_disable(omap->vbus_reg);
+ val = dwc3_omap_read_utmi_ctrl(omap);
+ val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+ dwc3_omap_write_utmi_ctrl(omap, val);
+ break;
+
+ case OMAP_DWC3_VBUS_OFF:
+ val = dwc3_omap_read_utmi_ctrl(omap);
+ val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID);
+ val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND;
+ dwc3_omap_write_utmi_ctrl(omap, val);
+ break;
+
+ default:
+ dev_WARN(omap->dev, "invalid state\n");
+ }
+}
+
+static void dwc3_omap_enable_irqs(struct dwc3_omap *omap);
+static void dwc3_omap_disable_irqs(struct dwc3_omap *omap);
+
+static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
+{
+ struct dwc3_omap *omap = _omap;
+
+ if (dwc3_omap_read_irqmisc_status(omap) ||
+ dwc3_omap_read_irq0_status(omap)) {
+ /* mask irqs */
+ dwc3_omap_disable_irqs(omap);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t dwc3_omap_interrupt_thread(int irq, void *_omap)
+{
+ struct dwc3_omap *omap = _omap;
+ u32 reg;
+
+ /* clear irq status flags */
+ reg = dwc3_omap_read_irqmisc_status(omap);
+ dwc3_omap_write_irqmisc_status(omap, reg);
+
+ reg = dwc3_omap_read_irq0_status(omap);
+ dwc3_omap_write_irq0_status(omap, reg);
+
+ /* unmask irqs */
+ dwc3_omap_enable_irqs(omap);
+
+ return IRQ_HANDLED;
+}
+
+static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
+{
+ u32 reg;
+
+ /* enable all IRQs */
+ reg = USBOTGSS_IRQO_COREIRQ_ST;
+ dwc3_omap_write_irq0_set(omap, reg);
+
+ reg = (USBOTGSS_IRQMISC_OEVT |
+ USBOTGSS_IRQMISC_DRVVBUS_RISE |
+ USBOTGSS_IRQMISC_CHRGVBUS_RISE |
+ USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
+ USBOTGSS_IRQMISC_IDPULLUP_RISE |
+ USBOTGSS_IRQMISC_DRVVBUS_FALL |
+ USBOTGSS_IRQMISC_CHRGVBUS_FALL |
+ USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
+ USBOTGSS_IRQMISC_IDPULLUP_FALL);
+
+ dwc3_omap_write_irqmisc_set(omap, reg);
+}
+
+static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
+{
+ u32 reg;
+
+ /* disable all IRQs */
+ reg = USBOTGSS_IRQO_COREIRQ_ST;
+ dwc3_omap_write_irq0_clr(omap, reg);
+
+ reg = (USBOTGSS_IRQMISC_OEVT |
+ USBOTGSS_IRQMISC_DRVVBUS_RISE |
+ USBOTGSS_IRQMISC_CHRGVBUS_RISE |
+ USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
+ USBOTGSS_IRQMISC_IDPULLUP_RISE |
+ USBOTGSS_IRQMISC_DRVVBUS_FALL |
+ USBOTGSS_IRQMISC_CHRGVBUS_FALL |
+ USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
+ USBOTGSS_IRQMISC_IDPULLUP_FALL);
+
+ dwc3_omap_write_irqmisc_clr(omap, reg);
+}
+
+static int dwc3_omap_id_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_omap *omap = container_of(nb, struct dwc3_omap, id_nb);
+
+ if (event)
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+
+ return NOTIFY_DONE;
+}
+
+static int dwc3_omap_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_omap *omap = container_of(nb, struct dwc3_omap, vbus_nb);
+
+ if (event)
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
+ return NOTIFY_DONE;
+}
+
+static void dwc3_omap_map_offset(struct dwc3_omap *omap)
+{
+ struct device_node *node = omap->dev->of_node;
+
+ /*
+ * Differentiate between OMAP5 and AM437x.
+ *
+ * For OMAP5(ES2.0) and AM437x wrapper revision is same, even
+ * though there are changes in wrapper register offsets.
+ *
+ * Using dt compatible to differentiate AM437x.
+ */
+ if (of_device_is_compatible(node, "ti,am437x-dwc3")) {
+ omap->irq_eoi_offset = USBOTGSS_EOI_OFFSET;
+ omap->irq0_offset = USBOTGSS_IRQ0_OFFSET;
+ omap->irqmisc_offset = USBOTGSS_IRQMISC_OFFSET;
+ omap->utmi_otg_offset = USBOTGSS_UTMI_OTG_OFFSET;
+ omap->debug_offset = USBOTGSS_DEBUG_OFFSET;
+ }
+}
+
+static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
+{
+ u32 reg;
+ struct device_node *node = omap->dev->of_node;
+ u32 utmi_mode = 0;
+
+ reg = dwc3_omap_read_utmi_ctrl(omap);
+
+ of_property_read_u32(node, "utmi-mode", &utmi_mode);
+
+ switch (utmi_mode) {
+ case DWC3_OMAP_UTMI_MODE_SW:
+ reg |= USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
+ break;
+ case DWC3_OMAP_UTMI_MODE_HW:
+ reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
+ break;
+ default:
+ dev_WARN(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
+ }
+
+ dwc3_omap_write_utmi_ctrl(omap, reg);
+}
+
+static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
+{
+ int ret;
+ struct device_node *node = omap->dev->of_node;
+ struct extcon_dev *edev;
+
+ if (of_property_read_bool(node, "extcon")) {
+ edev = extcon_get_edev_by_phandle(omap->dev, 0);
+ if (IS_ERR(edev)) {
+ dev_vdbg(omap->dev, "couldn't get extcon device\n");
+ return -EPROBE_DEFER;
+ }
+
+ omap->vbus_nb.notifier_call = dwc3_omap_vbus_notifier;
+ ret = devm_extcon_register_notifier(omap->dev, edev,
+ EXTCON_USB, &omap->vbus_nb);
+ if (ret < 0)
+ dev_vdbg(omap->dev, "failed to register notifier for USB\n");
+
+ omap->id_nb.notifier_call = dwc3_omap_id_notifier;
+ ret = devm_extcon_register_notifier(omap->dev, edev,
+ EXTCON_USB_HOST, &omap->id_nb);
+ if (ret < 0)
+ dev_vdbg(omap->dev, "failed to register notifier for USB-HOST\n");
+
+ if (extcon_get_state(edev, EXTCON_USB) == true)
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
+ if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+
+ omap->edev = edev;
+ }
+
+ return 0;
+}
+
+static int dwc3_omap_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+
+ struct dwc3_omap *omap;
+ struct device *dev = &pdev->dev;
+ struct regulator *vbus_reg = NULL;
+
+ int ret;
+ int irq;
+
+ void __iomem *base;
+
+ if (!node) {
+ dev_err(dev, "device node not found\n");
+ return -EINVAL;
+ }
+
+ omap = devm_kzalloc(dev, sizeof(*omap), GFP_KERNEL);
+ if (!omap)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, omap);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_property_read_bool(node, "vbus-supply")) {
+ vbus_reg = devm_regulator_get(dev, "vbus");
+ if (IS_ERR(vbus_reg)) {
+ dev_err(dev, "vbus init failed\n");
+ return PTR_ERR(vbus_reg);
+ }
+ }
+
+ omap->dev = dev;
+ omap->irq = irq;
+ omap->base = base;
+ omap->vbus_reg = vbus_reg;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "get_sync failed with err %d\n", ret);
+ goto err1;
+ }
+
+ dwc3_omap_map_offset(omap);
+ dwc3_omap_set_utmi_mode(omap);
+
+ ret = dwc3_omap_extcon_register(omap);
+ if (ret < 0)
+ goto err1;
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create dwc3 core\n");
+ goto err1;
+ }
+
+ ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
+ dwc3_omap_interrupt_thread, IRQF_SHARED,
+ "dwc3-omap", omap);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ #%d --> %d\n",
+ omap->irq, ret);
+ goto err1;
+ }
+ dwc3_omap_enable_irqs(omap);
+ return 0;
+
+err1:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int dwc3_omap_remove(struct platform_device *pdev)
+{
+ struct dwc3_omap *omap = platform_get_drvdata(pdev);
+
+ dwc3_omap_disable_irqs(omap);
+ disable_irq(omap->irq);
+ of_platform_depopulate(omap->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id of_dwc3_match[] = {
+ {
+ .compatible = "ti,dwc3"
+ },
+ {
+ .compatible = "ti,am437x-dwc3"
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_dwc3_match);
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_omap_suspend(struct device *dev)
+{
+ struct dwc3_omap *omap = dev_get_drvdata(dev);
+
+ omap->utmi_otg_ctrl = dwc3_omap_read_utmi_ctrl(omap);
+ dwc3_omap_disable_irqs(omap);
+
+ return 0;
+}
+
+static int dwc3_omap_resume(struct device *dev)
+{
+ struct dwc3_omap *omap = dev_get_drvdata(dev);
+
+ dwc3_omap_write_utmi_ctrl(omap, omap->utmi_otg_ctrl);
+ dwc3_omap_enable_irqs(omap);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static void dwc3_omap_complete(struct device *dev)
+{
+ struct dwc3_omap *omap = dev_get_drvdata(dev);
+
+ if (extcon_get_state(omap->edev, EXTCON_USB))
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
+ if (extcon_get_state(omap->edev, EXTCON_USB_HOST))
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+}
+
+static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
+
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume)
+ .complete = dwc3_omap_complete,
+};
+
+#define DEV_PM_OPS (&dwc3_omap_dev_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver dwc3_omap_driver = {
+ .probe = dwc3_omap_probe,
+ .remove = dwc3_omap_remove,
+ .driver = {
+ .name = "omap-dwc3",
+ .of_match_table = of_dwc3_match,
+ .pm = DEV_PM_OPS,
+ },
+};
+
+module_platform_driver(dwc3_omap_driver);
+
+MODULE_ALIAS("platform:omap-dwc3");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
new file mode 100644
index 000000000..ae25ee832
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-pci.c - PCI Specific glue layer
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+
+#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
+#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
+#define PCI_DEVICE_ID_INTEL_BSW 0x22b7
+#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
+#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
+#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
+#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
+#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
+#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
+#define PCI_DEVICE_ID_INTEL_CMLLP 0x02ee
+#define PCI_DEVICE_ID_INTEL_CMLH 0x06ee
+#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
+#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
+#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
+#define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0
+#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
+#define PCI_DEVICE_ID_INTEL_EHL 0x4b7e
+#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
+#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
+#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
+#define PCI_DEVICE_ID_INTEL_ADL 0x460e
+#define PCI_DEVICE_ID_INTEL_ADL_PCH 0x51ee
+#define PCI_DEVICE_ID_INTEL_ADLN 0x465e
+#define PCI_DEVICE_ID_INTEL_ADLN_PCH 0x54ee
+#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_RPL 0xa70e
+#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
+#define PCI_DEVICE_ID_INTEL_MTLM 0x7eb1
+#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
+#define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
+#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
+#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
+#define PCI_DEVICE_ID_AMD_MR 0x163a
+
+#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
+#define PCI_INTEL_BXT_STATE_D0 0
+#define PCI_INTEL_BXT_STATE_D3 3
+
+#define GP_RWBAR 1
+#define GP_RWREG1 0xa0
+#define GP_RWREG1_ULPI_REFCLK_DISABLE (1 << 17)
+
+/**
+ * struct dwc3_pci - Driver private structure
+ * @dwc3: child dwc3 platform_device
+ * @pci: our link to PCI bus
+ * @guid: _DSM GUID
+ * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
+ * @wakeup_work: work for asynchronous resume
+ */
+struct dwc3_pci {
+ struct platform_device *dwc3;
+ struct pci_dev *pci;
+
+ guid_t guid;
+
+ unsigned int has_dsm_for_pm:1;
+ struct work_struct wakeup_work;
+};
+
+static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
+static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
+ { "reset-gpios", &reset_gpios, 1 },
+ { "cs-gpios", &cs_gpios, 1 },
+ { },
+};
+
+static struct gpiod_lookup_table platform_bytcr_gpios = {
+ .dev_id = "0000:00:16.0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH),
+ {}
+ },
+};
+
+static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
+{
+ void __iomem *reg;
+ u32 value;
+
+ reg = pcim_iomap(pci, GP_RWBAR, 0);
+ if (!reg)
+ return -ENOMEM;
+
+ value = readl(reg + GP_RWREG1);
+ if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
+ goto unmap; /* ULPI refclk already enabled */
+
+ value &= ~GP_RWREG1_ULPI_REFCLK_DISABLE;
+ writel(value, reg + GP_RWREG1);
+ /* This comes from the Intel Android x86 tree w/o any explanation */
+ msleep(100);
+unmap:
+ pcim_iounmap(pci, reg);
+ return 0;
+}
+
+static const struct property_entry dwc3_pci_intel_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
+
+static const struct property_entry dwc3_pci_intel_phy_charger_detect_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,phy_charger_detect"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
+
+static const struct property_entry dwc3_pci_intel_byt_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
+
+static const struct property_entry dwc3_pci_mrfld_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+ PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
+
+static const struct property_entry dwc3_pci_amd_properties[] = {
+ PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+ PROPERTY_ENTRY_U8("snps,lpm-nyet-threshold", 0xf),
+ PROPERTY_ENTRY_BOOL("snps,u2exit_lfps_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,u2ss_inp3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,req_p1p2p3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,del_p1p2p3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,del_phy_power_chg_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,lfps_filter_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,rx_detect_poll_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,tx_de_emphasis_quirk"),
+ PROPERTY_ENTRY_U8("snps,tx_de_emphasis", 1),
+ /* FIXME these quirks should be removed when AMD NL tapes out */
+ PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
+
+static const struct property_entry dwc3_pci_mr_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+ PROPERTY_ENTRY_BOOL("usb-role-switch"),
+ PROPERTY_ENTRY_STRING("role-switch-default-mode", "host"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
+
+static const struct software_node dwc3_pci_intel_swnode = {
+ .properties = dwc3_pci_intel_properties,
+};
+
+static const struct software_node dwc3_pci_intel_phy_charger_detect_swnode = {
+ .properties = dwc3_pci_intel_phy_charger_detect_properties,
+};
+
+static const struct software_node dwc3_pci_intel_byt_swnode = {
+ .properties = dwc3_pci_intel_byt_properties,
+};
+
+static const struct software_node dwc3_pci_intel_mrfld_swnode = {
+ .properties = dwc3_pci_mrfld_properties,
+};
+
+static const struct software_node dwc3_pci_amd_swnode = {
+ .properties = dwc3_pci_amd_properties,
+};
+
+static const struct software_node dwc3_pci_amd_mr_swnode = {
+ .properties = dwc3_pci_mr_properties,
+};
+
+static int dwc3_pci_quirks(struct dwc3_pci *dwc,
+ const struct software_node *swnode)
+{
+ struct pci_dev *pdev = dwc->pci;
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BXT_M ||
+ pdev->device == PCI_DEVICE_ID_INTEL_EHL) {
+ guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
+ dwc->has_dsm_for_pm = true;
+ }
+
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
+ struct gpio_desc *gpio;
+ int ret;
+
+ /* On BYT the FW does not always enable the refclock */
+ ret = dwc3_byt_enable_ulpi_refclock(pdev);
+ if (ret)
+ return ret;
+
+ ret = devm_acpi_dev_add_driver_gpios(&pdev->dev,
+ acpi_dwc3_byt_gpios);
+ if (ret)
+ dev_dbg(&pdev->dev, "failed to add mapping table\n");
+
+ /*
+ * A lot of BYT devices lack ACPI resource entries for
+ * the GPIOs. If the ACPI entry for the GPIO controller
+ * is present add a fallback mapping to the reference
+ * design GPIOs which all boards seem to use.
+ */
+ if (acpi_dev_present("INT33FC", NULL, -1))
+ gpiod_add_lookup_table(&platform_bytcr_gpios);
+
+ /*
+ * These GPIOs will turn on the USB2 PHY. Note that we have to
+ * put the gpio descriptors again here because the phy driver
+ * might want to grab them, too.
+ */
+ gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ gpiod_set_value_cansleep(gpio, 1);
+ gpiod_put(gpio);
+
+ gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (gpio) {
+ gpiod_set_value_cansleep(gpio, 1);
+ gpiod_put(gpio);
+ usleep_range(10000, 11000);
+ }
+
+ /*
+ * Make the pdev name predictable (only 1 DWC3 on BYT)
+ * and patch the phy dev-name into the lookup table so
+ * that the phy-driver can get the GPIOs.
+ */
+ dwc->dwc3->id = PLATFORM_DEVID_NONE;
+ platform_bytcr_gpios.dev_id = "dwc3.ulpi";
+
+ /*
+ * Some Android tablets with a Crystal Cove PMIC
+ * (INT33FD), rely on the TUSB1211 phy for charger
+ * detection. These can be identified by them _not_
+ * using the standard ACPI battery and ac drivers.
+ */
+ if (acpi_dev_present("INT33FD", "1", 2) &&
+ acpi_quirk_skip_acpi_ac_and_battery()) {
+ dev_info(&pdev->dev, "Using TUSB1211 phy for charger detection\n");
+ swnode = &dwc3_pci_intel_phy_charger_detect_swnode;
+ }
+ }
+ }
+
+ return device_add_software_node(&dwc->dwc3->dev, swnode);
+}
+
+#ifdef CONFIG_PM
+static void dwc3_pci_resume_work(struct work_struct *work)
+{
+ struct dwc3_pci *dwc = container_of(work, struct dwc3_pci, wakeup_work);
+ struct platform_device *dwc3 = dwc->dwc3;
+ int ret;
+
+ ret = pm_runtime_get_sync(&dwc3->dev);
+ if (ret < 0) {
+ pm_runtime_put_sync_autosuspend(&dwc3->dev);
+ return;
+ }
+
+ pm_runtime_mark_last_busy(&dwc3->dev);
+ pm_runtime_put_sync_autosuspend(&dwc3->dev);
+}
+#endif
+
+static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct dwc3_pci *dwc;
+ struct resource res[2];
+ int ret;
+ struct device *dev = &pci->dev;
+
+ ret = pcim_enable_device(pci);
+ if (ret) {
+ dev_err(dev, "failed to enable pci device\n");
+ return -ENODEV;
+ }
+
+ pci_set_master(pci);
+
+ dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
+ if (!dwc->dwc3)
+ return -ENOMEM;
+
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+ res[0].name = "dwc_usb3";
+ res[0].flags = IORESOURCE_MEM;
+
+ res[1].start = pci->irq;
+ res[1].name = "dwc_usb3";
+ res[1].flags = IORESOURCE_IRQ;
+
+ ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(dev, "couldn't add resources to dwc3 device\n");
+ goto err;
+ }
+
+ dwc->pci = pci;
+ dwc->dwc3->dev.parent = dev;
+ ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
+
+ ret = dwc3_pci_quirks(dwc, (void *)id->driver_data);
+ if (ret)
+ goto err;
+
+ ret = platform_device_add(dwc->dwc3);
+ if (ret) {
+ dev_err(dev, "failed to register dwc3 device\n");
+ goto err;
+ }
+
+ device_init_wakeup(dev, true);
+ pci_set_drvdata(pci, dwc);
+ pm_runtime_put(dev);
+#ifdef CONFIG_PM
+ INIT_WORK(&dwc->wakeup_work, dwc3_pci_resume_work);
+#endif
+
+ return 0;
+err:
+ device_remove_software_node(&dwc->dwc3->dev);
+ platform_device_put(dwc->dwc3);
+ return ret;
+}
+
+static void dwc3_pci_remove(struct pci_dev *pci)
+{
+ struct dwc3_pci *dwc = pci_get_drvdata(pci);
+ struct pci_dev *pdev = dwc->pci;
+
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BYT)
+ gpiod_remove_lookup_table(&platform_bytcr_gpios);
+#ifdef CONFIG_PM
+ cancel_work_sync(&dwc->wakeup_work);
+#endif
+ device_init_wakeup(&pci->dev, false);
+ pm_runtime_get(&pci->dev);
+ device_remove_software_node(&dwc->dwc3->dev);
+ platform_device_unregister(dwc->dwc3);
+}
+
+static const struct pci_device_id dwc3_pci_id_table[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BSW),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
+ (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
+ (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTH),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT_M),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_APL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_KBP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_GLK),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPV),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_EHL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_PCH),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLN),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLN_PCH),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLM),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLS),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
+ (kernel_ulong_t) &dwc3_pci_amd_swnode, },
+
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_MR),
+ (kernel_ulong_t)&dwc3_pci_amd_mr_swnode, },
+
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+
+#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP)
+static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
+{
+ union acpi_object *obj;
+ union acpi_object tmp;
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+ if (!dwc->has_dsm_for_pm)
+ return 0;
+
+ tmp.type = ACPI_TYPE_INTEGER;
+ tmp.integer.value = param;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), &dwc->guid,
+ 1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
+ if (!obj) {
+ dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");
+ return -EIO;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+#endif /* CONFIG_PM || CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int dwc3_pci_runtime_suspend(struct device *dev)
+{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
+ if (device_can_wakeup(dev))
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
+
+ return -EBUSY;
+}
+
+static int dwc3_pci_runtime_resume(struct device *dev)
+{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
+ if (ret)
+ return ret;
+
+ queue_work(pm_wq, &dwc->wakeup_work);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_pci_suspend(struct device *dev)
+{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
+}
+
+static int dwc3_pci_resume(struct device *dev)
+{
+ struct dwc3_pci *dwc = dev_get_drvdata(dev);
+
+ return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops dwc3_pci_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume)
+ SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
+ NULL)
+};
+
+static struct pci_driver dwc3_pci_driver = {
+ .name = "dwc3-pci",
+ .id_table = dwc3_pci_id_table,
+ .probe = dwc3_pci_probe,
+ .remove = dwc3_pci_remove,
+ .driver = {
+ .pm = &dwc3_pci_dev_pm_ops,
+ }
+};
+
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 PCI Glue Layer");
+
+module_pci_driver(dwc3_pci_driver);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
new file mode 100644
index 000000000..93747ab2c
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -0,0 +1,1110 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Inspired by dwc3-of-simple.c
+ */
+
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/of_clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/extcon.h>
+#include <linux/interconnect.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/usb/of.h>
+#include <linux/reset.h>
+#include <linux/iopoll.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb.h>
+#include "core.h"
+
+/* USB QSCRATCH Hardware registers */
+#define QSCRATCH_HS_PHY_CTRL 0x10
+#define UTMI_OTG_VBUS_VALID BIT(20)
+#define SW_SESSVLD_SEL BIT(28)
+
+#define QSCRATCH_SS_PHY_CTRL 0x30
+#define LANE0_PWR_PRESENT BIT(24)
+
+#define QSCRATCH_GENERAL_CFG 0x08
+#define PIPE_UTMI_CLK_SEL BIT(0)
+#define PIPE3_PHYSTATUS_SW BIT(3)
+#define PIPE_UTMI_CLK_DIS BIT(8)
+
+#define PWR_EVNT_IRQ_STAT_REG 0x58
+#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
+#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
+
+#define SDM845_QSCRATCH_BASE_OFFSET 0xf8800
+#define SDM845_QSCRATCH_SIZE 0x400
+#define SDM845_DWC3_CORE_SIZE 0xcd00
+
+/* Interconnect path bandwidths in MBps */
+#define USB_MEMORY_AVG_HS_BW MBps_to_icc(240)
+#define USB_MEMORY_PEAK_HS_BW MBps_to_icc(700)
+#define USB_MEMORY_AVG_SS_BW MBps_to_icc(1000)
+#define USB_MEMORY_PEAK_SS_BW MBps_to_icc(2500)
+#define APPS_USB_AVG_BW 0
+#define APPS_USB_PEAK_BW MBps_to_icc(40)
+
+struct dwc3_acpi_pdata {
+ u32 qscratch_base_offset;
+ u32 qscratch_base_size;
+ u32 dwc3_core_base_size;
+ int hs_phy_irq_index;
+ int dp_hs_phy_irq_index;
+ int dm_hs_phy_irq_index;
+ int ss_phy_irq_index;
+ bool is_urs;
+};
+
+struct dwc3_qcom {
+ struct device *dev;
+ void __iomem *qscratch_base;
+ struct platform_device *dwc3;
+ struct platform_device *urs_usb;
+ struct clk **clks;
+ int num_clocks;
+ struct reset_control *resets;
+
+ int hs_phy_irq;
+ int dp_hs_phy_irq;
+ int dm_hs_phy_irq;
+ int ss_phy_irq;
+ enum usb_device_speed usb2_speed;
+
+ struct extcon_dev *edev;
+ struct extcon_dev *host_edev;
+ struct notifier_block vbus_nb;
+ struct notifier_block host_nb;
+
+ const struct dwc3_acpi_pdata *acpi_pdata;
+
+ enum usb_dr_mode mode;
+ bool is_suspended;
+ bool pm_suspended;
+ struct icc_path *icc_path_ddr;
+ struct icc_path *icc_path_apps;
+};
+
+static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable)
+{
+ if (enable) {
+ dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
+ LANE0_PWR_PRESENT);
+ dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL,
+ UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL);
+ } else {
+ dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
+ LANE0_PWR_PRESENT);
+ dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL,
+ UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL);
+ }
+}
+
+static int dwc3_qcom_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb);
+
+ /* enable vbus override for device mode */
+ dwc3_qcom_vbus_override_enable(qcom, event);
+ qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST;
+
+ return NOTIFY_DONE;
+}
+
+static int dwc3_qcom_host_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb);
+
+ /* disable vbus override in host mode */
+ dwc3_qcom_vbus_override_enable(qcom, !event);
+ qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL;
+
+ return NOTIFY_DONE;
+}
+
+static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
+{
+ struct device *dev = qcom->dev;
+ struct extcon_dev *host_edev;
+ int ret;
+
+ if (!of_property_read_bool(dev->of_node, "extcon"))
+ return 0;
+
+ qcom->edev = extcon_get_edev_by_phandle(dev, 0);
+ if (IS_ERR(qcom->edev))
+ return PTR_ERR(qcom->edev);
+
+ qcom->vbus_nb.notifier_call = dwc3_qcom_vbus_notifier;
+
+ qcom->host_edev = extcon_get_edev_by_phandle(dev, 1);
+ if (IS_ERR(qcom->host_edev))
+ qcom->host_edev = NULL;
+
+ ret = devm_extcon_register_notifier(dev, qcom->edev, EXTCON_USB,
+ &qcom->vbus_nb);
+ if (ret < 0) {
+ dev_err(dev, "VBUS notifier register failed\n");
+ return ret;
+ }
+
+ if (qcom->host_edev)
+ host_edev = qcom->host_edev;
+ else
+ host_edev = qcom->edev;
+
+ qcom->host_nb.notifier_call = dwc3_qcom_host_notifier;
+ ret = devm_extcon_register_notifier(dev, host_edev, EXTCON_USB_HOST,
+ &qcom->host_nb);
+ if (ret < 0) {
+ dev_err(dev, "Host notifier register failed\n");
+ return ret;
+ }
+
+ /* Update initial VBUS override based on extcon state */
+ if (extcon_get_state(qcom->edev, EXTCON_USB) ||
+ !extcon_get_state(host_edev, EXTCON_USB_HOST))
+ dwc3_qcom_vbus_notifier(&qcom->vbus_nb, true, qcom->edev);
+ else
+ dwc3_qcom_vbus_notifier(&qcom->vbus_nb, false, qcom->edev);
+
+ return 0;
+}
+
+static int dwc3_qcom_interconnect_enable(struct dwc3_qcom *qcom)
+{
+ int ret;
+
+ ret = icc_enable(qcom->icc_path_ddr);
+ if (ret)
+ return ret;
+
+ ret = icc_enable(qcom->icc_path_apps);
+ if (ret)
+ icc_disable(qcom->icc_path_ddr);
+
+ return ret;
+}
+
+static int dwc3_qcom_interconnect_disable(struct dwc3_qcom *qcom)
+{
+ int ret;
+
+ ret = icc_disable(qcom->icc_path_ddr);
+ if (ret)
+ return ret;
+
+ ret = icc_disable(qcom->icc_path_apps);
+ if (ret)
+ icc_enable(qcom->icc_path_ddr);
+
+ return ret;
+}
+
+/**
+ * dwc3_qcom_interconnect_init() - Get interconnect path handles
+ * and set bandwidth.
+ * @qcom: Pointer to the concerned usb core.
+ *
+ */
+static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
+{
+ enum usb_device_speed max_speed;
+ struct device *dev = qcom->dev;
+ int ret;
+
+ if (has_acpi_companion(dev))
+ return 0;
+
+ qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
+ if (IS_ERR(qcom->icc_path_ddr)) {
+ dev_err(dev, "failed to get usb-ddr path: %ld\n",
+ PTR_ERR(qcom->icc_path_ddr));
+ return PTR_ERR(qcom->icc_path_ddr);
+ }
+
+ qcom->icc_path_apps = of_icc_get(dev, "apps-usb");
+ if (IS_ERR(qcom->icc_path_apps)) {
+ dev_err(dev, "failed to get apps-usb path: %ld\n",
+ PTR_ERR(qcom->icc_path_apps));
+ ret = PTR_ERR(qcom->icc_path_apps);
+ goto put_path_ddr;
+ }
+
+ max_speed = usb_get_maximum_speed(&qcom->dwc3->dev);
+ if (max_speed >= USB_SPEED_SUPER || max_speed == USB_SPEED_UNKNOWN) {
+ ret = icc_set_bw(qcom->icc_path_ddr,
+ USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW);
+ } else {
+ ret = icc_set_bw(qcom->icc_path_ddr,
+ USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW);
+ }
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret);
+ goto put_path_apps;
+ }
+
+ ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret);
+ goto put_path_apps;
+ }
+
+ return 0;
+
+put_path_apps:
+ icc_put(qcom->icc_path_apps);
+put_path_ddr:
+ icc_put(qcom->icc_path_ddr);
+ return ret;
+}
+
+/**
+ * dwc3_qcom_interconnect_exit() - Release interconnect path handles
+ * @qcom: Pointer to the concerned usb core.
+ *
+ * This function is used to release interconnect path handle.
+ */
+static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
+{
+ icc_put(qcom->icc_path_ddr);
+ icc_put(qcom->icc_path_apps);
+}
+
+/* Only usable in contexts where the role can not change. */
+static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+{
+ struct dwc3 *dwc;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ dwc = platform_get_drvdata(qcom->dwc3);
+
+ /* Core driver may not have probed yet. */
+ if (!dwc)
+ return false;
+
+ return dwc->xhci;
+}
+
+static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
+{
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+ struct usb_device *udev;
+ struct usb_hcd __maybe_unused *hcd;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ hcd = platform_get_drvdata(dwc->xhci);
+
+ /*
+ * It is possible to query the speed of all children of
+ * USB2.0 root hub via usb_hub_for_each_child(). DWC3 code
+ * currently supports only 1 port per controller. So
+ * this is sufficient.
+ */
+#ifdef CONFIG_USB
+ udev = usb_hub_find_child(hcd->self.root_hub, 1);
+#else
+ udev = NULL;
+#endif
+ if (!udev)
+ return USB_SPEED_UNKNOWN;
+
+ return udev->speed;
+}
+
+static void dwc3_qcom_enable_wakeup_irq(int irq, unsigned int polarity)
+{
+ if (!irq)
+ return;
+
+ if (polarity)
+ irq_set_irq_type(irq, polarity);
+
+ enable_irq(irq);
+ enable_irq_wake(irq);
+}
+
+static void dwc3_qcom_disable_wakeup_irq(int irq)
+{
+ if (!irq)
+ return;
+
+ disable_irq_wake(irq);
+ disable_irq_nosync(irq);
+}
+
+static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
+{
+ dwc3_qcom_disable_wakeup_irq(qcom->hs_phy_irq);
+
+ if (qcom->usb2_speed == USB_SPEED_LOW) {
+ dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
+ } else if ((qcom->usb2_speed == USB_SPEED_HIGH) ||
+ (qcom->usb2_speed == USB_SPEED_FULL)) {
+ dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq);
+ } else {
+ dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq);
+ dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
+ }
+
+ dwc3_qcom_disable_wakeup_irq(qcom->ss_phy_irq);
+}
+
+static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
+{
+ dwc3_qcom_enable_wakeup_irq(qcom->hs_phy_irq, 0);
+
+ /*
+ * Configure DP/DM line interrupts based on the USB2 device attached to
+ * the root hub port. When HS/FS device is connected, configure the DP line
+ * as falling edge to detect both disconnect and remote wakeup scenarios. When
+ * LS device is connected, configure DM line as falling edge to detect both
+ * disconnect and remote wakeup. When no device is connected, configure both
+ * DP and DM lines as rising edge to detect HS/HS/LS device connect scenario.
+ */
+
+ if (qcom->usb2_speed == USB_SPEED_LOW) {
+ dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq,
+ IRQ_TYPE_EDGE_FALLING);
+ } else if ((qcom->usb2_speed == USB_SPEED_HIGH) ||
+ (qcom->usb2_speed == USB_SPEED_FULL)) {
+ dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq,
+ IRQ_TYPE_EDGE_FALLING);
+ } else {
+ dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq,
+ IRQ_TYPE_EDGE_RISING);
+ dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq,
+ IRQ_TYPE_EDGE_RISING);
+ }
+
+ dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq, 0);
+}
+
+static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup)
+{
+ u32 val;
+ int i, ret;
+
+ if (qcom->is_suspended)
+ return 0;
+
+ val = readl(qcom->qscratch_base + PWR_EVNT_IRQ_STAT_REG);
+ if (!(val & PWR_EVNT_LPM_IN_L2_MASK))
+ dev_err(qcom->dev, "HS-PHY not in L2\n");
+
+ for (i = qcom->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(qcom->clks[i]);
+
+ ret = dwc3_qcom_interconnect_disable(qcom);
+ if (ret)
+ dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
+
+ /*
+ * The role is stable during suspend as role switching is done from a
+ * freezable workqueue.
+ */
+ if (dwc3_qcom_is_host(qcom) && wakeup) {
+ qcom->usb2_speed = dwc3_qcom_read_usb2_speed(qcom);
+ dwc3_qcom_enable_interrupts(qcom);
+ }
+
+ qcom->is_suspended = true;
+
+ return 0;
+}
+
+static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup)
+{
+ int ret;
+ int i;
+
+ if (!qcom->is_suspended)
+ return 0;
+
+ if (dwc3_qcom_is_host(qcom) && wakeup)
+ dwc3_qcom_disable_interrupts(qcom);
+
+ for (i = 0; i < qcom->num_clocks; i++) {
+ ret = clk_prepare_enable(qcom->clks[i]);
+ if (ret < 0) {
+ while (--i >= 0)
+ clk_disable_unprepare(qcom->clks[i]);
+ return ret;
+ }
+ }
+
+ ret = dwc3_qcom_interconnect_enable(qcom);
+ if (ret)
+ dev_warn(qcom->dev, "failed to enable interconnect: %d\n", ret);
+
+ /* Clear existing events from PHY related to L2 in/out */
+ dwc3_qcom_setbits(qcom->qscratch_base, PWR_EVNT_IRQ_STAT_REG,
+ PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
+
+ qcom->is_suspended = false;
+
+ return 0;
+}
+
+static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
+{
+ struct dwc3_qcom *qcom = data;
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+
+ /* If pm_suspended then let pm_resume take care of resuming h/w */
+ if (qcom->pm_suspended)
+ return IRQ_HANDLED;
+
+ /*
+ * This is safe as role switching is done from a freezable workqueue
+ * and the wakeup interrupts are disabled as part of resume.
+ */
+ if (dwc3_qcom_is_host(qcom))
+ pm_runtime_resume(&dwc->xhci->dev);
+
+ return IRQ_HANDLED;
+}
+
+static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
+{
+ /* Configure dwc3 to use UTMI clock as PIPE clock not present */
+ dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
+ PIPE_UTMI_CLK_DIS);
+
+ usleep_range(100, 1000);
+
+ dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
+ PIPE_UTMI_CLK_SEL | PIPE3_PHYSTATUS_SW);
+
+ usleep_range(100, 1000);
+
+ dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
+ PIPE_UTMI_CLK_DIS);
+}
+
+static int dwc3_qcom_get_irq(struct platform_device *pdev,
+ const char *name, int num)
+{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : pdev;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (np)
+ ret = platform_get_irq_byname_optional(pdev_irq, name);
+ else
+ ret = platform_get_irq_optional(pdev_irq, num);
+
+ return ret;
+}
+
+static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ const struct dwc3_acpi_pdata *pdata = qcom->acpi_pdata;
+ int irq;
+ int ret;
+
+ irq = dwc3_qcom_get_irq(pdev, "hs_phy_irq",
+ pdata ? pdata->hs_phy_irq_index : -1);
+ if (irq > 0) {
+ /* Keep wakeup interrupts disabled until suspend */
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+ IRQF_ONESHOT,
+ "qcom_dwc3 HS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
+ return ret;
+ }
+ qcom->hs_phy_irq = irq;
+ }
+
+ irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq",
+ pdata ? pdata->dp_hs_phy_irq_index : -1);
+ if (irq > 0) {
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+ IRQF_ONESHOT,
+ "qcom_dwc3 DP_HS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
+ return ret;
+ }
+ qcom->dp_hs_phy_irq = irq;
+ }
+
+ irq = dwc3_qcom_get_irq(pdev, "dm_hs_phy_irq",
+ pdata ? pdata->dm_hs_phy_irq_index : -1);
+ if (irq > 0) {
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+ IRQF_ONESHOT,
+ "qcom_dwc3 DM_HS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
+ return ret;
+ }
+ qcom->dm_hs_phy_irq = irq;
+ }
+
+ irq = dwc3_qcom_get_irq(pdev, "ss_phy_irq",
+ pdata ? pdata->ss_phy_irq_index : -1);
+ if (irq > 0) {
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+ IRQF_ONESHOT,
+ "qcom_dwc3 SS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
+ return ret;
+ }
+ qcom->ss_phy_irq = irq;
+ }
+
+ return 0;
+}
+
+static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
+{
+ struct device *dev = qcom->dev;
+ struct device_node *np = dev->of_node;
+ int i;
+
+ if (!np || !count)
+ return 0;
+
+ if (count < 0)
+ return count;
+
+ qcom->num_clocks = count;
+
+ qcom->clks = devm_kcalloc(dev, qcom->num_clocks,
+ sizeof(struct clk *), GFP_KERNEL);
+ if (!qcom->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < qcom->num_clocks; i++) {
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(np, i);
+ if (IS_ERR(clk)) {
+ while (--i >= 0)
+ clk_put(qcom->clks[i]);
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret < 0) {
+ while (--i >= 0) {
+ clk_disable_unprepare(qcom->clks[i]);
+ clk_put(qcom->clks[i]);
+ }
+ clk_put(clk);
+
+ return ret;
+ }
+
+ qcom->clks[i] = clk;
+ }
+
+ return 0;
+}
+
+static const struct property_entry dwc3_qcom_acpi_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "host"),
+ {}
+};
+
+static const struct software_node dwc3_qcom_swnode = {
+ .properties = dwc3_qcom_acpi_properties,
+};
+
+static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
+{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct resource *res, *child_res = NULL;
+ struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb :
+ pdev;
+ int irq;
+ int ret;
+
+ qcom->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
+ if (!qcom->dwc3)
+ return -ENOMEM;
+
+ qcom->dwc3->dev.parent = dev;
+ qcom->dwc3->dev.type = dev->type;
+ qcom->dwc3->dev.dma_mask = dev->dma_mask;
+ qcom->dwc3->dev.dma_parms = dev->dma_parms;
+ qcom->dwc3->dev.coherent_dma_mask = dev->coherent_dma_mask;
+
+ child_res = kcalloc(2, sizeof(*child_res), GFP_KERNEL);
+ if (!child_res) {
+ platform_device_put(qcom->dwc3);
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory resource\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ child_res[0].flags = res->flags;
+ child_res[0].start = res->start;
+ child_res[0].end = child_res[0].start +
+ qcom->acpi_pdata->dwc3_core_base_size;
+
+ irq = platform_get_irq(pdev_irq, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+ child_res[1].flags = IORESOURCE_IRQ;
+ child_res[1].start = child_res[1].end = irq;
+
+ ret = platform_device_add_resources(qcom->dwc3, child_res, 2);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add resources\n");
+ goto out;
+ }
+
+ ret = device_add_software_node(&qcom->dwc3->dev, &dwc3_qcom_swnode);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add properties\n");
+ goto out;
+ }
+
+ ret = platform_device_add(qcom->dwc3);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add device\n");
+ device_remove_software_node(&qcom->dwc3->dev);
+ goto out;
+ }
+ kfree(child_res);
+ return 0;
+
+out:
+ platform_device_put(qcom->dwc3);
+ kfree(child_res);
+ return ret;
+}
+
+static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node, *dwc3_np;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ dwc3_np = of_get_compatible_child(np, "snps,dwc3");
+ if (!dwc3_np) {
+ dev_err(dev, "failed to find dwc3 core child\n");
+ return -ENODEV;
+ }
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to register dwc3 core - %d\n", ret);
+ goto node_put;
+ }
+
+ qcom->dwc3 = of_find_device_by_node(dwc3_np);
+ if (!qcom->dwc3) {
+ ret = -ENODEV;
+ dev_err(dev, "failed to get dwc3 platform device\n");
+ of_platform_depopulate(dev);
+ }
+
+node_put:
+ of_node_put(dwc3_np);
+
+ return ret;
+}
+
+static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+{
+ struct platform_device *urs_usb = NULL;
+ struct fwnode_handle *fwh;
+ struct acpi_device *adev;
+ char name[8];
+ int ret;
+ int id;
+
+ /* Figure out device id */
+ ret = sscanf(fwnode_get_name(dev->fwnode), "URS%d", &id);
+ if (!ret)
+ return NULL;
+
+ /* Find the child using name */
+ snprintf(name, sizeof(name), "USB%d", id);
+ fwh = fwnode_get_named_child_node(dev->fwnode, name);
+ if (!fwh)
+ return NULL;
+
+ adev = to_acpi_device_node(fwh);
+ if (!adev)
+ goto err_put_handle;
+
+ urs_usb = acpi_create_platform_device(adev, NULL);
+ if (IS_ERR_OR_NULL(urs_usb))
+ goto err_put_handle;
+
+ return urs_usb;
+
+err_put_handle:
+ fwnode_handle_put(fwh);
+
+ return urs_usb;
+}
+
+static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
+{
+ struct fwnode_handle *fwh = urs_usb->dev.fwnode;
+
+ platform_device_unregister(urs_usb);
+ fwnode_handle_put(fwh);
+}
+
+static int dwc3_qcom_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct dwc3_qcom *qcom;
+ struct resource *res, *parent_res = NULL;
+ struct resource local_res;
+ int ret, i;
+ bool ignore_pipe_clk;
+ bool wakeup_source;
+
+ qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL);
+ if (!qcom)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, qcom);
+ qcom->dev = &pdev->dev;
+
+ if (has_acpi_companion(dev)) {
+ qcom->acpi_pdata = acpi_device_get_match_data(dev);
+ if (!qcom->acpi_pdata) {
+ dev_err(&pdev->dev, "no supporting ACPI device data\n");
+ return -EINVAL;
+ }
+ }
+
+ qcom->resets = devm_reset_control_array_get_optional_exclusive(dev);
+ if (IS_ERR(qcom->resets)) {
+ ret = PTR_ERR(qcom->resets);
+ dev_err(&pdev->dev, "failed to get resets, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_assert(qcom->resets);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to assert resets, err=%d\n", ret);
+ return ret;
+ }
+
+ usleep_range(10, 1000);
+
+ ret = reset_control_deassert(qcom->resets);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret);
+ goto reset_assert;
+ }
+
+ ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np));
+ if (ret) {
+ dev_err(dev, "failed to get clocks\n");
+ goto reset_assert;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (np) {
+ parent_res = res;
+ } else {
+ memcpy(&local_res, res, sizeof(struct resource));
+ parent_res = &local_res;
+
+ parent_res->start = res->start +
+ qcom->acpi_pdata->qscratch_base_offset;
+ parent_res->end = parent_res->start +
+ qcom->acpi_pdata->qscratch_base_size;
+
+ if (qcom->acpi_pdata->is_urs) {
+ qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
+ if (IS_ERR_OR_NULL(qcom->urs_usb)) {
+ dev_err(dev, "failed to create URS USB platdev\n");
+ if (!qcom->urs_usb)
+ ret = -ENODEV;
+ else
+ ret = PTR_ERR(qcom->urs_usb);
+ goto clk_disable;
+ }
+ }
+ }
+
+ qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
+ if (IS_ERR(qcom->qscratch_base)) {
+ ret = PTR_ERR(qcom->qscratch_base);
+ goto free_urs;
+ }
+
+ ret = dwc3_qcom_setup_irq(pdev);
+ if (ret) {
+ dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
+ goto free_urs;
+ }
+
+ /*
+ * Disable pipe_clk requirement if specified. Used when dwc3
+ * operates without SSPHY and only HS/FS/LS modes are supported.
+ */
+ ignore_pipe_clk = device_property_read_bool(dev,
+ "qcom,select-utmi-as-pipe-clk");
+ if (ignore_pipe_clk)
+ dwc3_qcom_select_utmi_clk(qcom);
+
+ if (np)
+ ret = dwc3_qcom_of_register_core(pdev);
+ else
+ ret = dwc3_qcom_acpi_register_core(pdev);
+
+ if (ret) {
+ dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
+ goto free_urs;
+ }
+
+ ret = dwc3_qcom_interconnect_init(qcom);
+ if (ret)
+ goto depopulate;
+
+ qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
+
+ /* enable vbus override for device mode */
+ if (qcom->mode != USB_DR_MODE_HOST)
+ dwc3_qcom_vbus_override_enable(qcom, true);
+
+ /* register extcon to override sw_vbus on Vbus change later */
+ ret = dwc3_qcom_register_extcon(qcom);
+ if (ret)
+ goto interconnect_exit;
+
+ wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source");
+ device_init_wakeup(&pdev->dev, wakeup_source);
+ device_init_wakeup(&qcom->dwc3->dev, wakeup_source);
+
+ qcom->is_suspended = false;
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_forbid(dev);
+
+ return 0;
+
+interconnect_exit:
+ dwc3_qcom_interconnect_exit(qcom);
+depopulate:
+ if (np) {
+ of_platform_depopulate(&pdev->dev);
+ } else {
+ device_remove_software_node(&qcom->dwc3->dev);
+ platform_device_del(qcom->dwc3);
+ }
+ platform_device_put(qcom->dwc3);
+free_urs:
+ if (qcom->urs_usb)
+ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
+clk_disable:
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+ clk_put(qcom->clks[i]);
+ }
+reset_assert:
+ reset_control_assert(qcom->resets);
+
+ return ret;
+}
+
+static int dwc3_qcom_remove(struct platform_device *pdev)
+{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int i;
+
+ if (np) {
+ of_platform_depopulate(&pdev->dev);
+ } else {
+ device_remove_software_node(&qcom->dwc3->dev);
+ platform_device_del(qcom->dwc3);
+ }
+ platform_device_put(qcom->dwc3);
+
+ if (qcom->urs_usb)
+ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
+
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+ clk_put(qcom->clks[i]);
+ }
+ qcom->num_clocks = 0;
+
+ dwc3_qcom_interconnect_exit(qcom);
+ reset_control_assert(qcom->resets);
+
+ pm_runtime_allow(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
+{
+ struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ bool wakeup = device_may_wakeup(dev);
+ int ret;
+
+ ret = dwc3_qcom_suspend(qcom, wakeup);
+ if (ret)
+ return ret;
+
+ qcom->pm_suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
+{
+ struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ bool wakeup = device_may_wakeup(dev);
+ int ret;
+
+ ret = dwc3_qcom_resume(qcom, wakeup);
+ if (ret)
+ return ret;
+
+ qcom->pm_suspended = false;
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
+{
+ struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+
+ return dwc3_qcom_suspend(qcom, true);
+}
+
+static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
+{
+ struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+
+ return dwc3_qcom_resume(qcom, true);
+}
+
+static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
+ SET_RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id dwc3_qcom_of_match[] = {
+ { .compatible = "qcom,dwc3" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
+ .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
+ .qscratch_base_size = SDM845_QSCRATCH_SIZE,
+ .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
+ .hs_phy_irq_index = 1,
+ .dp_hs_phy_irq_index = 4,
+ .dm_hs_phy_irq_index = 3,
+ .ss_phy_irq_index = 2
+};
+
+static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
+ .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
+ .qscratch_base_size = SDM845_QSCRATCH_SIZE,
+ .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
+ .hs_phy_irq_index = 1,
+ .dp_hs_phy_irq_index = 4,
+ .dm_hs_phy_irq_index = 3,
+ .ss_phy_irq_index = 2,
+ .is_urs = true,
+};
+
+static const struct acpi_device_id dwc3_qcom_acpi_match[] = {
+ { "QCOM2430", (unsigned long)&sdm845_acpi_pdata },
+ { "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata },
+ { "QCOM0497", (unsigned long)&sdm845_acpi_urs_pdata },
+ { "QCOM04A6", (unsigned long)&sdm845_acpi_pdata },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match);
+#endif
+
+static struct platform_driver dwc3_qcom_driver = {
+ .probe = dwc3_qcom_probe,
+ .remove = dwc3_qcom_remove,
+ .driver = {
+ .name = "dwc3-qcom",
+ .pm = &dwc3_qcom_dev_pm_ops,
+ .of_match_table = dwc3_qcom_of_match,
+ .acpi_match_table = ACPI_PTR(dwc3_qcom_acpi_match),
+ },
+};
+
+module_platform_driver(dwc3_qcom_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare DWC3 QCOM Glue Driver");
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
new file mode 100644
index 000000000..fea5290de
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dwc3-st.c Support for dwc3 platform devices on ST Microelectronics platforms
+ *
+ * This is a small driver for the dwc3 to provide the glue logic
+ * to configure the controller. Tested on STi platforms.
+ *
+ * Copyright (C) 2014 Stmicroelectronics
+ *
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ * Contributors: Aymen Bouattay <aymen.bouattay@st.com>
+ * Peter Griffin <peter.griffin@linaro.org>
+ *
+ * Inspired by dwc3-omap.c and dwc3-exynos.c.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/usb/of.h>
+
+#include "core.h"
+#include "io.h"
+
+/* glue registers */
+#define CLKRST_CTRL 0x00
+#define AUX_CLK_EN BIT(0)
+#define SW_PIPEW_RESET_N BIT(4)
+#define EXT_CFG_RESET_N BIT(8)
+/*
+ * 1'b0 : The host controller complies with the xHCI revision 0.96
+ * 1'b1 : The host controller complies with the xHCI revision 1.0
+ */
+#define XHCI_REVISION BIT(12)
+
+#define USB2_VBUS_MNGMNT_SEL1 0x2C
+/*
+ * For all fields in USB2_VBUS_MNGMNT_SEL1
+ * 2’b00 : Override value from Reg 0x30 is selected
+ * 2’b01 : utmiotg_<signal_name> from usb3_top is selected
+ * 2’b10 : pipew_<signal_name> from PIPEW instance is selected
+ * 2’b11 : value is 1'b0
+ */
+#define USB2_VBUS_REG30 0x0
+#define USB2_VBUS_UTMIOTG 0x1
+#define USB2_VBUS_PIPEW 0x2
+#define USB2_VBUS_ZERO 0x3
+
+#define SEL_OVERRIDE_VBUSVALID(n) (n << 0)
+#define SEL_OVERRIDE_POWERPRESENT(n) (n << 4)
+#define SEL_OVERRIDE_BVALID(n) (n << 8)
+
+/* Static DRD configuration */
+#define USB3_CONTROL_MASK 0xf77
+
+#define USB3_DEVICE_NOT_HOST BIT(0)
+#define USB3_FORCE_VBUSVALID BIT(1)
+#define USB3_DELAY_VBUSVALID BIT(2)
+#define USB3_SEL_FORCE_OPMODE BIT(4)
+#define USB3_FORCE_OPMODE(n) (n << 5)
+#define USB3_SEL_FORCE_DPPULLDOWN2 BIT(8)
+#define USB3_FORCE_DPPULLDOWN2 BIT(9)
+#define USB3_SEL_FORCE_DMPULLDOWN2 BIT(10)
+#define USB3_FORCE_DMPULLDOWN2 BIT(11)
+
+/**
+ * struct st_dwc3 - dwc3-st driver private structure
+ * @dev: device pointer
+ * @glue_base: ioaddr for the glue registers
+ * @regmap: regmap pointer for getting syscfg
+ * @syscfg_reg_off: usb syscfg control offset
+ * @dr_mode: drd static host/device config
+ * @rstc_pwrdn: rest controller for powerdown signal
+ * @rstc_rst: reset controller for softreset signal
+ */
+
+struct st_dwc3 {
+ struct device *dev;
+ void __iomem *glue_base;
+ struct regmap *regmap;
+ int syscfg_reg_off;
+ enum usb_dr_mode dr_mode;
+ struct reset_control *rstc_pwrdn;
+ struct reset_control *rstc_rst;
+};
+
+static inline u32 st_dwc3_readl(void __iomem *base, u32 offset)
+{
+ return readl_relaxed(base + offset);
+}
+
+static inline void st_dwc3_writel(void __iomem *base, u32 offset, u32 value)
+{
+ writel_relaxed(value, base + offset);
+}
+
+/**
+ * st_dwc3_drd_init: program the port
+ * @dwc3_data: driver private structure
+ * Description: this function is to program the port as either host or device
+ * according to the static configuration passed from devicetree.
+ * OTG and dual role are not yet supported!
+ */
+static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
+{
+ u32 val;
+ int err;
+
+ err = regmap_read(dwc3_data->regmap, dwc3_data->syscfg_reg_off, &val);
+ if (err)
+ return err;
+
+ val &= USB3_CONTROL_MASK;
+
+ switch (dwc3_data->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+
+ val &= ~(USB3_DELAY_VBUSVALID
+ | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
+ | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
+ | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
+
+ /*
+ * USB3_PORT2_FORCE_VBUSVALID When '1' and when
+ * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
+ * of the pico PHY to 1.
+ */
+
+ val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
+ break;
+
+ case USB_DR_MODE_HOST:
+
+ val &= ~(USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID
+ | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
+ | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
+ | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
+
+ /*
+ * USB3_DELAY_VBUSVALID is ANDed with USB_C_VBUSVALID. Thus,
+ * when set to ‘0‘, it can delay the arrival of VBUSVALID
+ * information to VBUSVLDEXT2 input of the pico PHY.
+ * We don't want to do that so we set the bit to '1'.
+ */
+
+ val |= USB3_DELAY_VBUSVALID;
+ break;
+
+ default:
+ dev_err(dwc3_data->dev, "Unsupported mode of operation %d\n",
+ dwc3_data->dr_mode);
+ return -EINVAL;
+ }
+
+ return regmap_write(dwc3_data->regmap, dwc3_data->syscfg_reg_off, val);
+}
+
+/**
+ * st_dwc3_init: init the controller via glue logic
+ * @dwc3_data: driver private structure
+ */
+static void st_dwc3_init(struct st_dwc3 *dwc3_data)
+{
+ u32 reg = st_dwc3_readl(dwc3_data->glue_base, CLKRST_CTRL);
+
+ reg |= AUX_CLK_EN | EXT_CFG_RESET_N | XHCI_REVISION;
+ reg &= ~SW_PIPEW_RESET_N;
+ st_dwc3_writel(dwc3_data->glue_base, CLKRST_CTRL, reg);
+
+ /* configure mux for vbus, powerpresent and bvalid signals */
+ reg = st_dwc3_readl(dwc3_data->glue_base, USB2_VBUS_MNGMNT_SEL1);
+
+ reg |= SEL_OVERRIDE_VBUSVALID(USB2_VBUS_UTMIOTG) |
+ SEL_OVERRIDE_POWERPRESENT(USB2_VBUS_UTMIOTG) |
+ SEL_OVERRIDE_BVALID(USB2_VBUS_UTMIOTG);
+
+ st_dwc3_writel(dwc3_data->glue_base, USB2_VBUS_MNGMNT_SEL1, reg);
+
+ reg = st_dwc3_readl(dwc3_data->glue_base, CLKRST_CTRL);
+ reg |= SW_PIPEW_RESET_N;
+ st_dwc3_writel(dwc3_data->glue_base, CLKRST_CTRL, reg);
+}
+
+static int st_dwc3_probe(struct platform_device *pdev)
+{
+ struct st_dwc3 *dwc3_data;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node, *child;
+ struct platform_device *child_pdev;
+ struct regmap *regmap;
+ int ret;
+
+ dwc3_data = devm_kzalloc(dev, sizeof(*dwc3_data), GFP_KERNEL);
+ if (!dwc3_data)
+ return -ENOMEM;
+
+ dwc3_data->glue_base =
+ devm_platform_ioremap_resource_byname(pdev, "reg-glue");
+ if (IS_ERR(dwc3_data->glue_base))
+ return PTR_ERR(dwc3_data->glue_base);
+
+ regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ dwc3_data->dev = dev;
+ dwc3_data->regmap = regmap;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg");
+ if (!res) {
+ ret = -ENXIO;
+ goto undo_platform_dev_alloc;
+ }
+
+ dwc3_data->syscfg_reg_off = res->start;
+
+ dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
+ dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
+
+ dwc3_data->rstc_pwrdn =
+ devm_reset_control_get_exclusive(dev, "powerdown");
+ if (IS_ERR(dwc3_data->rstc_pwrdn)) {
+ dev_err(&pdev->dev, "could not get power controller\n");
+ ret = PTR_ERR(dwc3_data->rstc_pwrdn);
+ goto undo_platform_dev_alloc;
+ }
+
+ /* Manage PowerDown */
+ reset_control_deassert(dwc3_data->rstc_pwrdn);
+
+ dwc3_data->rstc_rst =
+ devm_reset_control_get_shared(dev, "softreset");
+ if (IS_ERR(dwc3_data->rstc_rst)) {
+ dev_err(&pdev->dev, "could not get reset controller\n");
+ ret = PTR_ERR(dwc3_data->rstc_rst);
+ goto undo_powerdown;
+ }
+
+ /* Manage SoftReset */
+ reset_control_deassert(dwc3_data->rstc_rst);
+
+ child = of_get_compatible_child(node, "snps,dwc3");
+ if (!child) {
+ dev_err(&pdev->dev, "failed to find dwc3 core node\n");
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ /* Allocate and initialize the core */
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to add dwc3 core\n");
+ goto err_node_put;
+ }
+
+ child_pdev = of_find_device_by_node(child);
+ if (!child_pdev) {
+ dev_err(dev, "failed to find dwc3 core device\n");
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
+ of_node_put(child);
+ platform_device_put(child_pdev);
+
+ /*
+ * Configure the USB port as device or host according to the static
+ * configuration passed from DT.
+ * DRD is the only mode currently supported so this will be enhanced
+ * as soon as OTG is available.
+ */
+ ret = st_dwc3_drd_init(dwc3_data);
+ if (ret) {
+ dev_err(dev, "drd initialisation failed\n");
+ goto undo_softreset;
+ }
+
+ /* ST glue logic init */
+ st_dwc3_init(dwc3_data);
+
+ platform_set_drvdata(pdev, dwc3_data);
+ return 0;
+
+err_node_put:
+ of_node_put(child);
+undo_softreset:
+ reset_control_assert(dwc3_data->rstc_rst);
+undo_powerdown:
+ reset_control_assert(dwc3_data->rstc_pwrdn);
+undo_platform_dev_alloc:
+ platform_device_put(pdev);
+ return ret;
+}
+
+static int st_dwc3_remove(struct platform_device *pdev)
+{
+ struct st_dwc3 *dwc3_data = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+
+ reset_control_assert(dwc3_data->rstc_pwrdn);
+ reset_control_assert(dwc3_data->rstc_rst);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int st_dwc3_suspend(struct device *dev)
+{
+ struct st_dwc3 *dwc3_data = dev_get_drvdata(dev);
+
+ reset_control_assert(dwc3_data->rstc_pwrdn);
+ reset_control_assert(dwc3_data->rstc_rst);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int st_dwc3_resume(struct device *dev)
+{
+ struct st_dwc3 *dwc3_data = dev_get_drvdata(dev);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ reset_control_deassert(dwc3_data->rstc_pwrdn);
+ reset_control_deassert(dwc3_data->rstc_rst);
+
+ ret = st_dwc3_drd_init(dwc3_data);
+ if (ret) {
+ dev_err(dev, "drd initialisation failed\n");
+ return ret;
+ }
+
+ /* ST glue logic init */
+ st_dwc3_init(dwc3_data);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(st_dwc3_dev_pm_ops, st_dwc3_suspend, st_dwc3_resume);
+
+static const struct of_device_id st_dwc3_match[] = {
+ { .compatible = "st,stih407-dwc3" },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, st_dwc3_match);
+
+static struct platform_driver st_dwc3_driver = {
+ .probe = st_dwc3_probe,
+ .remove = st_dwc3_remove,
+ .driver = {
+ .name = "usb-st-dwc3",
+ .of_match_table = st_dwc3_match,
+ .pm = &st_dwc3_dev_pm_ops,
+ },
+};
+
+module_platform_driver(st_dwc3_driver);
+
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_DESCRIPTION("DesignWare USB3 STi Glue Layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
new file mode 100644
index 000000000..0745e9f11
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwc3-xilinx.c - Xilinx DWC3 controller specific glue driver
+ *
+ * Authors: Manish Narani <manish.narani@xilinx.com>
+ * Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/io.h>
+
+#include <linux/phy/phy.h>
+
+/* USB phy reset mask register */
+#define XLNX_USB_PHY_RST_EN 0x001C
+#define XLNX_PHY_RST_MASK 0x1
+
+/* Xilinx USB 3.0 IP Register */
+#define XLNX_USB_TRAFFIC_ROUTE_CONFIG 0x005C
+#define XLNX_USB_TRAFFIC_ROUTE_FPD 0x1
+
+/* Versal USB Reset ID */
+#define VERSAL_USB_RESET_ID 0xC104036
+
+#define XLNX_USB_FPD_PIPE_CLK 0x7c
+#define PIPE_CLK_DESELECT 1
+#define PIPE_CLK_SELECT 0
+#define XLNX_USB_FPD_POWER_PRSNT 0x80
+#define FPD_POWER_PRSNT_OPTION BIT(0)
+
+struct dwc3_xlnx {
+ int num_clocks;
+ struct clk_bulk_data *clks;
+ struct device *dev;
+ void __iomem *regs;
+ int (*pltfm_init)(struct dwc3_xlnx *data);
+ struct phy *usb3_phy;
+};
+
+static void dwc3_xlnx_mask_phy_rst(struct dwc3_xlnx *priv_data, bool mask)
+{
+ u32 reg;
+
+ /*
+ * Enable or disable ULPI PHY reset from USB Controller.
+ * This does not actually reset the phy, but just controls
+ * whether USB controller can or cannot reset ULPI PHY.
+ */
+ reg = readl(priv_data->regs + XLNX_USB_PHY_RST_EN);
+
+ if (mask)
+ reg &= ~XLNX_PHY_RST_MASK;
+ else
+ reg |= XLNX_PHY_RST_MASK;
+
+ writel(reg, priv_data->regs + XLNX_USB_PHY_RST_EN);
+}
+
+static int dwc3_xlnx_init_versal(struct dwc3_xlnx *priv_data)
+{
+ struct device *dev = priv_data->dev;
+ int ret;
+
+ dwc3_xlnx_mask_phy_rst(priv_data, false);
+
+ /* Assert and De-assert reset */
+ ret = zynqmp_pm_reset_assert(VERSAL_USB_RESET_ID,
+ PM_RESET_ACTION_ASSERT);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to assert Reset\n");
+ return ret;
+ }
+
+ ret = zynqmp_pm_reset_assert(VERSAL_USB_RESET_ID,
+ PM_RESET_ACTION_RELEASE);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to De-assert Reset\n");
+ return ret;
+ }
+
+ dwc3_xlnx_mask_phy_rst(priv_data, true);
+
+ return 0;
+}
+
+static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
+{
+ struct device *dev = priv_data->dev;
+ struct reset_control *crst, *hibrst, *apbrst;
+ struct gpio_desc *reset_gpio;
+ int ret = 0;
+ u32 reg;
+
+ priv_data->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
+ if (IS_ERR(priv_data->usb3_phy)) {
+ ret = PTR_ERR(priv_data->usb3_phy);
+ dev_err_probe(dev, ret,
+ "failed to get USB3 PHY\n");
+ goto err;
+ }
+
+ /*
+ * The following core resets are not required unless a USB3 PHY
+ * is used, and the subsequent register settings are not required
+ * unless a core reset is performed (they should be set properly
+ * by the first-stage boot loader, but may be reverted by a core
+ * reset). They may also break the configuration if USB3 is actually
+ * in use but the usb3-phy entry is missing from the device tree.
+ * Therefore, skip these operations in this case.
+ */
+ if (!priv_data->usb3_phy)
+ goto skip_usb3_phy;
+
+ crst = devm_reset_control_get_exclusive(dev, "usb_crst");
+ if (IS_ERR(crst)) {
+ ret = PTR_ERR(crst);
+ dev_err_probe(dev, ret,
+ "failed to get core reset signal\n");
+ goto err;
+ }
+
+ hibrst = devm_reset_control_get_exclusive(dev, "usb_hibrst");
+ if (IS_ERR(hibrst)) {
+ ret = PTR_ERR(hibrst);
+ dev_err_probe(dev, ret,
+ "failed to get hibernation reset signal\n");
+ goto err;
+ }
+
+ apbrst = devm_reset_control_get_exclusive(dev, "usb_apbrst");
+ if (IS_ERR(apbrst)) {
+ ret = PTR_ERR(apbrst);
+ dev_err_probe(dev, ret,
+ "failed to get APB reset signal\n");
+ goto err;
+ }
+
+ ret = reset_control_assert(crst);
+ if (ret < 0) {
+ dev_err(dev, "Failed to assert core reset\n");
+ goto err;
+ }
+
+ ret = reset_control_assert(hibrst);
+ if (ret < 0) {
+ dev_err(dev, "Failed to assert hibernation reset\n");
+ goto err;
+ }
+
+ ret = reset_control_assert(apbrst);
+ if (ret < 0) {
+ dev_err(dev, "Failed to assert APB reset\n");
+ goto err;
+ }
+
+ ret = phy_init(priv_data->usb3_phy);
+ if (ret < 0) {
+ phy_exit(priv_data->usb3_phy);
+ goto err;
+ }
+
+ ret = reset_control_deassert(apbrst);
+ if (ret < 0) {
+ dev_err(dev, "Failed to release APB reset\n");
+ goto err;
+ }
+
+ /* Set PIPE Power Present signal in FPD Power Present Register*/
+ writel(FPD_POWER_PRSNT_OPTION, priv_data->regs + XLNX_USB_FPD_POWER_PRSNT);
+
+ /* Set the PIPE Clock Select bit in FPD PIPE Clock register */
+ writel(PIPE_CLK_SELECT, priv_data->regs + XLNX_USB_FPD_PIPE_CLK);
+
+ ret = reset_control_deassert(crst);
+ if (ret < 0) {
+ dev_err(dev, "Failed to release core reset\n");
+ goto err;
+ }
+
+ ret = reset_control_deassert(hibrst);
+ if (ret < 0) {
+ dev_err(dev, "Failed to release hibernation reset\n");
+ goto err;
+ }
+
+ ret = phy_power_on(priv_data->usb3_phy);
+ if (ret < 0) {
+ phy_exit(priv_data->usb3_phy);
+ goto err;
+ }
+
+skip_usb3_phy:
+ /* ulpi reset via gpio-modepin or gpio-framework driver */
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Failed to request reset GPIO\n");
+ }
+
+ if (reset_gpio) {
+ /* Toggle ulpi to reset the phy. */
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ usleep_range(5000, 10000);
+ }
+
+ /*
+ * This routes the USB DMA traffic to go through FPD path instead
+ * of reaching DDR directly. This traffic routing is needed to
+ * make SMMU and CCI work with USB DMA.
+ */
+ if (of_dma_is_coherent(dev->of_node) || device_iommu_mapped(dev)) {
+ reg = readl(priv_data->regs + XLNX_USB_TRAFFIC_ROUTE_CONFIG);
+ reg |= XLNX_USB_TRAFFIC_ROUTE_FPD;
+ writel(reg, priv_data->regs + XLNX_USB_TRAFFIC_ROUTE_CONFIG);
+ }
+
+err:
+ return ret;
+}
+
+static const struct of_device_id dwc3_xlnx_of_match[] = {
+ {
+ .compatible = "xlnx,zynqmp-dwc3",
+ .data = &dwc3_xlnx_init_zynqmp,
+ },
+ {
+ .compatible = "xlnx,versal-dwc3",
+ .data = &dwc3_xlnx_init_versal,
+ },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dwc3_xlnx_of_match);
+
+static int dwc3_xlnx_probe(struct platform_device *pdev)
+{
+ struct dwc3_xlnx *priv_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct of_device_id *match;
+ void __iomem *regs;
+ int ret;
+
+ priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ dev_err_probe(dev, ret, "failed to map registers\n");
+ return ret;
+ }
+
+ match = of_match_node(dwc3_xlnx_of_match, pdev->dev.of_node);
+
+ priv_data->pltfm_init = match->data;
+ priv_data->regs = regs;
+ priv_data->dev = dev;
+
+ platform_set_drvdata(pdev, priv_data);
+
+ ret = devm_clk_bulk_get_all(priv_data->dev, &priv_data->clks);
+ if (ret < 0)
+ return ret;
+
+ priv_data->num_clocks = ret;
+
+ ret = clk_bulk_prepare_enable(priv_data->num_clocks, priv_data->clks);
+ if (ret)
+ return ret;
+
+ ret = priv_data->pltfm_init(priv_data);
+ if (ret)
+ goto err_clk_put;
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret)
+ goto err_clk_put;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_suspend_ignore_children(dev, false);
+ pm_runtime_get_sync(dev);
+
+ return 0;
+
+err_clk_put:
+ clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
+
+ return ret;
+}
+
+static int dwc3_xlnx_remove(struct platform_device *pdev)
+{
+ struct dwc3_xlnx *priv_data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ of_platform_depopulate(dev);
+
+ clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
+ priv_data->num_clocks = 0;
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_xlnx_runtime_suspend(struct device *dev)
+{
+ struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
+
+ clk_bulk_disable(priv_data->num_clocks, priv_data->clks);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_xlnx_runtime_resume(struct device *dev)
+{
+ struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
+
+ return clk_bulk_enable(priv_data->num_clocks, priv_data->clks);
+}
+
+static int __maybe_unused dwc3_xlnx_runtime_idle(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_xlnx_suspend(struct device *dev)
+{
+ struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
+
+ phy_exit(priv_data->usb3_phy);
+
+ /* Disable the clocks */
+ clk_bulk_disable(priv_data->num_clocks, priv_data->clks);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_xlnx_resume(struct device *dev)
+{
+ struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_enable(priv_data->num_clocks, priv_data->clks);
+ if (ret)
+ return ret;
+
+ ret = phy_init(priv_data->usb3_phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(priv_data->usb3_phy);
+ if (ret < 0) {
+ phy_exit(priv_data->usb3_phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops dwc3_xlnx_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_xlnx_suspend, dwc3_xlnx_resume)
+ SET_RUNTIME_PM_OPS(dwc3_xlnx_runtime_suspend,
+ dwc3_xlnx_runtime_resume, dwc3_xlnx_runtime_idle)
+};
+
+static struct platform_driver dwc3_xlnx_driver = {
+ .probe = dwc3_xlnx_probe,
+ .remove = dwc3_xlnx_remove,
+ .driver = {
+ .name = "dwc3-xilinx",
+ .of_match_table = dwc3_xlnx_of_match,
+ .pm = &dwc3_xlnx_dev_pm_ops,
+ },
+};
+
+module_platform_driver(dwc3_xlnx_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx DWC3 controller specific glue driver");
+MODULE_AUTHOR("Manish Narani <manish.narani@xilinx.com>");
+MODULE_AUTHOR("Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>");
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
new file mode 100644
index 000000000..ec3c33266
--- /dev/null
+++ b/drivers/usb/dwc3/ep0.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
+
+#include "core.h"
+#include "debug.h"
+#include "gadget.h"
+#include "io.h"
+
+static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
+static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ struct dwc3_ep *dep, struct dwc3_request *req);
+
+static void dwc3_ep0_prepare_one_trb(struct dwc3_ep *dep,
+ dma_addr_t buf_dma, u32 len, u32 type, bool chain)
+{
+ struct dwc3_trb *trb;
+ struct dwc3 *dwc;
+
+ dwc = dep->dwc;
+ trb = &dwc->ep0_trb[dep->trb_enqueue];
+
+ if (chain)
+ dep->trb_enqueue++;
+
+ trb->bpl = lower_32_bits(buf_dma);
+ trb->bph = upper_32_bits(buf_dma);
+ trb->size = len;
+ trb->ctrl = type;
+
+ trb->ctrl |= (DWC3_TRB_CTRL_HWO
+ | DWC3_TRB_CTRL_ISP_IMI);
+
+ if (chain)
+ trb->ctrl |= DWC3_TRB_CTRL_CHN;
+ else
+ trb->ctrl |= (DWC3_TRB_CTRL_IOC
+ | DWC3_TRB_CTRL_LST);
+
+ trace_dwc3_prepare_trb(dep, trb);
+}
+
+static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3 *dwc;
+ int ret;
+
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED)
+ return 0;
+
+ dwc = dep->dwc;
+
+ memset(&params, 0, sizeof(params));
+ params.param0 = upper_32_bits(dwc->ep0_trb_addr);
+ params.param1 = lower_32_bits(dwc->ep0_trb_addr);
+
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
+ if (ret < 0)
+ return ret;
+
+ dwc->ep0_next_event = DWC3_EP0_COMPLETE;
+
+ return 0;
+}
+
+static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
+ struct dwc3_request *req)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ req->request.actual = 0;
+ req->request.status = -EINPROGRESS;
+ req->epnum = dep->number;
+
+ list_add_tail(&req->list, &dep->pending_list);
+
+ /*
+ * Gadget driver might not be quick enough to queue a request
+ * before we get a Transfer Not Ready event on this endpoint.
+ *
+ * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
+ * flag is set, it's telling us that as soon as Gadget queues the
+ * required request, we should kick the transfer here because the
+ * IRQ we were waiting for is long gone.
+ */
+ if (dep->flags & DWC3_EP_PENDING_REQUEST) {
+ unsigned int direction;
+
+ direction = !!(dep->flags & DWC3_EP0_DIR_IN);
+
+ if (dwc->ep0state != EP0_DATA_PHASE) {
+ dev_WARN(dwc->dev, "Unexpected pending request\n");
+ return 0;
+ }
+
+ __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
+
+ dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
+ DWC3_EP0_DIR_IN);
+
+ return 0;
+ }
+
+ /*
+ * In case gadget driver asked us to delay the STATUS phase,
+ * handle it here.
+ */
+ if (dwc->delayed_status) {
+ unsigned int direction;
+
+ direction = !dwc->ep0_expect_in;
+ dwc->delayed_status = false;
+ usb_gadget_set_state(dwc->gadget, USB_STATE_CONFIGURED);
+
+ if (dwc->ep0state == EP0_STATUS_PHASE)
+ __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
+
+ return 0;
+ }
+
+ /*
+ * Unfortunately we have uncovered a limitation wrt the Data Phase.
+ *
+ * Section 9.4 says we can wait for the XferNotReady(DATA) event to
+ * come before issueing Start Transfer command, but if we do, we will
+ * miss situations where the host starts another SETUP phase instead of
+ * the DATA phase. Such cases happen at least on TD.7.6 of the Link
+ * Layer Compliance Suite.
+ *
+ * The problem surfaces due to the fact that in case of back-to-back
+ * SETUP packets there will be no XferNotReady(DATA) generated and we
+ * will be stuck waiting for XferNotReady(DATA) forever.
+ *
+ * By looking at tables 9-13 and 9-14 of the Databook, we can see that
+ * it tells us to start Data Phase right away. It also mentions that if
+ * we receive a SETUP phase instead of the DATA phase, core will issue
+ * XferComplete for the DATA phase, before actually initiating it in
+ * the wire, with the TRB's status set to "SETUP_PENDING". Such status
+ * can only be used to print some debugging logs, as the core expects
+ * us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
+ * just so it completes right away, without transferring anything and,
+ * only then, we can go back to the SETUP phase.
+ *
+ * Because of this scenario, SNPS decided to change the programming
+ * model of control transfers and support on-demand transfers only for
+ * the STATUS phase. To fix the issue we have now, we will always wait
+ * for gadget driver to queue the DATA phase's struct usb_request, then
+ * start it right away.
+ *
+ * If we're actually in a 2-stage transfer, we will wait for
+ * XferNotReady(STATUS).
+ */
+ if (dwc->three_stage_setup) {
+ unsigned int direction;
+
+ direction = dwc->ep0_expect_in;
+ dwc->ep0state = EP0_DATA_PHASE;
+
+ __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
+
+ dep->flags &= ~DWC3_EP0_DIR_IN;
+ }
+
+ return 0;
+}
+
+int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct dwc3_request *req = to_dwc3_request(request);
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ unsigned long flags;
+
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
+ dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ dep->name);
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ /* we share one TRB for ep0/1 */
+ if (!list_empty(&dep->pending_list)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = __dwc3_gadget_ep0_queue(dep, req);
+
+out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+
+ /* reinitialize physical ep1 */
+ dep = dwc->eps[1];
+ dep->flags = DWC3_EP_ENABLED;
+
+ /* stall is always issued on EP0 */
+ dep = dwc->eps[0];
+ __dwc3_gadget_ep_set_halt(dep, 1, false);
+ dep->flags = DWC3_EP_ENABLED;
+ dwc->delayed_status = false;
+
+ if (!list_empty(&dep->pending_list)) {
+ struct dwc3_request *req;
+
+ req = next_request(&dep->pending_list);
+ if (!dwc->connected)
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ else
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ }
+
+ dwc->eps[0]->trb_enqueue = 0;
+ dwc->eps[1]->trb_enqueue = 0;
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+}
+
+int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ dwc3_ep0_stall_and_restart(dwc);
+
+ return 0;
+}
+
+int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_ep0_set_halt(ep, value);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+void dwc3_ep0_out_start(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ int ret;
+ int i;
+
+ complete(&dwc->ep0_in_setup);
+
+ dep = dwc->eps[0];
+ dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
+ DWC3_TRBCTL_CONTROL_SETUP, false);
+ ret = dwc3_ep0_start_trans(dep);
+ WARN_ON(ret < 0);
+ for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) {
+ struct dwc3_ep *dwc3_ep;
+
+ dwc3_ep = dwc->eps[i];
+ if (!dwc3_ep)
+ continue;
+
+ if (!(dwc3_ep->flags & DWC3_EP_DELAY_STOP))
+ continue;
+
+ dwc3_ep->flags &= ~DWC3_EP_DELAY_STOP;
+ if (dwc->connected)
+ dwc3_stop_active_transfer(dwc3_ep, true, true);
+ else
+ dwc3_remove_requests(dwc, dwc3_ep, -ESHUTDOWN);
+ }
+}
+
+static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
+{
+ struct dwc3_ep *dep;
+ u32 windex = le16_to_cpu(wIndex_le);
+ u32 epnum;
+
+ epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
+ if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ epnum |= 1;
+
+ dep = dwc->eps[epnum];
+ if (dep == NULL)
+ return NULL;
+
+ if (dep->flags & DWC3_EP_ENABLED)
+ return dep;
+
+ return NULL;
+}
+
+static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
+{
+}
+/*
+ * ch 9.4.5
+ */
+static int dwc3_ep0_handle_status(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct dwc3_ep *dep;
+ u32 recip;
+ u32 value;
+ u32 reg;
+ u16 usb_status = 0;
+ __le16 *response_pkt;
+
+ /* We don't support PTM_STATUS */
+ value = le16_to_cpu(ctrl->wValue);
+ if (value != 0)
+ return -EINVAL;
+
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ /*
+ * LTM will be set once we know how to set this in HW.
+ */
+ usb_status |= dwc->gadget->is_selfpowered;
+
+ if ((dwc->speed == DWC3_DSTS_SUPERSPEED) ||
+ (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (reg & DWC3_DCTL_INITU1ENA)
+ usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
+ if (reg & DWC3_DCTL_INITU2ENA)
+ usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
+ }
+
+ break;
+
+ case USB_RECIP_INTERFACE:
+ /*
+ * Function Remote Wake Capable D0
+ * Function Remote Wakeup D1
+ */
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ if (!dep)
+ return -EINVAL;
+
+ if (dep->flags & DWC3_EP_STALL)
+ usb_status = 1 << USB_ENDPOINT_HALT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ response_pkt = (__le16 *) dwc->setup_buf;
+ *response_pkt = cpu_to_le16(usb_status);
+
+ dep = dwc->eps[0];
+ dwc->ep0_usb_req.dep = dep;
+ dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
+ dwc->ep0_usb_req.request.buf = dwc->setup_buf;
+ dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
+
+ return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
+}
+
+static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
+ int set)
+{
+ u32 reg;
+
+ if (state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+ return -EINVAL;
+ if (set && dwc->dis_u1_entry_quirk)
+ return -EINVAL;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (set)
+ reg |= DWC3_DCTL_INITU1ENA;
+ else
+ reg &= ~DWC3_DCTL_INITU1ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
+ int set)
+{
+ u32 reg;
+
+
+ if (state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+ return -EINVAL;
+ if (set && dwc->dis_u2_entry_quirk)
+ return -EINVAL;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (set)
+ reg |= DWC3_DCTL_INITU2ENA;
+ else
+ reg &= ~DWC3_DCTL_INITU2ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
+ u32 wIndex, int set)
+{
+ if ((wIndex & 0xff) != 0)
+ return -EINVAL;
+ if (!set)
+ return -EINVAL;
+
+ switch (wIndex >> 8) {
+ case USB_TEST_J:
+ case USB_TEST_K:
+ case USB_TEST_SE0_NAK:
+ case USB_TEST_PACKET:
+ case USB_TEST_FORCE_ENABLE:
+ dwc->test_mode_nr = wIndex >> 8;
+ dwc->test_mode = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_device(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ enum usb_device_state state;
+ u32 wValue;
+ u32 wIndex;
+ int ret = 0;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+ state = dwc->gadget->state;
+
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ break;
+ /*
+ * 9.4.1 says only for SS, in AddressState only for
+ * default control pipe
+ */
+ case USB_DEVICE_U1_ENABLE:
+ ret = dwc3_ep0_handle_u1(dwc, state, set);
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ ret = dwc3_ep0_handle_u2(dwc, state, set);
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ ret = -EINVAL;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ u32 wValue;
+ int ret = 0;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+
+ switch (wValue) {
+ case USB_INTRF_FUNC_SUSPEND:
+ /*
+ * REVISIT: Ideally we would enable some low power mode here,
+ * however it's unclear what we should be doing here.
+ *
+ * For now, we're not doing anything, just making sure we return
+ * 0 so USB Command Verifier tests pass without any errors.
+ */
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ struct dwc3_ep *dep;
+ u32 wValue;
+ int ret;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+
+ switch (wValue) {
+ case USB_ENDPOINT_HALT:
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ if (!dep)
+ return -EINVAL;
+
+ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
+ break;
+
+ ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+ if (ret)
+ return -EINVAL;
+
+ /* ClearFeature(Halt) may need delayed status */
+ if (!set && (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ return USB_GADGET_DELAYED_STATUS;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl, int set)
+{
+ u32 recip;
+ int ret;
+
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ ret = dwc3_ep0_handle_device(dwc, ctrl, set);
+ break;
+ case USB_RECIP_INTERFACE:
+ ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
+ break;
+ case USB_RECIP_ENDPOINT:
+ ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = dwc->gadget->state;
+ u32 addr;
+ u32 reg;
+
+ addr = le16_to_cpu(ctrl->wValue);
+ if (addr > 127) {
+ dev_err(dwc->dev, "invalid device address %d\n", addr);
+ return -EINVAL;
+ }
+
+ if (state == USB_STATE_CONFIGURED) {
+ dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
+ return -EINVAL;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_DEVADDR_MASK);
+ reg |= DWC3_DCFG_DEVADDR(addr);
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+ if (addr)
+ usb_gadget_set_state(dwc->gadget, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(dwc->gadget, USB_STATE_DEFAULT);
+
+ return 0;
+}
+
+static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ int ret = -EINVAL;
+
+ if (dwc->async_callbacks) {
+ spin_unlock(&dwc->lock);
+ ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
+ spin_lock(&dwc->lock);
+ }
+ return ret;
+}
+
+static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = dwc->gadget->state;
+ u32 cfg;
+ int ret;
+ u32 reg;
+
+ cfg = le16_to_cpu(ctrl->wValue);
+
+ switch (state) {
+ case USB_STATE_DEFAULT:
+ return -EINVAL;
+
+ case USB_STATE_ADDRESS:
+ dwc3_gadget_clear_tx_fifos(dwc);
+
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ /* if the cfg matches and the cfg is non zero */
+ if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
+
+ /*
+ * only change state if set_config has already
+ * been processed. If gadget driver returns
+ * USB_GADGET_DELAYED_STATUS, we will wait
+ * to change the state on the next usb_ep_queue()
+ */
+ if (ret == 0)
+ usb_gadget_set_state(dwc->gadget,
+ USB_STATE_CONFIGURED);
+
+ /*
+ * Enable transition to U1/U2 state when
+ * nothing is pending from application.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (!dwc->dis_u1_entry_quirk)
+ reg |= DWC3_DCTL_ACCEPTU1ENA;
+ if (!dwc->dis_u2_entry_quirk)
+ reg |= DWC3_DCTL_ACCEPTU2ENA;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+ break;
+
+ case USB_STATE_CONFIGURED:
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ if (!cfg && !ret)
+ usb_gadget_set_state(dwc->gadget,
+ USB_STATE_ADDRESS);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ u32 param = 0;
+ u32 reg;
+
+ struct timing {
+ u8 u1sel;
+ u8 u1pel;
+ __le16 u2sel;
+ __le16 u2pel;
+ } __packed timing;
+
+ int ret;
+
+ memcpy(&timing, req->buf, sizeof(timing));
+
+ dwc->u1sel = timing.u1sel;
+ dwc->u1pel = timing.u1pel;
+ dwc->u2sel = le16_to_cpu(timing.u2sel);
+ dwc->u2pel = le16_to_cpu(timing.u2pel);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (reg & DWC3_DCTL_INITU2ENA)
+ param = dwc->u2pel;
+ if (reg & DWC3_DCTL_INITU1ENA)
+ param = dwc->u1pel;
+
+ /*
+ * According to Synopsys Databook, if parameter is
+ * greater than 125, a value of zero should be
+ * programmed in the register.
+ */
+ if (param > 125)
+ param = 0;
+
+ /* now that we have the time, issue DGCMD Set Sel */
+ ret = dwc3_send_gadget_generic_command(dwc,
+ DWC3_DGCMD_SET_PERIODIC_PAR, param);
+ WARN_ON(ret < 0);
+}
+
+static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ struct dwc3_ep *dep;
+ enum usb_device_state state = dwc->gadget->state;
+ u16 wLength;
+
+ if (state == USB_STATE_DEFAULT)
+ return -EINVAL;
+
+ wLength = le16_to_cpu(ctrl->wLength);
+
+ if (wLength != 6) {
+ dev_err(dwc->dev, "Set SEL should be 6 bytes, got %d\n",
+ wLength);
+ return -EINVAL;
+ }
+
+ /*
+ * To handle Set SEL we need to receive 6 bytes from Host. So let's
+ * queue a usb_request for 6 bytes.
+ *
+ * Remember, though, this controller can't handle non-wMaxPacketSize
+ * aligned transfers on the OUT direction, so we queue a request for
+ * wMaxPacketSize instead.
+ */
+ dep = dwc->eps[0];
+ dwc->ep0_usb_req.dep = dep;
+ dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
+ dwc->ep0_usb_req.request.buf = dwc->setup_buf;
+ dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
+
+ return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
+}
+
+static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ u16 wLength;
+ u16 wValue;
+ u16 wIndex;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ wLength = le16_to_cpu(ctrl->wLength);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+
+ if (wIndex || wLength)
+ return -EINVAL;
+
+ dwc->gadget->isoch_delay = wValue;
+
+ return 0;
+}
+
+static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ ret = dwc3_ep0_handle_status(dwc, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
+ break;
+ case USB_REQ_SET_FEATURE:
+ ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ ret = dwc3_ep0_set_address(dwc, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ ret = dwc3_ep0_set_config(dwc, ctrl);
+ break;
+ case USB_REQ_SET_SEL:
+ ret = dwc3_ep0_set_sel(dwc, ctrl);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
+ break;
+ default:
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ break;
+ }
+
+ return ret;
+}
+
+static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb;
+ int ret = -EINVAL;
+ u32 len;
+
+ if (!dwc->gadget_driver || !dwc->softconnect || !dwc->connected)
+ goto out;
+
+ trace_dwc3_ctrl_req(ctrl);
+
+ len = le16_to_cpu(ctrl->wLength);
+ if (!len) {
+ dwc->three_stage_setup = false;
+ dwc->ep0_expect_in = false;
+ dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
+ } else {
+ dwc->three_stage_setup = true;
+ dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
+ dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
+ }
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = dwc3_ep0_std_request(dwc, ctrl);
+ else
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+
+ if (ret == USB_GADGET_DELAYED_STATUS)
+ dwc->delayed_status = true;
+
+out:
+ if (ret < 0)
+ dwc3_ep0_stall_and_restart(dwc);
+}
+
+static void dwc3_ep0_complete_data(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_request *r;
+ struct usb_request *ur;
+ struct dwc3_trb *trb;
+ struct dwc3_ep *ep0;
+ u32 transferred = 0;
+ u32 status;
+ u32 length;
+ u8 epnum;
+
+ epnum = event->endpoint_number;
+ ep0 = dwc->eps[0];
+
+ dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
+ trb = dwc->ep0_trb;
+ trace_dwc3_complete_trb(ep0, trb);
+
+ r = next_request(&ep0->pending_list);
+ if (!r)
+ return;
+
+ status = DWC3_TRB_SIZE_TRBSTS(trb->size);
+ if (status == DWC3_TRBSTS_SETUP_PENDING) {
+ dwc->setup_packet_pending = true;
+ if (r)
+ dwc3_gadget_giveback(ep0, r, -ECONNRESET);
+
+ return;
+ }
+
+ ur = &r->request;
+
+ length = trb->size & DWC3_TRB_SIZE_MASK;
+ transferred = ur->length - length;
+ ur->actual += transferred;
+
+ if ((IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
+ ur->length && ur->zero) || dwc->ep0_bounced) {
+ trb++;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ trace_dwc3_complete_trb(ep0, trb);
+
+ if (r->direction)
+ dwc->eps[1]->trb_enqueue = 0;
+ else
+ dwc->eps[0]->trb_enqueue = 0;
+
+ dwc->ep0_bounced = false;
+ }
+
+ if ((epnum & 1) && ur->actual < ur->length)
+ dwc3_ep0_stall_and_restart(dwc);
+ else
+ dwc3_gadget_giveback(ep0, r, 0);
+}
+
+static void dwc3_ep0_complete_status(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_request *r;
+ struct dwc3_ep *dep;
+ struct dwc3_trb *trb;
+ u32 status;
+
+ dep = dwc->eps[0];
+ trb = dwc->ep0_trb;
+
+ trace_dwc3_complete_trb(dep, trb);
+
+ if (!list_empty(&dep->pending_list)) {
+ r = next_request(&dep->pending_list);
+
+ dwc3_gadget_giveback(dep, r, 0);
+ }
+
+ if (dwc->test_mode) {
+ int ret;
+
+ ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
+ if (ret < 0) {
+ dev_err(dwc->dev, "invalid test #%d\n",
+ dwc->test_mode_nr);
+ dwc3_ep0_stall_and_restart(dwc);
+ return;
+ }
+ }
+
+ status = DWC3_TRB_SIZE_TRBSTS(trb->size);
+ if (status == DWC3_TRBSTS_SETUP_PENDING)
+ dwc->setup_packet_pending = true;
+
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+}
+
+static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
+
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ dep->resource_index = 0;
+ dwc->setup_packet_pending = false;
+
+ switch (dwc->ep0state) {
+ case EP0_SETUP_PHASE:
+ dwc3_ep0_inspect_setup(dwc, event);
+ break;
+
+ case EP0_DATA_PHASE:
+ dwc3_ep0_complete_data(dwc, event);
+ break;
+
+ case EP0_STATUS_PHASE:
+ dwc3_ep0_complete_status(dwc, event);
+ break;
+ default:
+ WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
+ }
+}
+
+static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ struct dwc3_ep *dep, struct dwc3_request *req)
+{
+ unsigned int trb_length = 0;
+ int ret;
+
+ req->direction = !!dep->number;
+
+ if (req->request.length == 0) {
+ if (!req->direction)
+ trb_length = dep->endpoint.maxpacket;
+
+ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length,
+ DWC3_TRBCTL_CONTROL_DATA, false);
+ ret = dwc3_ep0_start_trans(dep);
+ } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
+ && (dep->number == 0)) {
+ u32 maxpacket;
+ u32 rem;
+
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
+ return;
+
+ maxpacket = dep->endpoint.maxpacket;
+ rem = req->request.length % maxpacket;
+ dwc->ep0_bounced = true;
+
+ /* prepare normal TRB */
+ dwc3_ep0_prepare_one_trb(dep, req->request.dma,
+ req->request.length,
+ DWC3_TRBCTL_CONTROL_DATA,
+ true);
+
+ req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
+
+ /* Now prepare one extra TRB to align transfer size */
+ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
+ maxpacket - rem,
+ DWC3_TRBCTL_CONTROL_DATA,
+ false);
+ ret = dwc3_ep0_start_trans(dep);
+ } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
+ req->request.length && req->request.zero) {
+
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
+ return;
+
+ /* prepare normal TRB */
+ dwc3_ep0_prepare_one_trb(dep, req->request.dma,
+ req->request.length,
+ DWC3_TRBCTL_CONTROL_DATA,
+ true);
+
+ req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
+
+ if (!req->direction)
+ trb_length = dep->endpoint.maxpacket;
+
+ /* Now prepare one extra TRB to align transfer size */
+ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
+ trb_length, DWC3_TRBCTL_CONTROL_DATA,
+ false);
+ ret = dwc3_ep0_start_trans(dep);
+ } else {
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
+ return;
+
+ dwc3_ep0_prepare_one_trb(dep, req->request.dma,
+ req->request.length, DWC3_TRBCTL_CONTROL_DATA,
+ false);
+
+ req->trb = &dwc->ep0_trb[dep->trb_enqueue];
+
+ ret = dwc3_ep0_start_trans(dep);
+ }
+
+ WARN_ON(ret < 0);
+}
+
+static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ u32 type;
+
+ type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
+ : DWC3_TRBCTL_CONTROL_STATUS2;
+
+ dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, type, false);
+ return dwc3_ep0_start_trans(dep);
+}
+
+static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+ WARN_ON(dwc3_ep0_start_control_status(dep));
+}
+
+static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
+
+ __dwc3_ep0_do_control_status(dwc, dep);
+}
+
+void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
+{
+ unsigned int direction = !dwc->ep0_expect_in;
+
+ dwc->delayed_status = false;
+ dwc->clear_stall_protocol = 0;
+
+ if (dwc->ep0state != EP0_STATUS_PHASE)
+ return;
+
+ __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
+}
+
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
+ int ret;
+
+ /*
+ * For status/DATA OUT stage, TRB will be queued on ep0 out
+ * endpoint for which resource index is zero. Hence allow
+ * queuing ENDXFER command for ep0 out endpoint.
+ */
+ if (!dep->resource_index && dep->number)
+ return;
+
+ cmd = DWC3_DEPCMD_ENDTRANSFER;
+ cmd |= DWC3_DEPCMD_CMDIOC;
+ cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
+ memset(&params, 0, sizeof(params));
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ WARN_ON_ONCE(ret);
+ dep->resource_index = 0;
+}
+
+static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ switch (event->status) {
+ case DEPEVT_STATUS_CONTROL_DATA:
+ if (!dwc->softconnect || !dwc->connected)
+ return;
+ /*
+ * We already have a DATA transfer in the controller's cache,
+ * if we receive a XferNotReady(DATA) we will ignore it, unless
+ * it's for the wrong direction.
+ *
+ * In that case, we must issue END_TRANSFER command to the Data
+ * Phase we already have started and issue SetStall on the
+ * control endpoint.
+ */
+ if (dwc->ep0_expect_in != event->endpoint_number) {
+ struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in];
+
+ dev_err(dwc->dev, "unexpected direction for Data Phase\n");
+ dwc3_ep0_end_control_data(dwc, dep);
+ dwc3_ep0_stall_and_restart(dwc);
+ return;
+ }
+
+ break;
+
+ case DEPEVT_STATUS_CONTROL_STATUS:
+ if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
+ return;
+
+ if (dwc->setup_packet_pending) {
+ dwc3_ep0_stall_and_restart(dwc);
+ return;
+ }
+
+ dwc->ep0state = EP0_STATUS_PHASE;
+
+ if (dwc->delayed_status) {
+ struct dwc3_ep *dep = dwc->eps[0];
+
+ WARN_ON_ONCE(event->endpoint_number != 1);
+ /*
+ * We should handle the delay STATUS phase here if the
+ * request for handling delay STATUS has been queued
+ * into the list.
+ */
+ if (!list_empty(&dep->pending_list)) {
+ dwc->delayed_status = false;
+ usb_gadget_set_state(dwc->gadget,
+ USB_STATE_CONFIGURED);
+ dwc3_ep0_do_control_status(dwc, event);
+ }
+
+ return;
+ }
+
+ dwc3_ep0_do_control_status(dwc, event);
+ }
+}
+
+void dwc3_ep0_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
+ u8 cmd;
+
+ switch (event->endpoint_event) {
+ case DWC3_DEPEVT_XFERCOMPLETE:
+ dwc3_ep0_xfer_complete(dwc, event);
+ break;
+
+ case DWC3_DEPEVT_XFERNOTREADY:
+ dwc3_ep0_xfernotready(dwc, event);
+ break;
+
+ case DWC3_DEPEVT_XFERINPROGRESS:
+ case DWC3_DEPEVT_RXTXFIFOEVT:
+ case DWC3_DEPEVT_STREAMEVT:
+ break;
+ case DWC3_DEPEVT_EPCMDCMPLT:
+ cmd = DEPEVT_PARAMETER_CMD(event->parameters);
+
+ if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ }
+ break;
+ }
+}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
new file mode 100644
index 000000000..c4703f6b2
--- /dev/null
+++ b/drivers/usb/dwc3/gadget.c
@@ -0,0 +1,4628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "debug.h"
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+
+#define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \
+ & ~((d)->interval - 1))
+
+/**
+ * dwc3_gadget_set_test_mode - enables usb2 test modes
+ * @dwc: pointer to our context structure
+ * @mode: the mode to set (J, K SE0 NAK, Force Enable)
+ *
+ * Caller should take care of locking. This function will return 0 on
+ * success or -EINVAL if wrong Test Selector is passed.
+ */
+int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_TSTCTRL_MASK;
+
+ switch (mode) {
+ case USB_TEST_J:
+ case USB_TEST_K:
+ case USB_TEST_SE0_NAK:
+ case USB_TEST_PACKET:
+ case USB_TEST_FORCE_ENABLE:
+ reg |= mode << 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dwc3_gadget_dctl_write_safe(dwc, reg);
+
+ return 0;
+}
+
+/**
+ * dwc3_gadget_get_link_state - gets current state of usb link
+ * @dwc: pointer to our context structure
+ *
+ * Caller should take care of locking. This function will
+ * return the link state on success (>= 0) or -ETIMEDOUT.
+ */
+int dwc3_gadget_get_link_state(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+
+ return DWC3_DSTS_USBLNKST(reg);
+}
+
+/**
+ * dwc3_gadget_set_link_state - sets usb link to a particular state
+ * @dwc: pointer to our context structure
+ * @state: the state to put link into
+ *
+ * Caller should take care of locking. This function will
+ * return 0 on success or -ETIMEDOUT.
+ */
+int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
+{
+ int retries = 10000;
+ u32 reg;
+
+ /*
+ * Wait until device controller is ready. Only applies to 1.94a and
+ * later RTL.
+ */
+ if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) {
+ while (--retries) {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (reg & DWC3_DSTS_DCNRD)
+ udelay(5);
+ else
+ break;
+ }
+
+ if (retries <= 0)
+ return -ETIMEDOUT;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+
+ /* set no action before sending new link state change */
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /* set requested state */
+ reg |= DWC3_DCTL_ULSTCHNGREQ(state);
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /*
+ * The following code is racy when called from dwc3_gadget_wakeup,
+ * and is not needed, at least on newer versions
+ */
+ if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
+ return 0;
+
+ /* wait for a change in DSTS */
+ retries = 10000;
+ while (--retries) {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+
+ if (DWC3_DSTS_USBLNKST(reg) == state)
+ return 0;
+
+ udelay(5);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void dwc3_ep0_reset_state(struct dwc3 *dwc)
+{
+ unsigned int dir;
+
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ dir = !!dwc->ep0_expect_in;
+ if (dwc->ep0state == EP0_DATA_PHASE)
+ dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+ else
+ dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+
+ dwc->eps[0]->trb_enqueue = 0;
+ dwc->eps[1]->trb_enqueue = 0;
+
+ dwc3_ep0_stall_and_restart(dwc);
+ }
+}
+
+/**
+ * dwc3_ep_inc_trb - increment a trb index.
+ * @index: Pointer to the TRB index to increment.
+ *
+ * The index should never point to the link TRB. After incrementing,
+ * if it is point to the link TRB, wrap around to the beginning. The
+ * link TRB is always at the last TRB entry.
+ */
+static void dwc3_ep_inc_trb(u8 *index)
+{
+ (*index)++;
+ if (*index == (DWC3_TRB_NUM - 1))
+ *index = 0;
+}
+
+/**
+ * dwc3_ep_inc_enq - increment endpoint's enqueue pointer
+ * @dep: The endpoint whose enqueue pointer we're incrementing
+ */
+static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
+{
+ dwc3_ep_inc_trb(&dep->trb_enqueue);
+}
+
+/**
+ * dwc3_ep_inc_deq - increment endpoint's dequeue pointer
+ * @dep: The endpoint whose enqueue pointer we're incrementing
+ */
+static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
+{
+ dwc3_ep_inc_trb(&dep->trb_dequeue);
+}
+
+static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
+ struct dwc3_request *req, int status)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ list_del(&req->list);
+ req->remaining = 0;
+ req->needs_extra_trb = false;
+ req->num_trbs = 0;
+
+ if (req->request.status == -EINPROGRESS)
+ req->request.status = status;
+
+ if (req->trb)
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
+
+ req->trb = NULL;
+ trace_dwc3_gadget_giveback(req);
+
+ if (dep->number > 1)
+ pm_runtime_put(dwc->dev);
+}
+
+/**
+ * dwc3_gadget_giveback - call struct usb_request's ->complete callback
+ * @dep: The endpoint to whom the request belongs to
+ * @req: The request we're giving back
+ * @status: completion code for the request
+ *
+ * Must be called with controller's lock held and interrupts disabled. This
+ * function will unmap @req and call its ->complete() callback to notify upper
+ * layers that it has completed.
+ */
+void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ int status)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ dwc3_gadget_del_and_unmap_request(dep, req, status);
+ req->status = DWC3_REQUEST_STATUS_COMPLETED;
+
+ spin_unlock(&dwc->lock);
+ usb_gadget_giveback_request(&dep->endpoint, &req->request);
+ spin_lock(&dwc->lock);
+}
+
+/**
+ * dwc3_send_gadget_generic_command - issue a generic command for the controller
+ * @dwc: pointer to the controller context
+ * @cmd: the command to be issued
+ * @param: command parameter
+ *
+ * Caller should take care of locking. Issue @cmd with a given @param to @dwc
+ * and wait for its completion.
+ */
+int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
+ u32 param)
+{
+ u32 timeout = 500;
+ int status = 0;
+ int ret = 0;
+ u32 reg;
+
+ dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
+ dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
+
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
+ if (!(reg & DWC3_DGCMD_CMDACT)) {
+ status = DWC3_DGCMD_STATUS(reg);
+ if (status)
+ ret = -EINVAL;
+ break;
+ }
+ } while (--timeout);
+
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ status = -ETIMEDOUT;
+ }
+
+ trace_dwc3_gadget_generic_cmd(cmd, param, status);
+
+ return ret;
+}
+
+static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
+
+/**
+ * dwc3_send_gadget_ep_cmd - issue an endpoint command
+ * @dep: the endpoint to which the command is going to be issued
+ * @cmd: the command to be issued
+ * @params: parameters to the command
+ *
+ * Caller should handle locking. This function will issue @cmd with given
+ * @params to @dep and wait for its completion.
+ */
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
+ struct dwc3_gadget_ep_cmd_params *params)
+{
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
+ struct dwc3 *dwc = dep->dwc;
+ u32 timeout = 5000;
+ u32 saved_config = 0;
+ u32 reg;
+
+ int cmd_status = 0;
+ int ret = -EINVAL;
+
+ /*
+ * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
+ * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
+ * endpoint command.
+ *
+ * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
+ * settings. Restore them after the command is completed.
+ *
+ * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
+ */
+ if (dwc->gadget->speed <= USB_SPEED_HIGH ||
+ DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+ saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ }
+
+ if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
+ saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+ }
+
+ if (saved_config)
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
+ int link_state;
+
+ /*
+ * Initiate remote wakeup if the link state is in U3 when
+ * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
+ * link state is in U1/U2, no remote wakeup is needed. The Start
+ * Transfer command will initiate the link recovery.
+ */
+ link_state = dwc3_gadget_get_link_state(dwc);
+ switch (link_state) {
+ case DWC3_LINK_STATE_U2:
+ if (dwc->gadget->speed >= USB_SPEED_SUPER)
+ break;
+
+ fallthrough;
+ case DWC3_LINK_STATE_U3:
+ ret = __dwc3_gadget_wakeup(dwc);
+ dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
+ ret);
+ break;
+ }
+ }
+
+ /*
+ * For some commands such as Update Transfer command, DEPCMDPARn
+ * registers are reserved. Since the driver often sends Update Transfer
+ * command, don't write to DEPCMDPARn to avoid register write delays and
+ * improve performance.
+ */
+ if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_UPDATETRANSFER) {
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
+ }
+
+ /*
+ * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
+ * not relying on XferNotReady, we can make use of a special "No
+ * Response Update Transfer" command where we should clear both CmdAct
+ * and CmdIOC bits.
+ *
+ * With this, we don't need to wait for command completion and can
+ * straight away issue further commands to the endpoint.
+ *
+ * NOTICE: We're making an assumption that control endpoints will never
+ * make use of Update Transfer command. This is a safe assumption
+ * because we can never have more than one request at a time with
+ * Control Endpoints. If anybody changes that assumption, this chunk
+ * needs to be updated accordingly.
+ */
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
+ !usb_endpoint_xfer_isoc(desc))
+ cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
+ else
+ cmd |= DWC3_DEPCMD_CMDACT;
+
+ dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
+
+ if (!(cmd & DWC3_DEPCMD_CMDACT) ||
+ (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER &&
+ !(cmd & DWC3_DEPCMD_CMDIOC))) {
+ ret = 0;
+ goto skip_status;
+ }
+
+ do {
+ reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
+ if (!(reg & DWC3_DEPCMD_CMDACT)) {
+ cmd_status = DWC3_DEPCMD_STATUS(reg);
+
+ switch (cmd_status) {
+ case 0:
+ ret = 0;
+ break;
+ case DEPEVT_TRANSFER_NO_RESOURCE:
+ dev_WARN(dwc->dev, "No resource for %s\n",
+ dep->name);
+ ret = -EINVAL;
+ break;
+ case DEPEVT_TRANSFER_BUS_EXPIRY:
+ /*
+ * SW issues START TRANSFER command to
+ * isochronous ep with future frame interval. If
+ * future interval time has already passed when
+ * core receives the command, it will respond
+ * with an error status of 'Bus Expiry'.
+ *
+ * Instead of always returning -EINVAL, let's
+ * give a hint to the gadget driver that this is
+ * the case by returning -EAGAIN.
+ */
+ ret = -EAGAIN;
+ break;
+ default:
+ dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
+ }
+
+ break;
+ }
+ } while (--timeout);
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ cmd_status = -ETIMEDOUT;
+ }
+
+skip_status:
+ trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
+
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
+ if (ret == 0)
+ dep->flags |= DWC3_EP_TRANSFER_STARTED;
+
+ if (ret != -ETIMEDOUT)
+ dwc3_gadget_ep_get_transfer_index(dep);
+ }
+
+ if (saved_config) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= saved_config;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+
+ return ret;
+}
+
+static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd = DWC3_DEPCMD_CLEARSTALL;
+
+ /*
+ * As of core revision 2.60a the recommended programming model
+ * is to set the ClearPendIN bit when issuing a Clear Stall EP
+ * command for IN endpoints. This is to prevent an issue where
+ * some (non-compliant) hosts may not send ACK TPs for pending
+ * IN transfers due to a mishandled error condition. Synopsys
+ * STAR 9000614252.
+ */
+ if (dep->direction &&
+ !DWC3_VER_IS_PRIOR(DWC3, 260A) &&
+ (dwc->gadget->speed >= USB_SPEED_SUPER))
+ cmd |= DWC3_DEPCMD_CLEARPENDIN;
+
+ memset(&params, 0, sizeof(params));
+
+ return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+}
+
+static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
+ struct dwc3_trb *trb)
+{
+ u32 offset = (char *) trb - (char *) dep->trb_pool;
+
+ return dep->trb_pool_dma + offset;
+}
+
+static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ if (dep->trb_pool)
+ return 0;
+
+ dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ &dep->trb_pool_dma, GFP_KERNEL);
+ if (!dep->trb_pool) {
+ dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
+ dep->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dwc3_free_trb_pool(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+ dep->trb_pool, dep->trb_pool_dma);
+
+ dep->trb_pool = NULL;
+ dep->trb_pool_dma = 0;
+}
+
+static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+
+ memset(&params, 0x00, sizeof(params));
+
+ params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
+
+ return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
+ &params);
+}
+
+/**
+ * dwc3_gadget_start_config - configure ep resources
+ * @dep: endpoint that is being enabled
+ *
+ * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
+ * completion, it will set Transfer Resource for all available endpoints.
+ *
+ * The assignment of transfer resources cannot perfectly follow the data book
+ * due to the fact that the controller driver does not have all knowledge of the
+ * configuration in advance. It is given this information piecemeal by the
+ * composite gadget framework after every SET_CONFIGURATION and
+ * SET_INTERFACE. Trying to follow the databook programming model in this
+ * scenario can cause errors. For two reasons:
+ *
+ * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
+ * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
+ * incorrect in the scenario of multiple interfaces.
+ *
+ * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
+ * endpoint on alt setting (8.1.6).
+ *
+ * The following simplified method is used instead:
+ *
+ * All hardware endpoints can be assigned a transfer resource and this setting
+ * will stay persistent until either a core reset or hibernation. So whenever we
+ * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
+ * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
+ * guaranteed that there are as many transfer resources as endpoints.
+ *
+ * This function is called for each endpoint when it is being enabled but is
+ * triggered only when called for EP0-out, which always happens first, and which
+ * should only happen in one of the above conditions.
+ */
+static int dwc3_gadget_start_config(struct dwc3_ep *dep)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3 *dwc;
+ u32 cmd;
+ int i;
+ int ret;
+
+ if (dep->number)
+ return 0;
+
+ memset(&params, 0x00, sizeof(params));
+ cmd = DWC3_DEPCMD_DEPSTARTCFG;
+ dwc = dep->dwc;
+
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
+
+ if (!dep)
+ continue;
+
+ ret = dwc3_gadget_set_xfer_resource(dep);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
+{
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3 *dwc = dep->dwc;
+
+ comp_desc = dep->endpoint.comp_desc;
+ desc = dep->endpoint.desc;
+
+ memset(&params, 0x00, sizeof(params));
+
+ params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
+ | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
+
+ /* Burst size is only needed in SuperSpeed mode */
+ if (dwc->gadget->speed >= USB_SPEED_SUPER) {
+ u32 burst = dep->endpoint.maxburst;
+
+ params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
+ }
+
+ params.param0 |= action;
+ if (action == DWC3_DEPCFG_ACTION_RESTORE)
+ params.param2 |= dep->saved_state;
+
+ if (usb_endpoint_xfer_control(desc))
+ params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
+
+ if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
+ params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+ if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
+ params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+ | DWC3_DEPCFG_XFER_COMPLETE_EN
+ | DWC3_DEPCFG_STREAM_EVENT_EN;
+ dep->stream_capable = true;
+ }
+
+ if (!usb_endpoint_xfer_control(desc))
+ params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
+
+ /*
+ * We are doing 1:1 mapping for endpoints, meaning
+ * Physical Endpoints 2 maps to Logical Endpoint 2 and
+ * so on. We consider the direction bit as part of the physical
+ * endpoint number. So USB endpoint 0x81 is 0x03.
+ */
+ params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
+
+ /*
+ * We must use the lower 16 TX FIFOs even though
+ * HW might have more
+ */
+ if (dep->direction)
+ params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+ if (desc->bInterval) {
+ u8 bInterval_m1;
+
+ /*
+ * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
+ *
+ * NOTE: The programming guide incorrectly stated bInterval_m1
+ * must be set to 0 when operating in fullspeed. Internally the
+ * controller does not have this limitation. See DWC_usb3x
+ * programming guide section 3.2.2.1.
+ */
+ bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
+
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
+ dwc->gadget->speed == USB_SPEED_FULL)
+ dep->interval = desc->bInterval;
+ else
+ dep->interval = 1 << (desc->bInterval - 1);
+
+ params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
+ }
+
+ return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
+}
+
+/**
+ * dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
+ * @dwc: pointer to the DWC3 context
+ * @mult: multiplier to be used when calculating the fifo_size
+ *
+ * Calculates the size value based on the equation below:
+ *
+ * DWC3 revision 280A and prior:
+ * fifo_size = mult * (max_packet / mdwidth) + 1;
+ *
+ * DWC3 revision 290A and onwards:
+ * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
+ *
+ * The max packet size is set to 1024, as the txfifo requirements mainly apply
+ * to super speed USB use cases. However, it is safe to overestimate the fifo
+ * allocations for other scenarios, i.e. high speed USB.
+ */
+static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
+{
+ int max_packet = 1024;
+ int fifo_size;
+ int mdwidth;
+
+ mdwidth = dwc3_mdwidth(dwc);
+
+ /* MDWIDTH is represented in bits, we need it in bytes */
+ mdwidth >>= 3;
+
+ if (DWC3_VER_IS_PRIOR(DWC3, 290A))
+ fifo_size = mult * (max_packet / mdwidth) + 1;
+ else
+ fifo_size = mult * ((max_packet + mdwidth) / mdwidth) + 1;
+ return fifo_size;
+}
+
+/**
+ * dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
+ * @dwc: pointer to the DWC3 context
+ *
+ * Iterates through all the endpoint registers and clears the previous txfifo
+ * allocations.
+ */
+void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ int fifo_depth;
+ int size;
+ int num;
+
+ if (!dwc->do_fifo_resize)
+ return;
+
+ /* Read ep0IN related TXFIFO size */
+ dep = dwc->eps[1];
+ size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
+ if (DWC3_IP_IS(DWC3))
+ fifo_depth = DWC3_GTXFIFOSIZ_TXFDEP(size);
+ else
+ fifo_depth = DWC31_GTXFIFOSIZ_TXFDEP(size);
+
+ dwc->last_fifo_depth = fifo_depth;
+ /* Clear existing TXFIFO for all IN eps except ep0 */
+ for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
+ num += 2) {
+ dep = dwc->eps[num];
+ /* Don't change TXFRAMNUM on usb31 version */
+ size = DWC3_IP_IS(DWC3) ? 0 :
+ dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
+ DWC31_GTXFIFOSIZ_TXFRAMNUM;
+
+ dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size);
+ dep->flags &= ~DWC3_EP_TXFIFO_RESIZED;
+ }
+ dwc->num_ep_resized = 0;
+}
+
+/*
+ * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
+ * @dwc: pointer to our context structure
+ *
+ * This function will a best effort FIFO allocation in order
+ * to improve FIFO usage and throughput, while still allowing
+ * us to enable as many endpoints as possible.
+ *
+ * Keep in mind that this operation will be highly dependent
+ * on the configured size for RAM1 - which contains TxFifo -,
+ * the amount of endpoints enabled on coreConsultant tool, and
+ * the width of the Master Bus.
+ *
+ * In general, FIFO depths are represented with the following equation:
+ *
+ * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
+ *
+ * In conjunction with dwc3_gadget_check_config(), this resizing logic will
+ * ensure that all endpoints will have enough internal memory for one max
+ * packet per endpoint.
+ */
+static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ int fifo_0_start;
+ int ram1_depth;
+ int fifo_size;
+ int min_depth;
+ int num_in_ep;
+ int remaining;
+ int num_fifos = 1;
+ int fifo;
+ int tmp;
+
+ if (!dwc->do_fifo_resize)
+ return 0;
+
+ /* resize IN endpoints except ep0 */
+ if (!usb_endpoint_dir_in(dep->endpoint.desc) || dep->number <= 1)
+ return 0;
+
+ /* bail if already resized */
+ if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
+ return 0;
+
+ ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
+
+ if ((dep->endpoint.maxburst > 1 &&
+ usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
+ usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ num_fifos = 3;
+
+ if (dep->endpoint.maxburst > 6 &&
+ (usb_endpoint_xfer_bulk(dep->endpoint.desc) ||
+ usb_endpoint_xfer_isoc(dep->endpoint.desc)) && DWC3_IP_IS(DWC31))
+ num_fifos = dwc->tx_fifo_resize_max_num;
+
+ /* FIFO size for a single buffer */
+ fifo = dwc3_gadget_calc_tx_fifo_size(dwc, 1);
+
+ /* Calculate the number of remaining EPs w/o any FIFO */
+ num_in_ep = dwc->max_cfg_eps;
+ num_in_ep -= dwc->num_ep_resized;
+
+ /* Reserve at least one FIFO for the number of IN EPs */
+ min_depth = num_in_ep * (fifo + 1);
+ remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
+ remaining = max_t(int, 0, remaining);
+ /*
+ * We've already reserved 1 FIFO per EP, so check what we can fit in
+ * addition to it. If there is not enough remaining space, allocate
+ * all the remaining space to the EP.
+ */
+ fifo_size = (num_fifos - 1) * fifo;
+ if (remaining < fifo_size)
+ fifo_size = remaining;
+
+ fifo_size += fifo;
+ /* Last increment according to the TX FIFO size equation */
+ fifo_size++;
+
+ /* Check if TXFIFOs start at non-zero addr */
+ tmp = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
+ fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(tmp);
+
+ fifo_size |= (fifo_0_start + (dwc->last_fifo_depth << 16));
+ if (DWC3_IP_IS(DWC3))
+ dwc->last_fifo_depth += DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
+ else
+ dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
+
+ /* Check fifo size allocation doesn't exceed available RAM size. */
+ if (dwc->last_fifo_depth >= ram1_depth) {
+ dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
+ dwc->last_fifo_depth, ram1_depth,
+ dep->endpoint.name, fifo_size);
+ if (DWC3_IP_IS(DWC3))
+ fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
+ else
+ fifo_size = DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
+
+ dwc->last_fifo_depth -= fifo_size;
+ return -ENOMEM;
+ }
+
+ dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1), fifo_size);
+ dep->flags |= DWC3_EP_TXFIFO_RESIZED;
+ dwc->num_ep_resized++;
+
+ return 0;
+}
+
+/**
+ * __dwc3_gadget_ep_enable - initializes a hw endpoint
+ * @dep: endpoint to be initialized
+ * @action: one of INIT, MODIFY or RESTORE
+ *
+ * Caller should take care of locking. Execute all necessary commands to
+ * initialize a HW endpoint so it can be used by a gadget driver.
+ */
+static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
+{
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
+ struct dwc3 *dwc = dep->dwc;
+
+ u32 reg;
+ int ret;
+
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ ret = dwc3_gadget_resize_tx_fifos(dep);
+ if (ret)
+ return ret;
+
+ ret = dwc3_gadget_start_config(dep);
+ if (ret)
+ return ret;
+ }
+
+ ret = dwc3_gadget_set_ep_config(dep, action);
+ if (ret)
+ return ret;
+
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ struct dwc3_trb *trb_st_hw;
+ struct dwc3_trb *trb_link;
+
+ dep->type = usb_endpoint_type(desc);
+ dep->flags |= DWC3_EP_ENABLED;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ reg |= DWC3_DALEPENA_EP(dep->number);
+ dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+
+ dep->trb_dequeue = 0;
+ dep->trb_enqueue = 0;
+
+ if (usb_endpoint_xfer_control(desc))
+ goto out;
+
+ /* Initialize the TRB ring */
+ memset(dep->trb_pool, 0,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
+
+ /* Link TRB. The HWO bit is never reset */
+ trb_st_hw = &dep->trb_pool[0];
+
+ trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
+ trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+ trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+ trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
+ trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
+ }
+
+ /*
+ * Issue StartTransfer here with no-op TRB so we can always rely on No
+ * Response Update Transfer command.
+ */
+ if (usb_endpoint_xfer_bulk(desc) ||
+ usb_endpoint_xfer_int(desc)) {
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_trb *trb;
+ dma_addr_t trb_dma;
+ u32 cmd;
+
+ memset(&params, 0, sizeof(params));
+ trb = &dep->trb_pool[0];
+ trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+ params.param0 = upper_32_bits(trb_dma);
+ params.param1 = lower_32_bits(trb_dma);
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (ret < 0)
+ return ret;
+
+ if (dep->stream_capable) {
+ /*
+ * For streams, at start, there maybe a race where the
+ * host primes the endpoint before the function driver
+ * queues a request to initiate a stream. In that case,
+ * the controller will not see the prime to generate the
+ * ERDY and start stream. To workaround this, issue a
+ * no-op TRB as normal, but end it immediately. As a
+ * result, when the function driver queues the request,
+ * the next START_TRANSFER command will cause the
+ * controller to generate an ERDY to initiate the
+ * stream.
+ */
+ dwc3_stop_active_transfer(dep, true, true);
+
+ /*
+ * All stream eps will reinitiate stream on NoStream
+ * rejection until we can determine that the host can
+ * prime after the first transfer.
+ *
+ * However, if the controller is capable of
+ * TXF_FLUSH_BYPASS, then IN direction endpoints will
+ * automatically restart the stream without the driver
+ * initiation.
+ */
+ if (!dep->direction ||
+ !(dwc->hwparams.hwparams9 &
+ DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS))
+ dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
+ }
+ }
+
+out:
+ trace_dwc3_gadget_ep_enable(dep);
+
+ return 0;
+}
+
+void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status)
+{
+ struct dwc3_request *req;
+
+ dwc3_stop_active_transfer(dep, true, false);
+
+ /* If endxfer is delayed, avoid unmapping requests */
+ if (dep->flags & DWC3_EP_DELAY_STOP)
+ return;
+
+ /* - giveback all requests to gadget driver */
+ while (!list_empty(&dep->started_list)) {
+ req = next_request(&dep->started_list);
+
+ dwc3_gadget_giveback(dep, req, status);
+ }
+
+ while (!list_empty(&dep->pending_list)) {
+ req = next_request(&dep->pending_list);
+
+ dwc3_gadget_giveback(dep, req, status);
+ }
+
+ while (!list_empty(&dep->cancelled_list)) {
+ req = next_request(&dep->cancelled_list);
+
+ dwc3_gadget_giveback(dep, req, status);
+ }
+}
+
+/**
+ * __dwc3_gadget_ep_disable - disables a hw endpoint
+ * @dep: the endpoint to disable
+ *
+ * This function undoes what __dwc3_gadget_ep_enable did and also removes
+ * requests which are currently being processed by the hardware and those which
+ * are not yet scheduled.
+ *
+ * Caller should take care of locking.
+ */
+static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ u32 reg;
+ u32 mask;
+
+ trace_dwc3_gadget_ep_disable(dep);
+
+ /* make sure HW endpoint isn't stalled */
+ if (dep->flags & DWC3_EP_STALL)
+ __dwc3_gadget_ep_set_halt(dep, 0, false);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ reg &= ~DWC3_DALEPENA_EP(dep->number);
+ dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+
+ dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
+
+ dep->stream_capable = false;
+ dep->type = 0;
+ mask = DWC3_EP_TXFIFO_RESIZED;
+ /*
+ * dwc3_remove_requests() can exit early if DWC3 EP delayed stop is
+ * set. Do not clear DEP flags, so that the end transfer command will
+ * be reattempted during the next SETUP stage.
+ */
+ if (dep->flags & DWC3_EP_DELAY_STOP)
+ mask |= (DWC3_EP_DELAY_STOP | DWC3_EP_TRANSFER_STARTED);
+ dep->flags &= mask;
+
+ /* Clear out the ep descriptors for non-ep0 */
+ if (dep->number > 1) {
+ dep->endpoint.comp_desc = NULL;
+ dep->endpoint.desc = NULL;
+ }
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EINVAL;
+}
+
+static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
+{
+ return -EINVAL;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static int dwc3_gadget_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct dwc3_ep *dep;
+ struct dwc3 *dwc;
+ unsigned long flags;
+ int ret;
+
+ if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ pr_debug("dwc3: invalid parameters\n");
+ return -EINVAL;
+ }
+
+ if (!desc->wMaxPacketSize) {
+ pr_debug("dwc3: missing wMaxPacketSize\n");
+ return -EINVAL;
+ }
+
+ dep = to_dwc3_ep(ep);
+ dwc = dep->dwc;
+
+ if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
+ "%s is already enabled\n",
+ dep->name))
+ return 0;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static int dwc3_gadget_ep_disable(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep;
+ struct dwc3 *dwc;
+ unsigned long flags;
+ int ret;
+
+ if (!ep) {
+ pr_debug("dwc3: invalid parameters\n");
+ return -EINVAL;
+ }
+
+ dep = to_dwc3_ep(ep);
+ dwc = dep->dwc;
+
+ if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
+ "%s is already disabled\n",
+ dep->name))
+ return 0;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_ep_disable(dep);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct dwc3_request *req;
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->direction = dep->direction;
+ req->epnum = dep->number;
+ req->dep = dep;
+ req->status = DWC3_REQUEST_STATUS_UNKNOWN;
+
+ trace_dwc3_alloc_request(req);
+
+ return &req->request;
+}
+
+static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct dwc3_request *req = to_dwc3_request(request);
+
+ trace_dwc3_free_request(req);
+ kfree(req);
+}
+
+/**
+ * dwc3_ep_prev_trb - returns the previous TRB in the ring
+ * @dep: The endpoint with the TRB ring
+ * @index: The index of the current TRB in the ring
+ *
+ * Returns the TRB prior to the one pointed to by the index. If the
+ * index is 0, we will wrap backwards, skip the link TRB, and return
+ * the one just before that.
+ */
+static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
+{
+ u8 tmp = index;
+
+ if (!tmp)
+ tmp = DWC3_TRB_NUM - 1;
+
+ return &dep->trb_pool[tmp - 1];
+}
+
+static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
+{
+ u8 trbs_left;
+
+ /*
+ * If the enqueue & dequeue are equal then the TRB ring is either full
+ * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
+ * pending to be processed by the driver.
+ */
+ if (dep->trb_enqueue == dep->trb_dequeue) {
+ /*
+ * If there is any request remained in the started_list at
+ * this point, that means there is no TRB available.
+ */
+ if (!list_empty(&dep->started_list))
+ return 0;
+
+ return DWC3_TRB_NUM - 1;
+ }
+
+ trbs_left = dep->trb_dequeue - dep->trb_enqueue;
+ trbs_left &= (DWC3_TRB_NUM - 1);
+
+ if (dep->trb_dequeue < dep->trb_enqueue)
+ trbs_left--;
+
+ return trbs_left;
+}
+
+/**
+ * dwc3_prepare_one_trb - setup one TRB from one request
+ * @dep: endpoint for which this request is prepared
+ * @req: dwc3_request pointer
+ * @trb_length: buffer size of the TRB
+ * @chain: should this TRB be chained to the next?
+ * @node: only for isochronous endpoints. First TRB needs different type.
+ * @use_bounce_buffer: set to use bounce buffer
+ * @must_interrupt: set to interrupt on TRB completion
+ */
+static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned int trb_length,
+ unsigned int chain, unsigned int node, bool use_bounce_buffer,
+ bool must_interrupt)
+{
+ struct dwc3_trb *trb;
+ dma_addr_t dma;
+ unsigned int stream_id = req->request.stream_id;
+ unsigned int short_not_ok = req->request.short_not_ok;
+ unsigned int no_interrupt = req->request.no_interrupt;
+ unsigned int is_last = req->request.is_last;
+ struct dwc3 *dwc = dep->dwc;
+ struct usb_gadget *gadget = dwc->gadget;
+ enum usb_device_speed speed = gadget->speed;
+
+ if (use_bounce_buffer)
+ dma = dep->dwc->bounce_addr;
+ else if (req->request.num_sgs > 0)
+ dma = sg_dma_address(req->start_sg);
+ else
+ dma = req->request.dma;
+
+ trb = &dep->trb_pool[dep->trb_enqueue];
+
+ if (!req->trb) {
+ dwc3_gadget_move_started_request(req);
+ req->trb = trb;
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+ }
+
+ req->num_trbs++;
+
+ trb->size = DWC3_TRB_SIZE_LENGTH(trb_length);
+ trb->bpl = lower_32_bits(dma);
+ trb->bph = upper_32_bits(dma);
+
+ switch (usb_endpoint_type(dep->endpoint.desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!node) {
+ trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+
+ /*
+ * USB Specification 2.0 Section 5.9.2 states that: "If
+ * there is only a single transaction in the microframe,
+ * only a DATA0 data packet PID is used. If there are
+ * two transactions per microframe, DATA1 is used for
+ * the first transaction data packet and DATA0 is used
+ * for the second transaction data packet. If there are
+ * three transactions per microframe, DATA2 is used for
+ * the first transaction data packet, DATA1 is used for
+ * the second, and DATA0 is used for the third."
+ *
+ * IOW, we should satisfy the following cases:
+ *
+ * 1) length <= maxpacket
+ * - DATA0
+ *
+ * 2) maxpacket < length <= (2 * maxpacket)
+ * - DATA1, DATA0
+ *
+ * 3) (2 * maxpacket) < length <= (3 * maxpacket)
+ * - DATA2, DATA1, DATA0
+ */
+ if (speed == USB_SPEED_HIGH) {
+ struct usb_ep *ep = &dep->endpoint;
+ unsigned int mult = 2;
+ unsigned int maxp = usb_endpoint_maxp(ep->desc);
+
+ if (req->request.length <= (2 * maxp))
+ mult--;
+
+ if (req->request.length <= maxp)
+ mult--;
+
+ trb->size |= DWC3_TRB_SIZE_PCM1(mult);
+ }
+ } else {
+ trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
+ }
+
+ if (!no_interrupt && !chain)
+ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ trb->ctrl = DWC3_TRBCTL_NORMAL;
+ break;
+ default:
+ /*
+ * This is only possible with faulty memory because we
+ * checked it already :)
+ */
+ dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
+ usb_endpoint_type(dep->endpoint.desc));
+ }
+
+ /*
+ * Enable Continue on Short Packet
+ * when endpoint is not a stream capable
+ */
+ if (usb_endpoint_dir_out(dep->endpoint.desc)) {
+ if (!dep->stream_capable)
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+ if (short_not_ok)
+ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+ }
+
+ /* All TRBs setup for MST must set CSP=1 when LST=0 */
+ if (dep->stream_capable && DWC3_MST_CAPABLE(&dwc->hwparams))
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+ if ((!no_interrupt && !chain) || must_interrupt)
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
+
+ if (chain)
+ trb->ctrl |= DWC3_TRB_CTRL_CHN;
+ else if (dep->stream_capable && is_last &&
+ !DWC3_MST_CAPABLE(&dwc->hwparams))
+ trb->ctrl |= DWC3_TRB_CTRL_LST;
+
+ if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
+ trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
+
+ /*
+ * As per data book 4.2.3.2TRB Control Bit Rules section
+ *
+ * The controller autonomously checks the HWO field of a TRB to determine if the
+ * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
+ * is valid before setting the HWO field to '1'. In most systems, this means that
+ * software must update the fourth DWORD of a TRB last.
+ *
+ * However there is a possibility of CPU re-ordering here which can cause
+ * controller to observe the HWO bit set prematurely.
+ * Add a write memory barrier to prevent CPU re-ordering.
+ */
+ wmb();
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+ dwc3_ep_inc_enq(dep);
+
+ trace_dwc3_prepare_trb(dep, trb);
+}
+
+static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
+{
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ unsigned int rem = req->request.length % maxp;
+
+ if ((req->request.length && req->request.zero && !rem &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc)) ||
+ (!req->direction && rem))
+ return true;
+
+ return false;
+}
+
+/**
+ * dwc3_prepare_last_sg - prepare TRBs for the last SG entry
+ * @dep: The endpoint that the request belongs to
+ * @req: The request to prepare
+ * @entry_length: The last SG entry size
+ * @node: Indicates whether this is not the first entry (for isoc only)
+ *
+ * Return the number of TRBs prepared.
+ */
+static int dwc3_prepare_last_sg(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned int entry_length,
+ unsigned int node)
+{
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ unsigned int rem = req->request.length % maxp;
+ unsigned int num_trbs = 1;
+
+ if (dwc3_needs_extra_trb(dep, req))
+ num_trbs++;
+
+ if (dwc3_calc_trbs_left(dep) < num_trbs)
+ return 0;
+
+ req->needs_extra_trb = num_trbs > 1;
+
+ /* Prepare a normal TRB */
+ if (req->direction || req->request.length)
+ dwc3_prepare_one_trb(dep, req, entry_length,
+ req->needs_extra_trb, node, false, false);
+
+ /* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */
+ if ((!req->direction && !req->request.length) || req->needs_extra_trb)
+ dwc3_prepare_one_trb(dep, req,
+ req->direction ? 0 : maxp - rem,
+ false, 1, true, false);
+
+ return num_trbs;
+}
+
+static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
+ struct dwc3_request *req)
+{
+ struct scatterlist *sg = req->start_sg;
+ struct scatterlist *s;
+ int i;
+ unsigned int length = req->request.length;
+ unsigned int remaining = req->request.num_mapped_sgs
+ - req->num_queued_sgs;
+ unsigned int num_trbs = req->num_trbs;
+ bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
+
+ /*
+ * If we resume preparing the request, then get the remaining length of
+ * the request and resume where we left off.
+ */
+ for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
+ length -= sg_dma_len(s);
+
+ for_each_sg(sg, s, remaining, i) {
+ unsigned int num_trbs_left = dwc3_calc_trbs_left(dep);
+ unsigned int trb_length;
+ bool must_interrupt = false;
+ bool last_sg = false;
+
+ trb_length = min_t(unsigned int, length, sg_dma_len(s));
+
+ length -= trb_length;
+
+ /*
+ * IOMMU driver is coalescing the list of sgs which shares a
+ * page boundary into one and giving it to USB driver. With
+ * this the number of sgs mapped is not equal to the number of
+ * sgs passed. So mark the chain bit to false if it isthe last
+ * mapped sg.
+ */
+ if ((i == remaining - 1) || !length)
+ last_sg = true;
+
+ if (!num_trbs_left)
+ break;
+
+ if (last_sg) {
+ if (!dwc3_prepare_last_sg(dep, req, trb_length, i))
+ break;
+ } else {
+ /*
+ * Look ahead to check if we have enough TRBs for the
+ * next SG entry. If not, set interrupt on this TRB to
+ * resume preparing the next SG entry when more TRBs are
+ * free.
+ */
+ if (num_trbs_left == 1 || (needs_extra_trb &&
+ num_trbs_left <= 2 &&
+ sg_dma_len(sg_next(s)) >= length))
+ must_interrupt = true;
+
+ dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false,
+ must_interrupt);
+ }
+
+ /*
+ * There can be a situation where all sgs in sglist are not
+ * queued because of insufficient trb number. To handle this
+ * case, update start_sg to next sg to be queued, so that
+ * we have free trbs we can continue queuing from where we
+ * previously stopped
+ */
+ if (!last_sg)
+ req->start_sg = sg_next(s);
+
+ req->num_queued_sgs++;
+ req->num_pending_sgs--;
+
+ /*
+ * The number of pending SG entries may not correspond to the
+ * number of mapped SG entries. If all the data are queued, then
+ * don't include unused SG entries.
+ */
+ if (length == 0) {
+ req->num_pending_sgs = 0;
+ break;
+ }
+
+ if (must_interrupt)
+ break;
+ }
+
+ return req->num_trbs - num_trbs;
+}
+
+static int dwc3_prepare_trbs_linear(struct dwc3_ep *dep,
+ struct dwc3_request *req)
+{
+ return dwc3_prepare_last_sg(dep, req, req->request.length, 0);
+}
+
+/*
+ * dwc3_prepare_trbs - setup TRBs from requests
+ * @dep: endpoint for which requests are being prepared
+ *
+ * The function goes through the requests list and sets up TRBs for the
+ * transfers. The function returns once there are no more TRBs available or
+ * it runs out of requests.
+ *
+ * Returns the number of TRBs prepared or negative errno.
+ */
+static int dwc3_prepare_trbs(struct dwc3_ep *dep)
+{
+ struct dwc3_request *req, *n;
+ int ret = 0;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
+
+ /*
+ * We can get in a situation where there's a request in the started list
+ * but there weren't enough TRBs to fully kick it in the first time
+ * around, so it has been waiting for more TRBs to be freed up.
+ *
+ * In that case, we should check if we have a request with pending_sgs
+ * in the started list and prepare TRBs for that request first,
+ * otherwise we will prepare TRBs completely out of order and that will
+ * break things.
+ */
+ list_for_each_entry(req, &dep->started_list, list) {
+ if (req->num_pending_sgs > 0) {
+ ret = dwc3_prepare_trbs_sg(dep, req);
+ if (!ret || req->num_pending_sgs)
+ return ret;
+ }
+
+ if (!dwc3_calc_trbs_left(dep))
+ return ret;
+
+ /*
+ * Don't prepare beyond a transfer. In DWC_usb32, its transfer
+ * burst capability may try to read and use TRBs beyond the
+ * active transfer instead of stopping.
+ */
+ if (dep->stream_capable && req->request.is_last &&
+ !DWC3_MST_CAPABLE(&dep->dwc->hwparams))
+ return ret;
+ }
+
+ list_for_each_entry_safe(req, n, &dep->pending_list, list) {
+ struct dwc3 *dwc = dep->dwc;
+
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
+ dep->direction);
+ if (ret)
+ return ret;
+
+ req->sg = req->request.sg;
+ req->start_sg = req->sg;
+ req->num_queued_sgs = 0;
+ req->num_pending_sgs = req->request.num_mapped_sgs;
+
+ if (req->num_pending_sgs > 0) {
+ ret = dwc3_prepare_trbs_sg(dep, req);
+ if (req->num_pending_sgs)
+ return ret;
+ } else {
+ ret = dwc3_prepare_trbs_linear(dep, req);
+ }
+
+ if (!ret || !dwc3_calc_trbs_left(dep))
+ return ret;
+
+ /*
+ * Don't prepare beyond a transfer. In DWC_usb32, its transfer
+ * burst capability may try to read and use TRBs beyond the
+ * active transfer instead of stopping.
+ */
+ if (dep->stream_capable && req->request.is_last &&
+ !DWC3_MST_CAPABLE(&dwc->hwparams))
+ return ret;
+ }
+
+ return ret;
+}
+
+static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
+
+static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_request *req;
+ int starting;
+ int ret;
+ u32 cmd;
+
+ /*
+ * Note that it's normal to have no new TRBs prepared (i.e. ret == 0).
+ * This happens when we need to stop and restart a transfer such as in
+ * the case of reinitiating a stream or retrying an isoc transfer.
+ */
+ ret = dwc3_prepare_trbs(dep);
+ if (ret < 0)
+ return ret;
+
+ starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED);
+
+ /*
+ * If there's no new TRB prepared and we don't need to restart a
+ * transfer, there's no need to update the transfer.
+ */
+ if (!ret && !starting)
+ return ret;
+
+ req = next_request(&dep->started_list);
+ if (!req) {
+ dep->flags |= DWC3_EP_PENDING_REQUEST;
+ return 0;
+ }
+
+ memset(&params, 0, sizeof(params));
+
+ if (starting) {
+ params.param0 = upper_32_bits(req->trb_dma);
+ params.param1 = lower_32_bits(req->trb_dma);
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+
+ if (dep->stream_capable)
+ cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
+ } else {
+ cmd = DWC3_DEPCMD_UPDATETRANSFER |
+ DWC3_DEPCMD_PARAM(dep->resource_index);
+ }
+
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (ret < 0) {
+ struct dwc3_request *tmp;
+
+ if (ret == -EAGAIN)
+ return ret;
+
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_DEQUEUED);
+
+ /* If ep isn't started, then there's no end transfer pending */
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+
+ return ret;
+ }
+
+ if (dep->stream_capable && req->request.is_last &&
+ !DWC3_MST_CAPABLE(&dep->dwc->hwparams))
+ dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
+ return 0;
+}
+
+static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ return DWC3_DSTS_SOFFN(reg);
+}
+
+/**
+ * __dwc3_stop_active_transfer - stop the current active transfer
+ * @dep: isoc endpoint
+ * @force: set forcerm bit in the command
+ * @interrupt: command complete interrupt after End Transfer command
+ *
+ * When setting force, the ForceRM bit will be set. In that case
+ * the controller won't update the TRB progress on command
+ * completion. It also won't clear the HWO bit in the TRB.
+ * The command will also not complete immediately in that case.
+ */
+static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
+ int ret;
+
+ cmd = DWC3_DEPCMD_ENDTRANSFER;
+ cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
+ cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0;
+ cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
+ memset(&params, 0, sizeof(params));
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ /*
+ * If the End Transfer command was timed out while the device is
+ * not in SETUP phase, it's possible that an incoming Setup packet
+ * may prevent the command's completion. Let's retry when the
+ * ep0state returns to EP0_SETUP_PHASE.
+ */
+ if (ret == -ETIMEDOUT && dep->dwc->ep0state != EP0_SETUP_PHASE) {
+ dep->flags |= DWC3_EP_DELAY_STOP;
+ return 0;
+ }
+ WARN_ON_ONCE(ret);
+ dep->resource_index = 0;
+
+ if (!interrupt) {
+ if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
+ mdelay(1);
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ } else if (!ret) {
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+ }
+
+ dep->flags &= ~DWC3_EP_DELAY_STOP;
+ return ret;
+}
+
+/**
+ * dwc3_gadget_start_isoc_quirk - workaround invalid frame number
+ * @dep: isoc endpoint
+ *
+ * This function tests for the correct combination of BIT[15:14] from the 16-bit
+ * microframe number reported by the XferNotReady event for the future frame
+ * number to start the isoc transfer.
+ *
+ * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed
+ * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the
+ * XferNotReady event are invalid. The driver uses this number to schedule the
+ * isochronous transfer and passes it to the START TRANSFER command. Because
+ * this number is invalid, the command may fail. If BIT[15:14] matches the
+ * internal 16-bit microframe, the START TRANSFER command will pass and the
+ * transfer will start at the scheduled time, if it is off by 1, the command
+ * will still pass, but the transfer will start 2 seconds in the future. For all
+ * other conditions, the START TRANSFER command will fail with bus-expiry.
+ *
+ * In order to workaround this issue, we can test for the correct combination of
+ * BIT[15:14] by sending START TRANSFER commands with different values of
+ * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart
+ * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status.
+ * As the result, within the 4 possible combinations for BIT[15:14], there will
+ * be 2 successful and 2 failure START COMMAND status. One of the 2 successful
+ * command status will result in a 2-second delay start. The smaller BIT[15:14]
+ * value is the correct combination.
+ *
+ * Since there are only 4 outcomes and the results are ordered, we can simply
+ * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to
+ * deduce the smaller successful combination.
+ *
+ * Let test0 = test status for combination 'b00 and test1 = test status for 'b01
+ * of BIT[15:14]. The correct combination is as follow:
+ *
+ * if test0 fails and test1 passes, BIT[15:14] is 'b01
+ * if test0 fails and test1 fails, BIT[15:14] is 'b10
+ * if test0 passes and test1 fails, BIT[15:14] is 'b11
+ * if test0 passes and test1 passes, BIT[15:14] is 'b00
+ *
+ * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN
+ * endpoints.
+ */
+static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep)
+{
+ int cmd_status = 0;
+ bool test0;
+ bool test1;
+
+ while (dep->combo_num < 2) {
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 test_frame_number;
+ u32 cmd;
+
+ /*
+ * Check if we can start isoc transfer on the next interval or
+ * 4 uframes in the future with BIT[15:14] as dep->combo_num
+ */
+ test_frame_number = dep->frame_number & DWC3_FRNUMBER_MASK;
+ test_frame_number |= dep->combo_num << 14;
+ test_frame_number += max_t(u32, 4, dep->interval);
+
+ params.param0 = upper_32_bits(dep->dwc->bounce_addr);
+ params.param1 = lower_32_bits(dep->dwc->bounce_addr);
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+ cmd |= DWC3_DEPCMD_PARAM(test_frame_number);
+ cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+
+ /* Redo if some other failure beside bus-expiry is received */
+ if (cmd_status && cmd_status != -EAGAIN) {
+ dep->start_cmd_status = 0;
+ dep->combo_num = 0;
+ return 0;
+ }
+
+ /* Store the first test status */
+ if (dep->combo_num == 0)
+ dep->start_cmd_status = cmd_status;
+
+ dep->combo_num++;
+
+ /*
+ * End the transfer if the START_TRANSFER command is successful
+ * to wait for the next XferNotReady to test the command again
+ */
+ if (cmd_status == 0) {
+ dwc3_stop_active_transfer(dep, true, true);
+ return 0;
+ }
+ }
+
+ /* test0 and test1 are both completed at this point */
+ test0 = (dep->start_cmd_status == 0);
+ test1 = (cmd_status == 0);
+
+ if (!test0 && test1)
+ dep->combo_num = 1;
+ else if (!test0 && !test1)
+ dep->combo_num = 2;
+ else if (test0 && !test1)
+ dep->combo_num = 3;
+ else if (test0 && test1)
+ dep->combo_num = 0;
+
+ dep->frame_number &= DWC3_FRNUMBER_MASK;
+ dep->frame_number |= dep->combo_num << 14;
+ dep->frame_number += max_t(u32, 4, dep->interval);
+
+ /* Reinitialize test variables */
+ dep->start_cmd_status = 0;
+ dep->combo_num = 0;
+
+ return __dwc3_gadget_kick_transfer(dep);
+}
+
+static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
+{
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
+ struct dwc3 *dwc = dep->dwc;
+ int ret;
+ int i;
+
+ if (list_empty(&dep->pending_list) &&
+ list_empty(&dep->started_list)) {
+ dep->flags |= DWC3_EP_PENDING_REQUEST;
+ return -EAGAIN;
+ }
+
+ if (!dwc->dis_start_transfer_quirk &&
+ (DWC3_VER_IS_PRIOR(DWC31, 170A) ||
+ DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) {
+ if (dwc->gadget->speed <= USB_SPEED_HIGH && dep->direction)
+ return dwc3_gadget_start_isoc_quirk(dep);
+ }
+
+ if (desc->bInterval <= 14 &&
+ dwc->gadget->speed >= USB_SPEED_HIGH) {
+ u32 frame = __dwc3_gadget_get_frame(dwc);
+ bool rollover = frame <
+ (dep->frame_number & DWC3_FRNUMBER_MASK);
+
+ /*
+ * frame_number is set from XferNotReady and may be already
+ * out of date. DSTS only provides the lower 14 bit of the
+ * current frame number. So add the upper two bits of
+ * frame_number and handle a possible rollover.
+ * This will provide the correct frame_number unless more than
+ * rollover has happened since XferNotReady.
+ */
+
+ dep->frame_number = (dep->frame_number & ~DWC3_FRNUMBER_MASK) |
+ frame;
+ if (rollover)
+ dep->frame_number += BIT(14);
+ }
+
+ for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) {
+ int future_interval = i + 1;
+
+ /* Give the controller at least 500us to schedule transfers */
+ if (desc->bInterval < 3)
+ future_interval += 3 - desc->bInterval;
+
+ dep->frame_number = DWC3_ALIGN_FRAME(dep, future_interval);
+
+ ret = __dwc3_gadget_kick_transfer(dep);
+ if (ret != -EAGAIN)
+ break;
+ }
+
+ /*
+ * After a number of unsuccessful start attempts due to bus-expiry
+ * status, issue END_TRANSFER command and retry on the next XferNotReady
+ * event.
+ */
+ if (ret == -EAGAIN)
+ ret = __dwc3_stop_active_transfer(dep, false, true);
+
+ return ret;
+}
+
+static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
+ dev_dbg(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ dep->name);
+ return -ESHUTDOWN;
+ }
+
+ if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
+ &req->request, req->dep->name))
+ return -EINVAL;
+
+ if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
+ "%s: request %pK already in flight\n",
+ dep->name, &req->request))
+ return -EINVAL;
+
+ pm_runtime_get(dwc->dev);
+
+ req->request.actual = 0;
+ req->request.status = -EINPROGRESS;
+
+ trace_dwc3_ep_queue(req);
+
+ list_add_tail(&req->list, &dep->pending_list);
+ req->status = DWC3_REQUEST_STATUS_QUEUED;
+
+ if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
+ return 0;
+
+ /*
+ * Start the transfer only after the END_TRANSFER is completed
+ * and endpoint STALL is cleared.
+ */
+ if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
+ (dep->flags & DWC3_EP_WEDGE) ||
+ (dep->flags & DWC3_EP_DELAY_STOP) ||
+ (dep->flags & DWC3_EP_STALL)) {
+ dep->flags |= DWC3_EP_DELAY_START;
+ return 0;
+ }
+
+ /*
+ * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
+ * wait for a XferNotReady event so we will know what's the current
+ * (micro-)frame number.
+ *
+ * Without this trick, we are very, very likely gonna get Bus Expiry
+ * errors which will force us issue EndTransfer command.
+ */
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) {
+ if ((dep->flags & DWC3_EP_PENDING_REQUEST))
+ return __dwc3_gadget_start_isoc(dep);
+
+ return 0;
+ }
+ }
+
+ __dwc3_gadget_kick_transfer(dep);
+
+ return 0;
+}
+
+static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct dwc3_request *req = to_dwc3_request(request);
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ unsigned long flags;
+
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_ep_queue(dep, req);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req)
+{
+ int i;
+
+ /* If req->trb is not set, then the request has not started */
+ if (!req->trb)
+ return;
+
+ /*
+ * If request was already started, this means we had to
+ * stop the transfer. With that we also need to ignore
+ * all TRBs used by the request, however TRBs can only
+ * be modified after completion of END_TRANSFER
+ * command. So what we do here is that we wait for
+ * END_TRANSFER completion and only after that, we jump
+ * over TRBs by clearing HWO and incrementing dequeue
+ * pointer.
+ */
+ for (i = 0; i < req->num_trbs; i++) {
+ struct dwc3_trb *trb;
+
+ trb = &dep->trb_pool[dep->trb_dequeue];
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+
+ req->num_trbs = 0;
+}
+
+static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
+{
+ struct dwc3_request *req;
+ struct dwc3 *dwc = dep->dwc;
+
+ while (!list_empty(&dep->cancelled_list)) {
+ req = next_request(&dep->cancelled_list);
+ dwc3_gadget_ep_skip_trbs(dep, req);
+ switch (req->status) {
+ case DWC3_REQUEST_STATUS_DISCONNECTED:
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ break;
+ case DWC3_REQUEST_STATUS_DEQUEUED:
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ break;
+ case DWC3_REQUEST_STATUS_STALLED:
+ dwc3_gadget_giveback(dep, req, -EPIPE);
+ break;
+ default:
+ dev_err(dwc->dev, "request cancelled with wrong reason:%d\n", req->status);
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ break;
+ }
+ /*
+ * The endpoint is disabled, let the dwc3_remove_requests()
+ * handle the cleanup.
+ */
+ if (!dep->endpoint.desc)
+ break;
+ }
+}
+
+static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct dwc3_request *req = to_dwc3_request(request);
+ struct dwc3_request *r = NULL;
+
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ unsigned long flags;
+ int ret = 0;
+
+ trace_dwc3_ep_dequeue(req);
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ list_for_each_entry(r, &dep->cancelled_list, list) {
+ if (r == req)
+ goto out;
+ }
+
+ list_for_each_entry(r, &dep->pending_list, list) {
+ if (r == req) {
+ /*
+ * Explicitly check for EP0/1 as dequeue for those
+ * EPs need to be handled differently. Control EP
+ * only deals with one USB req, and giveback will
+ * occur during dwc3_ep0_stall_and_restart(). EP0
+ * requests are never added to started_list.
+ */
+ if (dep->number > 1)
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ else
+ dwc3_ep0_reset_state(dwc);
+ goto out;
+ }
+ }
+
+ list_for_each_entry(r, &dep->started_list, list) {
+ if (r == req) {
+ struct dwc3_request *t;
+
+ /* wait until it is processed */
+ dwc3_stop_active_transfer(dep, true, true);
+
+ /*
+ * Remove any started request if the transfer is
+ * cancelled.
+ */
+ list_for_each_entry_safe(r, t, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(r,
+ DWC3_REQUEST_STATUS_DEQUEUED);
+
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
+ goto out;
+ }
+ }
+
+ dev_err(dwc->dev, "request %pK was not queued to %s\n",
+ request, ep->name);
+ ret = -EINVAL;
+out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_request *req;
+ struct dwc3_request *tmp;
+ int ret;
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
+ return -EINVAL;
+ }
+
+ memset(&params, 0x00, sizeof(params));
+
+ if (value) {
+ struct dwc3_trb *trb;
+
+ unsigned int transfer_in_flight;
+ unsigned int started;
+
+ if (dep->number > 1)
+ trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
+ else
+ trb = &dwc->ep0_trb[dep->trb_enqueue];
+
+ transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
+ started = !list_empty(&dep->started_list);
+
+ if (!protocol && ((dep->direction && transfer_in_flight) ||
+ (!dep->direction && started))) {
+ return -EAGAIN;
+ }
+
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+ &params);
+ if (ret)
+ dev_err(dwc->dev, "failed to set STALL on %s\n",
+ dep->name);
+ else
+ dep->flags |= DWC3_EP_STALL;
+ } else {
+ /*
+ * Don't issue CLEAR_STALL command to control endpoints. The
+ * controller automatically clears the STALL when it receives
+ * the SETUP token.
+ */
+ if (dep->number <= 1) {
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ return 0;
+ }
+
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_STALLED);
+
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING ||
+ (dep->flags & DWC3_EP_DELAY_STOP)) {
+ dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
+ if (protocol)
+ dwc->clear_stall_protocol = dep->number;
+
+ return 0;
+ }
+
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+
+ ret = dwc3_send_clear_stall_ep_cmd(dep);
+ if (ret) {
+ dev_err(dwc->dev, "failed to clear STALL on %s\n",
+ dep->name);
+ return ret;
+ }
+
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+
+ if ((dep->flags & DWC3_EP_DELAY_START) &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
+
+ dep->flags &= ~DWC3_EP_DELAY_START;
+ }
+
+ return ret;
+}
+
+static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+
+ unsigned long flags;
+
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_ep_set_halt(dep, value, false);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dep->flags |= DWC3_EP_WEDGE;
+
+ if (dep->number == 0 || dep->number == 1)
+ ret = __dwc3_gadget_ep0_set_halt(ep, 1);
+ else
+ ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+};
+
+static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
+ .enable = dwc3_gadget_ep0_enable,
+ .disable = dwc3_gadget_ep0_disable,
+ .alloc_request = dwc3_gadget_ep_alloc_request,
+ .free_request = dwc3_gadget_ep_free_request,
+ .queue = dwc3_gadget_ep0_queue,
+ .dequeue = dwc3_gadget_ep_dequeue,
+ .set_halt = dwc3_gadget_ep0_set_halt,
+ .set_wedge = dwc3_gadget_ep_set_wedge,
+};
+
+static const struct usb_ep_ops dwc3_gadget_ep_ops = {
+ .enable = dwc3_gadget_ep_enable,
+ .disable = dwc3_gadget_ep_disable,
+ .alloc_request = dwc3_gadget_ep_alloc_request,
+ .free_request = dwc3_gadget_ep_free_request,
+ .queue = dwc3_gadget_ep_queue,
+ .dequeue = dwc3_gadget_ep_dequeue,
+ .set_halt = dwc3_gadget_ep_set_halt,
+ .set_wedge = dwc3_gadget_ep_set_wedge,
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int dwc3_gadget_get_frame(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ return __dwc3_gadget_get_frame(dwc);
+}
+
+static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
+{
+ int retries;
+
+ int ret;
+ u32 reg;
+
+ u8 link_state;
+
+ /*
+ * According to the Databook Remote wakeup request should
+ * be issued only when the device is in early suspend state.
+ *
+ * We can check that via USB Link State bits in DSTS register.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+
+ link_state = DWC3_DSTS_USBLNKST(reg);
+
+ switch (link_state) {
+ case DWC3_LINK_STATE_RESET:
+ case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
+ case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
+ case DWC3_LINK_STATE_U2: /* in HS, means Sleep (L1) */
+ case DWC3_LINK_STATE_U1:
+ case DWC3_LINK_STATE_RESUME:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
+ if (ret < 0) {
+ dev_err(dwc->dev, "failed to put link in Recovery\n");
+ return ret;
+ }
+
+ /* Recent versions do this automatically */
+ if (DWC3_VER_IS_PRIOR(DWC3, 194A)) {
+ /* write zeroes to Link Change Request */
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
+ /* poll until Link State changes to ON */
+ retries = 20000;
+
+ while (retries--) {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+
+ /* in HS, means ON */
+ if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
+ break;
+ }
+
+ if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
+ dev_err(dwc->dev, "failed to send remote wakeup\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dwc3_gadget_wakeup(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_wakeup(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
+ int is_selfpowered)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ g->is_selfpowered = !!is_selfpowered;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static void dwc3_stop_active_transfers(struct dwc3 *dwc)
+{
+ u32 epnum;
+
+ for (epnum = 2; epnum < dwc->num_eps; epnum++) {
+ struct dwc3_ep *dep;
+
+ dep = dwc->eps[epnum];
+ if (!dep)
+ continue;
+
+ dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
+ }
+}
+
+static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc)
+{
+ enum usb_ssp_rate ssp_rate = dwc->gadget_ssp_rate;
+ u32 reg;
+
+ if (ssp_rate == USB_SSP_GEN_UNKNOWN)
+ ssp_rate = dwc->max_ssp_rate;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~DWC3_DCFG_SPEED_MASK;
+ reg &= ~DWC3_DCFG_NUMLANES(~0);
+
+ if (ssp_rate == USB_SSP_GEN_1x2)
+ reg |= DWC3_DCFG_SUPERSPEED;
+ else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2)
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+
+ if (ssp_rate != USB_SSP_GEN_2x1 &&
+ dwc->max_ssp_rate != USB_SSP_GEN_2x1)
+ reg |= DWC3_DCFG_NUMLANES(1);
+
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
+
+static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
+{
+ enum usb_device_speed speed;
+ u32 reg;
+
+ speed = dwc->gadget_max_speed;
+ if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
+ speed = dwc->maximum_speed;
+
+ if (speed == USB_SPEED_SUPER_PLUS &&
+ DWC3_IP_IS(DWC32)) {
+ __dwc3_gadget_set_ssp_rate(dwc);
+ return;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_SPEED_MASK);
+
+ /*
+ * WORKAROUND: DWC3 revision < 2.20a have an issue
+ * which would cause metastability state on Run/Stop
+ * bit if we try to force the IP to USB2-only mode.
+ *
+ * Because of that, we cannot configure the IP to any
+ * speed other than the SuperSpeed
+ *
+ * Refers to:
+ *
+ * STAR#9000525659: Clock Domain Crossing on DCTL in
+ * USB 2.0 Mode
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
+ !dwc->dis_metastability_quirk) {
+ reg |= DWC3_DCFG_SUPERSPEED;
+ } else {
+ switch (speed) {
+ case USB_SPEED_FULL:
+ reg |= DWC3_DCFG_FULLSPEED;
+ break;
+ case USB_SPEED_HIGH:
+ reg |= DWC3_DCFG_HIGHSPEED;
+ break;
+ case USB_SPEED_SUPER:
+ reg |= DWC3_DCFG_SUPERSPEED;
+ break;
+ case USB_SPEED_SUPER_PLUS:
+ if (DWC3_IP_IS(DWC3))
+ reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+ break;
+ default:
+ dev_err(dwc->dev, "invalid speed (%d)\n", speed);
+
+ if (DWC3_IP_IS(DWC3))
+ reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+ }
+ }
+
+ if (DWC3_IP_IS(DWC32) &&
+ speed > USB_SPEED_UNKNOWN &&
+ speed < USB_SPEED_SUPER_PLUS)
+ reg &= ~DWC3_DCFG_NUMLANES(~0);
+
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
+
+static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+{
+ u32 reg;
+ u32 timeout = 2000;
+
+ if (pm_runtime_suspended(dwc->dev))
+ return 0;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (is_on) {
+ if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
+ reg &= ~DWC3_DCTL_TRGTULST_MASK;
+ reg |= DWC3_DCTL_TRGTULST_RX_DET;
+ }
+
+ if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ reg |= DWC3_DCTL_RUN_STOP;
+
+ __dwc3_gadget_set_speed(dwc);
+ dwc->pullups_connected = true;
+ } else {
+ reg &= ~DWC3_DCTL_RUN_STOP;
+
+ dwc->pullups_connected = false;
+ }
+
+ dwc3_gadget_dctl_write_safe(dwc, reg);
+
+ do {
+ usleep_range(1000, 2000);
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ reg &= DWC3_DSTS_DEVCTRLHLT;
+ } while (--timeout && !(!is_on ^ !reg));
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
+static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
+
+static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->connected = false;
+
+ /*
+ * Attempt to end pending SETUP status phase, and not wait for the
+ * function to do so.
+ */
+ if (dwc->delayed_status)
+ dwc3_ep0_send_delayed_status(dwc);
+
+ /*
+ * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
+ * Section 4.1.8 Table 4-7, it states that for a device-initiated
+ * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
+ * command for any active transfers" before clearing the RunStop
+ * bit.
+ */
+ dwc3_stop_active_transfers(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ /*
+ * Per databook, when we want to stop the gadget, if a control transfer
+ * is still in process, complete it and get the core into setup phase.
+ * In case the host is unresponsive to a SETUP transaction, forcefully
+ * stall the transfer, and move back to the SETUP phase, so that any
+ * pending endxfers can be executed.
+ */
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ reinit_completion(&dwc->ep0_in_setup);
+
+ ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+ msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+ if (ret == 0) {
+ dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_ep0_reset_state(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+ }
+
+ /*
+ * Note: if the GEVNTCOUNT indicates events in the event buffer, the
+ * driver needs to acknowledge them before the controller can halt.
+ * Simply let the interrupt handler acknowledges and handle the
+ * remaining event generated by the controller while polling for
+ * DSTS.DEVCTLHLT.
+ */
+ ret = dwc3_gadget_run_stop(dwc, false);
+
+ /*
+ * Stop the gadget after controller is halted, so that if needed, the
+ * events to update EP0 state can still occur while the run/stop
+ * routine polls for the halted state. DEVTEN is cleared as part of
+ * gadget stop.
+ */
+ spin_lock_irqsave(&dwc->lock, flags);
+ __dwc3_gadget_stop(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+}
+
+static int dwc3_gadget_soft_connect(struct dwc3 *dwc)
+{
+ /*
+ * In the Synopsys DWC_usb31 1.90a programming guide section
+ * 4.1.9, it specifies that for a reconnect after a
+ * device-initiated disconnect requires a core soft reset
+ * (DCTL.CSftRst) before enabling the run/stop bit.
+ */
+ dwc3_core_soft_reset(dwc);
+
+ dwc3_event_buffers_setup(dwc);
+ __dwc3_gadget_start(dwc);
+ return dwc3_gadget_run_stop(dwc, true);
+}
+
+static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ int ret;
+
+ is_on = !!is_on;
+
+ dwc->softconnect = is_on;
+
+ /*
+ * Avoid issuing a runtime resume if the device is already in the
+ * suspended state during gadget disconnect. DWC3 gadget was already
+ * halted/stopped during runtime suspend.
+ */
+ if (!is_on) {
+ pm_runtime_barrier(dwc->dev);
+ if (pm_runtime_suspended(dwc->dev))
+ return 0;
+ }
+
+ /*
+ * Check the return value for successful resume, or error. For a
+ * successful resume, the DWC3 runtime PM resume routine will handle
+ * the run stop sequence, so avoid duplicate operations here.
+ */
+ ret = pm_runtime_get_sync(dwc->dev);
+ if (!ret || ret < 0) {
+ pm_runtime_put(dwc->dev);
+ if (ret < 0)
+ pm_runtime_set_suspended(dwc->dev);
+ return ret;
+ }
+
+ if (dwc->pullups_connected == is_on) {
+ pm_runtime_put(dwc->dev);
+ return 0;
+ }
+
+ synchronize_irq(dwc->irq_gadget);
+
+ if (!is_on)
+ ret = dwc3_gadget_soft_disconnect(dwc);
+ else
+ ret = dwc3_gadget_soft_connect(dwc);
+
+ pm_runtime_put(dwc->dev);
+
+ return ret;
+}
+
+static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ /* Enable all but Start and End of Frame IRQs */
+ reg = (DWC3_DEVTEN_EVNTOVERFLOWEN |
+ DWC3_DEVTEN_CMDCMPLTEN |
+ DWC3_DEVTEN_ERRTICERREN |
+ DWC3_DEVTEN_WKUPEVTEN |
+ DWC3_DEVTEN_CONNECTDONEEN |
+ DWC3_DEVTEN_USBRSTEN |
+ DWC3_DEVTEN_DISCONNEVTEN);
+
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A))
+ reg |= DWC3_DEVTEN_ULSTCNGEN;
+
+ /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
+ if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
+ reg |= DWC3_DEVTEN_U3L2L1SUSPEN;
+
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+}
+
+static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
+{
+ /* mask all interrupts */
+ dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
+}
+
+static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
+static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
+
+/**
+ * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
+ * @dwc: pointer to our context structure
+ *
+ * The following looks like complex but it's actually very simple. In order to
+ * calculate the number of packets we can burst at once on OUT transfers, we're
+ * gonna use RxFIFO size.
+ *
+ * To calculate RxFIFO size we need two numbers:
+ * MDWIDTH = size, in bits, of the internal memory bus
+ * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
+ *
+ * Given these two numbers, the formula is simple:
+ *
+ * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
+ *
+ * 24 bytes is for 3x SETUP packets
+ * 16 bytes is a clock domain crossing tolerance
+ *
+ * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
+ */
+static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
+{
+ u32 ram2_depth;
+ u32 mdwidth;
+ u32 nump;
+ u32 reg;
+
+ ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
+ mdwidth = dwc3_mdwidth(dwc);
+
+ nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
+ nump = min_t(u32, nump, 16);
+
+ /* update NumP */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~DWC3_DCFG_NUMP_MASK;
+ reg |= nump << DWC3_DCFG_NUMP_SHIFT;
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
+
+static int __dwc3_gadget_start(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ int ret = 0;
+ u32 reg;
+
+ /*
+ * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
+ * the core supports IMOD, disable it.
+ */
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ } else if (dwc3_has_imod(dwc)) {
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
+ }
+
+ /*
+ * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
+ * field instead of letting dwc3 itself calculate that automatically.
+ *
+ * This way, we maximize the chances that we'll be able to get several
+ * bursts of data without going through any sort of endpoint throttling.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ if (DWC3_IP_IS(DWC3))
+ reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
+ else
+ reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
+
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+
+ dwc3_gadget_setup_nump(dwc);
+
+ /*
+ * Currently the controller handles single stream only. So, Ignore
+ * Packet Pending bit for stream selection and don't search for another
+ * stream if the host sends Data Packet with PP=0 (for OUT direction) or
+ * ACK with NumP=0 and PP=0 (for IN direction). This slightly improves
+ * the stream performance.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg |= DWC3_DCFG_IGNSTRMPP;
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+ /* Enable MST by default if the device is capable of MST */
+ if (DWC3_MST_CAPABLE(&dwc->hwparams)) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG1);
+ reg &= ~DWC3_DCFG1_DIS_MST_ENH;
+ dwc3_writel(dwc->regs, DWC3_DCFG1, reg);
+ }
+
+ /* Start with SuperSpeed Default */
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+
+ dep = dwc->eps[0];
+ dep->flags = 0;
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ goto err0;
+ }
+
+ dep = dwc->eps[1];
+ dep->flags = 0;
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ goto err1;
+ }
+
+ /* begin to receive SETUP packets */
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc->ep0_bounced = false;
+ dwc->link_state = DWC3_LINK_STATE_SS_DIS;
+ dwc->delayed_status = false;
+ dwc3_ep0_out_start(dwc);
+
+ dwc3_gadget_enable_irq(dwc);
+
+ return 0;
+
+err1:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+err0:
+ return ret;
+}
+
+static int dwc3_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ int ret;
+ int irq;
+
+ irq = dwc->irq_gadget;
+ ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
+ IRQF_SHARED, "dwc3", dwc->ev_buf);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+ irq, ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->gadget_driver = driver;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static void __dwc3_gadget_stop(struct dwc3 *dwc)
+{
+ dwc3_gadget_disable_irq(dwc);
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+ __dwc3_gadget_ep_disable(dwc->eps[1]);
+}
+
+static int dwc3_gadget_stop(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->gadget_driver = NULL;
+ dwc->max_cfg_eps = 0;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ free_irq(dwc->irq_gadget, dwc->ev_buf);
+
+ return 0;
+}
+
+static void dwc3_gadget_config_params(struct usb_gadget *g,
+ struct usb_dcd_config_params *params)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED;
+ params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED;
+
+ /* Recommended BESL */
+ if (!dwc->dis_enblslpm_quirk) {
+ /*
+ * If the recommended BESL baseline is 0 or if the BESL deep is
+ * less than 2, Microsoft's Windows 10 host usb stack will issue
+ * a usb reset immediately after it receives the extended BOS
+ * descriptor and the enumeration will fail. To maintain
+ * compatibility with the Windows' usb stack, let's set the
+ * recommended BESL baseline to 1 and clamp the BESL deep to be
+ * within 2 to 15.
+ */
+ params->besl_baseline = 1;
+ if (dwc->is_utmi_l1_suspend)
+ params->besl_deep =
+ clamp_t(u8, dwc->hird_threshold, 2, 15);
+ }
+
+ /* U1 Device exit Latency */
+ if (dwc->dis_u1_entry_quirk)
+ params->bU1devExitLat = 0;
+ else
+ params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT;
+
+ /* U2 Device exit Latency */
+ if (dwc->dis_u2_entry_quirk)
+ params->bU2DevExitLat = 0;
+ else
+ params->bU2DevExitLat =
+ cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT);
+}
+
+static void dwc3_gadget_set_speed(struct usb_gadget *g,
+ enum usb_device_speed speed)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->gadget_max_speed = speed;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
+static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
+ enum usb_ssp_rate rate)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
+ dwc->gadget_ssp_rate = rate;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
+static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ union power_supply_propval val = {0};
+ int ret;
+
+ if (dwc->usb2_phy)
+ return usb_phy_set_power(dwc->usb2_phy, mA);
+
+ if (!dwc->usb_psy)
+ return -EOPNOTSUPP;
+
+ val.intval = 1000 * mA;
+ ret = power_supply_set_property(dwc->usb_psy, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val);
+
+ return ret;
+}
+
+/**
+ * dwc3_gadget_check_config - ensure dwc3 can support the USB configuration
+ * @g: pointer to the USB gadget
+ *
+ * Used to record the maximum number of endpoints being used in a USB composite
+ * device. (across all configurations) This is to be used in the calculation
+ * of the TXFIFO sizes when resizing internal memory for individual endpoints.
+ * It will help ensured that the resizing logic reserves enough space for at
+ * least one max packet.
+ */
+static int dwc3_gadget_check_config(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ struct usb_ep *ep;
+ int fifo_size = 0;
+ int ram1_depth;
+ int ep_num = 0;
+
+ if (!dwc->do_fifo_resize)
+ return 0;
+
+ list_for_each_entry(ep, &g->ep_list, ep_list) {
+ /* Only interested in the IN endpoints */
+ if (ep->claimed && (ep->address & USB_DIR_IN))
+ ep_num++;
+ }
+
+ if (ep_num <= dwc->max_cfg_eps)
+ return 0;
+
+ /* Update the max number of eps in the composition */
+ dwc->max_cfg_eps = ep_num;
+
+ fifo_size = dwc3_gadget_calc_tx_fifo_size(dwc, dwc->max_cfg_eps);
+ /* Based on the equation, increment by one for every ep */
+ fifo_size += dwc->max_cfg_eps;
+
+ /* Check if we can fit a single fifo per endpoint */
+ ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
+ if (fifo_size > ram1_depth)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->async_callbacks = enable;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
+static const struct usb_gadget_ops dwc3_gadget_ops = {
+ .get_frame = dwc3_gadget_get_frame,
+ .wakeup = dwc3_gadget_wakeup,
+ .set_selfpowered = dwc3_gadget_set_selfpowered,
+ .pullup = dwc3_gadget_pullup,
+ .udc_start = dwc3_gadget_start,
+ .udc_stop = dwc3_gadget_stop,
+ .udc_set_speed = dwc3_gadget_set_speed,
+ .udc_set_ssp_rate = dwc3_gadget_set_ssp_rate,
+ .get_config_params = dwc3_gadget_config_params,
+ .vbus_draw = dwc3_gadget_vbus_draw,
+ .check_config = dwc3_gadget_check_config,
+ .udc_async_callbacks = dwc3_gadget_async_callbacks,
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
+ dep->endpoint.maxburst = 1;
+ dep->endpoint.ops = &dwc3_gadget_ep0_ops;
+ if (!dep->direction)
+ dwc->gadget->ep0 = &dep->endpoint;
+
+ dep->endpoint.caps.type_control = true;
+
+ return 0;
+}
+
+static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ u32 mdwidth;
+ int size;
+ int maxpacket;
+
+ mdwidth = dwc3_mdwidth(dwc);
+
+ /* MDWIDTH is represented in bits, we need it in bytes */
+ mdwidth /= 8;
+
+ size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
+ if (DWC3_IP_IS(DWC3))
+ size = DWC3_GTXFIFOSIZ_TXFDEP(size);
+ else
+ size = DWC31_GTXFIFOSIZ_TXFDEP(size);
+
+ /*
+ * maxpacket size is determined as part of the following, after assuming
+ * a mult value of one maxpacket:
+ * DWC3 revision 280A and prior:
+ * fifo_size = mult * (max_packet / mdwidth) + 1;
+ * maxpacket = mdwidth * (fifo_size - 1);
+ *
+ * DWC3 revision 290A and onwards:
+ * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
+ * maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth;
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 290A))
+ maxpacket = mdwidth * (size - 1);
+ else
+ maxpacket = mdwidth * ((size - 1) - 1) - mdwidth;
+
+ /* Functionally, space for one max packet is sufficient */
+ size = min_t(int, maxpacket, 1024);
+ usb_ep_set_maxpacket_limit(&dep->endpoint, size);
+
+ dep->endpoint.max_streams = 16;
+ dep->endpoint.ops = &dwc3_gadget_ep_ops;
+ list_add_tail(&dep->endpoint.ep_list,
+ &dwc->gadget->ep_list);
+ dep->endpoint.caps.type_iso = true;
+ dep->endpoint.caps.type_bulk = true;
+ dep->endpoint.caps.type_int = true;
+
+ return dwc3_alloc_trb_pool(dep);
+}
+
+static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ u32 mdwidth;
+ int size;
+
+ mdwidth = dwc3_mdwidth(dwc);
+
+ /* MDWIDTH is represented in bits, convert to bytes */
+ mdwidth /= 8;
+
+ /* All OUT endpoints share a single RxFIFO space */
+ size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
+ if (DWC3_IP_IS(DWC3))
+ size = DWC3_GRXFIFOSIZ_RXFDEP(size);
+ else
+ size = DWC31_GRXFIFOSIZ_RXFDEP(size);
+
+ /* FIFO depth is in MDWDITH bytes */
+ size *= mdwidth;
+
+ /*
+ * To meet performance requirement, a minimum recommended RxFIFO size
+ * is defined as follow:
+ * RxFIFO size >= (3 x MaxPacketSize) +
+ * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
+ *
+ * Then calculate the max packet limit as below.
+ */
+ size -= (3 * 8) + 16;
+ if (size < 0)
+ size = 0;
+ else
+ size /= 3;
+
+ usb_ep_set_maxpacket_limit(&dep->endpoint, size);
+ dep->endpoint.max_streams = 16;
+ dep->endpoint.ops = &dwc3_gadget_ep_ops;
+ list_add_tail(&dep->endpoint.ep_list,
+ &dwc->gadget->ep_list);
+ dep->endpoint.caps.type_iso = true;
+ dep->endpoint.caps.type_bulk = true;
+ dep->endpoint.caps.type_int = true;
+
+ return dwc3_alloc_trb_pool(dep);
+}
+
+static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
+{
+ struct dwc3_ep *dep;
+ bool direction = epnum & 1;
+ int ret;
+ u8 num = epnum >> 1;
+
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (!dep)
+ return -ENOMEM;
+
+ dep->dwc = dwc;
+ dep->number = epnum;
+ dep->direction = direction;
+ dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
+ dwc->eps[epnum] = dep;
+ dep->combo_num = 0;
+ dep->start_cmd_status = 0;
+
+ snprintf(dep->name, sizeof(dep->name), "ep%u%s", num,
+ direction ? "in" : "out");
+
+ dep->endpoint.name = dep->name;
+
+ if (!(dep->number > 1)) {
+ dep->endpoint.desc = &dwc3_gadget_ep0_desc;
+ dep->endpoint.comp_desc = NULL;
+ }
+
+ if (num == 0)
+ ret = dwc3_gadget_init_control_endpoint(dep);
+ else if (direction)
+ ret = dwc3_gadget_init_in_endpoint(dep);
+ else
+ ret = dwc3_gadget_init_out_endpoint(dep);
+
+ if (ret)
+ return ret;
+
+ dep->endpoint.caps.dir_in = direction;
+ dep->endpoint.caps.dir_out = !direction;
+
+ INIT_LIST_HEAD(&dep->pending_list);
+ INIT_LIST_HEAD(&dep->started_list);
+ INIT_LIST_HEAD(&dep->cancelled_list);
+
+ dwc3_debugfs_create_endpoint_dir(dep);
+
+ return 0;
+}
+
+static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
+{
+ u8 epnum;
+
+ INIT_LIST_HEAD(&dwc->gadget->ep_list);
+
+ for (epnum = 0; epnum < total; epnum++) {
+ int ret;
+
+ ret = dwc3_gadget_init_endpoint(dwc, epnum);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ u8 epnum;
+
+ for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ dep = dwc->eps[epnum];
+ if (!dep)
+ continue;
+ /*
+ * Physical endpoints 0 and 1 are special; they form the
+ * bi-directional USB endpoint 0.
+ *
+ * For those two physical endpoints, we don't allocate a TRB
+ * pool nor do we add them the endpoints list. Due to that, we
+ * shouldn't do these two operations otherwise we would end up
+ * with all sorts of bugs when removing dwc3.ko.
+ */
+ if (epnum != 0 && epnum != 1) {
+ dwc3_free_trb_pool(dep);
+ list_del(&dep->endpoint.ep_list);
+ }
+
+ dwc3_debugfs_remove_endpoint_dir(dep);
+ kfree(dep);
+ }
+}
+
+/* -------------------------------------------------------------------------- */
+
+static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
+ struct dwc3_request *req, struct dwc3_trb *trb,
+ const struct dwc3_event_depevt *event, int status, int chain)
+{
+ unsigned int count;
+
+ dwc3_ep_inc_deq(dep);
+
+ trace_dwc3_complete_trb(dep, trb);
+ req->num_trbs--;
+
+ /*
+ * If we're in the middle of series of chained TRBs and we
+ * receive a short transfer along the way, DWC3 will skip
+ * through all TRBs including the last TRB in the chain (the
+ * where CHN bit is zero. DWC3 will also avoid clearing HWO
+ * bit and SW has to do it manually.
+ *
+ * We're going to do that here to avoid problems of HW trying
+ * to use bogus TRBs for transfers.
+ */
+ if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+
+ /*
+ * For isochronous transfers, the first TRB in a service interval must
+ * have the Isoc-First type. Track and report its interval frame number.
+ */
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) {
+ unsigned int frame_number;
+
+ frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl);
+ frame_number &= ~(dep->interval - 1);
+ req->request.frame_number = frame_number;
+ }
+
+ /*
+ * We use bounce buffer for requests that needs extra TRB or OUT ZLP. If
+ * this TRB points to the bounce buffer address, it's a MPS alignment
+ * TRB. Don't add it to req->remaining calculation.
+ */
+ if (trb->bpl == lower_32_bits(dep->dwc->bounce_addr) &&
+ trb->bph == upper_32_bits(dep->dwc->bounce_addr)) {
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ return 1;
+ }
+
+ count = trb->size & DWC3_TRB_SIZE_MASK;
+ req->remaining += count;
+
+ if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
+ return 1;
+
+ if (event->status & DEPEVT_STATUS_SHORT && !chain)
+ return 1;
+
+ if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) &&
+ DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC)
+ return 1;
+
+ if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
+ (trb->ctrl & DWC3_TRB_CTRL_LST))
+ return 1;
+
+ return 0;
+}
+
+static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
+ struct dwc3_request *req, const struct dwc3_event_depevt *event,
+ int status)
+{
+ struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
+ struct scatterlist *sg = req->sg;
+ struct scatterlist *s;
+ unsigned int num_queued = req->num_queued_sgs;
+ unsigned int i;
+ int ret = 0;
+
+ for_each_sg(sg, s, num_queued, i) {
+ trb = &dep->trb_pool[dep->trb_dequeue];
+
+ req->sg = sg_next(s);
+ req->num_queued_sgs--;
+
+ ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
+ trb, event, status, true);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
+ struct dwc3_request *req, const struct dwc3_event_depevt *event,
+ int status)
+{
+ struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
+
+ return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb,
+ event, status, false);
+}
+
+static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
+{
+ return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
+}
+
+static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event,
+ struct dwc3_request *req, int status)
+{
+ int request_status;
+ int ret;
+
+ if (req->request.num_mapped_sgs)
+ ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
+ status);
+ else
+ ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
+ status);
+
+ req->request.actual = req->request.length - req->remaining;
+
+ if (!dwc3_gadget_ep_request_completed(req))
+ goto out;
+
+ if (req->needs_extra_trb) {
+ ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
+ status);
+ req->needs_extra_trb = false;
+ }
+
+ /*
+ * The event status only reflects the status of the TRB with IOC set.
+ * For the requests that don't set interrupt on completion, the driver
+ * needs to check and return the status of the completed TRBs associated
+ * with the request. Use the status of the last TRB of the request.
+ */
+ if (req->request.no_interrupt) {
+ struct dwc3_trb *trb;
+
+ trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
+ switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
+ case DWC3_TRBSTS_MISSED_ISOC:
+ /* Isoc endpoint only */
+ request_status = -EXDEV;
+ break;
+ case DWC3_TRB_STS_XFER_IN_PROG:
+ /* Applicable when End Transfer with ForceRM=0 */
+ case DWC3_TRBSTS_SETUP_PENDING:
+ /* Control endpoint only */
+ case DWC3_TRBSTS_OK:
+ default:
+ request_status = 0;
+ break;
+ }
+ } else {
+ request_status = status;
+ }
+
+ dwc3_gadget_giveback(dep, req, request_status);
+
+out:
+ return ret;
+}
+
+static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event, int status)
+{
+ struct dwc3_request *req;
+
+ while (!list_empty(&dep->started_list)) {
+ int ret;
+
+ req = next_request(&dep->started_list);
+ ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
+ req, status);
+ if (ret)
+ break;
+ /*
+ * The endpoint is disabled, let the dwc3_remove_requests()
+ * handle the cleanup.
+ */
+ if (!dep->endpoint.desc)
+ break;
+ }
+}
+
+static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
+{
+ struct dwc3_request *req;
+ struct dwc3 *dwc = dep->dwc;
+
+ if (!dep->endpoint.desc || !dwc->pullups_connected ||
+ !dwc->connected)
+ return false;
+
+ if (!list_empty(&dep->pending_list))
+ return true;
+
+ /*
+ * We only need to check the first entry of the started list. We can
+ * assume the completed requests are removed from the started list.
+ */
+ req = next_request(&dep->started_list);
+ if (!req)
+ return false;
+
+ return !dwc3_gadget_ep_request_completed(req);
+}
+
+static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ dep->frame_number = event->parameters;
+}
+
+static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event, int status)
+{
+ struct dwc3 *dwc = dep->dwc;
+ bool no_started_trb = true;
+
+ dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
+
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
+ goto out;
+
+ if (!dep->endpoint.desc)
+ return no_started_trb;
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ list_empty(&dep->started_list) &&
+ (list_empty(&dep->pending_list) || status == -EXDEV))
+ dwc3_stop_active_transfer(dep, true, true);
+ else if (dwc3_gadget_ep_should_continue(dep))
+ if (__dwc3_gadget_kick_transfer(dep) == 0)
+ no_started_trb = false;
+
+out:
+ /*
+ * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
+ * See dwc3_gadget_linksts_change_interrupt() for 1st half.
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
+ u32 reg;
+ int i;
+
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ dep = dwc->eps[i];
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (!list_empty(&dep->started_list))
+ return no_started_trb;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= dwc->u1u2;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ dwc->u1u2 = 0;
+ }
+
+ return no_started_trb;
+}
+
+static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ int status = 0;
+
+ if (!dep->endpoint.desc)
+ return;
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ dwc3_gadget_endpoint_frame_from_event(dep, event);
+
+ if (event->status & DEPEVT_STATUS_BUSERR)
+ status = -ECONNRESET;
+
+ if (event->status & DEPEVT_STATUS_MISSED_ISOC)
+ status = -EXDEV;
+
+ dwc3_gadget_endpoint_trbs_complete(dep, event, status);
+}
+
+static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ int status = 0;
+
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+
+ if (event->status & DEPEVT_STATUS_BUSERR)
+ status = -ECONNRESET;
+
+ if (dwc3_gadget_endpoint_trbs_complete(dep, event, status))
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+}
+
+static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ dwc3_gadget_endpoint_frame_from_event(dep, event);
+
+ /*
+ * The XferNotReady event is generated only once before the endpoint
+ * starts. It will be generated again when END_TRANSFER command is
+ * issued. For some controller versions, the XferNotReady event may be
+ * generated while the END_TRANSFER command is still in process. Ignore
+ * it and wait for the next XferNotReady event after the command is
+ * completed.
+ */
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
+ return;
+
+ (void) __dwc3_gadget_start_isoc(dep);
+}
+
+static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ u8 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
+
+ if (cmd != DWC3_DEPCMD_ENDTRANSFER)
+ return;
+
+ /*
+ * The END_TRANSFER command will cause the controller to generate a
+ * NoStream Event, and it's not due to the host DP NoStream rejection.
+ * Ignore the next NoStream event.
+ */
+ if (dep->stream_capable)
+ dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
+
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+
+ if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) {
+ struct dwc3 *dwc = dep->dwc;
+
+ dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL;
+ if (dwc3_send_clear_stall_ep_cmd(dep)) {
+ struct usb_ep *ep0 = &dwc->eps[0]->endpoint;
+
+ dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name);
+ if (dwc->delayed_status)
+ __dwc3_gadget_ep0_set_halt(ep0, 1);
+ return;
+ }
+
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ if (dwc->clear_stall_protocol == dep->number)
+ dwc3_ep0_send_delayed_status(dwc);
+ }
+
+ if ((dep->flags & DWC3_EP_DELAY_START) &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
+
+ dep->flags &= ~DWC3_EP_DELAY_START;
+}
+
+static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ if (event->status == DEPEVT_STREAMEVT_FOUND) {
+ dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
+ goto out;
+ }
+
+ /* Note: NoStream rejection event param value is 0 and not 0xFFFF */
+ switch (event->parameters) {
+ case DEPEVT_STREAM_PRIME:
+ /*
+ * If the host can properly transition the endpoint state from
+ * idle to prime after a NoStream rejection, there's no need to
+ * force restarting the endpoint to reinitiate the stream. To
+ * simplify the check, assume the host follows the USB spec if
+ * it primed the endpoint more than once.
+ */
+ if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
+ if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
+ dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
+ else
+ dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
+ }
+
+ break;
+ case DEPEVT_STREAM_NOSTREAM:
+ if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
+ !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
+ (!DWC3_MST_CAPABLE(&dwc->hwparams) &&
+ !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)))
+ break;
+
+ /*
+ * If the host rejects a stream due to no active stream, by the
+ * USB and xHCI spec, the endpoint will be put back to idle
+ * state. When the host is ready (buffer added/updated), it will
+ * prime the endpoint to inform the usb device controller. This
+ * triggers the device controller to issue ERDY to restart the
+ * stream. However, some hosts don't follow this and keep the
+ * endpoint in the idle state. No prime will come despite host
+ * streams are updated, and the device controller will not be
+ * triggered to generate ERDY to move the next stream data. To
+ * workaround this and maintain compatibility with various
+ * hosts, force to reinitiate the stream until the host is ready
+ * instead of waiting for the host to prime the endpoint.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
+ unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
+
+ dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
+ } else {
+ dep->flags |= DWC3_EP_DELAY_START;
+ dwc3_stop_active_transfer(dep, true, true);
+ return;
+ }
+ break;
+ }
+
+out:
+ dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
+}
+
+static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep;
+ u8 epnum = event->endpoint_number;
+
+ dep = dwc->eps[epnum];
+
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
+ return;
+
+ /* Handle only EPCMDCMPLT when EP disabled */
+ if ((event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) &&
+ !(epnum <= 1 && event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE))
+ return;
+ }
+
+ if (epnum == 0 || epnum == 1) {
+ dwc3_ep0_interrupt(dwc, event);
+ return;
+ }
+
+ switch (event->endpoint_event) {
+ case DWC3_DEPEVT_XFERINPROGRESS:
+ dwc3_gadget_endpoint_transfer_in_progress(dep, event);
+ break;
+ case DWC3_DEPEVT_XFERNOTREADY:
+ dwc3_gadget_endpoint_transfer_not_ready(dep, event);
+ break;
+ case DWC3_DEPEVT_EPCMDCMPLT:
+ dwc3_gadget_endpoint_command_complete(dep, event);
+ break;
+ case DWC3_DEPEVT_XFERCOMPLETE:
+ dwc3_gadget_endpoint_transfer_complete(dep, event);
+ break;
+ case DWC3_DEPEVT_STREAMEVT:
+ dwc3_gadget_endpoint_stream_event(dep, event);
+ break;
+ case DWC3_DEPEVT_RXTXFIFOEVT:
+ break;
+ }
+}
+
+static void dwc3_disconnect_gadget(struct dwc3 *dwc)
+{
+ if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->disconnect(dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
+}
+
+static void dwc3_suspend_gadget(struct dwc3 *dwc)
+{
+ if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->suspend(dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
+}
+
+static void dwc3_resume_gadget(struct dwc3 *dwc)
+{
+ if (dwc->async_callbacks && dwc->gadget_driver->resume) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->resume(dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
+}
+
+static void dwc3_reset_gadget(struct dwc3 *dwc)
+{
+ if (!dwc->gadget_driver)
+ return;
+
+ if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
+ spin_unlock(&dwc->lock);
+ usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
+ spin_lock(&dwc->lock);
+ }
+}
+
+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ bool interrupt)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ /*
+ * Only issue End Transfer command to the control endpoint of a started
+ * Data Phase. Typically we should only do so in error cases such as
+ * invalid/unexpected direction as described in the control transfer
+ * flow of the programming guide.
+ */
+ if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
+ return;
+
+ if (interrupt && (dep->flags & DWC3_EP_DELAY_STOP))
+ return;
+
+ if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
+ (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ return;
+
+ /*
+ * If a Setup packet is received but yet to DMA out, the controller will
+ * not process the End Transfer command of any endpoint. Polling of its
+ * DEPCMD.CmdAct may block setting up TRB for Setup packet, causing a
+ * timeout. Delay issuing the End Transfer command until the Setup TRB is
+ * prepared.
+ */
+ if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
+ dep->flags |= DWC3_EP_DELAY_STOP;
+ return;
+ }
+
+ /*
+ * NOTICE: We are violating what the Databook says about the
+ * EndTransfer command. Ideally we would _always_ wait for the
+ * EndTransfer Command Completion IRQ, but that's causing too
+ * much trouble synchronizing between us and gadget driver.
+ *
+ * We have discussed this with the IP Provider and it was
+ * suggested to giveback all requests here.
+ *
+ * Note also that a similar handling was tested by Synopsys
+ * (thanks a lot Paul) and nothing bad has come out of it.
+ * In short, what we're doing is issuing EndTransfer with
+ * CMDIOC bit set and delay kicking transfer until the
+ * EndTransfer command had completed.
+ *
+ * As of IP version 3.10a of the DWC_usb3 IP, the controller
+ * supports a mode to work around the above limitation. The
+ * software can poll the CMDACT bit in the DEPCMD register
+ * after issuing a EndTransfer command. This mode is enabled
+ * by writing GUCTL2[14]. This polling is already done in the
+ * dwc3_send_gadget_ep_cmd() function so if the mode is
+ * enabled, the EndTransfer command will have completed upon
+ * returning from this function.
+ *
+ * This mode is NOT available on the DWC_usb31 IP. In this
+ * case, if the IOC bit is not set, then delay by 1ms
+ * after issuing the EndTransfer command. This allows for the
+ * controller to handle the command completely before DWC3
+ * remove requests attempts to unmap USB request buffers.
+ */
+
+ __dwc3_stop_active_transfer(dep, force, interrupt);
+}
+
+static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
+{
+ u32 epnum;
+
+ for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep;
+ int ret;
+
+ dep = dwc->eps[epnum];
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_STALL))
+ continue;
+
+ dep->flags &= ~DWC3_EP_STALL;
+
+ ret = dwc3_send_clear_stall_ep_cmd(dep);
+ WARN_ON_ONCE(ret);
+ }
+}
+
+static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
+{
+ int reg;
+
+ dwc->suspended = false;
+
+ dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_INITU1ENA;
+ reg &= ~DWC3_DCTL_INITU2ENA;
+ dwc3_gadget_dctl_write_safe(dwc, reg);
+
+ dwc->connected = false;
+
+ dwc3_disconnect_gadget(dwc);
+
+ dwc->gadget->speed = USB_SPEED_UNKNOWN;
+ dwc->setup_packet_pending = false;
+ usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
+
+ dwc3_ep0_reset_state(dwc);
+
+ /*
+ * Request PM idle to address condition where usage count is
+ * already decremented to zero, but waiting for the disconnect
+ * interrupt to set dwc->connected to FALSE.
+ */
+ pm_request_idle(dwc->dev);
+}
+
+static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ dwc->suspended = false;
+
+ /*
+ * Ideally, dwc3_reset_gadget() would trigger the function
+ * drivers to stop any active transfers through ep disable.
+ * However, for functions which defer ep disable, such as mass
+ * storage, we will need to rely on the call to stop active
+ * transfers here, and avoid allowing of request queuing.
+ */
+ dwc->connected = false;
+
+ /*
+ * WORKAROUND: DWC3 revisions <1.88a have an issue which
+ * would cause a missing Disconnect Event if there's a
+ * pending Setup Packet in the FIFO.
+ *
+ * There's no suggested workaround on the official Bug
+ * report, which states that "unless the driver/application
+ * is doing any special handling of a disconnect event,
+ * there is no functional issue".
+ *
+ * Unfortunately, it turns out that we _do_ some special
+ * handling of a disconnect event, namely complete all
+ * pending transfers, notify gadget driver of the
+ * disconnection, and so on.
+ *
+ * Our suggested workaround is to follow the Disconnect
+ * Event steps here, instead, based on a setup_packet_pending
+ * flag. Such flag gets set whenever we have a SETUP_PENDING
+ * status for EP0 TRBs and gets cleared on XferComplete for the
+ * same endpoint.
+ *
+ * Refers to:
+ *
+ * STAR#9000466709: RTL: Device : Disconnect event not
+ * generated if setup packet pending in FIFO
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 188A)) {
+ if (dwc->setup_packet_pending)
+ dwc3_gadget_disconnect_interrupt(dwc);
+ }
+
+ dwc3_reset_gadget(dwc);
+
+ /*
+ * From SNPS databook section 8.1.2, the EP0 should be in setup
+ * phase. So ensure that EP0 is in setup phase by issuing a stall
+ * and restart if EP0 is not in setup phase.
+ */
+ dwc3_ep0_reset_state(dwc);
+
+ /*
+ * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
+ * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
+ * needs to ensure that it sends "a DEPENDXFER command for any active
+ * transfers."
+ */
+ dwc3_stop_active_transfers(dwc);
+ dwc->connected = true;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_TSTCTRL_MASK;
+ dwc3_gadget_dctl_write_safe(dwc, reg);
+ dwc->test_mode = false;
+ dwc3_clear_stall_all_ep(dwc);
+
+ /* Reset device address to zero */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_DEVADDR_MASK);
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
+
+static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ int ret;
+ u32 reg;
+ u8 lanes = 1;
+ u8 speed;
+
+ if (!dwc->softconnect)
+ return;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ speed = reg & DWC3_DSTS_CONNECTSPD;
+ dwc->speed = speed;
+
+ if (DWC3_IP_IS(DWC32))
+ lanes = DWC3_DSTS_CONNLANES(reg) + 1;
+
+ dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
+
+ /*
+ * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
+ * each time on Connect Done.
+ *
+ * Currently we always use the reset value. If any platform
+ * wants to set this to a different value, we need to add a
+ * setting and update GCTL.RAMCLKSEL here.
+ */
+
+ switch (speed) {
+ case DWC3_DSTS_SUPERSPEED_PLUS:
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ dwc->gadget->ep0->maxpacket = 512;
+ dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
+
+ if (lanes > 1)
+ dwc->gadget->ssp_rate = USB_SSP_GEN_2x2;
+ else
+ dwc->gadget->ssp_rate = USB_SSP_GEN_2x1;
+ break;
+ case DWC3_DSTS_SUPERSPEED:
+ /*
+ * WORKAROUND: DWC3 revisions <1.90a have an issue which
+ * would cause a missing USB3 Reset event.
+ *
+ * In such situations, we should force a USB3 Reset
+ * event by calling our dwc3_gadget_reset_interrupt()
+ * routine.
+ *
+ * Refers to:
+ *
+ * STAR#9000483510: RTL: SS : USB3 reset event may
+ * not be generated always when the link enters poll
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 190A))
+ dwc3_gadget_reset_interrupt(dwc);
+
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ dwc->gadget->ep0->maxpacket = 512;
+ dwc->gadget->speed = USB_SPEED_SUPER;
+
+ if (lanes > 1) {
+ dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
+ dwc->gadget->ssp_rate = USB_SSP_GEN_1x2;
+ }
+ break;
+ case DWC3_DSTS_HIGHSPEED:
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ dwc->gadget->ep0->maxpacket = 64;
+ dwc->gadget->speed = USB_SPEED_HIGH;
+ break;
+ case DWC3_DSTS_FULLSPEED:
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ dwc->gadget->ep0->maxpacket = 64;
+ dwc->gadget->speed = USB_SPEED_FULL;
+ break;
+ }
+
+ dwc->eps[1]->endpoint.maxpacket = dwc->gadget->ep0->maxpacket;
+
+ /* Enable USB2 LPM Capability */
+
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
+ !dwc->usb2_gadget_lpm_disable &&
+ (speed != DWC3_DSTS_SUPERSPEED) &&
+ (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg |= DWC3_DCFG_LPM_CAP;
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
+
+ reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold |
+ (dwc->is_utmi_l1_suspend << 4));
+
+ /*
+ * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
+ * DCFG.LPMCap is set, core responses with an ACK and the
+ * BESL value in the LPM token is less than or equal to LPM
+ * NYET threshold.
+ */
+ WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
+ "LPM Erratum not available on dwc3 revisions < 2.40a\n");
+
+ if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
+ reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
+
+ dwc3_gadget_dctl_write_safe(dwc, reg);
+ } else {
+ if (dwc->usb2_gadget_lpm_disable) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~DWC3_DCFG_LPM_CAP;
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
+ dwc3_gadget_dctl_write_safe(dwc, reg);
+ }
+
+ dep = dwc->eps[0];
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return;
+ }
+
+ dep = dwc->eps[1];
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return;
+ }
+
+ /*
+ * Configure PHY via GUSB3PIPECTLn if required.
+ *
+ * Update GTXFIFOSIZn
+ *
+ * In both cases reset values should be sufficient.
+ */
+}
+
+static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
+{
+ dwc->suspended = false;
+
+ /*
+ * TODO take core out of low power mode when that's
+ * implemented.
+ */
+
+ if (dwc->async_callbacks && dwc->gadget_driver->resume) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->resume(dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
+}
+
+static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
+ unsigned int evtinfo)
+{
+ enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+ unsigned int pwropt;
+
+ /*
+ * WORKAROUND: DWC3 < 2.50a have an issue when configured without
+ * Hibernation mode enabled which would show up when device detects
+ * host-initiated U3 exit.
+ *
+ * In that case, device will generate a Link State Change Interrupt
+ * from U3 to RESUME which is only necessary if Hibernation is
+ * configured in.
+ *
+ * There are no functional changes due to such spurious event and we
+ * just need to ignore it.
+ *
+ * Refers to:
+ *
+ * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
+ * operational mode
+ */
+ pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A) &&
+ (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
+ if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
+ (next == DWC3_LINK_STATE_RESUME)) {
+ return;
+ }
+ }
+
+ /*
+ * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
+ * on the link partner, the USB session might do multiple entry/exit
+ * of low power states before a transfer takes place.
+ *
+ * Due to this problem, we might experience lower throughput. The
+ * suggested workaround is to disable DCTL[12:9] bits if we're
+ * transitioning from U1/U2 to U0 and enable those bits again
+ * after a transfer completes and there are no pending transfers
+ * on any of the enabled endpoints.
+ *
+ * This is the first half of that workaround.
+ *
+ * Refers to:
+ *
+ * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
+ * core send LGO_Ux entering U0
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
+ if (next == DWC3_LINK_STATE_U0) {
+ u32 u1u2;
+ u32 reg;
+
+ switch (dwc->link_state) {
+ case DWC3_LINK_STATE_U1:
+ case DWC3_LINK_STATE_U2:
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ u1u2 = reg & (DWC3_DCTL_INITU2ENA
+ | DWC3_DCTL_ACCEPTU2ENA
+ | DWC3_DCTL_INITU1ENA
+ | DWC3_DCTL_ACCEPTU1ENA);
+
+ if (!dwc->u1u2)
+ dwc->u1u2 = reg & u1u2;
+
+ reg &= ~u1u2;
+
+ dwc3_gadget_dctl_write_safe(dwc, reg);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+ }
+
+ switch (next) {
+ case DWC3_LINK_STATE_U1:
+ if (dwc->speed == USB_SPEED_SUPER)
+ dwc3_suspend_gadget(dwc);
+ break;
+ case DWC3_LINK_STATE_U2:
+ case DWC3_LINK_STATE_U3:
+ dwc3_suspend_gadget(dwc);
+ break;
+ case DWC3_LINK_STATE_RESUME:
+ dwc3_resume_gadget(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ dwc->link_state = next;
+}
+
+static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
+ unsigned int evtinfo)
+{
+ enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+
+ if (!dwc->suspended && next == DWC3_LINK_STATE_U3) {
+ dwc->suspended = true;
+ dwc3_suspend_gadget(dwc);
+ }
+
+ dwc->link_state = next;
+}
+
+static void dwc3_gadget_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_devt *event)
+{
+ switch (event->type) {
+ case DWC3_DEVICE_EVENT_DISCONNECT:
+ dwc3_gadget_disconnect_interrupt(dwc);
+ break;
+ case DWC3_DEVICE_EVENT_RESET:
+ dwc3_gadget_reset_interrupt(dwc);
+ break;
+ case DWC3_DEVICE_EVENT_CONNECT_DONE:
+ dwc3_gadget_conndone_interrupt(dwc);
+ break;
+ case DWC3_DEVICE_EVENT_WAKEUP:
+ dwc3_gadget_wakeup_interrupt(dwc);
+ break;
+ case DWC3_DEVICE_EVENT_HIBER_REQ:
+ dev_WARN_ONCE(dwc->dev, true, "unexpected hibernation event\n");
+ break;
+ case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
+ dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
+ break;
+ case DWC3_DEVICE_EVENT_SUSPEND:
+ /* It changed to be suspend event for version 2.30a and above */
+ if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
+ dwc3_gadget_suspend_interrupt(dwc, event->event_info);
+ break;
+ case DWC3_DEVICE_EVENT_SOF:
+ case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
+ case DWC3_DEVICE_EVENT_CMD_CMPL:
+ case DWC3_DEVICE_EVENT_OVERFLOW:
+ break;
+ default:
+ dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
+ }
+}
+
+static void dwc3_process_event_entry(struct dwc3 *dwc,
+ const union dwc3_event *event)
+{
+ trace_dwc3_event(event->raw, dwc);
+
+ if (!event->type.is_devspec)
+ dwc3_endpoint_interrupt(dwc, &event->depevt);
+ else if (event->type.type == DWC3_EVENT_TYPE_DEV)
+ dwc3_gadget_interrupt(dwc, &event->devt);
+ else
+ dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
+}
+
+static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
+{
+ struct dwc3 *dwc = evt->dwc;
+ irqreturn_t ret = IRQ_NONE;
+ int left;
+
+ left = evt->count;
+
+ if (!(evt->flags & DWC3_EVENT_PENDING))
+ return IRQ_NONE;
+
+ while (left > 0) {
+ union dwc3_event event;
+
+ event.raw = *(u32 *) (evt->cache + evt->lpos);
+
+ dwc3_process_event_entry(dwc, &event);
+
+ /*
+ * FIXME we wrap around correctly to the next entry as
+ * almost all entries are 4 bytes in size. There is one
+ * entry which has 12 bytes which is a regular entry
+ * followed by 8 bytes data. ATM I don't know how
+ * things are organized if we get next to the a
+ * boundary so I worry about that once we try to handle
+ * that.
+ */
+ evt->lpos = (evt->lpos + 4) % evt->length;
+ left -= 4;
+ }
+
+ evt->count = 0;
+ ret = IRQ_HANDLED;
+
+ /* Unmask interrupt */
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ DWC3_GEVNTSIZ_SIZE(evt->length));
+
+ if (dwc->imod_interval) {
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+ dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+ }
+
+ /* Keep the clearing of DWC3_EVENT_PENDING at the end */
+ evt->flags &= ~DWC3_EVENT_PENDING;
+
+ return ret;
+}
+
+static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
+{
+ struct dwc3_event_buffer *evt = _evt;
+ struct dwc3 *dwc = evt->dwc;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ local_bh_disable();
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = dwc3_process_event_buf(evt);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ local_bh_enable();
+
+ return ret;
+}
+
+static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
+{
+ struct dwc3 *dwc = evt->dwc;
+ u32 amount;
+ u32 count;
+
+ if (pm_runtime_suspended(dwc->dev)) {
+ dwc->pending_events = true;
+ /*
+ * Trigger runtime resume. The get() function will be balanced
+ * after processing the pending events in dwc3_process_pending
+ * events().
+ */
+ pm_runtime_get(dwc->dev);
+ disable_irq_nosync(dwc->irq_gadget);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * With PCIe legacy interrupt, test shows that top-half irq handler can
+ * be called again after HW interrupt deassertion. Check if bottom-half
+ * irq event handler completes before caching new event to prevent
+ * losing events.
+ */
+ if (evt->flags & DWC3_EVENT_PENDING)
+ return IRQ_HANDLED;
+
+ count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
+ count &= DWC3_GEVNTCOUNT_MASK;
+ if (!count)
+ return IRQ_NONE;
+
+ evt->count = count;
+ evt->flags |= DWC3_EVENT_PENDING;
+
+ /* Mask interrupt */
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ DWC3_GEVNTSIZ_INTMASK | DWC3_GEVNTSIZ_SIZE(evt->length));
+
+ amount = min(count, evt->length - evt->lpos);
+ memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
+
+ if (amount < count)
+ memcpy(evt->cache, evt->buf, count - amount);
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t dwc3_interrupt(int irq, void *_evt)
+{
+ struct dwc3_event_buffer *evt = _evt;
+
+ return dwc3_check_event_buf(evt);
+}
+
+static int dwc3_gadget_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
+static void dwc_gadget_release(struct device *dev)
+{
+ struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev);
+
+ kfree(gadget);
+}
+
+/**
+ * dwc3_gadget_init - initializes gadget related registers
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+int dwc3_gadget_init(struct dwc3 *dwc)
+{
+ int ret;
+ int irq;
+ struct device *dev;
+
+ irq = dwc3_gadget_get_irq(dwc);
+ if (irq < 0) {
+ ret = irq;
+ goto err0;
+ }
+
+ dwc->irq_gadget = irq;
+
+ dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
+ sizeof(*dwc->ep0_trb) * 2,
+ &dwc->ep0_trb_addr, GFP_KERNEL);
+ if (!dwc->ep0_trb) {
+ dev_err(dwc->dev, "failed to allocate ep0 trb\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
+ if (!dwc->setup_buf) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
+ &dwc->bounce_addr, GFP_KERNEL);
+ if (!dwc->bounce) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ init_completion(&dwc->ep0_in_setup);
+ dwc->gadget = kzalloc(sizeof(struct usb_gadget), GFP_KERNEL);
+ if (!dwc->gadget) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+
+ usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
+ dev = &dwc->gadget->dev;
+ dev->platform_data = dwc;
+ dwc->gadget->ops = &dwc3_gadget_ops;
+ dwc->gadget->speed = USB_SPEED_UNKNOWN;
+ dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
+ dwc->gadget->sg_supported = true;
+ dwc->gadget->name = "dwc3-gadget";
+ dwc->gadget->lpm_capable = !dwc->usb2_gadget_lpm_disable;
+
+ /*
+ * FIXME We might be setting max_speed to <SUPER, however versions
+ * <2.20a of dwc3 have an issue with metastability (documented
+ * elsewhere in this driver) which tells us we can't set max speed to
+ * anything lower than SUPER.
+ *
+ * Because gadget.max_speed is only used by composite.c and function
+ * drivers (i.e. it won't go into dwc3's registers) we are allowing this
+ * to happen so we avoid sending SuperSpeed Capability descriptor
+ * together with our BOS descriptor as that could confuse host into
+ * thinking we can handle super speed.
+ *
+ * Note that, in fact, we won't even support GetBOS requests when speed
+ * is less than super speed because we don't have means, yet, to tell
+ * composite.c that we are USB 2.0 + LPM ECN.
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
+ !dwc->dis_metastability_quirk)
+ dev_info(dwc->dev, "changing max_speed on rev %08x\n",
+ dwc->revision);
+
+ dwc->gadget->max_speed = dwc->maximum_speed;
+ dwc->gadget->max_ssp_rate = dwc->max_ssp_rate;
+
+ /*
+ * REVISIT: Here we should clear all pending IRQs to be
+ * sure we're starting from a well known location.
+ */
+
+ ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
+ if (ret)
+ goto err4;
+
+ ret = usb_add_gadget(dwc->gadget);
+ if (ret) {
+ dev_err(dwc->dev, "failed to add gadget\n");
+ goto err5;
+ }
+
+ if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS)
+ dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate);
+ else
+ dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
+
+ return 0;
+
+err5:
+ dwc3_gadget_free_endpoints(dwc);
+err4:
+ usb_put_gadget(dwc->gadget);
+ dwc->gadget = NULL;
+err3:
+ dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
+ dwc->bounce_addr);
+
+err2:
+ kfree(dwc->setup_buf);
+
+err1:
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
+ dwc->ep0_trb, dwc->ep0_trb_addr);
+
+err0:
+ return ret;
+}
+
+/* -------------------------------------------------------------------------- */
+
+void dwc3_gadget_exit(struct dwc3 *dwc)
+{
+ if (!dwc->gadget)
+ return;
+
+ usb_del_gadget(dwc->gadget);
+ dwc3_gadget_free_endpoints(dwc);
+ usb_put_gadget(dwc->gadget);
+ dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
+ dwc->bounce_addr);
+ kfree(dwc->setup_buf);
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
+ dwc->ep0_trb, dwc->ep0_trb_addr);
+}
+
+int dwc3_gadget_suspend(struct dwc3 *dwc)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!dwc->gadget_driver)
+ return 0;
+
+ ret = dwc3_gadget_soft_disconnect(dwc);
+ if (ret)
+ goto err;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_disconnect_gadget(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+
+err:
+ /*
+ * Attempt to reset the controller's state. Likely no
+ * communication can be established until the host
+ * performs a port reset.
+ */
+ if (dwc->softconnect)
+ dwc3_gadget_soft_connect(dwc);
+
+ return ret;
+}
+
+int dwc3_gadget_resume(struct dwc3 *dwc)
+{
+ if (!dwc->gadget_driver || !dwc->softconnect)
+ return 0;
+
+ return dwc3_gadget_soft_connect(dwc);
+}
+
+void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+{
+ if (dwc->pending_events) {
+ dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
+ dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
+ pm_runtime_put(dwc->dev);
+ dwc->pending_events = false;
+ enable_irq(dwc->irq_gadget);
+ }
+}
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
new file mode 100644
index 000000000..55a56cf67
--- /dev/null
+++ b/drivers/usb/dwc3/gadget.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * gadget.h - DesignWare USB3 DRD Gadget Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#ifndef __DRIVERS_USB_DWC3_GADGET_H
+#define __DRIVERS_USB_DWC3_GADGET_H
+
+#include <linux/list.h>
+#include <linux/usb/gadget.h>
+#include "io.h"
+
+struct dwc3;
+#define to_dwc3_ep(ep) (container_of(ep, struct dwc3_ep, endpoint))
+#define gadget_to_dwc(g) (dev_get_platdata(&g->dev))
+
+/* DEPCFG parameter 1 */
+#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
+#define DWC3_DEPCFG_XFER_COMPLETE_EN BIT(8)
+#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN BIT(9)
+#define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10)
+#define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11)
+#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(13)
+#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
+#define DWC3_DEPCFG_STREAM_CAPABLE BIT(24)
+#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
+#define DWC3_DEPCFG_BULK_BASED BIT(30)
+#define DWC3_DEPCFG_FIFO_BASED BIT(31)
+
+/* DEPCFG parameter 0 */
+#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
+#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
+/* This applies for core versions earlier than 1.94a */
+#define DWC3_DEPCFG_IGN_SEQ_NUM BIT(31)
+/* These apply for core versions 1.94a and later */
+#define DWC3_DEPCFG_ACTION_INIT (0 << 30)
+#define DWC3_DEPCFG_ACTION_RESTORE BIT(30)
+#define DWC3_DEPCFG_ACTION_MODIFY (2 << 30)
+
+/* DEPXFERCFG parameter 0 */
+#define DWC3_DEPXFERCFG_NUM_XFER_RES(n) ((n) & 0xffff)
+
+/* U1 Device exit Latency */
+#define DWC3_DEFAULT_U1_DEV_EXIT_LAT 0x0A /* Less then 10 microsec */
+
+/* U2 Device exit Latency */
+#define DWC3_DEFAULT_U2_DEV_EXIT_LAT 0x1FF /* Less then 511 microsec */
+
+/* Frame/Microframe Number Mask */
+#define DWC3_FRNUMBER_MASK 0x3fff
+/* -------------------------------------------------------------------------- */
+
+#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
+
+/**
+ * next_request - gets the next request on the given list
+ * @list: the request list to operate on
+ *
+ * Caller should take care of locking. This function return %NULL or the first
+ * request available on @list.
+ */
+static inline struct dwc3_request *next_request(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct dwc3_request, list);
+}
+
+/**
+ * dwc3_gadget_move_started_request - move @req to the started_list
+ * @req: the request to be moved
+ *
+ * Caller should take care of locking. This function will move @req from its
+ * current list to the endpoint's started_list.
+ */
+static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
+{
+ struct dwc3_ep *dep = req->dep;
+
+ req->status = DWC3_REQUEST_STATUS_STARTED;
+ list_move_tail(&req->list, &dep->started_list);
+}
+
+/**
+ * dwc3_gadget_move_cancelled_request - move @req to the cancelled_list
+ * @req: the request to be moved
+ * @reason: cancelled reason for the dwc3 request
+ *
+ * Caller should take care of locking. This function will move @req from its
+ * current list to the endpoint's cancelled_list.
+ */
+static inline void dwc3_gadget_move_cancelled_request(struct dwc3_request *req,
+ unsigned int reason)
+{
+ struct dwc3_ep *dep = req->dep;
+
+ req->status = reason;
+ list_move_tail(&req->list, &dep->cancelled_list);
+}
+
+void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ int status);
+
+void dwc3_ep0_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event);
+void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep);
+void dwc3_ep0_stall_and_restart(struct dwc3 *dwc);
+int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
+int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
+int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags);
+int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+void dwc3_ep0_send_delayed_status(struct dwc3 *dwc);
+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt);
+
+/**
+ * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
+ * @dep: dwc3 endpoint
+ *
+ * Caller should take care of locking. Returns the transfer resource
+ * index for a given endpoint.
+ */
+static inline void dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
+{
+ u32 res_id;
+
+ res_id = dwc3_readl(dep->regs, DWC3_DEPCMD);
+ dep->resource_index = DWC3_DEPCMD_GET_RSC_IDX(res_id);
+}
+
+/**
+ * dwc3_gadget_dctl_write_safe - write to DCTL safe from link state change
+ * @dwc: pointer to our context structure
+ * @value: value to write to DCTL
+ *
+ * Use this function when doing read-modify-write to DCTL. It will not
+ * send link state change request.
+ */
+static inline void dwc3_gadget_dctl_write_safe(struct dwc3 *dwc, u32 value)
+{
+ value &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+ dwc3_writel(dwc->regs, DWC3_DCTL, value);
+}
+
+#endif /* __DRIVERS_USB_DWC3_GADGET_H */
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
new file mode 100644
index 000000000..f6f13e7f1
--- /dev/null
+++ b/drivers/usb/dwc3/host.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * host.c - DesignWare USB3 DRD Controller Host Glue
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ */
+
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "core.h"
+
+static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
+ int irq, char *name)
+{
+ struct platform_device *pdev = to_platform_device(dwc->dev);
+ struct device_node *np = dev_of_node(&pdev->dev);
+
+ dwc->xhci_resources[1].start = irq;
+ dwc->xhci_resources[1].end = irq;
+ dwc->xhci_resources[1].flags = IORESOURCE_IRQ | irq_get_trigger_type(irq);
+ if (!name && np)
+ dwc->xhci_resources[1].name = of_node_full_name(pdev->dev.of_node);
+ else
+ dwc->xhci_resources[1].name = name;
+}
+
+static int dwc3_host_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "host");
+ if (irq > 0) {
+ dwc3_host_fill_xhci_irq_res(dwc, irq, "host");
+ goto out;
+ }
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
+ if (irq > 0) {
+ dwc3_host_fill_xhci_irq_res(dwc, irq, "dwc_usb3");
+ goto out;
+ }
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0) {
+ dwc3_host_fill_xhci_irq_res(dwc, irq, NULL);
+ goto out;
+ }
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
+int dwc3_host_init(struct dwc3 *dwc)
+{
+ struct property_entry props[4];
+ struct platform_device *xhci;
+ int ret, irq;
+ int prop_idx = 0;
+
+ irq = dwc3_host_get_irq(dwc);
+ if (irq < 0)
+ return irq;
+
+ xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
+ if (!xhci) {
+ dev_err(dwc->dev, "couldn't allocate xHCI device\n");
+ return -ENOMEM;
+ }
+
+ xhci->dev.parent = dwc->dev;
+
+ dwc->xhci = xhci;
+
+ ret = platform_device_add_resources(xhci, dwc->xhci_resources,
+ DWC3_XHCI_RESOURCES_NUM);
+ if (ret) {
+ dev_err(dwc->dev, "couldn't add resources to xHCI device\n");
+ goto err;
+ }
+
+ memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
+
+ if (dwc->usb3_lpm_capable)
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
+
+ if (dwc->usb2_lpm_disable)
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
+
+ /**
+ * WORKAROUND: dwc3 revisions <=3.00a have a limitation
+ * where Port Disable command doesn't work.
+ *
+ * The suggested workaround is that we avoid Port Disable
+ * completely.
+ *
+ * This following flag tells XHCI to do just that.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC3, ANY, 300A))
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
+
+ if (prop_idx) {
+ ret = device_create_managed_software_node(&xhci->dev, props, NULL);
+ if (ret) {
+ dev_err(dwc->dev, "failed to add properties to xHCI\n");
+ goto err;
+ }
+ }
+
+ ret = platform_device_add(xhci);
+ if (ret) {
+ dev_err(dwc->dev, "failed to register xHCI device\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ platform_device_put(xhci);
+ return ret;
+}
+
+void dwc3_host_exit(struct dwc3 *dwc)
+{
+ platform_device_unregister(dwc->xhci);
+ dwc->xhci = NULL;
+}
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
new file mode 100644
index 000000000..1e96ea339
--- /dev/null
+++ b/drivers/usb/dwc3/io.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * io.h - DesignWare USB3 DRD IO Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#ifndef __DRIVERS_USB_DWC3_IO_H
+#define __DRIVERS_USB_DWC3_IO_H
+
+#include <linux/io.h>
+#include "trace.h"
+#include "debug.h"
+#include "core.h"
+
+static inline u32 dwc3_readl(void __iomem *base, u32 offset)
+{
+ u32 value;
+
+ /*
+ * We requested the mem region starting from the Globals address
+ * space, see dwc3_probe in core.c.
+ * However, the offsets are given starting from xHCI address space.
+ */
+ value = readl(base + offset - DWC3_GLOBALS_REGS_START);
+
+ /*
+ * When tracing we want to make it easy to find the correct address on
+ * documentation, so we revert it back to the proper addresses, the
+ * same way they are described on SNPS documentation
+ */
+ trace_dwc3_readl(base - DWC3_GLOBALS_REGS_START, offset, value);
+
+ return value;
+}
+
+static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
+{
+ /*
+ * We requested the mem region starting from the Globals address
+ * space, see dwc3_probe in core.c.
+ * However, the offsets are given starting from xHCI address space.
+ */
+ writel(value, base + offset - DWC3_GLOBALS_REGS_START);
+
+ /*
+ * When tracing we want to make it easy to find the correct address on
+ * documentation, so we revert it back to the proper addresses, the
+ * same way they are described on SNPS documentation
+ */
+ trace_dwc3_writel(base - DWC3_GLOBALS_REGS_START, offset, value);
+}
+
+#endif /* __DRIVERS_USB_DWC3_IO_H */
diff --git a/drivers/usb/dwc3/trace.c b/drivers/usb/dwc3/trace.c
new file mode 100644
index 000000000..088995885
--- /dev/null
+++ b/drivers/usb/dwc3/trace.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * trace.c - DesignWare USB3 DRD Controller Trace Support
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Author: Felipe Balbi <balbi@ti.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
new file mode 100644
index 000000000..1975aec8d
--- /dev/null
+++ b/drivers/usb/dwc3/trace.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * trace.h - DesignWare USB3 DRD Controller Trace Support
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Author: Felipe Balbi <balbi@ti.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dwc3
+
+#if !defined(__DWC3_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __DWC3_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <asm/byteorder.h>
+#include "core.h"
+#include "debug.h"
+
+DECLARE_EVENT_CLASS(dwc3_log_io,
+ TP_PROTO(void *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value),
+ TP_STRUCT__entry(
+ __field(void *, base)
+ __field(u32, offset)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __entry->base = base;
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+ TP_printk("addr %p offset %04x value %08x",
+ __entry->base + __entry->offset,
+ __entry->offset,
+ __entry->value)
+);
+
+DEFINE_EVENT(dwc3_log_io, dwc3_readl,
+ TP_PROTO(void __iomem *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value)
+);
+
+DEFINE_EVENT(dwc3_log_io, dwc3_writel,
+ TP_PROTO(void __iomem *base, u32 offset, u32 value),
+ TP_ARGS(base, offset, value)
+);
+
+DECLARE_EVENT_CLASS(dwc3_log_event,
+ TP_PROTO(u32 event, struct dwc3 *dwc),
+ TP_ARGS(event, dwc),
+ TP_STRUCT__entry(
+ __field(u32, event)
+ __field(u32, ep0state)
+ __dynamic_array(char, str, DWC3_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->event = event;
+ __entry->ep0state = dwc->ep0state;
+ ),
+ TP_printk("event (%08x): %s", __entry->event,
+ dwc3_decode_event(__get_str(str), DWC3_MSG_MAX,
+ __entry->event, __entry->ep0state))
+);
+
+DEFINE_EVENT(dwc3_log_event, dwc3_event,
+ TP_PROTO(u32 event, struct dwc3 *dwc),
+ TP_ARGS(event, dwc)
+);
+
+DECLARE_EVENT_CLASS(dwc3_log_ctrl,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl),
+ TP_STRUCT__entry(
+ __field(__u8, bRequestType)
+ __field(__u8, bRequest)
+ __field(__u16, wValue)
+ __field(__u16, wIndex)
+ __field(__u16, wLength)
+ __dynamic_array(char, str, DWC3_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->bRequestType = ctrl->bRequestType;
+ __entry->bRequest = ctrl->bRequest;
+ __entry->wValue = le16_to_cpu(ctrl->wValue);
+ __entry->wIndex = le16_to_cpu(ctrl->wIndex);
+ __entry->wLength = le16_to_cpu(ctrl->wLength);
+ ),
+ TP_printk("%s", usb_decode_ctrl(__get_str(str), DWC3_MSG_MAX,
+ __entry->bRequestType,
+ __entry->bRequest, __entry->wValue,
+ __entry->wIndex, __entry->wLength)
+ )
+);
+
+DEFINE_EVENT(dwc3_log_ctrl, dwc3_ctrl_req,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl)
+);
+
+DECLARE_EVENT_CLASS(dwc3_log_request,
+ TP_PROTO(struct dwc3_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __string(name, req->dep->name)
+ __field(struct dwc3_request *, req)
+ __field(unsigned int, actual)
+ __field(unsigned int, length)
+ __field(int, status)
+ __field(int, zero)
+ __field(int, short_not_ok)
+ __field(int, no_interrupt)
+ ),
+ TP_fast_assign(
+ __assign_str(name, req->dep->name);
+ __entry->req = req;
+ __entry->actual = req->request.actual;
+ __entry->length = req->request.length;
+ __entry->status = req->request.status;
+ __entry->zero = req->request.zero;
+ __entry->short_not_ok = req->request.short_not_ok;
+ __entry->no_interrupt = req->request.no_interrupt;
+ ),
+ TP_printk("%s: req %p length %u/%u %s%s%s ==> %d",
+ __get_str(name), __entry->req, __entry->actual, __entry->length,
+ __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "i" : "I",
+ __entry->status
+ )
+);
+
+DEFINE_EVENT(dwc3_log_request, dwc3_alloc_request,
+ TP_PROTO(struct dwc3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(dwc3_log_request, dwc3_free_request,
+ TP_PROTO(struct dwc3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(dwc3_log_request, dwc3_ep_queue,
+ TP_PROTO(struct dwc3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(dwc3_log_request, dwc3_ep_dequeue,
+ TP_PROTO(struct dwc3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(dwc3_log_request, dwc3_gadget_giveback,
+ TP_PROTO(struct dwc3_request *req),
+ TP_ARGS(req)
+);
+
+DECLARE_EVENT_CLASS(dwc3_log_generic_cmd,
+ TP_PROTO(unsigned int cmd, u32 param, int status),
+ TP_ARGS(cmd, param, status),
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(u32, param)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->param = param;
+ __entry->status = status;
+ ),
+ TP_printk("cmd '%s' [%x] param %08x --> status: %s",
+ dwc3_gadget_generic_cmd_string(__entry->cmd),
+ __entry->cmd, __entry->param,
+ dwc3_gadget_generic_cmd_status_string(__entry->status)
+ )
+);
+
+DEFINE_EVENT(dwc3_log_generic_cmd, dwc3_gadget_generic_cmd,
+ TP_PROTO(unsigned int cmd, u32 param, int status),
+ TP_ARGS(cmd, param, status)
+);
+
+DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
+ TP_PROTO(struct dwc3_ep *dep, unsigned int cmd,
+ struct dwc3_gadget_ep_cmd_params *params, int cmd_status),
+ TP_ARGS(dep, cmd, params, cmd_status),
+ TP_STRUCT__entry(
+ __string(name, dep->name)
+ __field(unsigned int, cmd)
+ __field(u32, param0)
+ __field(u32, param1)
+ __field(u32, param2)
+ __field(int, cmd_status)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dep->name);
+ __entry->cmd = cmd;
+ __entry->param0 = params->param0;
+ __entry->param1 = params->param1;
+ __entry->param2 = params->param2;
+ __entry->cmd_status = cmd_status;
+ ),
+ TP_printk("%s: cmd '%s' [%x] params %08x %08x %08x --> status: %s",
+ __get_str(name), dwc3_gadget_ep_cmd_string(__entry->cmd),
+ __entry->cmd, __entry->param0,
+ __entry->param1, __entry->param2,
+ dwc3_ep_cmd_status_string(__entry->cmd_status)
+ )
+);
+
+DEFINE_EVENT(dwc3_log_gadget_ep_cmd, dwc3_gadget_ep_cmd,
+ TP_PROTO(struct dwc3_ep *dep, unsigned int cmd,
+ struct dwc3_gadget_ep_cmd_params *params, int cmd_status),
+ TP_ARGS(dep, cmd, params, cmd_status)
+);
+
+DECLARE_EVENT_CLASS(dwc3_log_trb,
+ TP_PROTO(struct dwc3_ep *dep, struct dwc3_trb *trb),
+ TP_ARGS(dep, trb),
+ TP_STRUCT__entry(
+ __string(name, dep->name)
+ __field(struct dwc3_trb *, trb)
+ __field(u32, bpl)
+ __field(u32, bph)
+ __field(u32, size)
+ __field(u32, ctrl)
+ __field(u32, type)
+ __field(u32, enqueue)
+ __field(u32, dequeue)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dep->name);
+ __entry->trb = trb;
+ __entry->bpl = trb->bpl;
+ __entry->bph = trb->bph;
+ __entry->size = trb->size;
+ __entry->ctrl = trb->ctrl;
+ __entry->type = usb_endpoint_type(dep->endpoint.desc);
+ __entry->enqueue = dep->trb_enqueue;
+ __entry->dequeue = dep->trb_dequeue;
+ ),
+ TP_printk("%s: trb %p (E%d:D%d) buf %08x%08x size %s%d ctrl %08x sofn %08x (%c%c%c%c:%c%c:%s)",
+ __get_str(name), __entry->trb, __entry->enqueue,
+ __entry->dequeue, __entry->bph, __entry->bpl,
+ ({char *s;
+ int pcm = ((__entry->size >> 24) & 3) + 1;
+
+ switch (__entry->type) {
+ case USB_ENDPOINT_XFER_INT:
+ case USB_ENDPOINT_XFER_ISOC:
+ switch (pcm) {
+ case 1:
+ s = "1x ";
+ break;
+ case 2:
+ s = "2x ";
+ break;
+ case 3:
+ default:
+ s = "3x ";
+ break;
+ }
+ break;
+ default:
+ s = "";
+ } s; }),
+ DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl,
+ DWC3_TRB_CTRL_GET_SID_SOFN(__entry->ctrl),
+ __entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
+ __entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
+ __entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
+ __entry->ctrl & DWC3_TRB_CTRL_CSP ? 'S' : 's',
+ __entry->ctrl & DWC3_TRB_CTRL_ISP_IMI ? 'S' : 's',
+ __entry->ctrl & DWC3_TRB_CTRL_IOC ? 'C' : 'c',
+ dwc3_trb_type_string(DWC3_TRBCTL_TYPE(__entry->ctrl))
+ )
+);
+
+DEFINE_EVENT(dwc3_log_trb, dwc3_prepare_trb,
+ TP_PROTO(struct dwc3_ep *dep, struct dwc3_trb *trb),
+ TP_ARGS(dep, trb)
+);
+
+DEFINE_EVENT(dwc3_log_trb, dwc3_complete_trb,
+ TP_PROTO(struct dwc3_ep *dep, struct dwc3_trb *trb),
+ TP_ARGS(dep, trb)
+);
+
+DECLARE_EVENT_CLASS(dwc3_log_ep,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep),
+ TP_STRUCT__entry(
+ __string(name, dep->name)
+ __field(unsigned int, maxpacket)
+ __field(unsigned int, maxpacket_limit)
+ __field(unsigned int, max_streams)
+ __field(unsigned int, maxburst)
+ __field(unsigned int, flags)
+ __field(unsigned int, direction)
+ __field(u8, trb_enqueue)
+ __field(u8, trb_dequeue)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dep->name);
+ __entry->maxpacket = dep->endpoint.maxpacket;
+ __entry->maxpacket_limit = dep->endpoint.maxpacket_limit;
+ __entry->max_streams = dep->endpoint.max_streams;
+ __entry->maxburst = dep->endpoint.maxburst;
+ __entry->flags = dep->flags;
+ __entry->direction = dep->direction;
+ __entry->trb_enqueue = dep->trb_enqueue;
+ __entry->trb_dequeue = dep->trb_dequeue;
+ ),
+ TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c:%c",
+ __get_str(name), __entry->maxpacket,
+ __entry->maxpacket_limit, __entry->max_streams,
+ __entry->maxburst, __entry->trb_enqueue,
+ __entry->trb_dequeue,
+ __entry->flags & DWC3_EP_ENABLED ? 'E' : 'e',
+ __entry->flags & DWC3_EP_STALL ? 'S' : 's',
+ __entry->flags & DWC3_EP_WEDGE ? 'W' : 'w',
+ __entry->flags & DWC3_EP_TRANSFER_STARTED ? 'B' : 'b',
+ __entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p',
+ __entry->direction ? '<' : '>'
+ )
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_enable,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep)
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_disable,
+ TP_PROTO(struct dwc3_ep *dep),
+ TP_ARGS(dep)
+);
+
+#endif /* __DWC3_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
new file mode 100644
index 000000000..f23f4c9a5
--- /dev/null
+++ b/drivers/usb/dwc3/ulpi.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ulpi.c - DesignWare USB3 Controller's ULPI PHY interface
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/time64.h>
+#include <linux/ulpi/regs.h>
+
+#include "core.h"
+#include "io.h"
+
+#define DWC3_ULPI_ADDR(a) \
+ ((a >= ULPI_EXT_VENDOR_SPECIFIC) ? \
+ DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
+ DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
+
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
+{
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+ unsigned int count = 10000;
+ u32 reg;
+
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ if (read)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+ usleep_range(1000, 1200);
+
+ while (count--) {
+ ndelay(ns);
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
+ if (reg & DWC3_GUSB2PHYACC_DONE)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int dwc3_ulpi_read(struct device *dev, u8 addr)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ u32 reg;
+ int ret;
+
+ reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
+
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
+ if (ret)
+ return ret;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
+
+ return DWC3_GUSB2PHYACC_DATA(reg);
+}
+
+static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ u32 reg;
+
+ reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
+ reg |= DWC3_GUSB2PHYACC_WRITE | val;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
+
+ return dwc3_ulpi_busyloop(dwc, addr, false);
+}
+
+static const struct ulpi_ops dwc3_ulpi_ops = {
+ .read = dwc3_ulpi_read,
+ .write = dwc3_ulpi_write,
+};
+
+int dwc3_ulpi_init(struct dwc3 *dwc)
+{
+ /* Register the interface */
+ dwc->ulpi = ulpi_register_interface(dwc->dev, &dwc3_ulpi_ops);
+ if (IS_ERR(dwc->ulpi)) {
+ dev_err(dwc->dev, "failed to register ULPI interface");
+ return PTR_ERR(dwc->ulpi);
+ }
+
+ return 0;
+}
+
+void dwc3_ulpi_exit(struct dwc3 *dwc)
+{
+ if (dwc->ulpi) {
+ ulpi_unregister_interface(dwc->ulpi);
+ dwc->ulpi = NULL;
+ }
+}