summaryrefslogtreecommitdiffstats
path: root/drivers/usb/gadget/udc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/gadget/udc')
-rw-r--r--drivers/usb/gadget/udc/Kconfig510
-rw-r--r--drivers/usb/gadget/udc/Makefile45
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.h661
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c216
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/Kconfig8
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/Makefile4
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c445
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c610
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c522
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c847
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c1082
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h565
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c1597
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c2021
-rw-r--r--drivers/usb/gadget/udc/at91_udc.h177
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2465
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h381
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c2387
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig13
-rw-r--r--drivers/usb/gadget/udc/bdc/Makefile7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h489
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c367
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.h23
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c657
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c118
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.h32
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2035
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.h17
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c590
-rw-r--r--drivers/usb/gadget/udc/core.c1877
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c2909
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c1239
-rw-r--r--drivers/usb/gadget/udc/fotg210.h249
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2726
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.h417
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c2688
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h591
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c1517
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.h675
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c1860
-rw-r--r--drivers/usb/gadget/udc/goku_udc.h289
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c2258
-rw-r--r--drivers/usb/gadget/udc/gr_udc.h220
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c3273
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c1697
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.h603
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c1332
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h317
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c2064
-rw-r--r--drivers/usb/gadget/udc/mv_udc.h309
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2424
-rw-r--r--drivers/usb/gadget/udc/net2272.c2725
-rw-r--r--drivers/usb/gadget/udc/net2272.h584
-rw-r--r--drivers/usb/gadget/udc/net2280.c3884
-rw-r--r--drivers/usb/gadget/udc/net2280.h390
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c3018
-rw-r--r--drivers/usb/gadget/udc/omap_udc.h207
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c3163
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c2550
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h243
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c2562
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.h498
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c1980
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.h287
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2999
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c1319
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c1980
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.h99
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc_regs.h146
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c3192
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c331
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c4048
-rw-r--r--drivers/usb/gadget/udc/trace.c10
-rw-r--r--drivers/usb/gadget/udc/trace.h289
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2271
75 files changed, 89200 insertions, 0 deletions
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
new file mode 100644
index 000000000..5756acb07
--- /dev/null
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -0,0 +1,510 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# USB Gadget support on a system involves
+# (a) a peripheral controller, and
+# (b) the gadget driver using it.
+#
+# NOTE: Gadget support ** DOES NOT ** depend on host-side CONFIG_USB !!
+#
+# - Host systems (like PCs) need CONFIG_USB (with "A" jacks).
+# - Peripherals (like PDAs) need CONFIG_USB_GADGET (with "B" jacks).
+# - Some systems have both kinds of controllers.
+#
+# With help from a special transceiver and a "Mini-AB" jack, systems with
+# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
+#
+
+#
+# USB Peripheral Controller Support
+#
+# The order here is alphabetical, except that integrated controllers go
+# before discrete ones so they will be the initial/default value:
+# - integrated/SOC controllers first
+# - licensed IP used in both SOC and discrete versions
+# - discrete ones (including all PCI-only controllers)
+# - debug/dummy gadget+hcd is last.
+#
+menu "USB Peripheral Controller"
+
+#
+# Integrated controllers
+#
+
+config USB_AT91
+ tristate "Atmel AT91 USB Device Port"
+ depends on ARCH_AT91
+ depends on OF || COMPILE_TEST
+ help
+ Many Atmel AT91 processors (such as the AT91RM2000) have a
+ full speed USB Device Port with support for five configurable
+ endpoints (plus endpoint zero).
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "at91_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_LPC32XX
+ tristate "LPC32XX USB Peripheral Controller"
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on I2C
+ select USB_ISP1301
+ help
+ This option selects the USB device controller in the LPC32xx SoC.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "lpc32xx_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_ATMEL_USBA
+ tristate "Atmel USBA"
+ depends on ARCH_AT91
+ help
+ USBA is the integrated high-speed USB Device controller on some
+ AT91SAM9 and AT91CAP9 processors from Atmel.
+
+ The fifo_mode parameter is used to select endpoint allocation mode.
+ fifo_mode = 0 is used to let the driver autoconfigure the endpoints.
+ In this case, for ep1 2 banks are allocated if it works in isochronous
+ mode and only 1 bank otherwise. For the rest of the endpoints
+ only 1 bank is allocated.
+
+ fifo_mode = 1 is a generic maximum fifo size (1024 bytes) configuration
+ allowing the usage of ep1 - ep6
+
+ fifo_mode = 2 is a generic performance maximum fifo size (1024 bytes)
+ configuration allowing the usage of ep1 - ep3
+
+ fifo_mode = 3 is a balanced performance configuration allowing the
+ the usage of ep1 - ep8
+
+config USB_BCM63XX_UDC
+ tristate "Broadcom BCM63xx Peripheral Controller"
+ depends on BCM63XX
+ help
+ Many Broadcom BCM63xx chipsets (such as the BCM6328) have a
+ high speed USB Device Port with support for four fixed endpoints
+ (plus endpoint zero).
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "bcm63xx_udc".
+
+config USB_FSL_USB2
+ tristate "Freescale Highspeed USB DR Peripheral Controller"
+ depends on FSL_SOC
+ help
+ Some of Freescale PowerPC and i.MX processors have a High Speed
+ Dual-Role(DR) USB controller, which supports device mode.
+
+ The number of programmable endpoints is different through
+ SOC revisions.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "fsl_usb2_udc" and force
+ all gadget drivers to also be dynamically linked.
+
+config USB_FUSB300
+ tristate "Faraday FUSB300 USB Peripheral Controller"
+ depends on !PHYS_ADDR_T_64BIT && HAS_DMA
+ help
+ Faraday usb device controller FUSB300 driver
+
+config USB_FOTG210_UDC
+ depends on HAS_DMA
+ tristate "Faraday FOTG210 USB Peripheral Controller"
+ help
+ Faraday USB2.0 OTG controller which can be configured as
+ high speed or full speed USB device. This driver supppors
+ Bulk Transfer so far.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "fotg210_udc".
+
+config USB_GR_UDC
+ tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
+ depends on HAS_DMA
+ help
+ Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
+ VHDL IP core library.
+
+config USB_OMAP
+ tristate "OMAP USB Device Controller"
+ depends on ARCH_OMAP1
+ depends on ISP1301_OMAP || !(MACH_OMAP_H2 || MACH_OMAP_H3)
+ help
+ Many Texas Instruments OMAP processors have flexible full
+ speed USB device controllers, with support for up to 30
+ endpoints (plus endpoint zero). This driver supports the
+ controller in the OMAP 1611, and should work with controllers
+ in other OMAP processors too, given minor tweaks.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "omap_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_PXA25X
+ tristate "PXA 25x or IXP 4xx"
+ depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
+ depends on HAS_IOMEM
+ help
+ Intel's PXA 25x series XScale ARM-5TE processors include
+ an integrated full speed USB 1.1 device controller. The
+ controller in the IXP 4xx series is register-compatible.
+
+ It has fifteen fixed-function endpoints, as well as endpoint
+ zero (for control transfers).
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "pxa25x_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+# if there's only one gadget driver, using only two bulk endpoints,
+# don't waste memory for the other endpoints
+config USB_PXA25X_SMALL
+ depends on USB_PXA25X
+ bool
+ default n if USB_ETH_RNDIS
+ default y if USB_ZERO
+ default y if USB_ETH
+ default y if USB_G_SERIAL
+
+config USB_R8A66597
+ tristate "Renesas R8A66597 USB Peripheral Controller"
+ depends on HAS_DMA
+ help
+ R8A66597 is a discrete USB host and peripheral controller chip that
+ supports both full and high speed USB 2.0 data transfers.
+ It has nine configurable endpoints, and endpoint zero.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "r8a66597_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_RENESAS_USBHS_UDC
+ tristate 'Renesas USBHS controller'
+ depends on USB_RENESAS_USBHS
+ help
+ Renesas USBHS is a discrete USB host and peripheral controller chip
+ that supports both full and high speed USB 2.0 data transfers.
+ It has nine or more configurable endpoints, and endpoint zero.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "renesas_usbhs" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_RENESAS_USB3
+ tristate 'Renesas USB3.0 Peripheral controller'
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on EXTCON
+ select USB_ROLE_SWITCH
+ help
+ Renesas USB3.0 Peripheral controller is a USB peripheral controller
+ that supports super, high, and full speed USB 3.0 data transfers.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "renesas_usb3" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_PXA27X
+ tristate "PXA 27x"
+ depends on HAS_IOMEM
+ help
+ Intel's PXA 27x series XScale ARM v5TE processors include
+ an integrated full speed USB 1.1 device controller.
+
+ It has up to 23 endpoints, as well as endpoint zero (for
+ control transfers).
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "pxa27x_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_S3C2410
+ tristate "S3C2410 USB Device Controller"
+ depends on ARCH_S3C24XX
+ help
+ Samsung's S3C2410 is an ARM-4 processor with an integrated
+ full speed USB 1.1 device controller. It has 4 configurable
+ endpoints, as well as endpoint zero (for control transfers).
+
+ This driver has been tested on the S3C2410, S3C2412, and
+ S3C2440 processors.
+
+config USB_S3C2410_DEBUG
+ bool "S3C2410 udc debug messages"
+ depends on USB_S3C2410
+
+config USB_S3C_HSUDC
+ tristate "S3C2416, S3C2443 and S3C2450 USB Device Controller"
+ depends on ARCH_S3C24XX
+ help
+ Samsung's S3C2416, S3C2443 and S3C2450 is an ARM9 based SoC
+ integrated with dual speed USB 2.0 device controller. It has
+ 8 endpoints, as well as endpoint zero.
+
+ This driver has been tested on S3C2416 and S3C2450 processors.
+
+config USB_MV_UDC
+ tristate "Marvell USB2.0 Device Controller"
+ depends on HAS_DMA
+ help
+ Marvell Socs (including PXA and MMP series) include a high speed
+ USB2.0 OTG controller, which can be configured as high speed or
+ full speed USB peripheral.
+
+config USB_MV_U3D
+ depends on HAS_DMA
+ tristate "MARVELL PXA2128 USB 3.0 controller"
+ help
+ MARVELL PXA2128 Processor series include a super speed USB3.0 device
+ controller, which support super speed USB peripheral.
+
+config USB_SNP_CORE
+ depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT)
+ depends on HAS_DMA
+ tristate
+ help
+ This enables core driver support for Synopsys USB 2.0 Device
+ controller.
+
+ This will be enabled when PCI or Platform driver for this UDC is
+ selected. Currently, this will be enabled by USB_SNP_UDC_PLAT or
+ USB_AMD5536UDC options.
+
+ This IP is different to the High Speed OTG IP that can be enabled
+ by selecting USB_DWC2 or USB_DWC3 options.
+
+config USB_SNP_UDC_PLAT
+ tristate "Synopsys USB 2.0 Device controller"
+ depends on USB_GADGET && OF && HAS_DMA
+ depends on EXTCON || EXTCON=n
+ select USB_SNP_CORE
+ default ARCH_BCM_IPROC
+ help
+ This adds Platform Device support for Synopsys Designware core
+ AHB subsystem USB2.0 Device Controller (UDC).
+
+ This driver works with UDCs integrated into Broadcom's Northstar2
+ and Cygnus SoCs.
+
+ If unsure, say N.
+#
+# Controllers available in both integrated and discrete versions
+#
+
+config USB_M66592
+ tristate "Renesas M66592 USB Peripheral Controller"
+ depends on HAS_IOMEM
+ help
+ M66592 is a discrete USB peripheral controller chip that
+ supports both full and high speed USB 2.0 data transfers.
+ It has seven configurable endpoints, and endpoint zero.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "m66592_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+source "drivers/usb/gadget/udc/bdc/Kconfig"
+
+#
+# Controllers available only in discrete form (and all PCI controllers)
+#
+
+config USB_AMD5536UDC
+ tristate "AMD5536 UDC"
+ depends on USB_PCI && HAS_DMA
+ select USB_SNP_CORE
+ help
+ The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
+ It is a USB Highspeed DMA capable USB device controller. Beside ep0
+ it provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
+ The UDC port supports OTG operation, and may be used as a host port
+ if it's not being used to implement peripheral or OTG roles.
+
+ This UDC is based on Synopsys USB device controller IP and selects
+ CONFIG_USB_SNP_CORE option to build the core driver.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "amd5536udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_FSL_QE
+ tristate "Freescale QE/CPM USB Device Controller"
+ depends on FSL_SOC && (QUICC_ENGINE || CPM)
+ depends on !64BIT || BROKEN
+ help
+ Some of Freescale PowerPC processors have a Full Speed
+ QE/CPM2 USB controller, which support device mode with 4
+ programmable endpoints. This driver supports the
+ controller in the MPC8360 and MPC8272, and should work with
+ controllers having QE or CPM2, given minor tweaks.
+
+ Set CONFIG_USB_GADGET to "m" to build this driver as a
+ dynamically linked module called "fsl_qe_udc".
+
+config USB_NET2272
+ depends on HAS_IOMEM
+ tristate "PLX NET2272"
+ help
+ PLX NET2272 is a USB peripheral controller which supports
+ both full and high speed USB 2.0 data transfers.
+
+ It has three configurable endpoints, as well as endpoint zero
+ (for control transfer).
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "net2272" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_NET2272_DMA
+ bool "Support external DMA controller"
+ depends on USB_NET2272 && HAS_DMA
+ help
+ The NET2272 part can optionally support an external DMA
+ controller, but your board has to have support in the
+ driver itself.
+
+ If unsure, say "N" here. The driver works fine in PIO mode.
+
+config USB_NET2280
+ tristate "NetChip NET228x / PLX USB3x8x"
+ depends on USB_PCI
+ help
+ NetChip 2280 / 2282 is a PCI based USB peripheral controller which
+ supports both full and high speed USB 2.0 data transfers.
+
+ It has six configurable endpoints, as well as endpoint zero
+ (for control transfers) and several endpoints with dedicated
+ functions.
+
+ PLX 2380 is a PCIe version of the PLX 2380.
+
+ PLX 3380 / 3382 is a PCIe based USB peripheral controller which
+ supports full, high speed USB 2.0 and super speed USB 3.0
+ data transfers.
+
+ It has eight configurable endpoints, as well as endpoint zero
+ (for control transfers) and several endpoints with dedicated
+ functions.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "net2280" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_GOKU
+ tristate "Toshiba TC86C001 'Goku-S'"
+ depends on USB_PCI
+ help
+ The Toshiba TC86C001 is a PCI device which includes controllers
+ for full speed USB devices, IDE, I2C, SIO, plus a USB host (OHCI).
+
+ The device controller has three configurable (bulk or interrupt)
+ endpoints, plus endpoint zero (for control transfers).
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "goku_udc" and to force all
+ gadget drivers to also be dynamically linked.
+
+config USB_EG20T
+ tristate "Intel QUARK X1000/EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
+ depends on USB_PCI
+ help
+ This is a USB device driver for EG20T PCH.
+ EG20T PCH is the platform controller hub that is used in Intel's
+ general embedded platform. EG20T PCH has USB device interface.
+ Using this interface, it is able to access system devices connected
+ to USB device.
+ This driver enables USB device function.
+ USB device is a USB peripheral controller which
+ supports both full and high speed USB 2.0 data transfers.
+ This driver supports both control transfer and bulk transfer modes.
+ This driver dose not support interrupt transfer or isochronous
+ transfer modes.
+
+ This driver also can be used for LAPIS Semiconductor's ML7213 which is
+ for IVI(In-Vehicle Infotainment) use.
+ ML7831 is for general purpose use.
+ ML7213/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7831 is completely compatible for Intel EG20T PCH.
+
+ This driver can be used with Intel's Quark X1000 SOC platform
+
+config USB_GADGET_XILINX
+ tristate "Xilinx USB Driver"
+ depends on HAS_DMA
+ depends on OF || COMPILE_TEST
+ help
+ USB peripheral controller driver for Xilinx USB2 device.
+ Xilinx USB2 device is a soft IP which supports both full
+ and high speed USB 2.0 data transfers. It has seven configurable
+ endpoints(bulk or interrupt or isochronous), as well as
+ endpoint zero(for control transfers).
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "udc-xilinx" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_MAX3420_UDC
+ tristate "MAX3420 (USB-over-SPI) support"
+ depends on SPI
+ help
+ The Maxim MAX3420 chip supports USB2.0 full-speed peripheral mode.
+ The MAX3420 is run by SPI interface, and hence the dependency.
+
+ To compile this driver as a module, choose M here: the module will
+ be called max3420_udc
+
+config USB_TEGRA_XUDC
+ tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on PHY_TEGRA_XUSB
+ help
+ Enables NVIDIA Tegra USB 3.0 device mode controller driver.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "tegra_xudc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_ASPEED_UDC
+ tristate "Aspeed UDC driver support"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on USB_LIBCOMPOSITE
+ help
+ Enables Aspeed USB2.0 Device Controller driver for AST260x
+ family SoCs. The controller supports 1 control endpoint and
+ 4 programmable endpoints.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "aspeed_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+source "drivers/usb/gadget/udc/aspeed-vhub/Kconfig"
+
+#
+# LAST -- dummy/emulated controller
+#
+
+config USB_DUMMY_HCD
+ tristate "Dummy HCD (DEVELOPMENT)"
+ depends on USB=y || (USB=m && USB_GADGET=m)
+ help
+ This host controller driver emulates USB, looping all data transfer
+ requests back to a USB "gadget driver" in the same host. The host
+ side is the controller; the gadget side is the device. Gadget drivers
+ can be high, full, or low speed; and they have access to endpoints
+ like those from NET2280, PXA2xx, or SA1100 hardware.
+
+ This may help in some stages of creating a driver to embed in a
+ Linux device, since it lets you debug several parts of the gadget
+ driver without its hardware or drivers being involved.
+
+ Since such a gadget side driver needs to interoperate with a host
+ side Linux-USB device driver, this may help to debug both sides
+ of a USB protocol stack.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "dummy_hcd" and force all
+ gadget drivers to also be dynamically linked.
+
+# NOTE: Please keep dummy_hcd LAST so that "real hardware" appears
+# first and will be selected by default.
+
+endmenu
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
new file mode 100644
index 000000000..12f9e4c9e
--- /dev/null
+++ b/drivers/usb/gadget/udc/Makefile
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+# define_trace.h needs to know how to find our header
+CFLAGS_trace.o := -I$(src)
+
+udc-core-y := core.o trace.o
+
+#
+# USB peripheral controller drivers
+#
+obj-$(CONFIG_USB_GADGET) += udc-core.o
+obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
+obj-$(CONFIG_USB_NET2272) += net2272.o
+obj-$(CONFIG_USB_NET2280) += net2280.o
+obj-$(CONFIG_USB_SNP_CORE) += snps_udc_core.o
+obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc_pci.o
+obj-$(CONFIG_USB_PXA25X) += pxa25x_udc.o
+obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o
+obj-$(CONFIG_USB_GOKU) += goku_udc.o
+obj-$(CONFIG_USB_OMAP) += omap_udc.o
+obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
+obj-$(CONFIG_USB_AT91) += at91_udc.o
+obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
+obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
+obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
+fsl_usb2_udc-y := fsl_udc_core.o
+obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
+obj-$(CONFIG_USB_M66592) += m66592-udc.o
+obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
+obj-$(CONFIG_USB_RENESAS_USB3) += renesas_usb3.o
+obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
+obj-$(CONFIG_USB_S3C_HSUDC) += s3c-hsudc.o
+obj-$(CONFIG_USB_LPC32XX) += lpc32xx_udc.o
+obj-$(CONFIG_USB_EG20T) += pch_udc.o
+obj-$(CONFIG_USB_MV_UDC) += mv_udc.o
+mv_udc-y := mv_udc_core.o
+obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
+obj-$(CONFIG_USB_FOTG210_UDC) += fotg210-udc.o
+obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o
+obj-$(CONFIG_USB_GR_UDC) += gr_udc.o
+obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o
+obj-$(CONFIG_USB_SNP_UDC_PLAT) += snps_udc_plat.o
+obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub/
+obj-$(CONFIG_USB_ASPEED_UDC) += aspeed_udc.o
+obj-$(CONFIG_USB_BDC_UDC) += bdc/
+obj-$(CONFIG_USB_MAX3420_UDC) += max3420_udc.o
diff --git a/drivers/usb/gadget/udc/amd5536udc.h b/drivers/usb/gadget/udc/amd5536udc.h
new file mode 100644
index 000000000..055436016
--- /dev/null
+++ b/drivers/usb/gadget/udc/amd5536udc.h
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * amd5536.h -- header for AMD 5536 UDC high/full speed USB device controller
+ *
+ * Copyright (C) 2007 AMD (https://www.amd.com)
+ * Author: Thomas Dahlmann
+ */
+
+#ifndef AMD5536UDC_H
+#define AMD5536UDC_H
+
+/* debug control */
+/* #define UDC_VERBOSE */
+
+#include <linux/extcon.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* various constants */
+#define UDC_RDE_TIMER_SECONDS 1
+#define UDC_RDE_TIMER_DIV 10
+#define UDC_POLLSTALL_TIMER_USECONDS 500
+
+/* Hs AMD5536 chip rev. */
+#define UDC_HSA0_REV 1
+#define UDC_HSB1_REV 2
+
+/* Broadcom chip rev. */
+#define UDC_BCM_REV 10
+
+/*
+ * SETUP usb commands
+ * needed, because some SETUP's are handled in hw, but must be passed to
+ * gadget driver above
+ * SET_CONFIG
+ */
+#define UDC_SETCONFIG_DWORD0 0x00000900
+#define UDC_SETCONFIG_DWORD0_VALUE_MASK 0xffff0000
+#define UDC_SETCONFIG_DWORD0_VALUE_OFS 16
+
+#define UDC_SETCONFIG_DWORD1 0x00000000
+
+/* SET_INTERFACE */
+#define UDC_SETINTF_DWORD0 0x00000b00
+#define UDC_SETINTF_DWORD0_ALT_MASK 0xffff0000
+#define UDC_SETINTF_DWORD0_ALT_OFS 16
+
+#define UDC_SETINTF_DWORD1 0x00000000
+#define UDC_SETINTF_DWORD1_INTF_MASK 0x0000ffff
+#define UDC_SETINTF_DWORD1_INTF_OFS 0
+
+/* Mass storage reset */
+#define UDC_MSCRES_DWORD0 0x0000ff21
+#define UDC_MSCRES_DWORD1 0x00000000
+
+/* Global CSR's -------------------------------------------------------------*/
+#define UDC_CSR_ADDR 0x500
+
+/* EP NE bits */
+/* EP number */
+#define UDC_CSR_NE_NUM_MASK 0x0000000f
+#define UDC_CSR_NE_NUM_OFS 0
+/* EP direction */
+#define UDC_CSR_NE_DIR_MASK 0x00000010
+#define UDC_CSR_NE_DIR_OFS 4
+/* EP type */
+#define UDC_CSR_NE_TYPE_MASK 0x00000060
+#define UDC_CSR_NE_TYPE_OFS 5
+/* EP config number */
+#define UDC_CSR_NE_CFG_MASK 0x00000780
+#define UDC_CSR_NE_CFG_OFS 7
+/* EP interface number */
+#define UDC_CSR_NE_INTF_MASK 0x00007800
+#define UDC_CSR_NE_INTF_OFS 11
+/* EP alt setting */
+#define UDC_CSR_NE_ALT_MASK 0x00078000
+#define UDC_CSR_NE_ALT_OFS 15
+
+/* max pkt */
+#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
+#define UDC_CSR_NE_MAX_PKT_OFS 19
+
+/* Device Config Register ---------------------------------------------------*/
+#define UDC_DEVCFG_ADDR 0x400
+
+#define UDC_DEVCFG_SOFTRESET 31
+#define UDC_DEVCFG_HNPSFEN 30
+#define UDC_DEVCFG_DMARST 29
+#define UDC_DEVCFG_SET_DESC 18
+#define UDC_DEVCFG_CSR_PRG 17
+#define UDC_DEVCFG_STATUS 7
+#define UDC_DEVCFG_DIR 6
+#define UDC_DEVCFG_PI 5
+#define UDC_DEVCFG_SS 4
+#define UDC_DEVCFG_SP 3
+#define UDC_DEVCFG_RWKP 2
+
+#define UDC_DEVCFG_SPD_MASK 0x3
+#define UDC_DEVCFG_SPD_OFS 0
+#define UDC_DEVCFG_SPD_HS 0x0
+#define UDC_DEVCFG_SPD_FS 0x1
+#define UDC_DEVCFG_SPD_LS 0x2
+/*#define UDC_DEVCFG_SPD_FS 0x3*/
+
+
+/* Device Control Register --------------------------------------------------*/
+#define UDC_DEVCTL_ADDR 0x404
+
+#define UDC_DEVCTL_THLEN_MASK 0xff000000
+#define UDC_DEVCTL_THLEN_OFS 24
+
+#define UDC_DEVCTL_BRLEN_MASK 0x00ff0000
+#define UDC_DEVCTL_BRLEN_OFS 16
+
+#define UDC_DEVCTL_SRX_FLUSH 14
+#define UDC_DEVCTL_CSR_DONE 13
+#define UDC_DEVCTL_DEVNAK 12
+#define UDC_DEVCTL_SD 10
+#define UDC_DEVCTL_MODE 9
+#define UDC_DEVCTL_BREN 8
+#define UDC_DEVCTL_THE 7
+#define UDC_DEVCTL_BF 6
+#define UDC_DEVCTL_BE 5
+#define UDC_DEVCTL_DU 4
+#define UDC_DEVCTL_TDE 3
+#define UDC_DEVCTL_RDE 2
+#define UDC_DEVCTL_RES 0
+
+
+/* Device Status Register ---------------------------------------------------*/
+#define UDC_DEVSTS_ADDR 0x408
+
+#define UDC_DEVSTS_TS_MASK 0xfffc0000
+#define UDC_DEVSTS_TS_OFS 18
+
+#define UDC_DEVSTS_SESSVLD 17
+#define UDC_DEVSTS_PHY_ERROR 16
+#define UDC_DEVSTS_RXFIFO_EMPTY 15
+
+#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
+#define UDC_DEVSTS_ENUM_SPEED_OFS 13
+#define UDC_DEVSTS_ENUM_SPEED_FULL 1
+#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
+
+#define UDC_DEVSTS_SUSP 12
+
+#define UDC_DEVSTS_ALT_MASK 0x00000f00
+#define UDC_DEVSTS_ALT_OFS 8
+
+#define UDC_DEVSTS_INTF_MASK 0x000000f0
+#define UDC_DEVSTS_INTF_OFS 4
+
+#define UDC_DEVSTS_CFG_MASK 0x0000000f
+#define UDC_DEVSTS_CFG_OFS 0
+
+
+/* Device Interrupt Register ------------------------------------------------*/
+#define UDC_DEVINT_ADDR 0x40c
+
+#define UDC_DEVINT_SVC 7
+#define UDC_DEVINT_ENUM 6
+#define UDC_DEVINT_SOF 5
+#define UDC_DEVINT_US 4
+#define UDC_DEVINT_UR 3
+#define UDC_DEVINT_ES 2
+#define UDC_DEVINT_SI 1
+#define UDC_DEVINT_SC 0
+
+/* Device Interrupt Mask Register -------------------------------------------*/
+#define UDC_DEVINT_MSK_ADDR 0x410
+
+#define UDC_DEVINT_MSK 0x7f
+
+/* Endpoint Interrupt Register ----------------------------------------------*/
+#define UDC_EPINT_ADDR 0x414
+
+#define UDC_EPINT_OUT_MASK 0xffff0000
+#define UDC_EPINT_OUT_OFS 16
+#define UDC_EPINT_IN_MASK 0x0000ffff
+#define UDC_EPINT_IN_OFS 0
+
+#define UDC_EPINT_IN_EP0 0
+#define UDC_EPINT_IN_EP1 1
+#define UDC_EPINT_IN_EP2 2
+#define UDC_EPINT_IN_EP3 3
+#define UDC_EPINT_OUT_EP0 16
+#define UDC_EPINT_OUT_EP1 17
+#define UDC_EPINT_OUT_EP2 18
+#define UDC_EPINT_OUT_EP3 19
+
+#define UDC_EPINT_EP0_ENABLE_MSK 0x001e001e
+
+/* Endpoint Interrupt Mask Register -----------------------------------------*/
+#define UDC_EPINT_MSK_ADDR 0x418
+
+#define UDC_EPINT_OUT_MSK_MASK 0xffff0000
+#define UDC_EPINT_OUT_MSK_OFS 16
+#define UDC_EPINT_IN_MSK_MASK 0x0000ffff
+#define UDC_EPINT_IN_MSK_OFS 0
+
+#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
+/* mask non-EP0 endpoints */
+#define UDC_EPDATAINT_MSK_DISABLE 0xfffefffe
+/* mask all dev interrupts */
+#define UDC_DEV_MSK_DISABLE 0x7f
+
+/* Endpoint-specific CSR's --------------------------------------------------*/
+#define UDC_EPREGS_ADDR 0x0
+#define UDC_EPIN_REGS_ADDR 0x0
+#define UDC_EPOUT_REGS_ADDR 0x200
+
+#define UDC_EPCTL_ADDR 0x0
+
+#define UDC_EPCTL_RRDY 9
+#define UDC_EPCTL_CNAK 8
+#define UDC_EPCTL_SNAK 7
+#define UDC_EPCTL_NAK 6
+
+#define UDC_EPCTL_ET_MASK 0x00000030
+#define UDC_EPCTL_ET_OFS 4
+#define UDC_EPCTL_ET_CONTROL 0
+#define UDC_EPCTL_ET_ISO 1
+#define UDC_EPCTL_ET_BULK 2
+#define UDC_EPCTL_ET_INTERRUPT 3
+
+#define UDC_EPCTL_P 3
+#define UDC_EPCTL_SN 2
+#define UDC_EPCTL_F 1
+#define UDC_EPCTL_S 0
+
+/* Endpoint Status Registers ------------------------------------------------*/
+#define UDC_EPSTS_ADDR 0x4
+
+#define UDC_EPSTS_RX_PKT_SIZE_MASK 0x007ff800
+#define UDC_EPSTS_RX_PKT_SIZE_OFS 11
+
+#define UDC_EPSTS_TDC 10
+#define UDC_EPSTS_HE 9
+#define UDC_EPSTS_BNA 7
+#define UDC_EPSTS_IN 6
+
+#define UDC_EPSTS_OUT_MASK 0x00000030
+#define UDC_EPSTS_OUT_OFS 4
+#define UDC_EPSTS_OUT_DATA 1
+#define UDC_EPSTS_OUT_DATA_CLEAR 0x10
+#define UDC_EPSTS_OUT_SETUP 2
+#define UDC_EPSTS_OUT_SETUP_CLEAR 0x20
+#define UDC_EPSTS_OUT_CLEAR 0x30
+
+/* Endpoint Buffer Size IN/ Receive Packet Frame Number OUT Registers ------*/
+#define UDC_EPIN_BUFF_SIZE_ADDR 0x8
+#define UDC_EPOUT_FRAME_NUMBER_ADDR 0x8
+
+#define UDC_EPIN_BUFF_SIZE_MASK 0x0000ffff
+#define UDC_EPIN_BUFF_SIZE_OFS 0
+/* EP0in txfifo = 128 bytes*/
+#define UDC_EPIN0_BUFF_SIZE 32
+/* EP0in fullspeed txfifo = 128 bytes*/
+#define UDC_FS_EPIN0_BUFF_SIZE 32
+
+/* fifo size mult = fifo size / max packet */
+#define UDC_EPIN_BUFF_SIZE_MULT 2
+
+/* EPin data fifo size = 1024 bytes DOUBLE BUFFERING */
+#define UDC_EPIN_BUFF_SIZE 256
+/* EPin small INT data fifo size = 128 bytes */
+#define UDC_EPIN_SMALLINT_BUFF_SIZE 32
+
+/* EPin fullspeed data fifo size = 128 bytes DOUBLE BUFFERING */
+#define UDC_FS_EPIN_BUFF_SIZE 32
+
+#define UDC_EPOUT_FRAME_NUMBER_MASK 0x0000ffff
+#define UDC_EPOUT_FRAME_NUMBER_OFS 0
+
+/* Endpoint Buffer Size OUT/Max Packet Size Registers -----------------------*/
+#define UDC_EPOUT_BUFF_SIZE_ADDR 0x0c
+#define UDC_EP_MAX_PKT_SIZE_ADDR 0x0c
+
+#define UDC_EPOUT_BUFF_SIZE_MASK 0xffff0000
+#define UDC_EPOUT_BUFF_SIZE_OFS 16
+#define UDC_EP_MAX_PKT_SIZE_MASK 0x0000ffff
+#define UDC_EP_MAX_PKT_SIZE_OFS 0
+/* EP0in max packet size = 64 bytes */
+#define UDC_EP0IN_MAX_PKT_SIZE 64
+/* EP0out max packet size = 64 bytes */
+#define UDC_EP0OUT_MAX_PKT_SIZE 64
+/* EP0in fullspeed max packet size = 64 bytes */
+#define UDC_FS_EP0IN_MAX_PKT_SIZE 64
+/* EP0out fullspeed max packet size = 64 bytes */
+#define UDC_FS_EP0OUT_MAX_PKT_SIZE 64
+
+/*
+ * Endpoint dma descriptors ------------------------------------------------
+ *
+ * Setup data, Status dword
+ */
+#define UDC_DMA_STP_STS_CFG_MASK 0x0fff0000
+#define UDC_DMA_STP_STS_CFG_OFS 16
+#define UDC_DMA_STP_STS_CFG_ALT_MASK 0x000f0000
+#define UDC_DMA_STP_STS_CFG_ALT_OFS 16
+#define UDC_DMA_STP_STS_CFG_INTF_MASK 0x00f00000
+#define UDC_DMA_STP_STS_CFG_INTF_OFS 20
+#define UDC_DMA_STP_STS_CFG_NUM_MASK 0x0f000000
+#define UDC_DMA_STP_STS_CFG_NUM_OFS 24
+#define UDC_DMA_STP_STS_RX_MASK 0x30000000
+#define UDC_DMA_STP_STS_RX_OFS 28
+#define UDC_DMA_STP_STS_BS_MASK 0xc0000000
+#define UDC_DMA_STP_STS_BS_OFS 30
+#define UDC_DMA_STP_STS_BS_HOST_READY 0
+#define UDC_DMA_STP_STS_BS_DMA_BUSY 1
+#define UDC_DMA_STP_STS_BS_DMA_DONE 2
+#define UDC_DMA_STP_STS_BS_HOST_BUSY 3
+/* IN data, Status dword */
+#define UDC_DMA_IN_STS_TXBYTES_MASK 0x0000ffff
+#define UDC_DMA_IN_STS_TXBYTES_OFS 0
+#define UDC_DMA_IN_STS_FRAMENUM_MASK 0x07ff0000
+#define UDC_DMA_IN_STS_FRAMENUM_OFS 0
+#define UDC_DMA_IN_STS_L 27
+#define UDC_DMA_IN_STS_TX_MASK 0x30000000
+#define UDC_DMA_IN_STS_TX_OFS 28
+#define UDC_DMA_IN_STS_BS_MASK 0xc0000000
+#define UDC_DMA_IN_STS_BS_OFS 30
+#define UDC_DMA_IN_STS_BS_HOST_READY 0
+#define UDC_DMA_IN_STS_BS_DMA_BUSY 1
+#define UDC_DMA_IN_STS_BS_DMA_DONE 2
+#define UDC_DMA_IN_STS_BS_HOST_BUSY 3
+/* OUT data, Status dword */
+#define UDC_DMA_OUT_STS_RXBYTES_MASK 0x0000ffff
+#define UDC_DMA_OUT_STS_RXBYTES_OFS 0
+#define UDC_DMA_OUT_STS_FRAMENUM_MASK 0x07ff0000
+#define UDC_DMA_OUT_STS_FRAMENUM_OFS 0
+#define UDC_DMA_OUT_STS_L 27
+#define UDC_DMA_OUT_STS_RX_MASK 0x30000000
+#define UDC_DMA_OUT_STS_RX_OFS 28
+#define UDC_DMA_OUT_STS_BS_MASK 0xc0000000
+#define UDC_DMA_OUT_STS_BS_OFS 30
+#define UDC_DMA_OUT_STS_BS_HOST_READY 0
+#define UDC_DMA_OUT_STS_BS_DMA_BUSY 1
+#define UDC_DMA_OUT_STS_BS_DMA_DONE 2
+#define UDC_DMA_OUT_STS_BS_HOST_BUSY 3
+/* max ep0in packet */
+#define UDC_EP0IN_MAXPACKET 1000
+/* max dma packet */
+#define UDC_DMA_MAXPACKET 65536
+
+/* un-usable DMA address */
+#define DMA_DONT_USE (~(dma_addr_t) 0 )
+
+/* other Endpoint register addresses and values-----------------------------*/
+#define UDC_EP_SUBPTR_ADDR 0x10
+#define UDC_EP_DESPTR_ADDR 0x14
+#define UDC_EP_WRITE_CONFIRM_ADDR 0x1c
+
+/* EP number as layouted in AHB space */
+#define UDC_EP_NUM 32
+#define UDC_EPIN_NUM 16
+#define UDC_EPIN_NUM_USED 5
+#define UDC_EPOUT_NUM 16
+/* EP number of EP's really used = EP0 + 8 data EP's */
+#define UDC_USED_EP_NUM 9
+/* UDC CSR regs are aligned but AHB regs not - offset for OUT EP's */
+#define UDC_CSR_EP_OUT_IX_OFS 12
+
+#define UDC_EP0OUT_IX 16
+#define UDC_EP0IN_IX 0
+
+/* Rx fifo address and size = 1k -------------------------------------------*/
+#define UDC_RXFIFO_ADDR 0x800
+#define UDC_RXFIFO_SIZE 0x400
+
+/* Tx fifo address and size = 1.5k -----------------------------------------*/
+#define UDC_TXFIFO_ADDR 0xc00
+#define UDC_TXFIFO_SIZE 0x600
+
+/* default data endpoints --------------------------------------------------*/
+#define UDC_EPIN_STATUS_IX 1
+#define UDC_EPIN_IX 2
+#define UDC_EPOUT_IX 18
+
+/* general constants -------------------------------------------------------*/
+#define UDC_DWORD_BYTES 4
+#define UDC_BITS_PER_BYTE_SHIFT 3
+#define UDC_BYTE_MASK 0xff
+#define UDC_BITS_PER_BYTE 8
+
+/*---------------------------------------------------------------------------*/
+/* UDC CSR's */
+struct udc_csrs {
+
+ /* sca - setup command address */
+ u32 sca;
+
+ /* ep ne's */
+ u32 ne[UDC_USED_EP_NUM];
+} __attribute__ ((packed));
+
+/* AHB subsystem CSR registers */
+struct udc_regs {
+
+ /* device configuration */
+ u32 cfg;
+
+ /* device control */
+ u32 ctl;
+
+ /* device status */
+ u32 sts;
+
+ /* device interrupt */
+ u32 irqsts;
+
+ /* device interrupt mask */
+ u32 irqmsk;
+
+ /* endpoint interrupt */
+ u32 ep_irqsts;
+
+ /* endpoint interrupt mask */
+ u32 ep_irqmsk;
+} __attribute__ ((packed));
+
+/* endpoint specific registers */
+struct udc_ep_regs {
+
+ /* endpoint control */
+ u32 ctl;
+
+ /* endpoint status */
+ u32 sts;
+
+ /* endpoint buffer size in/ receive packet frame number out */
+ u32 bufin_framenum;
+
+ /* endpoint buffer size out/max packet size */
+ u32 bufout_maxpkt;
+
+ /* endpoint setup buffer pointer */
+ u32 subptr;
+
+ /* endpoint data descriptor pointer */
+ u32 desptr;
+
+ /* reserved */
+ u32 reserved;
+
+ /* write/read confirmation */
+ u32 confirm;
+
+} __attribute__ ((packed));
+
+/* control data DMA desc */
+struct udc_stp_dma {
+ /* status quadlet */
+ u32 status;
+ /* reserved */
+ u32 _reserved;
+ /* first setup word */
+ u32 data12;
+ /* second setup word */
+ u32 data34;
+} __attribute__ ((aligned (16)));
+
+/* normal data DMA desc */
+struct udc_data_dma {
+ /* status quadlet */
+ u32 status;
+ /* reserved */
+ u32 _reserved;
+ /* buffer pointer */
+ u32 bufptr;
+ /* next descriptor pointer */
+ u32 next;
+} __attribute__ ((aligned (16)));
+
+/* request packet */
+struct udc_request {
+ /* embedded gadget ep */
+ struct usb_request req;
+
+ /* flags */
+ unsigned dma_going : 1,
+ dma_done : 1;
+ /* phys. address */
+ dma_addr_t td_phys;
+ /* first dma desc. of chain */
+ struct udc_data_dma *td_data;
+ /* last dma desc. of chain */
+ struct udc_data_dma *td_data_last;
+ struct list_head queue;
+
+ /* chain length */
+ unsigned chain_len;
+
+};
+
+/* UDC specific endpoint parameters */
+struct udc_ep {
+ struct usb_ep ep;
+ struct udc_ep_regs __iomem *regs;
+ u32 __iomem *txfifo;
+ u32 __iomem *dma;
+ dma_addr_t td_phys;
+ dma_addr_t td_stp_dma;
+ struct udc_stp_dma *td_stp;
+ struct udc_data_dma *td;
+ /* temp request */
+ struct udc_request *req;
+ unsigned req_used;
+ unsigned req_completed;
+ /* dummy DMA desc for BNA dummy */
+ struct udc_request *bna_dummy_req;
+ unsigned bna_occurred;
+
+ /* NAK state */
+ unsigned naking;
+
+ struct udc *dev;
+
+ /* queue for requests */
+ struct list_head queue;
+ unsigned halted;
+ unsigned cancel_transfer;
+ unsigned num : 5,
+ fifo_depth : 14,
+ in : 1;
+};
+
+/* device struct */
+struct udc {
+ struct usb_gadget gadget;
+ spinlock_t lock; /* protects all state */
+ /* all endpoints */
+ struct udc_ep ep[UDC_EP_NUM];
+ struct usb_gadget_driver *driver;
+ /* operational flags */
+ unsigned stall_ep0in : 1,
+ waiting_zlp_ack_ep0in : 1,
+ set_cfg_not_acked : 1,
+ data_ep_enabled : 1,
+ data_ep_queued : 1,
+ sys_suspended : 1,
+ connected;
+
+ u16 chiprev;
+
+ /* registers */
+ struct pci_dev *pdev;
+ struct udc_csrs __iomem *csr;
+ struct udc_regs __iomem *regs;
+ struct udc_ep_regs __iomem *ep_regs;
+ u32 __iomem *rxfifo;
+ u32 __iomem *txfifo;
+
+ /* DMA desc pools */
+ struct dma_pool *data_requests;
+ struct dma_pool *stp_requests;
+
+ /* device data */
+ unsigned long phys_addr;
+ void __iomem *virt_addr;
+ unsigned irq;
+
+ /* states */
+ u16 cur_config;
+ u16 cur_intf;
+ u16 cur_alt;
+
+ /* for platform device and extcon support */
+ struct device *dev;
+ struct phy *udc_phy;
+ struct extcon_dev *edev;
+ struct extcon_specific_cable_nb extcon_nb;
+ struct notifier_block nb;
+ struct delayed_work drd_work;
+ u32 conn_type;
+};
+
+#define to_amd5536_udc(g) (container_of((g), struct udc, gadget))
+
+/* setup request data */
+union udc_setup_data {
+ u32 data[2];
+ struct usb_ctrlrequest request;
+};
+
+/* Function declarations */
+int udc_enable_dev_setup_interrupts(struct udc *dev);
+int udc_mask_unused_interrupts(struct udc *dev);
+irqreturn_t udc_irq(int irq, void *pdev);
+void gadget_release(struct device *pdev);
+void empty_req_queue(struct udc_ep *ep);
+void udc_basic_init(struct udc *dev);
+void free_dma_pools(struct udc *dev);
+int init_dma_pools(struct udc *dev);
+void udc_remove(struct udc *dev);
+int udc_probe(struct udc *dev);
+
+/* DMA usage flag */
+static bool use_dma = 1;
+/* packet per buffer dma */
+static bool use_dma_ppb = 1;
+/* with per descr. update */
+static bool use_dma_ppb_du;
+/* full speed only mode */
+static bool use_fullspeed;
+
+/* module parameters */
+module_param(use_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma, "true for DMA");
+module_param(use_dma_ppb, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
+module_param(use_dma_ppb_du, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma_ppb_du,
+ "true for DMA in packet per buffer mode with descriptor update");
+module_param(use_fullspeed, bool, S_IRUGO);
+MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
+/*
+ *---------------------------------------------------------------------------
+ * SET and GET bitfields in u32 values
+ * via constants for mask/offset:
+ * <bit_field_stub_name> is the text between
+ * UDC_ and _MASK|_OFS of appropriate
+ * constant
+ *
+ * set bitfield value in u32 u32Val
+ */
+#define AMD_ADDBITS(u32Val, bitfield_val, bitfield_stub_name) \
+ (((u32Val) & (((u32) ~((u32) bitfield_stub_name##_MASK)))) \
+ | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \
+ & ((u32) bitfield_stub_name##_MASK)))
+
+/*
+ * set bitfield value in zero-initialized u32 u32Val
+ * => bitfield bits in u32Val are all zero
+ */
+#define AMD_INIT_SETBITS(u32Val, bitfield_val, bitfield_stub_name) \
+ ((u32Val) \
+ | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \
+ & ((u32) bitfield_stub_name##_MASK)))
+
+/* get bitfield value from u32 u32Val */
+#define AMD_GETBITS(u32Val, bitfield_stub_name) \
+ ((u32Val & ((u32) bitfield_stub_name##_MASK)) \
+ >> ((u32) bitfield_stub_name##_OFS))
+
+/* SET and GET bits in u32 values ------------------------------------------*/
+#define AMD_BIT(bit_stub_name) (1 << bit_stub_name)
+#define AMD_UNMASK_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name))
+#define AMD_CLEAR_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name))
+
+/* debug macros ------------------------------------------------------------*/
+
+#define DBG(udc , args...) dev_dbg(udc->dev, args)
+
+#ifdef UDC_VERBOSE
+#define VDBG DBG
+#else
+#define VDBG(udc , args...) do {} while (0)
+#endif
+
+#endif /* #ifdef AMD5536UDC_H */
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
new file mode 100644
index 000000000..a36913ae3
--- /dev/null
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * amd5536udc_pci.c -- AMD 5536 UDC high/full speed USB device controller
+ *
+ * Copyright (C) 2005-2007 AMD (https://www.amd.com)
+ * Author: Thomas Dahlmann
+ */
+
+/*
+ * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
+ * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
+ * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
+ *
+ * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
+ * be used as host port) and UOC bits PAD_EN and APU are set (should be done
+ * by BIOS init).
+ *
+ * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
+ * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
+ * can be used with gadget ether.
+ *
+ * This file does pci device registration, and the core driver implementation
+ * is done in amd5536udc.c
+ *
+ * The driver is split so as to use the core UDC driver which is based on
+ * Synopsys device controller IP (different than HS OTG IP) in UDCs
+ * integrated to SoC platforms.
+ *
+ */
+
+/* Driver strings */
+#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
+
+/* system */
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/prefetch.h>
+#include <linux/pci.h>
+
+/* udc specific */
+#include "amd5536udc.h"
+
+/* pointer to device object */
+static struct udc *udc;
+
+/* description */
+static const char name[] = "amd5536udc-pci";
+
+/* Reset all pci context */
+static void udc_pci_remove(struct pci_dev *pdev)
+{
+ struct udc *dev;
+
+ dev = pci_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&udc->gadget);
+ /* gadget driver must not be registered */
+ if (WARN_ON(dev->driver))
+ return;
+
+ /* dma pool cleanup */
+ free_dma_pools(dev);
+
+ /* reset controller */
+ writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
+ free_irq(pdev->irq, dev);
+ iounmap(dev->virt_addr);
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ pci_disable_device(pdev);
+
+ udc_remove(dev);
+}
+
+/* Called by pci bus driver to init pci context */
+static int udc_pci_probe(
+ struct pci_dev *pdev,
+ const struct pci_device_id *id
+)
+{
+ struct udc *dev;
+ unsigned long resource;
+ unsigned long len;
+ int retval = 0;
+
+ /* one udc only */
+ if (udc) {
+ dev_dbg(&pdev->dev, "already probed\n");
+ return -EBUSY;
+ }
+
+ /* init */
+ dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ /* pci setup */
+ if (pci_enable_device(pdev) < 0) {
+ retval = -ENODEV;
+ goto err_pcidev;
+ }
+
+ /* PCI resource allocation */
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+ if (!request_mem_region(resource, len, name)) {
+ dev_dbg(&pdev->dev, "pci device used already\n");
+ retval = -EBUSY;
+ goto err_memreg;
+ }
+
+ dev->virt_addr = ioremap(resource, len);
+ if (!dev->virt_addr) {
+ dev_dbg(&pdev->dev, "start address cannot be mapped\n");
+ retval = -EFAULT;
+ goto err_ioremap;
+ }
+
+ if (!pdev->irq) {
+ dev_err(&pdev->dev, "irq not set\n");
+ retval = -ENODEV;
+ goto err_irq;
+ }
+
+ spin_lock_init(&dev->lock);
+ /* udc csr registers base */
+ dev->csr = dev->virt_addr + UDC_CSR_ADDR;
+ /* dev registers base */
+ dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
+ /* ep registers base */
+ dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
+ /* fifo's base */
+ dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
+ dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
+
+ if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
+ dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
+ retval = -EBUSY;
+ goto err_irq;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ /* chip revision for Hs AMD5536 */
+ dev->chiprev = pdev->revision;
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ dev->phys_addr = resource;
+ dev->irq = pdev->irq;
+ dev->pdev = pdev;
+ dev->dev = &pdev->dev;
+
+ /* init dma pools */
+ if (use_dma) {
+ retval = init_dma_pools(dev);
+ if (retval != 0)
+ goto err_dma;
+ }
+
+ /* general probing */
+ if (udc_probe(dev)) {
+ retval = -ENODEV;
+ goto err_probe;
+ }
+
+ udc = dev;
+
+ return 0;
+
+err_probe:
+ if (use_dma)
+ free_dma_pools(dev);
+err_dma:
+ free_irq(pdev->irq, dev);
+err_irq:
+ iounmap(dev->virt_addr);
+err_ioremap:
+ release_mem_region(resource, len);
+err_memreg:
+ pci_disable_device(pdev);
+err_pcidev:
+ kfree(dev);
+ return retval;
+}
+
+/* PCI device parameters */
+static const struct pci_device_id pci_id[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = 0xffffffff,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, pci_id);
+
+/* PCI functions */
+static struct pci_driver udc_pci_driver = {
+ .name = name,
+ .id_table = pci_id,
+ .probe = udc_pci_probe,
+ .remove = udc_pci_remove,
+};
+module_pci_driver(udc_pci_driver);
+
+MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
+MODULE_AUTHOR("Thomas Dahlmann");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
new file mode 100644
index 000000000..605500b19
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0+
+config USB_ASPEED_VHUB
+ tristate "Aspeed vHub UDC driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on USB_LIBCOMPOSITE
+ help
+ USB peripheral controller for the Aspeed AST2400, AST2500 and
+ AST2600 family SoCs supporting the "vHub" functionality and USB2.0
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Makefile b/drivers/usb/gadget/udc/aspeed-vhub/Makefile
new file mode 100644
index 000000000..9f3add605
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed-vhub/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0+
+obj-$(CONFIG_USB_ASPEED_VHUB) += aspeed-vhub.o
+aspeed-vhub-y := core.o ep0.o epn.o dev.o hub.o
+
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
new file mode 100644
index 000000000..7a635c499
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
+ *
+ * core.c - Top level support
+ *
+ * Copyright 2017 IBM Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
+#include <linux/clk.h>
+#include <linux/usb/gadget.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/dma-mapping.h>
+
+#include "vhub.h"
+
+void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
+ int status)
+{
+ bool internal = req->internal;
+ struct ast_vhub *vhub = ep->vhub;
+
+ EPVDBG(ep, "completing request @%p, status %d\n", req, status);
+
+ list_del_init(&req->queue);
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+
+ if (req->req.dma) {
+ if (!WARN_ON(!ep->dev))
+ usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
+ &req->req, ep->epn.is_in);
+ req->req.dma = 0;
+ }
+
+ /*
+ * If this isn't an internal EP0 request, call the core
+ * to call the gadget completion.
+ */
+ if (!internal) {
+ spin_unlock(&ep->vhub->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&ep->vhub->lock);
+ }
+}
+
+void ast_vhub_nuke(struct ast_vhub_ep *ep, int status)
+{
+ struct ast_vhub_req *req;
+ int count = 0;
+
+ /* Beware, lock will be dropped & req-acquired by done() */
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct ast_vhub_req, queue);
+ ast_vhub_done(ep, req, status);
+ count++;
+ }
+ if (count)
+ EPDBG(ep, "Nuked %d request(s)\n", count);
+}
+
+struct usb_request *ast_vhub_alloc_request(struct usb_ep *u_ep,
+ gfp_t gfp_flags)
+{
+ struct ast_vhub_req *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+ return &req->req;
+}
+
+void ast_vhub_free_request(struct usb_ep *u_ep, struct usb_request *u_req)
+{
+ struct ast_vhub_req *req = to_ast_req(u_req);
+
+ kfree(req);
+}
+
+static irqreturn_t ast_vhub_irq(int irq, void *data)
+{
+ struct ast_vhub *vhub = data;
+ irqreturn_t iret = IRQ_NONE;
+ u32 i, istat;
+
+ /* Stale interrupt while tearing down */
+ if (!vhub->ep0_bufs)
+ return IRQ_NONE;
+
+ spin_lock(&vhub->lock);
+
+ /* Read and ACK interrupts */
+ istat = readl(vhub->regs + AST_VHUB_ISR);
+ if (!istat)
+ goto bail;
+ writel(istat, vhub->regs + AST_VHUB_ISR);
+ iret = IRQ_HANDLED;
+
+ UDCVDBG(vhub, "irq status=%08x, ep_acks=%08x ep_nacks=%08x\n",
+ istat,
+ readl(vhub->regs + AST_VHUB_EP_ACK_ISR),
+ readl(vhub->regs + AST_VHUB_EP_NACK_ISR));
+
+ /* Handle generic EPs first */
+ if (istat & VHUB_IRQ_EP_POOL_ACK_STALL) {
+ u32 ep_acks = readl(vhub->regs + AST_VHUB_EP_ACK_ISR);
+ writel(ep_acks, vhub->regs + AST_VHUB_EP_ACK_ISR);
+
+ for (i = 0; ep_acks && i < vhub->max_epns; i++) {
+ u32 mask = VHUB_EP_IRQ(i);
+ if (ep_acks & mask) {
+ ast_vhub_epn_ack_irq(&vhub->epns[i]);
+ ep_acks &= ~mask;
+ }
+ }
+ }
+
+ /* Handle device interrupts */
+ if (istat & vhub->port_irq_mask) {
+ for (i = 0; i < vhub->max_ports; i++) {
+ if (istat & VHUB_DEV_IRQ(i))
+ ast_vhub_dev_irq(&vhub->ports[i].dev);
+ }
+ }
+
+ /* Handle top-level vHub EP0 interrupts */
+ if (istat & (VHUB_IRQ_HUB_EP0_OUT_ACK_STALL |
+ VHUB_IRQ_HUB_EP0_IN_ACK_STALL |
+ VHUB_IRQ_HUB_EP0_SETUP)) {
+ if (istat & VHUB_IRQ_HUB_EP0_IN_ACK_STALL)
+ ast_vhub_ep0_handle_ack(&vhub->ep0, true);
+ if (istat & VHUB_IRQ_HUB_EP0_OUT_ACK_STALL)
+ ast_vhub_ep0_handle_ack(&vhub->ep0, false);
+ if (istat & VHUB_IRQ_HUB_EP0_SETUP)
+ ast_vhub_ep0_handle_setup(&vhub->ep0);
+ }
+
+ /* Various top level bus events */
+ if (istat & (VHUB_IRQ_BUS_RESUME |
+ VHUB_IRQ_BUS_SUSPEND |
+ VHUB_IRQ_BUS_RESET)) {
+ if (istat & VHUB_IRQ_BUS_RESUME)
+ ast_vhub_hub_resume(vhub);
+ if (istat & VHUB_IRQ_BUS_SUSPEND)
+ ast_vhub_hub_suspend(vhub);
+ if (istat & VHUB_IRQ_BUS_RESET)
+ ast_vhub_hub_reset(vhub);
+ }
+
+ bail:
+ spin_unlock(&vhub->lock);
+ return iret;
+}
+
+void ast_vhub_init_hw(struct ast_vhub *vhub)
+{
+ u32 ctrl, port_mask, epn_mask;
+
+ UDCDBG(vhub,"(Re)Starting HW ...\n");
+
+ /* Enable PHY */
+ ctrl = VHUB_CTRL_PHY_CLK |
+ VHUB_CTRL_PHY_RESET_DIS;
+
+ /*
+ * We do *NOT* set the VHUB_CTRL_CLK_STOP_SUSPEND bit
+ * to stop the logic clock during suspend because
+ * it causes the registers to become inaccessible and
+ * we haven't yet figured out a good wayt to bring the
+ * controller back into life to issue a wakeup.
+ */
+
+ /*
+ * Set some ISO & split control bits according to Aspeed
+ * recommendation
+ *
+ * VHUB_CTRL_ISO_RSP_CTRL: When set tells the HW to respond
+ * with 0 bytes data packet to ISO IN endpoints when no data
+ * is available.
+ *
+ * VHUB_CTRL_SPLIT_IN: This makes a SOF complete a split IN
+ * transaction.
+ */
+ ctrl |= VHUB_CTRL_ISO_RSP_CTRL | VHUB_CTRL_SPLIT_IN;
+ writel(ctrl, vhub->regs + AST_VHUB_CTRL);
+ udelay(1);
+
+ /* Set descriptor ring size */
+ if (AST_VHUB_DESCS_COUNT == 256) {
+ ctrl |= VHUB_CTRL_LONG_DESC;
+ writel(ctrl, vhub->regs + AST_VHUB_CTRL);
+ } else {
+ BUILD_BUG_ON(AST_VHUB_DESCS_COUNT != 32);
+ }
+
+ /* Reset all devices */
+ port_mask = GENMASK(vhub->max_ports, 1);
+ writel(VHUB_SW_RESET_ROOT_HUB |
+ VHUB_SW_RESET_DMA_CONTROLLER |
+ VHUB_SW_RESET_EP_POOL |
+ port_mask, vhub->regs + AST_VHUB_SW_RESET);
+ udelay(1);
+ writel(0, vhub->regs + AST_VHUB_SW_RESET);
+
+ /* Disable and cleanup EP ACK/NACK interrupts */
+ epn_mask = GENMASK(vhub->max_epns - 1, 0);
+ writel(0, vhub->regs + AST_VHUB_EP_ACK_IER);
+ writel(0, vhub->regs + AST_VHUB_EP_NACK_IER);
+ writel(epn_mask, vhub->regs + AST_VHUB_EP_ACK_ISR);
+ writel(epn_mask, vhub->regs + AST_VHUB_EP_NACK_ISR);
+
+ /* Default settings for EP0, enable HW hub EP1 */
+ writel(0, vhub->regs + AST_VHUB_EP0_CTRL);
+ writel(VHUB_EP1_CTRL_RESET_TOGGLE |
+ VHUB_EP1_CTRL_ENABLE,
+ vhub->regs + AST_VHUB_EP1_CTRL);
+ writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG);
+
+ /* Configure EP0 DMA buffer */
+ writel(vhub->ep0.buf_dma, vhub->regs + AST_VHUB_EP0_DATA);
+
+ /* Clear address */
+ writel(0, vhub->regs + AST_VHUB_CONF);
+
+ /* Pullup hub (activate on host) */
+ if (vhub->force_usb1)
+ ctrl |= VHUB_CTRL_FULL_SPEED_ONLY;
+
+ ctrl |= VHUB_CTRL_UPSTREAM_CONNECT;
+ writel(ctrl, vhub->regs + AST_VHUB_CTRL);
+
+ /* Enable some interrupts */
+ writel(VHUB_IRQ_HUB_EP0_IN_ACK_STALL |
+ VHUB_IRQ_HUB_EP0_OUT_ACK_STALL |
+ VHUB_IRQ_HUB_EP0_SETUP |
+ VHUB_IRQ_EP_POOL_ACK_STALL |
+ VHUB_IRQ_BUS_RESUME |
+ VHUB_IRQ_BUS_SUSPEND |
+ VHUB_IRQ_BUS_RESET,
+ vhub->regs + AST_VHUB_IER);
+}
+
+static int ast_vhub_remove(struct platform_device *pdev)
+{
+ struct ast_vhub *vhub = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int i;
+
+ if (!vhub || !vhub->regs)
+ return 0;
+
+ /* Remove devices */
+ for (i = 0; i < vhub->max_ports; i++)
+ ast_vhub_del_dev(&vhub->ports[i].dev);
+
+ spin_lock_irqsave(&vhub->lock, flags);
+
+ /* Mask & ack all interrupts */
+ writel(0, vhub->regs + AST_VHUB_IER);
+ writel(VHUB_IRQ_ACK_ALL, vhub->regs + AST_VHUB_ISR);
+
+ /* Pull device, leave PHY enabled */
+ writel(VHUB_CTRL_PHY_CLK |
+ VHUB_CTRL_PHY_RESET_DIS,
+ vhub->regs + AST_VHUB_CTRL);
+
+ if (vhub->clk)
+ clk_disable_unprepare(vhub->clk);
+
+ spin_unlock_irqrestore(&vhub->lock, flags);
+
+ if (vhub->ep0_bufs)
+ dma_free_coherent(&pdev->dev,
+ AST_VHUB_EP0_MAX_PACKET *
+ (vhub->max_ports + 1),
+ vhub->ep0_bufs,
+ vhub->ep0_bufs_dma);
+ vhub->ep0_bufs = NULL;
+
+ return 0;
+}
+
+static int ast_vhub_probe(struct platform_device *pdev)
+{
+ enum usb_device_speed max_speed;
+ struct ast_vhub *vhub;
+ struct resource *res;
+ int i, rc = 0;
+ const struct device_node *np = pdev->dev.of_node;
+
+ vhub = devm_kzalloc(&pdev->dev, sizeof(*vhub), GFP_KERNEL);
+ if (!vhub)
+ return -ENOMEM;
+
+ rc = of_property_read_u32(np, "aspeed,vhub-downstream-ports",
+ &vhub->max_ports);
+ if (rc < 0)
+ vhub->max_ports = AST_VHUB_NUM_PORTS;
+
+ vhub->ports = devm_kcalloc(&pdev->dev, vhub->max_ports,
+ sizeof(*vhub->ports), GFP_KERNEL);
+ if (!vhub->ports)
+ return -ENOMEM;
+
+ rc = of_property_read_u32(np, "aspeed,vhub-generic-endpoints",
+ &vhub->max_epns);
+ if (rc < 0)
+ vhub->max_epns = AST_VHUB_NUM_GEN_EPs;
+
+ vhub->epns = devm_kcalloc(&pdev->dev, vhub->max_epns,
+ sizeof(*vhub->epns), GFP_KERNEL);
+ if (!vhub->epns)
+ return -ENOMEM;
+
+ spin_lock_init(&vhub->lock);
+ vhub->pdev = pdev;
+ vhub->port_irq_mask = GENMASK(VHUB_IRQ_DEV1_BIT + vhub->max_ports - 1,
+ VHUB_IRQ_DEV1_BIT);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vhub->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(vhub->regs)) {
+ dev_err(&pdev->dev, "Failed to map resources\n");
+ return PTR_ERR(vhub->regs);
+ }
+ UDCDBG(vhub, "vHub@%pR mapped @%p\n", res, vhub->regs);
+
+ platform_set_drvdata(pdev, vhub);
+
+ vhub->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(vhub->clk)) {
+ rc = PTR_ERR(vhub->clk);
+ goto err;
+ }
+ rc = clk_prepare_enable(vhub->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", rc);
+ goto err;
+ }
+
+ /* Check if we need to limit the HW to USB1 */
+ max_speed = usb_get_maximum_speed(&pdev->dev);
+ if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH)
+ vhub->force_usb1 = true;
+
+ /* Mask & ack all interrupts before installing the handler */
+ writel(0, vhub->regs + AST_VHUB_IER);
+ writel(VHUB_IRQ_ACK_ALL, vhub->regs + AST_VHUB_ISR);
+
+ /* Find interrupt and install handler */
+ vhub->irq = platform_get_irq(pdev, 0);
+ if (vhub->irq < 0) {
+ rc = vhub->irq;
+ goto err;
+ }
+ rc = devm_request_irq(&pdev->dev, vhub->irq, ast_vhub_irq, 0,
+ KBUILD_MODNAME, vhub);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to request interrupt\n");
+ goto err;
+ }
+
+ /*
+ * Allocate DMA buffers for all EP0s in one chunk,
+ * one per port and one for the vHub itself
+ */
+ vhub->ep0_bufs = dma_alloc_coherent(&pdev->dev,
+ AST_VHUB_EP0_MAX_PACKET *
+ (vhub->max_ports + 1),
+ &vhub->ep0_bufs_dma, GFP_KERNEL);
+ if (!vhub->ep0_bufs) {
+ dev_err(&pdev->dev, "Failed to allocate EP0 DMA buffers\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ UDCVDBG(vhub, "EP0 DMA buffers @%p (DMA 0x%08x)\n",
+ vhub->ep0_bufs, (u32)vhub->ep0_bufs_dma);
+
+ /* Init vHub EP0 */
+ ast_vhub_init_ep0(vhub, &vhub->ep0, NULL);
+
+ /* Init devices */
+ for (i = 0; i < vhub->max_ports && rc == 0; i++)
+ rc = ast_vhub_init_dev(vhub, i);
+ if (rc)
+ goto err;
+
+ /* Init hub emulation */
+ rc = ast_vhub_init_hub(vhub);
+ if (rc)
+ goto err;
+
+ /* Initialize HW */
+ ast_vhub_init_hw(vhub);
+
+ dev_info(&pdev->dev, "Initialized virtual hub in USB%d mode\n",
+ vhub->force_usb1 ? 1 : 2);
+
+ return 0;
+ err:
+ ast_vhub_remove(pdev);
+ return rc;
+}
+
+static const struct of_device_id ast_vhub_dt_ids[] = {
+ {
+ .compatible = "aspeed,ast2400-usb-vhub",
+ },
+ {
+ .compatible = "aspeed,ast2500-usb-vhub",
+ },
+ {
+ .compatible = "aspeed,ast2600-usb-vhub",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ast_vhub_dt_ids);
+
+static struct platform_driver ast_vhub_driver = {
+ .probe = ast_vhub_probe,
+ .remove = ast_vhub_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ast_vhub_dt_ids,
+ },
+};
+module_platform_driver(ast_vhub_driver);
+
+MODULE_DESCRIPTION("Aspeed vHub udc driver");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
new file mode 100644
index 000000000..4f3bc27c1
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
+ *
+ * dev.c - Individual device/gadget management (ie, a port = a gadget)
+ *
+ * Copyright 2017 IBM Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
+#include <linux/clk.h>
+#include <linux/usb/gadget.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "vhub.h"
+
+void ast_vhub_dev_irq(struct ast_vhub_dev *d)
+{
+ u32 istat = readl(d->regs + AST_VHUB_DEV_ISR);
+
+ writel(istat, d->regs + AST_VHUB_DEV_ISR);
+
+ if (istat & VHUV_DEV_IRQ_EP0_IN_ACK_STALL)
+ ast_vhub_ep0_handle_ack(&d->ep0, true);
+ if (istat & VHUV_DEV_IRQ_EP0_OUT_ACK_STALL)
+ ast_vhub_ep0_handle_ack(&d->ep0, false);
+ if (istat & VHUV_DEV_IRQ_EP0_SETUP)
+ ast_vhub_ep0_handle_setup(&d->ep0);
+}
+
+static void ast_vhub_dev_enable(struct ast_vhub_dev *d)
+{
+ u32 reg, hmsk, i;
+
+ if (d->enabled)
+ return;
+
+ /* Cleanup EP0 state */
+ ast_vhub_reset_ep0(d);
+
+ /* Enable device and its EP0 interrupts */
+ reg = VHUB_DEV_EN_ENABLE_PORT |
+ VHUB_DEV_EN_EP0_IN_ACK_IRQEN |
+ VHUB_DEV_EN_EP0_OUT_ACK_IRQEN |
+ VHUB_DEV_EN_EP0_SETUP_IRQEN;
+ if (d->gadget.speed == USB_SPEED_HIGH)
+ reg |= VHUB_DEV_EN_SPEED_SEL_HIGH;
+ writel(reg, d->regs + AST_VHUB_DEV_EN_CTRL);
+
+ /* Enable device interrupt in the hub as well */
+ hmsk = VHUB_IRQ_DEVICE1 << d->index;
+ reg = readl(d->vhub->regs + AST_VHUB_IER);
+ reg |= hmsk;
+ writel(reg, d->vhub->regs + AST_VHUB_IER);
+
+ /* Set EP0 DMA buffer address */
+ writel(d->ep0.buf_dma, d->regs + AST_VHUB_DEV_EP0_DATA);
+
+ /* Clear stall on all EPs */
+ for (i = 0; i < d->max_epns; i++) {
+ struct ast_vhub_ep *ep = d->epns[i];
+
+ if (ep && (ep->epn.stalled || ep->epn.wedged)) {
+ ep->epn.stalled = false;
+ ep->epn.wedged = false;
+ ast_vhub_update_epn_stall(ep);
+ }
+ }
+
+ /* Additional cleanups */
+ d->wakeup_en = false;
+ d->enabled = true;
+}
+
+static void ast_vhub_dev_disable(struct ast_vhub_dev *d)
+{
+ u32 reg, hmsk;
+
+ if (!d->enabled)
+ return;
+
+ /* Disable device interrupt in the hub */
+ hmsk = VHUB_IRQ_DEVICE1 << d->index;
+ reg = readl(d->vhub->regs + AST_VHUB_IER);
+ reg &= ~hmsk;
+ writel(reg, d->vhub->regs + AST_VHUB_IER);
+
+ /* Then disable device */
+ writel(0, d->regs + AST_VHUB_DEV_EN_CTRL);
+ d->gadget.speed = USB_SPEED_UNKNOWN;
+ d->enabled = false;
+}
+
+static int ast_vhub_dev_feature(struct ast_vhub_dev *d,
+ u16 wIndex, u16 wValue,
+ bool is_set)
+{
+ u32 val;
+
+ DDBG(d, "%s_FEATURE(dev val=%02x)\n",
+ is_set ? "SET" : "CLEAR", wValue);
+
+ if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
+ d->wakeup_en = is_set;
+ return std_req_complete;
+ }
+
+ if (wValue == USB_DEVICE_TEST_MODE) {
+ val = readl(d->vhub->regs + AST_VHUB_CTRL);
+ val &= ~GENMASK(10, 8);
+ val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
+ writel(val, d->vhub->regs + AST_VHUB_CTRL);
+
+ return std_req_complete;
+ }
+
+ return std_req_driver;
+}
+
+static int ast_vhub_ep_feature(struct ast_vhub_dev *d,
+ u16 wIndex, u16 wValue, bool is_set)
+{
+ struct ast_vhub_ep *ep;
+ int ep_num;
+
+ ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ DDBG(d, "%s_FEATURE(ep%d val=%02x)\n",
+ is_set ? "SET" : "CLEAR", ep_num, wValue);
+ if (ep_num == 0)
+ return std_req_complete;
+ if (ep_num >= d->max_epns || !d->epns[ep_num - 1])
+ return std_req_stall;
+ if (wValue != USB_ENDPOINT_HALT)
+ return std_req_driver;
+
+ ep = d->epns[ep_num - 1];
+ if (WARN_ON(!ep))
+ return std_req_stall;
+
+ if (!ep->epn.enabled || !ep->ep.desc || ep->epn.is_iso ||
+ ep->epn.is_in != !!(wIndex & USB_DIR_IN))
+ return std_req_stall;
+
+ DDBG(d, "%s stall on EP %d\n",
+ is_set ? "setting" : "clearing", ep_num);
+ ep->epn.stalled = is_set;
+ ast_vhub_update_epn_stall(ep);
+
+ return std_req_complete;
+}
+
+static int ast_vhub_dev_status(struct ast_vhub_dev *d,
+ u16 wIndex, u16 wValue)
+{
+ u8 st0;
+
+ DDBG(d, "GET_STATUS(dev)\n");
+
+ st0 = d->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED;
+ if (d->wakeup_en)
+ st0 |= 1 << USB_DEVICE_REMOTE_WAKEUP;
+
+ return ast_vhub_simple_reply(&d->ep0, st0, 0);
+}
+
+static int ast_vhub_ep_status(struct ast_vhub_dev *d,
+ u16 wIndex, u16 wValue)
+{
+ int ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ struct ast_vhub_ep *ep;
+ u8 st0 = 0;
+
+ DDBG(d, "GET_STATUS(ep%d)\n", ep_num);
+
+ if (ep_num >= d->max_epns)
+ return std_req_stall;
+ if (ep_num != 0) {
+ ep = d->epns[ep_num - 1];
+ if (!ep)
+ return std_req_stall;
+ if (!ep->epn.enabled || !ep->ep.desc || ep->epn.is_iso ||
+ ep->epn.is_in != !!(wIndex & USB_DIR_IN))
+ return std_req_stall;
+ if (ep->epn.stalled)
+ st0 |= 1 << USB_ENDPOINT_HALT;
+ }
+
+ return ast_vhub_simple_reply(&d->ep0, st0, 0);
+}
+
+static void ast_vhub_dev_set_address(struct ast_vhub_dev *d, u8 addr)
+{
+ u32 reg;
+
+ DDBG(d, "SET_ADDRESS: Got address %x\n", addr);
+
+ reg = readl(d->regs + AST_VHUB_DEV_EN_CTRL);
+ reg &= ~VHUB_DEV_EN_ADDR_MASK;
+ reg |= VHUB_DEV_EN_SET_ADDR(addr);
+ writel(reg, d->regs + AST_VHUB_DEV_EN_CTRL);
+}
+
+int ast_vhub_std_dev_request(struct ast_vhub_ep *ep,
+ struct usb_ctrlrequest *crq)
+{
+ struct ast_vhub_dev *d = ep->dev;
+ u16 wValue, wIndex;
+
+ /* No driver, we shouldn't be enabled ... */
+ if (!d->driver || !d->enabled) {
+ EPDBG(ep,
+ "Device is wrong state driver=%p enabled=%d\n",
+ d->driver, d->enabled);
+ return std_req_stall;
+ }
+
+ /*
+ * Note: we used to reject/stall requests while suspended,
+ * we don't do that anymore as we seem to have cases of
+ * mass storage getting very upset.
+ */
+
+ /* First packet, grab speed */
+ if (d->gadget.speed == USB_SPEED_UNKNOWN) {
+ d->gadget.speed = ep->vhub->speed;
+ if (d->gadget.speed > d->driver->max_speed)
+ d->gadget.speed = d->driver->max_speed;
+ DDBG(d, "fist packet, captured speed %d\n",
+ d->gadget.speed);
+ }
+
+ wValue = le16_to_cpu(crq->wValue);
+ wIndex = le16_to_cpu(crq->wIndex);
+
+ switch ((crq->bRequestType << 8) | crq->bRequest) {
+ /* SET_ADDRESS */
+ case DeviceOutRequest | USB_REQ_SET_ADDRESS:
+ ast_vhub_dev_set_address(d, wValue);
+ return std_req_complete;
+
+ /* GET_STATUS */
+ case DeviceRequest | USB_REQ_GET_STATUS:
+ return ast_vhub_dev_status(d, wIndex, wValue);
+ case InterfaceRequest | USB_REQ_GET_STATUS:
+ return ast_vhub_simple_reply(ep, 0, 0);
+ case EndpointRequest | USB_REQ_GET_STATUS:
+ return ast_vhub_ep_status(d, wIndex, wValue);
+
+ /* SET/CLEAR_FEATURE */
+ case DeviceOutRequest | USB_REQ_SET_FEATURE:
+ return ast_vhub_dev_feature(d, wIndex, wValue, true);
+ case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
+ return ast_vhub_dev_feature(d, wIndex, wValue, false);
+ case EndpointOutRequest | USB_REQ_SET_FEATURE:
+ return ast_vhub_ep_feature(d, wIndex, wValue, true);
+ case EndpointOutRequest | USB_REQ_CLEAR_FEATURE:
+ return ast_vhub_ep_feature(d, wIndex, wValue, false);
+ }
+ return std_req_driver;
+}
+
+static int ast_vhub_udc_wakeup(struct usb_gadget* gadget)
+{
+ struct ast_vhub_dev *d = to_ast_dev(gadget);
+ unsigned long flags;
+ int rc = -EINVAL;
+
+ spin_lock_irqsave(&d->vhub->lock, flags);
+ if (!d->wakeup_en)
+ goto err;
+
+ DDBG(d, "Device initiated wakeup\n");
+
+ /* Wakeup the host */
+ ast_vhub_hub_wake_all(d->vhub);
+ rc = 0;
+ err:
+ spin_unlock_irqrestore(&d->vhub->lock, flags);
+ return rc;
+}
+
+static int ast_vhub_udc_get_frame(struct usb_gadget* gadget)
+{
+ struct ast_vhub_dev *d = to_ast_dev(gadget);
+
+ return (readl(d->vhub->regs + AST_VHUB_USBSTS) >> 16) & 0x7ff;
+}
+
+static void ast_vhub_dev_nuke(struct ast_vhub_dev *d)
+{
+ unsigned int i;
+
+ for (i = 0; i < d->max_epns; i++) {
+ if (!d->epns[i])
+ continue;
+ ast_vhub_nuke(d->epns[i], -ESHUTDOWN);
+ }
+}
+
+static int ast_vhub_udc_pullup(struct usb_gadget* gadget, int on)
+{
+ struct ast_vhub_dev *d = to_ast_dev(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->vhub->lock, flags);
+
+ DDBG(d, "pullup(%d)\n", on);
+
+ /* Mark disconnected in the hub */
+ ast_vhub_device_connect(d->vhub, d->index, on);
+
+ /*
+ * If enabled, nuke all requests if any (there shouldn't be)
+ * and disable the port. This will clear the address too.
+ */
+ if (d->enabled) {
+ ast_vhub_dev_nuke(d);
+ ast_vhub_dev_disable(d);
+ }
+
+ spin_unlock_irqrestore(&d->vhub->lock, flags);
+
+ return 0;
+}
+
+static int ast_vhub_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct ast_vhub_dev *d = to_ast_dev(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->vhub->lock, flags);
+
+ DDBG(d, "start\n");
+
+ /* We don't do much more until the hub enables us */
+ d->driver = driver;
+ d->gadget.is_selfpowered = 1;
+
+ spin_unlock_irqrestore(&d->vhub->lock, flags);
+
+ return 0;
+}
+
+static struct usb_ep *ast_vhub_udc_match_ep(struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ss)
+{
+ struct ast_vhub_dev *d = to_ast_dev(gadget);
+ struct ast_vhub_ep *ep;
+ struct usb_ep *u_ep;
+ unsigned int max, addr, i;
+
+ DDBG(d, "Match EP type %d\n", usb_endpoint_type(desc));
+
+ /*
+ * First we need to look for an existing unclaimed EP as another
+ * configuration may have already associated a bunch of EPs with
+ * this gadget. This duplicates the code in usb_ep_autoconfig_ss()
+ * unfortunately.
+ */
+ list_for_each_entry(u_ep, &gadget->ep_list, ep_list) {
+ if (usb_gadget_ep_match_desc(gadget, u_ep, desc, ss)) {
+ DDBG(d, " -> using existing EP%d\n",
+ to_ast_ep(u_ep)->d_idx);
+ return u_ep;
+ }
+ }
+
+ /*
+ * We didn't find one, we need to grab one from the pool.
+ *
+ * First let's do some sanity checking
+ */
+ switch(usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* Only EP0 can be a control endpoint */
+ return NULL;
+ case USB_ENDPOINT_XFER_ISOC:
+ /* ISO: limit 1023 bytes full speed, 1024 high/super speed */
+ if (gadget_is_dualspeed(gadget))
+ max = 1024;
+ else
+ max = 1023;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (gadget_is_dualspeed(gadget))
+ max = 512;
+ else
+ max = 64;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (gadget_is_dualspeed(gadget))
+ max = 1024;
+ else
+ max = 64;
+ break;
+ }
+ if (usb_endpoint_maxp(desc) > max)
+ return NULL;
+
+ /*
+ * Find a free EP address for that device. We can't
+ * let the generic code assign these as it would
+ * create overlapping numbers for IN and OUT which
+ * we don't support, so also create a suitable name
+ * that will allow the generic code to use our
+ * assigned address.
+ */
+ for (i = 0; i < d->max_epns; i++)
+ if (d->epns[i] == NULL)
+ break;
+ if (i >= d->max_epns)
+ return NULL;
+ addr = i + 1;
+
+ /*
+ * Now grab an EP from the shared pool and associate
+ * it with our device
+ */
+ ep = ast_vhub_alloc_epn(d, addr);
+ if (!ep)
+ return NULL;
+ DDBG(d, "Allocated epn#%d for port EP%d\n",
+ ep->epn.g_idx, addr);
+
+ return &ep->ep;
+}
+
+static int ast_vhub_udc_stop(struct usb_gadget *gadget)
+{
+ struct ast_vhub_dev *d = to_ast_dev(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->vhub->lock, flags);
+
+ DDBG(d, "stop\n");
+
+ d->driver = NULL;
+ d->gadget.speed = USB_SPEED_UNKNOWN;
+
+ ast_vhub_dev_nuke(d);
+
+ if (d->enabled)
+ ast_vhub_dev_disable(d);
+
+ spin_unlock_irqrestore(&d->vhub->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops ast_vhub_udc_ops = {
+ .get_frame = ast_vhub_udc_get_frame,
+ .wakeup = ast_vhub_udc_wakeup,
+ .pullup = ast_vhub_udc_pullup,
+ .udc_start = ast_vhub_udc_start,
+ .udc_stop = ast_vhub_udc_stop,
+ .match_ep = ast_vhub_udc_match_ep,
+};
+
+void ast_vhub_dev_suspend(struct ast_vhub_dev *d)
+{
+ if (d->driver && d->driver->suspend) {
+ spin_unlock(&d->vhub->lock);
+ d->driver->suspend(&d->gadget);
+ spin_lock(&d->vhub->lock);
+ }
+}
+
+void ast_vhub_dev_resume(struct ast_vhub_dev *d)
+{
+ if (d->driver && d->driver->resume) {
+ spin_unlock(&d->vhub->lock);
+ d->driver->resume(&d->gadget);
+ spin_lock(&d->vhub->lock);
+ }
+}
+
+void ast_vhub_dev_reset(struct ast_vhub_dev *d)
+{
+ /* No driver, just disable the device and return */
+ if (!d->driver) {
+ ast_vhub_dev_disable(d);
+ return;
+ }
+
+ /* If the port isn't enabled, just enable it */
+ if (!d->enabled) {
+ DDBG(d, "Reset of disabled device, enabling...\n");
+ ast_vhub_dev_enable(d);
+ } else {
+ DDBG(d, "Reset of enabled device, resetting...\n");
+ spin_unlock(&d->vhub->lock);
+ usb_gadget_udc_reset(&d->gadget, d->driver);
+ spin_lock(&d->vhub->lock);
+
+ /*
+ * Disable and maybe re-enable HW, this will clear the address
+ * and speed setting.
+ */
+ ast_vhub_dev_disable(d);
+ ast_vhub_dev_enable(d);
+ }
+}
+
+void ast_vhub_del_dev(struct ast_vhub_dev *d)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->vhub->lock, flags);
+ if (!d->registered) {
+ spin_unlock_irqrestore(&d->vhub->lock, flags);
+ return;
+ }
+ d->registered = false;
+ spin_unlock_irqrestore(&d->vhub->lock, flags);
+
+ usb_del_gadget_udc(&d->gadget);
+ device_unregister(d->port_dev);
+ kfree(d->epns);
+}
+
+static void ast_vhub_dev_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
+{
+ struct ast_vhub_dev *d = &vhub->ports[idx].dev;
+ struct device *parent = &vhub->pdev->dev;
+ int rc;
+
+ d->vhub = vhub;
+ d->index = idx;
+ d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1);
+ d->regs = vhub->regs + 0x100 + 0x10 * idx;
+
+ ast_vhub_init_ep0(vhub, &d->ep0, d);
+
+ /*
+ * A USB device can have up to 30 endpoints besides control
+ * endpoint 0.
+ */
+ d->max_epns = min_t(u32, vhub->max_epns, 30);
+ d->epns = kcalloc(d->max_epns, sizeof(*d->epns), GFP_KERNEL);
+ if (!d->epns)
+ return -ENOMEM;
+
+ /*
+ * The UDC core really needs us to have separate and uniquely
+ * named "parent" devices for each port so we create a sub device
+ * here for that purpose
+ */
+ d->port_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!d->port_dev) {
+ rc = -ENOMEM;
+ goto fail_alloc;
+ }
+ device_initialize(d->port_dev);
+ d->port_dev->release = ast_vhub_dev_release;
+ d->port_dev->parent = parent;
+ dev_set_name(d->port_dev, "%s:p%d", dev_name(parent), idx + 1);
+ rc = device_add(d->port_dev);
+ if (rc)
+ goto fail_add;
+
+ /* Populate gadget */
+ INIT_LIST_HEAD(&d->gadget.ep_list);
+ d->gadget.ops = &ast_vhub_udc_ops;
+ d->gadget.ep0 = &d->ep0.ep;
+ d->gadget.name = KBUILD_MODNAME;
+ if (vhub->force_usb1)
+ d->gadget.max_speed = USB_SPEED_FULL;
+ else
+ d->gadget.max_speed = USB_SPEED_HIGH;
+ d->gadget.speed = USB_SPEED_UNKNOWN;
+ d->gadget.dev.of_node = vhub->pdev->dev.of_node;
+ d->gadget.dev.of_node_reused = true;
+
+ rc = usb_add_gadget_udc(d->port_dev, &d->gadget);
+ if (rc != 0)
+ goto fail_udc;
+ d->registered = true;
+
+ return 0;
+ fail_udc:
+ device_del(d->port_dev);
+ fail_add:
+ put_device(d->port_dev);
+ fail_alloc:
+ kfree(d->epns);
+
+ return rc;
+}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
new file mode 100644
index 000000000..b4cf46249
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
+ *
+ * ep0.c - Endpoint 0 handling
+ *
+ * Copyright 2017 IBM Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
+#include <linux/clk.h>
+#include <linux/usb/gadget.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/dma-mapping.h>
+
+#include "vhub.h"
+
+int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
+{
+ struct usb_request *req = &ep->ep0.req.req;
+ int rc;
+
+ if (WARN_ON(ep->d_idx != 0))
+ return std_req_stall;
+ if (WARN_ON(!ep->ep0.dir_in))
+ return std_req_stall;
+ if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
+ return std_req_stall;
+ if (WARN_ON(req->status == -EINPROGRESS))
+ return std_req_stall;
+
+ req->buf = ptr;
+ req->length = len;
+ req->complete = NULL;
+ req->zero = true;
+
+ /*
+ * Call internal queue directly after dropping the lock. This is
+ * safe to do as the reply is always the last thing done when
+ * processing a SETUP packet, usually as a tail call
+ */
+ spin_unlock(&ep->vhub->lock);
+ if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
+ rc = std_req_stall;
+ else
+ rc = std_req_data;
+ spin_lock(&ep->vhub->lock);
+ return rc;
+}
+
+int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
+{
+ u8 *buffer = ep->buf;
+ unsigned int i;
+ va_list args;
+
+ va_start(args, len);
+
+ /* Copy data directly into EP buffer */
+ for (i = 0; i < len; i++)
+ buffer[i] = va_arg(args, int);
+ va_end(args);
+
+ /* req->buf NULL means data is already there */
+ return ast_vhub_reply(ep, NULL, len);
+}
+
+void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
+{
+ struct usb_ctrlrequest crq;
+ enum std_req_rc std_req_rc;
+ int rc = -ENODEV;
+
+ if (WARN_ON(ep->d_idx != 0))
+ return;
+
+ /*
+ * Grab the setup packet from the chip and byteswap
+ * interesting fields
+ */
+ memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
+
+ EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
+ crq.bRequestType, crq.bRequest,
+ le16_to_cpu(crq.wValue),
+ le16_to_cpu(crq.wIndex),
+ le16_to_cpu(crq.wLength),
+ (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
+ ep->ep0.state);
+
+ /*
+ * Check our state, cancel pending requests if needed
+ *
+ * Note: Under some circumstances, we can get a new setup
+ * packet while waiting for the stall ack, just accept it.
+ *
+ * In any case, a SETUP packet in wrong state should have
+ * reset the HW state machine, so let's just log, nuke
+ * requests, move on.
+ */
+ if (ep->ep0.state != ep0_state_token &&
+ ep->ep0.state != ep0_state_stall) {
+ EPDBG(ep, "wrong state\n");
+ ast_vhub_nuke(ep, -EIO);
+ }
+
+ /* Calculate next state for EP0 */
+ ep->ep0.state = ep0_state_data;
+ ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
+
+ /* If this is the vHub, we handle requests differently */
+ std_req_rc = std_req_driver;
+ if (ep->dev == NULL) {
+ if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ std_req_rc = ast_vhub_std_hub_request(ep, &crq);
+ else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ std_req_rc = ast_vhub_class_hub_request(ep, &crq);
+ else
+ std_req_rc = std_req_stall;
+ } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ std_req_rc = ast_vhub_std_dev_request(ep, &crq);
+
+ /* Act upon result */
+ switch(std_req_rc) {
+ case std_req_complete:
+ goto complete;
+ case std_req_stall:
+ goto stall;
+ case std_req_driver:
+ break;
+ case std_req_data:
+ return;
+ }
+
+ /* Pass request up to the gadget driver */
+ if (WARN_ON(!ep->dev))
+ goto stall;
+ if (ep->dev->driver) {
+ EPDBG(ep, "forwarding to gadget...\n");
+ spin_unlock(&ep->vhub->lock);
+ rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
+ spin_lock(&ep->vhub->lock);
+ EPDBG(ep, "driver returned %d\n", rc);
+ } else {
+ EPDBG(ep, "no gadget for request !\n");
+ }
+ if (rc >= 0)
+ return;
+
+ stall:
+ EPDBG(ep, "stalling\n");
+ writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
+ ep->ep0.state = ep0_state_stall;
+ ep->ep0.dir_in = false;
+ return;
+
+ complete:
+ EPVDBG(ep, "sending [in] status with no data\n");
+ writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
+ ep->ep0.state = ep0_state_status;
+ ep->ep0.dir_in = false;
+}
+
+
+static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
+ struct ast_vhub_req *req)
+{
+ unsigned int chunk;
+ u32 reg;
+
+ /* If this is a 0-length request, it's the gadget trying to
+ * send a status on our behalf. We take it from here.
+ */
+ if (req->req.length == 0)
+ req->last_desc = 1;
+
+ /* Are we done ? Complete request, otherwise wait for next interrupt */
+ if (req->last_desc >= 0) {
+ EPVDBG(ep, "complete send %d/%d\n",
+ req->req.actual, req->req.length);
+ ep->ep0.state = ep0_state_status;
+ writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
+ ast_vhub_done(ep, req, 0);
+ return;
+ }
+
+ /*
+ * Next chunk cropped to max packet size. Also check if this
+ * is the last packet
+ */
+ chunk = req->req.length - req->req.actual;
+ if (chunk > ep->ep.maxpacket)
+ chunk = ep->ep.maxpacket;
+ else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
+ req->last_desc = 1;
+
+ EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
+ chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
+
+ /*
+ * Copy data if any (internal requests already have data
+ * in the EP buffer)
+ */
+ if (chunk && req->req.buf)
+ memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
+
+ vhub_dma_workaround(ep->buf);
+
+ /* Remember chunk size and trigger send */
+ reg = VHUB_EP0_SET_TX_LEN(chunk);
+ writel(reg, ep->ep0.ctlstat);
+ writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
+ req->req.actual += chunk;
+}
+
+static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
+{
+ EPVDBG(ep, "rx prime\n");
+
+ /* Prime endpoint for receiving data */
+ writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
+}
+
+static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
+ unsigned int len)
+{
+ unsigned int remain;
+ int rc = 0;
+
+ /* We are receiving... grab request */
+ remain = req->req.length - req->req.actual;
+
+ EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
+
+ /* Are we getting more than asked ? */
+ if (len > remain) {
+ EPDBG(ep, "receiving too much (ovf: %d) !\n",
+ len - remain);
+ len = remain;
+ rc = -EOVERFLOW;
+ }
+
+ /* Hardware return wrong data len */
+ if (len < ep->ep.maxpacket && len != remain) {
+ EPDBG(ep, "using expected data len instead\n");
+ len = remain;
+ }
+
+ if (len && req->req.buf)
+ memcpy(req->req.buf + req->req.actual, ep->buf, len);
+ req->req.actual += len;
+
+ /* Done ? */
+ if (len < ep->ep.maxpacket || len == remain) {
+ ep->ep0.state = ep0_state_status;
+ writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
+ ast_vhub_done(ep, req, rc);
+ } else
+ ast_vhub_ep0_rx_prime(ep);
+}
+
+void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
+{
+ struct ast_vhub_req *req;
+ struct ast_vhub *vhub = ep->vhub;
+ struct device *dev = &vhub->pdev->dev;
+ bool stall = false;
+ u32 stat;
+
+ /* Read EP0 status */
+ stat = readl(ep->ep0.ctlstat);
+
+ /* Grab current request if any */
+ req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
+
+ EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
+ stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
+
+ switch(ep->ep0.state) {
+ case ep0_state_token:
+ /* There should be no request queued in that state... */
+ if (req) {
+ dev_warn(dev, "request present while in TOKEN state\n");
+ ast_vhub_nuke(ep, -EINVAL);
+ }
+ dev_warn(dev, "ack while in TOKEN state\n");
+ stall = true;
+ break;
+ case ep0_state_data:
+ /* Check the state bits corresponding to our direction */
+ if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
+ (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
+ (ep->ep0.dir_in != in_ack)) {
+ /* In that case, ignore interrupt */
+ dev_warn(dev, "irq state mismatch");
+ break;
+ }
+ /*
+ * We are in data phase and there's no request, something is
+ * wrong, stall
+ */
+ if (!req) {
+ dev_warn(dev, "data phase, no request\n");
+ stall = true;
+ break;
+ }
+
+ /* We have a request, handle data transfers */
+ if (ep->ep0.dir_in)
+ ast_vhub_ep0_do_send(ep, req);
+ else
+ ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
+ return;
+ case ep0_state_status:
+ /* Nuke stale requests */
+ if (req) {
+ dev_warn(dev, "request present while in STATUS state\n");
+ ast_vhub_nuke(ep, -EINVAL);
+ }
+
+ /*
+ * If the status phase completes with the wrong ack, stall
+ * the endpoint just in case, to abort whatever the host
+ * was doing.
+ */
+ if (ep->ep0.dir_in == in_ack) {
+ dev_warn(dev, "status direction mismatch\n");
+ stall = true;
+ }
+ break;
+ case ep0_state_stall:
+ /*
+ * There shouldn't be any request left, but nuke just in case
+ * otherwise the stale request will block subsequent ones
+ */
+ ast_vhub_nuke(ep, -EIO);
+ break;
+ }
+
+ /* Reset to token state or stall */
+ if (stall) {
+ writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
+ ep->ep0.state = ep0_state_stall;
+ } else
+ ep->ep0.state = ep0_state_token;
+}
+
+static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
+ gfp_t gfp_flags)
+{
+ struct ast_vhub_req *req = to_ast_req(u_req);
+ struct ast_vhub_ep *ep = to_ast_ep(u_ep);
+ struct ast_vhub *vhub = ep->vhub;
+ struct device *dev = &vhub->pdev->dev;
+ unsigned long flags;
+
+ /* Paranoid cheks */
+ if (!u_req || (!u_req->complete && !req->internal)) {
+ dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
+ if (u_req) {
+ dev_warn(dev, "complete=%p internal=%d\n",
+ u_req->complete, req->internal);
+ }
+ return -EINVAL;
+ }
+
+ /* Not endpoint 0 ? */
+ if (WARN_ON(ep->d_idx != 0))
+ return -EINVAL;
+
+ /* Disabled device */
+ if (ep->dev && !ep->dev->enabled)
+ return -ESHUTDOWN;
+
+ /* Data, no buffer and not internal ? */
+ if (u_req->length && !u_req->buf && !req->internal) {
+ dev_warn(dev, "Request with no buffer !\n");
+ return -EINVAL;
+ }
+
+ EPVDBG(ep, "enqueue req @%p\n", req);
+ EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n",
+ u_req->length, u_req->zero,
+ u_req->short_not_ok, ep->ep0.dir_in);
+
+ /* Initialize request progress fields */
+ u_req->status = -EINPROGRESS;
+ u_req->actual = 0;
+ req->last_desc = -1;
+ req->active = false;
+
+ spin_lock_irqsave(&vhub->lock, flags);
+
+ /* EP0 can only support a single request at a time */
+ if (!list_empty(&ep->queue) ||
+ ep->ep0.state == ep0_state_token ||
+ ep->ep0.state == ep0_state_stall) {
+ dev_warn(dev, "EP0: Request in wrong state\n");
+ EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
+ list_empty(&ep->queue), ep->ep0.state);
+ spin_unlock_irqrestore(&vhub->lock, flags);
+ return -EBUSY;
+ }
+
+ /* Add request to list and kick processing if empty */
+ list_add_tail(&req->queue, &ep->queue);
+
+ if (ep->ep0.dir_in) {
+ /* IN request, send data */
+ ast_vhub_ep0_do_send(ep, req);
+ } else if (u_req->length == 0) {
+ /* 0-len request, send completion as rx */
+ EPVDBG(ep, "0-length rx completion\n");
+ ep->ep0.state = ep0_state_status;
+ writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
+ ast_vhub_done(ep, req, 0);
+ } else {
+ /* OUT request, start receiver */
+ ast_vhub_ep0_rx_prime(ep);
+ }
+
+ spin_unlock_irqrestore(&vhub->lock, flags);
+
+ return 0;
+}
+
+static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
+{
+ struct ast_vhub_ep *ep = to_ast_ep(u_ep);
+ struct ast_vhub *vhub = ep->vhub;
+ struct ast_vhub_req *req;
+ unsigned long flags;
+ int rc = -EINVAL;
+
+ spin_lock_irqsave(&vhub->lock, flags);
+
+ /* Only one request can be in the queue */
+ req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
+
+ /* Is it ours ? */
+ if (req && u_req == &req->req) {
+ EPVDBG(ep, "dequeue req @%p\n", req);
+
+ /*
+ * We don't have to deal with "active" as all
+ * DMAs go to the EP buffers, not the request.
+ */
+ ast_vhub_done(ep, req, -ECONNRESET);
+
+ /* We do stall the EP to clean things up in HW */
+ writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
+ ep->ep0.state = ep0_state_status;
+ ep->ep0.dir_in = false;
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&vhub->lock, flags);
+ return rc;
+}
+
+
+static const struct usb_ep_ops ast_vhub_ep0_ops = {
+ .queue = ast_vhub_ep0_queue,
+ .dequeue = ast_vhub_ep0_dequeue,
+ .alloc_request = ast_vhub_alloc_request,
+ .free_request = ast_vhub_free_request,
+};
+
+void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
+{
+ struct ast_vhub_ep *ep = &dev->ep0;
+
+ ast_vhub_nuke(ep, -EIO);
+ ep->ep0.state = ep0_state_token;
+}
+
+
+void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
+ struct ast_vhub_dev *dev)
+{
+ memset(ep, 0, sizeof(*ep));
+
+ INIT_LIST_HEAD(&ep->ep.ep_list);
+ INIT_LIST_HEAD(&ep->queue);
+ ep->ep.ops = &ast_vhub_ep0_ops;
+ ep->ep.name = "ep0";
+ ep->ep.caps.type_control = true;
+ usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
+ ep->d_idx = 0;
+ ep->dev = dev;
+ ep->vhub = vhub;
+ ep->ep0.state = ep0_state_token;
+ INIT_LIST_HEAD(&ep->ep0.req.queue);
+ ep->ep0.req.internal = true;
+
+ /* Small difference between vHub and devices */
+ if (dev) {
+ ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
+ ep->ep0.setup = vhub->regs +
+ AST_VHUB_SETUP0 + 8 * (dev->index + 1);
+ ep->buf = vhub->ep0_bufs +
+ AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
+ ep->buf_dma = vhub->ep0_bufs_dma +
+ AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
+ } else {
+ ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
+ ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
+ ep->buf = vhub->ep0_bufs;
+ ep->buf_dma = vhub->ep0_bufs_dma;
+ }
+}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
new file mode 100644
index 000000000..b5252880b
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
+ *
+ * epn.c - Generic endpoints management
+ *
+ * Copyright 2017 IBM Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
+#include <linux/clk.h>
+#include <linux/usb/gadget.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/dma-mapping.h>
+
+#include "vhub.h"
+
+#define EXTRA_CHECKS
+
+#ifdef EXTRA_CHECKS
+#define CHECK(ep, expr, fmt...) \
+ do { \
+ if (!(expr)) EPDBG(ep, "CHECK:" fmt); \
+ } while(0)
+#else
+#define CHECK(ep, expr, fmt...) do { } while(0)
+#endif
+
+static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
+{
+ unsigned int act = req->req.actual;
+ unsigned int len = req->req.length;
+ unsigned int chunk;
+
+ /* There should be no DMA ongoing */
+ WARN_ON(req->active);
+
+ /* Calculate next chunk size */
+ chunk = len - act;
+ if (chunk > ep->ep.maxpacket)
+ chunk = ep->ep.maxpacket;
+ else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
+ req->last_desc = 1;
+
+ EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n",
+ req, act, len, chunk, req->last_desc);
+
+ /* If DMA unavailable, using staging EP buffer */
+ if (!req->req.dma) {
+
+ /* For IN transfers, copy data over first */
+ if (ep->epn.is_in) {
+ memcpy(ep->buf, req->req.buf + act, chunk);
+ vhub_dma_workaround(ep->buf);
+ }
+ writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
+ } else {
+ if (ep->epn.is_in)
+ vhub_dma_workaround(req->req.buf);
+ writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
+ }
+
+ /* Start DMA */
+ req->active = true;
+ writel(VHUB_EP_DMA_SET_TX_SIZE(chunk),
+ ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
+ writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK,
+ ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
+}
+
+static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
+{
+ struct ast_vhub_req *req;
+ unsigned int len;
+ u32 stat;
+
+ /* Read EP status */
+ stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
+
+ /* Grab current request if any */
+ req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
+
+ EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n",
+ stat, ep->epn.is_in, req, req ? req->active : 0);
+
+ /* In absence of a request, bail out, must have been dequeued */
+ if (!req)
+ return;
+
+ /*
+ * Request not active, move on to processing queue, active request
+ * was probably dequeued
+ */
+ if (!req->active)
+ goto next_chunk;
+
+ /* Check if HW has moved on */
+ if (VHUB_EP_DMA_RPTR(stat) != 0) {
+ EPDBG(ep, "DMA read pointer not 0 !\n");
+ return;
+ }
+
+ /* No current DMA ongoing */
+ req->active = false;
+
+ /* Grab length out of HW */
+ len = VHUB_EP_DMA_TX_SIZE(stat);
+
+ /* If not using DMA, copy data out if needed */
+ if (!req->req.dma && !ep->epn.is_in && len)
+ memcpy(req->req.buf + req->req.actual, ep->buf, len);
+
+ /* Adjust size */
+ req->req.actual += len;
+
+ /* Check for short packet */
+ if (len < ep->ep.maxpacket)
+ req->last_desc = 1;
+
+ /* That's it ? complete the request and pick a new one */
+ if (req->last_desc >= 0) {
+ ast_vhub_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
+ queue);
+
+ /*
+ * Due to lock dropping inside "done" the next request could
+ * already be active, so check for that and bail if needed.
+ */
+ if (!req || req->active)
+ return;
+ }
+
+ next_chunk:
+ ast_vhub_epn_kick(ep, req);
+}
+
+static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
+{
+ /*
+ * d_next == d_last means descriptor list empty to HW,
+ * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors
+ * in the list
+ */
+ return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
+ (AST_VHUB_DESCS_COUNT - 1);
+}
+
+static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
+ struct ast_vhub_req *req)
+{
+ struct ast_vhub_desc *desc = NULL;
+ unsigned int act = req->act_count;
+ unsigned int len = req->req.length;
+ unsigned int chunk;
+
+ /* Mark request active if not already */
+ req->active = true;
+
+ /* If the request was already completely written, do nothing */
+ if (req->last_desc >= 0)
+ return;
+
+ EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n",
+ act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep));
+
+ /* While we can create descriptors */
+ while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
+ unsigned int d_num;
+
+ /* Grab next free descriptor */
+ d_num = ep->epn.d_next;
+ desc = &ep->epn.descs[d_num];
+ ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
+
+ /* Calculate next chunk size */
+ chunk = len - act;
+ if (chunk <= ep->epn.chunk_max) {
+ /*
+ * Is this the last packet ? Because of having up to 8
+ * packets in a descriptor we can't just compare "chunk"
+ * with ep.maxpacket. We have to see if it's a multiple
+ * of it to know if we have to send a zero packet.
+ * Sadly that involves a modulo which is a bit expensive
+ * but probably still better than not doing it.
+ */
+ if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
+ req->last_desc = d_num;
+ } else {
+ chunk = ep->epn.chunk_max;
+ }
+
+ EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n",
+ act, len, chunk, req->last_desc, d_num,
+ ast_vhub_count_free_descs(ep));
+
+ /* Populate descriptor */
+ desc->w0 = cpu_to_le32(req->req.dma + act);
+
+ /* Interrupt if end of request or no more descriptors */
+
+ /*
+ * TODO: Be smarter about it, if we don't have enough
+ * descriptors request an interrupt before queue empty
+ * or so in order to be able to populate more before
+ * the HW runs out. This isn't a problem at the moment
+ * as we use 256 descriptors and only put at most one
+ * request in the ring.
+ */
+ desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk));
+ if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
+ desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
+
+ /* Account packet */
+ req->act_count = act = act + chunk;
+ }
+
+ if (likely(desc))
+ vhub_dma_workaround(desc);
+
+ /* Tell HW about new descriptors */
+ writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
+ ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
+
+ EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n",
+ ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS));
+}
+
+static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep)
+{
+ struct ast_vhub_req *req;
+ unsigned int len, d_last;
+ u32 stat, stat1;
+
+ /* Read EP status, workaround HW race */
+ do {
+ stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
+ stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
+ } while(stat != stat1);
+
+ /* Extract RPTR */
+ d_last = VHUB_EP_DMA_RPTR(stat);
+
+ /* Grab current request if any */
+ req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
+
+ EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n",
+ stat, ep->epn.is_in, ep->epn.d_last, d_last);
+
+ /* Check all completed descriptors */
+ while (ep->epn.d_last != d_last) {
+ struct ast_vhub_desc *desc;
+ unsigned int d_num;
+ bool is_last_desc;
+
+ /* Grab next completed descriptor */
+ d_num = ep->epn.d_last;
+ desc = &ep->epn.descs[d_num];
+ ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
+
+ /* Grab len out of descriptor */
+ len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1));
+
+ EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n",
+ d_num, len, req, req ? req->active : 0);
+
+ /* If no active request pending, move on */
+ if (!req || !req->active)
+ continue;
+
+ /* Adjust size */
+ req->req.actual += len;
+
+ /* Is that the last chunk ? */
+ is_last_desc = req->last_desc == d_num;
+ CHECK(ep, is_last_desc == (len < ep->ep.maxpacket ||
+ (req->req.actual >= req->req.length &&
+ !req->req.zero)),
+ "Last packet discrepancy: last_desc=%d len=%d r.act=%d "
+ "r.len=%d r.zero=%d mp=%d\n",
+ is_last_desc, len, req->req.actual, req->req.length,
+ req->req.zero, ep->ep.maxpacket);
+
+ if (is_last_desc) {
+ /*
+ * Because we can only have one request at a time
+ * in our descriptor list in this implementation,
+ * d_last and ep->d_last should now be equal
+ */
+ CHECK(ep, d_last == ep->epn.d_last,
+ "DMA read ptr mismatch %d vs %d\n",
+ d_last, ep->epn.d_last);
+
+ /* Note: done will drop and re-acquire the lock */
+ ast_vhub_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue,
+ struct ast_vhub_req,
+ queue);
+ break;
+ }
+ }
+
+ /* More work ? */
+ if (req)
+ ast_vhub_epn_kick_desc(ep, req);
+}
+
+void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep)
+{
+ if (ep->epn.desc_mode)
+ ast_vhub_epn_handle_ack_desc(ep);
+ else
+ ast_vhub_epn_handle_ack(ep);
+}
+
+static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
+ gfp_t gfp_flags)
+{
+ struct ast_vhub_req *req = to_ast_req(u_req);
+ struct ast_vhub_ep *ep = to_ast_ep(u_ep);
+ struct ast_vhub *vhub = ep->vhub;
+ unsigned long flags;
+ bool empty;
+ int rc;
+
+ /* Paranoid checks */
+ if (!u_req || !u_req->complete || !u_req->buf) {
+ dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req);
+ if (u_req) {
+ dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
+ u_req->complete, req->internal);
+ }
+ return -EINVAL;
+ }
+
+ /* Endpoint enabled ? */
+ if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
+ !ep->dev->enabled) {
+ EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
+ return -ESHUTDOWN;
+ }
+
+ /* Map request for DMA if possible. For now, the rule for DMA is
+ * that:
+ *
+ * * For single stage mode (no descriptors):
+ *
+ * - The buffer is aligned to a 8 bytes boundary (HW requirement)
+ * - For a OUT endpoint, the request size is a multiple of the EP
+ * packet size (otherwise the controller will DMA past the end
+ * of the buffer if the host is sending a too long packet).
+ *
+ * * For descriptor mode (tx only for now), always.
+ *
+ * We could relax the latter by making the decision to use the bounce
+ * buffer based on the size of a given *segment* of the request rather
+ * than the whole request.
+ */
+ if (ep->epn.desc_mode ||
+ ((((unsigned long)u_req->buf & 7) == 0) &&
+ (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
+ rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
+ ep->epn.is_in);
+ if (rc) {
+ dev_warn(&vhub->pdev->dev,
+ "Request mapping failure %d\n", rc);
+ return rc;
+ }
+ } else
+ u_req->dma = 0;
+
+ EPVDBG(ep, "enqueue req @%p\n", req);
+ EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
+ u_req->length, (u32)u_req->dma, u_req->zero,
+ u_req->short_not_ok, u_req->no_interrupt,
+ ep->epn.is_in);
+
+ /* Initialize request progress fields */
+ u_req->status = -EINPROGRESS;
+ u_req->actual = 0;
+ req->act_count = 0;
+ req->active = false;
+ req->last_desc = -1;
+ spin_lock_irqsave(&vhub->lock, flags);
+ empty = list_empty(&ep->queue);
+
+ /* Add request to list and kick processing if empty */
+ list_add_tail(&req->queue, &ep->queue);
+ if (empty) {
+ if (ep->epn.desc_mode)
+ ast_vhub_epn_kick_desc(ep, req);
+ else
+ ast_vhub_epn_kick(ep, req);
+ }
+ spin_unlock_irqrestore(&vhub->lock, flags);
+
+ return 0;
+}
+
+static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
+ bool restart_ep)
+{
+ u32 state, reg, loops;
+
+ /* Stop DMA activity */
+ if (ep->epn.desc_mode)
+ writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ else
+ writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+
+ /* Wait for it to complete */
+ for (loops = 0; loops < 1000; loops++) {
+ state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ state = VHUB_EP_DMA_PROC_STATUS(state);
+ if (state == EP_DMA_PROC_RX_IDLE ||
+ state == EP_DMA_PROC_TX_IDLE)
+ break;
+ udelay(1);
+ }
+ if (loops >= 1000)
+ dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
+
+ /* If we don't have to restart the endpoint, that's it */
+ if (!restart_ep)
+ return;
+
+ /* Restart the endpoint */
+ if (ep->epn.desc_mode) {
+ /*
+ * Take out descriptors by resetting the DMA read
+ * pointer to be equal to the CPU write pointer.
+ *
+ * Note: If we ever support creating descriptors for
+ * requests that aren't the head of the queue, we
+ * may have to do something more complex here,
+ * especially if the request being taken out is
+ * not the current head descriptors.
+ */
+ reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
+ VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
+ writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
+
+ /* Then turn it back on */
+ writel(ep->epn.dma_conf,
+ ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ } else {
+ /* Single mode: just turn it back on */
+ writel(ep->epn.dma_conf,
+ ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ }
+}
+
+static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
+{
+ struct ast_vhub_ep *ep = to_ast_ep(u_ep);
+ struct ast_vhub *vhub = ep->vhub;
+ struct ast_vhub_req *req = NULL, *iter;
+ unsigned long flags;
+ int rc = -EINVAL;
+
+ spin_lock_irqsave(&vhub->lock, flags);
+
+ /* Make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != u_req)
+ continue;
+ req = iter;
+ break;
+ }
+
+ if (req) {
+ EPVDBG(ep, "dequeue req @%p active=%d\n",
+ req, req->active);
+ if (req->active)
+ ast_vhub_stop_active_req(ep, true);
+ ast_vhub_done(ep, req, -ECONNRESET);
+ rc = 0;
+ }
+
+ spin_unlock_irqrestore(&vhub->lock, flags);
+ return rc;
+}
+
+void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep)
+{
+ u32 reg;
+
+ if (WARN_ON(ep->d_idx == 0))
+ return;
+ reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG);
+ if (ep->epn.stalled || ep->epn.wedged)
+ reg |= VHUB_EP_CFG_STALL_CTRL;
+ else
+ reg &= ~VHUB_EP_CFG_STALL_CTRL;
+ writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG);
+
+ if (!ep->epn.stalled && !ep->epn.wedged)
+ writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
+ ep->vhub->regs + AST_VHUB_EP_TOGGLE);
+}
+
+static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt,
+ bool wedge)
+{
+ struct ast_vhub_ep *ep = to_ast_ep(u_ep);
+ struct ast_vhub *vhub = ep->vhub;
+ unsigned long flags;
+
+ EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge);
+
+ if (!u_ep || !u_ep->desc)
+ return -EINVAL;
+ if (ep->d_idx == 0)
+ return 0;
+ if (ep->epn.is_iso)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&vhub->lock, flags);
+
+ /* Fail with still-busy IN endpoints */
+ if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
+ spin_unlock_irqrestore(&vhub->lock, flags);
+ return -EAGAIN;
+ }
+ ep->epn.stalled = halt;
+ ep->epn.wedged = wedge;
+ ast_vhub_update_epn_stall(ep);
+
+ spin_unlock_irqrestore(&vhub->lock, flags);
+
+ return 0;
+}
+
+static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value)
+{
+ return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false);
+}
+
+static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep)
+{
+ return ast_vhub_set_halt_and_wedge(u_ep, true, true);
+}
+
+static int ast_vhub_epn_disable(struct usb_ep* u_ep)
+{
+ struct ast_vhub_ep *ep = to_ast_ep(u_ep);
+ struct ast_vhub *vhub = ep->vhub;
+ unsigned long flags;
+ u32 imask, ep_ier;
+
+ EPDBG(ep, "Disabling !\n");
+
+ spin_lock_irqsave(&vhub->lock, flags);
+
+ ep->epn.enabled = false;
+
+ /* Stop active DMA if any */
+ ast_vhub_stop_active_req(ep, false);
+
+ /* Disable endpoint */
+ writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
+
+ /* Disable ACK interrupt */
+ imask = VHUB_EP_IRQ(ep->epn.g_idx);
+ ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
+ ep_ier &= ~imask;
+ writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
+ writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
+
+ /* Nuke all pending requests */
+ ast_vhub_nuke(ep, -ESHUTDOWN);
+
+ /* No more descriptor associated with request */
+ ep->ep.desc = NULL;
+
+ spin_unlock_irqrestore(&vhub->lock, flags);
+
+ return 0;
+}
+
+static int ast_vhub_epn_enable(struct usb_ep* u_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct ast_vhub_ep *ep = to_ast_ep(u_ep);
+ struct ast_vhub_dev *dev;
+ struct ast_vhub *vhub;
+ u16 maxpacket, type;
+ unsigned long flags;
+ u32 ep_conf, ep_ier, imask;
+
+ /* Check arguments */
+ if (!u_ep || !desc)
+ return -EINVAL;
+
+ maxpacket = usb_endpoint_maxp(desc);
+ if (!ep->d_idx || !ep->dev ||
+ desc->bDescriptorType != USB_DT_ENDPOINT ||
+ maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
+ EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n",
+ ep->d_idx, ep->dev, desc->bDescriptorType,
+ maxpacket, ep->ep.maxpacket);
+ return -EINVAL;
+ }
+ if (ep->d_idx != usb_endpoint_num(desc)) {
+ EPDBG(ep, "EP number mismatch !\n");
+ return -EINVAL;
+ }
+
+ if (ep->epn.enabled) {
+ EPDBG(ep, "Already enabled\n");
+ return -EBUSY;
+ }
+ dev = ep->dev;
+ vhub = ep->vhub;
+
+ /* Check device state */
+ if (!dev->driver) {
+ EPDBG(ep, "Bogus device state: driver=%p speed=%d\n",
+ dev->driver, dev->gadget.speed);
+ return -ESHUTDOWN;
+ }
+
+ /* Grab some info from the descriptor */
+ ep->epn.is_in = usb_endpoint_dir_in(desc);
+ ep->ep.maxpacket = maxpacket;
+ type = usb_endpoint_type(desc);
+ ep->epn.d_next = ep->epn.d_last = 0;
+ ep->epn.is_iso = false;
+ ep->epn.stalled = false;
+ ep->epn.wedged = false;
+
+ EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
+ ep->epn.is_in ? "in" : "out", usb_ep_type_string(type),
+ usb_endpoint_num(desc), maxpacket);
+
+ /* Can we use DMA descriptor mode ? */
+ ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in;
+ if (ep->epn.desc_mode)
+ memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
+
+ /*
+ * Large send function can send up to 8 packets from
+ * one descriptor with a limit of 4095 bytes.
+ */
+ ep->epn.chunk_max = ep->ep.maxpacket;
+ if (ep->epn.is_in) {
+ ep->epn.chunk_max <<= 3;
+ while (ep->epn.chunk_max > 4095)
+ ep->epn.chunk_max -= ep->ep.maxpacket;
+ }
+
+ switch(type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ EPDBG(ep, "Only one control endpoint\n");
+ return -EINVAL;
+ case USB_ENDPOINT_XFER_INT:
+ ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT);
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
+ ep->epn.is_iso = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Encode the rest of the EP config register */
+ if (maxpacket < 1024)
+ ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket);
+ if (!ep->epn.is_in)
+ ep_conf |= VHUB_EP_CFG_DIR_OUT;
+ ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
+ ep_conf |= VHUB_EP_CFG_ENABLE;
+ ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
+ EPVDBG(ep, "config=%08x\n", ep_conf);
+
+ spin_lock_irqsave(&vhub->lock, flags);
+
+ /* Disable HW and reset DMA */
+ writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
+ writel(VHUB_EP_DMA_CTRL_RESET,
+ ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+
+ /* Configure and enable */
+ writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG);
+
+ if (ep->epn.desc_mode) {
+ /* Clear DMA status, including the DMA read ptr */
+ writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
+
+ /* Set descriptor base */
+ writel(ep->epn.descs_dma,
+ ep->epn.regs + AST_VHUB_EP_DESC_BASE);
+
+ /* Set base DMA config value */
+ ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE;
+ if (ep->epn.is_in)
+ ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE;
+
+ /* First reset and disable all operations */
+ writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
+ ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+
+ /* Enable descriptor mode */
+ writel(ep->epn.dma_conf,
+ ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ } else {
+ /* Set base DMA config value */
+ ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE;
+
+ /* Reset and switch to single stage mode */
+ writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
+ ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ writel(ep->epn.dma_conf,
+ ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
+ }
+
+ /* Cleanup data toggle just in case */
+ writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
+ vhub->regs + AST_VHUB_EP_TOGGLE);
+
+ /* Cleanup and enable ACK interrupt */
+ imask = VHUB_EP_IRQ(ep->epn.g_idx);
+ writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
+ ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
+ ep_ier |= imask;
+ writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
+
+ /* Woot, we are online ! */
+ ep->epn.enabled = true;
+
+ spin_unlock_irqrestore(&vhub->lock, flags);
+
+ return 0;
+}
+
+static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
+{
+ struct ast_vhub_ep *ep = to_ast_ep(u_ep);
+
+ if (WARN_ON(!ep->dev || !ep->d_idx))
+ return;
+
+ EPDBG(ep, "Releasing endpoint\n");
+
+ /* Take it out of the EP list */
+ list_del_init(&ep->ep.ep_list);
+
+ /* Mark the address free in the device */
+ ep->dev->epns[ep->d_idx - 1] = NULL;
+
+ /* Free name & DMA buffers */
+ kfree(ep->ep.name);
+ ep->ep.name = NULL;
+ dma_free_coherent(&ep->vhub->pdev->dev,
+ AST_VHUB_EPn_MAX_PACKET +
+ 8 * AST_VHUB_DESCS_COUNT,
+ ep->buf, ep->buf_dma);
+ ep->buf = NULL;
+ ep->epn.descs = NULL;
+
+ /* Mark free */
+ ep->dev = NULL;
+}
+
+static const struct usb_ep_ops ast_vhub_epn_ops = {
+ .enable = ast_vhub_epn_enable,
+ .disable = ast_vhub_epn_disable,
+ .dispose = ast_vhub_epn_dispose,
+ .queue = ast_vhub_epn_queue,
+ .dequeue = ast_vhub_epn_dequeue,
+ .set_halt = ast_vhub_epn_set_halt,
+ .set_wedge = ast_vhub_epn_set_wedge,
+ .alloc_request = ast_vhub_alloc_request,
+ .free_request = ast_vhub_free_request,
+};
+
+struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
+{
+ struct ast_vhub *vhub = d->vhub;
+ struct ast_vhub_ep *ep;
+ unsigned long flags;
+ int i;
+
+ /* Find a free one (no device) */
+ spin_lock_irqsave(&vhub->lock, flags);
+ for (i = 0; i < vhub->max_epns; i++)
+ if (vhub->epns[i].dev == NULL)
+ break;
+ if (i >= vhub->max_epns) {
+ spin_unlock_irqrestore(&vhub->lock, flags);
+ return NULL;
+ }
+
+ /* Set it up */
+ ep = &vhub->epns[i];
+ ep->dev = d;
+ spin_unlock_irqrestore(&vhub->lock, flags);
+
+ DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
+ INIT_LIST_HEAD(&ep->queue);
+ ep->d_idx = addr;
+ ep->vhub = vhub;
+ ep->ep.ops = &ast_vhub_epn_ops;
+ ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
+ d->epns[addr-1] = ep;
+ ep->epn.g_idx = i;
+ ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
+
+ ep->buf = dma_alloc_coherent(&vhub->pdev->dev,
+ AST_VHUB_EPn_MAX_PACKET +
+ 8 * AST_VHUB_DESCS_COUNT,
+ &ep->buf_dma, GFP_KERNEL);
+ if (!ep->buf) {
+ kfree(ep->ep.name);
+ ep->ep.name = NULL;
+ return NULL;
+ }
+ ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET;
+ ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET;
+
+ usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET);
+ list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list);
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+
+ return ep;
+}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
new file mode 100644
index 000000000..e2207d014
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -0,0 +1,1082 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
+ *
+ * hub.c - virtual hub handling
+ *
+ * Copyright 2017 IBM Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
+#include <linux/clk.h>
+#include <linux/usb/gadget.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bcd.h>
+#include <linux/version.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "vhub.h"
+
+/* usb 2.0 hub device descriptor
+ *
+ * A few things we may want to improve here:
+ *
+ * - We may need to indicate TT support
+ * - We may need a device qualifier descriptor
+ * as devices can pretend to be usb1 or 2
+ * - Make vid/did overridable
+ * - make it look like usb1 if usb1 mode forced
+ */
+#define KERNEL_REL bin2bcd(LINUX_VERSION_MAJOR)
+#define KERNEL_VER bin2bcd(LINUX_VERSION_PATCHLEVEL)
+
+enum {
+ AST_VHUB_STR_INDEX_MAX = 4,
+ AST_VHUB_STR_MANUF = 3,
+ AST_VHUB_STR_PRODUCT = 2,
+ AST_VHUB_STR_SERIAL = 1,
+};
+
+static const struct usb_device_descriptor ast_vhub_dev_desc = {
+ .bLength = USB_DT_DEVICE_SIZE,
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_HUB,
+ .bDeviceSubClass = 0,
+ .bDeviceProtocol = 1,
+ .bMaxPacketSize0 = 64,
+ .idVendor = cpu_to_le16(0x1d6b),
+ .idProduct = cpu_to_le16(0x0107),
+ .bcdDevice = cpu_to_le16(0x0100),
+ .iManufacturer = AST_VHUB_STR_MANUF,
+ .iProduct = AST_VHUB_STR_PRODUCT,
+ .iSerialNumber = AST_VHUB_STR_SERIAL,
+ .bNumConfigurations = 1,
+};
+
+static const struct usb_qualifier_descriptor ast_vhub_qual_desc = {
+ .bLength = 0xA,
+ .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
+ .bcdUSB = cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_HUB,
+ .bDeviceSubClass = 0,
+ .bDeviceProtocol = 0,
+ .bMaxPacketSize0 = 64,
+ .bNumConfigurations = 1,
+ .bRESERVED = 0,
+};
+
+/*
+ * Configuration descriptor: same comments as above
+ * regarding handling USB1 mode.
+ */
+
+/*
+ * We don't use sizeof() as Linux definition of
+ * struct usb_endpoint_descriptor contains 2
+ * extra bytes
+ */
+#define AST_VHUB_CONF_DESC_SIZE (USB_DT_CONFIG_SIZE + \
+ USB_DT_INTERFACE_SIZE + \
+ USB_DT_ENDPOINT_SIZE)
+
+static const struct ast_vhub_full_cdesc ast_vhub_conf_desc = {
+ .cfg = {
+ .bLength = USB_DT_CONFIG_SIZE,
+ .bDescriptorType = USB_DT_CONFIG,
+ .wTotalLength = cpu_to_le16(AST_VHUB_CONF_DESC_SIZE),
+ .bNumInterfaces = 1,
+ .bConfigurationValue = 1,
+ .iConfiguration = 0,
+ .bmAttributes = USB_CONFIG_ATT_ONE |
+ USB_CONFIG_ATT_SELFPOWER |
+ USB_CONFIG_ATT_WAKEUP,
+ .bMaxPower = 0,
+ },
+ .intf = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_HUB,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ .iInterface = 0,
+ },
+ .ep = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0x81,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(1),
+ .bInterval = 0x0c,
+ },
+};
+
+#define AST_VHUB_HUB_DESC_SIZE (USB_DT_HUB_NONVAR_SIZE + 2)
+
+static const struct usb_hub_descriptor ast_vhub_hub_desc = {
+ .bDescLength = AST_VHUB_HUB_DESC_SIZE,
+ .bDescriptorType = USB_DT_HUB,
+ .bNbrPorts = AST_VHUB_NUM_PORTS,
+ .wHubCharacteristics = cpu_to_le16(HUB_CHAR_NO_LPSM),
+ .bPwrOn2PwrGood = 10,
+ .bHubContrCurrent = 0,
+ .u.hs.DeviceRemovable[0] = 0,
+ .u.hs.DeviceRemovable[1] = 0xff,
+};
+
+/*
+ * These strings converted to UTF-16 must be smaller than
+ * our EP0 buffer.
+ */
+static const struct usb_string ast_vhub_str_array[] = {
+ {
+ .id = AST_VHUB_STR_SERIAL,
+ .s = "00000000"
+ },
+ {
+ .id = AST_VHUB_STR_PRODUCT,
+ .s = "USB Virtual Hub"
+ },
+ {
+ .id = AST_VHUB_STR_MANUF,
+ .s = "Aspeed"
+ },
+ { }
+};
+
+static const struct usb_gadget_strings ast_vhub_strings = {
+ .language = 0x0409,
+ .strings = (struct usb_string *)ast_vhub_str_array
+};
+
+static int ast_vhub_hub_dev_status(struct ast_vhub_ep *ep,
+ u16 wIndex, u16 wValue)
+{
+ u8 st0;
+
+ EPDBG(ep, "GET_STATUS(dev)\n");
+
+ /*
+ * Mark it as self-powered, I doubt the BMC is powered off
+ * the USB bus ...
+ */
+ st0 = 1 << USB_DEVICE_SELF_POWERED;
+
+ /*
+ * Need to double check how remote wakeup actually works
+ * on that chip and what triggers it.
+ */
+ if (ep->vhub->wakeup_en)
+ st0 |= 1 << USB_DEVICE_REMOTE_WAKEUP;
+
+ return ast_vhub_simple_reply(ep, st0, 0);
+}
+
+static int ast_vhub_hub_ep_status(struct ast_vhub_ep *ep,
+ u16 wIndex, u16 wValue)
+{
+ int ep_num;
+ u8 st0 = 0;
+
+ ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ EPDBG(ep, "GET_STATUS(ep%d)\n", ep_num);
+
+ /* On the hub we have only EP 0 and 1 */
+ if (ep_num == 1) {
+ if (ep->vhub->ep1_stalled)
+ st0 |= 1 << USB_ENDPOINT_HALT;
+ } else if (ep_num != 0)
+ return std_req_stall;
+
+ return ast_vhub_simple_reply(ep, st0, 0);
+}
+
+static int ast_vhub_hub_dev_feature(struct ast_vhub_ep *ep,
+ u16 wIndex, u16 wValue,
+ bool is_set)
+{
+ u32 val;
+
+ EPDBG(ep, "%s_FEATURE(dev val=%02x)\n",
+ is_set ? "SET" : "CLEAR", wValue);
+
+ if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
+ ep->vhub->wakeup_en = is_set;
+ EPDBG(ep, "Hub remote wakeup %s\n",
+ is_set ? "enabled" : "disabled");
+ return std_req_complete;
+ }
+
+ if (wValue == USB_DEVICE_TEST_MODE) {
+ val = readl(ep->vhub->regs + AST_VHUB_CTRL);
+ val &= ~GENMASK(10, 8);
+ val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
+ writel(val, ep->vhub->regs + AST_VHUB_CTRL);
+
+ return std_req_complete;
+ }
+
+ return std_req_stall;
+}
+
+static int ast_vhub_hub_ep_feature(struct ast_vhub_ep *ep,
+ u16 wIndex, u16 wValue,
+ bool is_set)
+{
+ int ep_num;
+ u32 reg;
+
+ ep_num = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ EPDBG(ep, "%s_FEATURE(ep%d val=%02x)\n",
+ is_set ? "SET" : "CLEAR", ep_num, wValue);
+
+ if (ep_num > 1)
+ return std_req_stall;
+ if (wValue != USB_ENDPOINT_HALT)
+ return std_req_stall;
+ if (ep_num == 0)
+ return std_req_complete;
+
+ EPDBG(ep, "%s stall on EP 1\n",
+ is_set ? "setting" : "clearing");
+
+ ep->vhub->ep1_stalled = is_set;
+ reg = readl(ep->vhub->regs + AST_VHUB_EP1_CTRL);
+ if (is_set) {
+ reg |= VHUB_EP1_CTRL_STALL;
+ } else {
+ reg &= ~VHUB_EP1_CTRL_STALL;
+ reg |= VHUB_EP1_CTRL_RESET_TOGGLE;
+ }
+ writel(reg, ep->vhub->regs + AST_VHUB_EP1_CTRL);
+
+ return std_req_complete;
+}
+
+static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
+ u8 desc_type, u16 len)
+{
+ size_t dsize;
+ struct ast_vhub *vhub = ep->vhub;
+
+ EPDBG(ep, "GET_DESCRIPTOR(type:%d)\n", desc_type);
+
+ /*
+ * Copy first to EP buffer and send from there, so
+ * we can do some in-place patching if needed. We know
+ * the EP buffer is big enough but ensure that doesn't
+ * change. We do that now rather than later after we
+ * have checked sizes etc... to avoid a gcc bug where
+ * it thinks len is constant and barfs about read
+ * overflows in memcpy.
+ */
+ switch(desc_type) {
+ case USB_DT_DEVICE:
+ dsize = USB_DT_DEVICE_SIZE;
+ memcpy(ep->buf, &vhub->vhub_dev_desc, dsize);
+ BUILD_BUG_ON(dsize > sizeof(vhub->vhub_dev_desc));
+ BUILD_BUG_ON(USB_DT_DEVICE_SIZE >= AST_VHUB_EP0_MAX_PACKET);
+ break;
+ case USB_DT_OTHER_SPEED_CONFIG:
+ case USB_DT_CONFIG:
+ dsize = AST_VHUB_CONF_DESC_SIZE;
+ memcpy(ep->buf, &vhub->vhub_conf_desc, dsize);
+ ((u8 *)ep->buf)[1] = desc_type;
+ BUILD_BUG_ON(dsize > sizeof(vhub->vhub_conf_desc));
+ BUILD_BUG_ON(AST_VHUB_CONF_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
+ break;
+ case USB_DT_HUB:
+ dsize = AST_VHUB_HUB_DESC_SIZE;
+ memcpy(ep->buf, &vhub->vhub_hub_desc, dsize);
+ BUILD_BUG_ON(dsize > sizeof(vhub->vhub_hub_desc));
+ BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
+ break;
+ case USB_DT_DEVICE_QUALIFIER:
+ dsize = sizeof(vhub->vhub_qual_desc);
+ memcpy(ep->buf, &vhub->vhub_qual_desc, dsize);
+ break;
+ default:
+ return std_req_stall;
+ }
+
+ /* Crop requested length */
+ if (len > dsize)
+ len = dsize;
+
+ /* Shoot it from the EP buffer */
+ return ast_vhub_reply(ep, NULL, len);
+}
+
+static struct usb_gadget_strings*
+ast_vhub_str_of_container(struct usb_gadget_string_container *container)
+{
+ return (struct usb_gadget_strings *)container->stash;
+}
+
+static int ast_vhub_collect_languages(struct ast_vhub *vhub, void *buf,
+ size_t size)
+{
+ int rc, hdr_len, nlangs, max_langs;
+ struct usb_gadget_strings *lang_str;
+ struct usb_gadget_string_container *container;
+ struct usb_string_descriptor *sdesc = buf;
+
+ nlangs = 0;
+ hdr_len = sizeof(struct usb_descriptor_header);
+ max_langs = (size - hdr_len) / sizeof(sdesc->wData[0]);
+ list_for_each_entry(container, &vhub->vhub_str_desc, list) {
+ if (nlangs >= max_langs)
+ break;
+
+ lang_str = ast_vhub_str_of_container(container);
+ sdesc->wData[nlangs++] = cpu_to_le16(lang_str->language);
+ }
+
+ rc = hdr_len + nlangs * sizeof(sdesc->wData[0]);
+ sdesc->bLength = rc;
+ sdesc->bDescriptorType = USB_DT_STRING;
+
+ return rc;
+}
+
+static struct usb_gadget_strings *ast_vhub_lookup_string(struct ast_vhub *vhub,
+ u16 lang_id)
+{
+ struct usb_gadget_strings *lang_str;
+ struct usb_gadget_string_container *container;
+
+ list_for_each_entry(container, &vhub->vhub_str_desc, list) {
+ lang_str = ast_vhub_str_of_container(container);
+ if (lang_str->language == lang_id)
+ return lang_str;
+ }
+
+ return NULL;
+}
+
+static int ast_vhub_rep_string(struct ast_vhub_ep *ep,
+ u8 string_id, u16 lang_id,
+ u16 len)
+{
+ int rc;
+ u8 buf[256];
+ struct ast_vhub *vhub = ep->vhub;
+ struct usb_gadget_strings *lang_str;
+
+ if (string_id == 0) {
+ rc = ast_vhub_collect_languages(vhub, buf, sizeof(buf));
+ } else {
+ lang_str = ast_vhub_lookup_string(vhub, lang_id);
+ if (!lang_str)
+ return std_req_stall;
+
+ rc = usb_gadget_get_string(lang_str, string_id, buf);
+ }
+
+ if (rc < 0 || rc >= AST_VHUB_EP0_MAX_PACKET)
+ return std_req_stall;
+
+ /* Shoot it from the EP buffer */
+ memcpy(ep->buf, buf, rc);
+ return ast_vhub_reply(ep, NULL, min_t(u16, rc, len));
+}
+
+enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
+ struct usb_ctrlrequest *crq)
+{
+ struct ast_vhub *vhub = ep->vhub;
+ u16 wValue, wIndex, wLength;
+
+ wValue = le16_to_cpu(crq->wValue);
+ wIndex = le16_to_cpu(crq->wIndex);
+ wLength = le16_to_cpu(crq->wLength);
+
+ /* First packet, grab speed */
+ if (vhub->speed == USB_SPEED_UNKNOWN) {
+ u32 ustat = readl(vhub->regs + AST_VHUB_USBSTS);
+ if (ustat & VHUB_USBSTS_HISPEED)
+ vhub->speed = USB_SPEED_HIGH;
+ else
+ vhub->speed = USB_SPEED_FULL;
+ UDCDBG(vhub, "USB status=%08x speed=%s\n", ustat,
+ vhub->speed == USB_SPEED_HIGH ? "high" : "full");
+ }
+
+ switch ((crq->bRequestType << 8) | crq->bRequest) {
+ /* SET_ADDRESS */
+ case DeviceOutRequest | USB_REQ_SET_ADDRESS:
+ EPDBG(ep, "SET_ADDRESS: Got address %x\n", wValue);
+ writel(wValue, vhub->regs + AST_VHUB_CONF);
+ return std_req_complete;
+
+ /* GET_STATUS */
+ case DeviceRequest | USB_REQ_GET_STATUS:
+ return ast_vhub_hub_dev_status(ep, wIndex, wValue);
+ case InterfaceRequest | USB_REQ_GET_STATUS:
+ return ast_vhub_simple_reply(ep, 0, 0);
+ case EndpointRequest | USB_REQ_GET_STATUS:
+ return ast_vhub_hub_ep_status(ep, wIndex, wValue);
+
+ /* SET/CLEAR_FEATURE */
+ case DeviceOutRequest | USB_REQ_SET_FEATURE:
+ return ast_vhub_hub_dev_feature(ep, wIndex, wValue, true);
+ case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
+ return ast_vhub_hub_dev_feature(ep, wIndex, wValue, false);
+ case EndpointOutRequest | USB_REQ_SET_FEATURE:
+ return ast_vhub_hub_ep_feature(ep, wIndex, wValue, true);
+ case EndpointOutRequest | USB_REQ_CLEAR_FEATURE:
+ return ast_vhub_hub_ep_feature(ep, wIndex, wValue, false);
+
+ /* GET/SET_CONFIGURATION */
+ case DeviceRequest | USB_REQ_GET_CONFIGURATION:
+ return ast_vhub_simple_reply(ep, 1);
+ case DeviceOutRequest | USB_REQ_SET_CONFIGURATION:
+ if (wValue != 1)
+ return std_req_stall;
+ return std_req_complete;
+
+ /* GET_DESCRIPTOR */
+ case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+ switch (wValue >> 8) {
+ case USB_DT_DEVICE:
+ case USB_DT_CONFIG:
+ case USB_DT_DEVICE_QUALIFIER:
+ case USB_DT_OTHER_SPEED_CONFIG:
+ return ast_vhub_rep_desc(ep, wValue >> 8,
+ wLength);
+ case USB_DT_STRING:
+ return ast_vhub_rep_string(ep, wValue & 0xff,
+ wIndex, wLength);
+ }
+ return std_req_stall;
+
+ /* GET/SET_INTERFACE */
+ case DeviceRequest | USB_REQ_GET_INTERFACE:
+ return ast_vhub_simple_reply(ep, 0);
+ case DeviceOutRequest | USB_REQ_SET_INTERFACE:
+ if (wValue != 0 || wIndex != 0)
+ return std_req_stall;
+ return std_req_complete;
+ }
+ return std_req_stall;
+}
+
+static void ast_vhub_update_hub_ep1(struct ast_vhub *vhub,
+ unsigned int port)
+{
+ /* Update HW EP1 response */
+ u32 reg = readl(vhub->regs + AST_VHUB_EP1_STS_CHG);
+ u32 pmask = (1 << (port + 1));
+ if (vhub->ports[port].change)
+ reg |= pmask;
+ else
+ reg &= ~pmask;
+ writel(reg, vhub->regs + AST_VHUB_EP1_STS_CHG);
+}
+
+static void ast_vhub_change_port_stat(struct ast_vhub *vhub,
+ unsigned int port,
+ u16 clr_flags,
+ u16 set_flags,
+ bool set_c)
+{
+ struct ast_vhub_port *p = &vhub->ports[port];
+ u16 prev;
+
+ /* Update port status */
+ prev = p->status;
+ p->status = (prev & ~clr_flags) | set_flags;
+ DDBG(&p->dev, "port %d status %04x -> %04x (C=%d)\n",
+ port + 1, prev, p->status, set_c);
+
+ /* Update change bits if needed */
+ if (set_c) {
+ u16 chg = p->status ^ prev;
+
+ /* Only these are relevant for change */
+ chg &= USB_PORT_STAT_C_CONNECTION |
+ USB_PORT_STAT_C_ENABLE |
+ USB_PORT_STAT_C_SUSPEND |
+ USB_PORT_STAT_C_OVERCURRENT |
+ USB_PORT_STAT_C_RESET |
+ USB_PORT_STAT_C_L1;
+
+ /*
+ * We only set USB_PORT_STAT_C_ENABLE if we are disabling
+ * the port as per USB spec, otherwise MacOS gets upset
+ */
+ if (p->status & USB_PORT_STAT_ENABLE)
+ chg &= ~USB_PORT_STAT_C_ENABLE;
+
+ p->change = chg;
+ ast_vhub_update_hub_ep1(vhub, port);
+ }
+}
+
+static void ast_vhub_send_host_wakeup(struct ast_vhub *vhub)
+{
+ u32 reg = readl(vhub->regs + AST_VHUB_CTRL);
+ UDCDBG(vhub, "Waking up host !\n");
+ reg |= VHUB_CTRL_MANUAL_REMOTE_WAKEUP;
+ writel(reg, vhub->regs + AST_VHUB_CTRL);
+}
+
+void ast_vhub_device_connect(struct ast_vhub *vhub,
+ unsigned int port, bool on)
+{
+ if (on)
+ ast_vhub_change_port_stat(vhub, port, 0,
+ USB_PORT_STAT_CONNECTION, true);
+ else
+ ast_vhub_change_port_stat(vhub, port,
+ USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_ENABLE,
+ 0, true);
+
+ /*
+ * If the hub is set to wakup the host on connection events
+ * then send a wakeup.
+ */
+ if (vhub->wakeup_en)
+ ast_vhub_send_host_wakeup(vhub);
+}
+
+static void ast_vhub_wake_work(struct work_struct *work)
+{
+ struct ast_vhub *vhub = container_of(work,
+ struct ast_vhub,
+ wake_work);
+ unsigned long flags;
+ unsigned int i;
+
+ /*
+ * Wake all sleeping ports. If a port is suspended by
+ * the host suspend (without explicit state suspend),
+ * we let the normal host wake path deal with it later.
+ */
+ spin_lock_irqsave(&vhub->lock, flags);
+ for (i = 0; i < vhub->max_ports; i++) {
+ struct ast_vhub_port *p = &vhub->ports[i];
+
+ if (!(p->status & USB_PORT_STAT_SUSPEND))
+ continue;
+ ast_vhub_change_port_stat(vhub, i,
+ USB_PORT_STAT_SUSPEND,
+ 0, true);
+ ast_vhub_dev_resume(&p->dev);
+ }
+ ast_vhub_send_host_wakeup(vhub);
+ spin_unlock_irqrestore(&vhub->lock, flags);
+}
+
+void ast_vhub_hub_wake_all(struct ast_vhub *vhub)
+{
+ /*
+ * A device is trying to wake the world, because this
+ * can recurse into the device, we break the call chain
+ * using a work queue
+ */
+ schedule_work(&vhub->wake_work);
+}
+
+static void ast_vhub_port_reset(struct ast_vhub *vhub, u8 port)
+{
+ struct ast_vhub_port *p = &vhub->ports[port];
+ u16 set, clr, speed;
+
+ /* First mark disabled */
+ ast_vhub_change_port_stat(vhub, port,
+ USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_SUSPEND,
+ USB_PORT_STAT_RESET,
+ false);
+
+ if (!p->dev.driver)
+ return;
+
+ /*
+ * This will either "start" the port or reset the
+ * device if already started...
+ */
+ ast_vhub_dev_reset(&p->dev);
+
+ /* Grab the right speed */
+ speed = p->dev.driver->max_speed;
+ if (speed == USB_SPEED_UNKNOWN || speed > vhub->speed)
+ speed = vhub->speed;
+
+ switch (speed) {
+ case USB_SPEED_LOW:
+ set = USB_PORT_STAT_LOW_SPEED;
+ clr = USB_PORT_STAT_HIGH_SPEED;
+ break;
+ case USB_SPEED_FULL:
+ set = 0;
+ clr = USB_PORT_STAT_LOW_SPEED |
+ USB_PORT_STAT_HIGH_SPEED;
+ break;
+ case USB_SPEED_HIGH:
+ set = USB_PORT_STAT_HIGH_SPEED;
+ clr = USB_PORT_STAT_LOW_SPEED;
+ break;
+ default:
+ UDCDBG(vhub, "Unsupported speed %d when"
+ " connecting device\n",
+ speed);
+ return;
+ }
+ clr |= USB_PORT_STAT_RESET;
+ set |= USB_PORT_STAT_ENABLE;
+
+ /* This should ideally be delayed ... */
+ ast_vhub_change_port_stat(vhub, port, clr, set, true);
+}
+
+static enum std_req_rc ast_vhub_set_port_feature(struct ast_vhub_ep *ep,
+ u8 port, u16 feat)
+{
+ struct ast_vhub *vhub = ep->vhub;
+ struct ast_vhub_port *p;
+
+ if (port == 0 || port > vhub->max_ports)
+ return std_req_stall;
+ port--;
+ p = &vhub->ports[port];
+
+ switch(feat) {
+ case USB_PORT_FEAT_SUSPEND:
+ if (!(p->status & USB_PORT_STAT_ENABLE))
+ return std_req_complete;
+ ast_vhub_change_port_stat(vhub, port,
+ 0, USB_PORT_STAT_SUSPEND,
+ false);
+ ast_vhub_dev_suspend(&p->dev);
+ return std_req_complete;
+ case USB_PORT_FEAT_RESET:
+ EPDBG(ep, "Port reset !\n");
+ ast_vhub_port_reset(vhub, port);
+ return std_req_complete;
+ case USB_PORT_FEAT_POWER:
+ /*
+ * On Power-on, we mark the connected flag changed,
+ * if there's a connected device, some hosts will
+ * otherwise fail to detect it.
+ */
+ if (p->status & USB_PORT_STAT_CONNECTION) {
+ p->change |= USB_PORT_STAT_C_CONNECTION;
+ ast_vhub_update_hub_ep1(vhub, port);
+ }
+ return std_req_complete;
+ case USB_PORT_FEAT_TEST:
+ case USB_PORT_FEAT_INDICATOR:
+ /* We don't do anything with these */
+ return std_req_complete;
+ }
+ return std_req_stall;
+}
+
+static enum std_req_rc ast_vhub_clr_port_feature(struct ast_vhub_ep *ep,
+ u8 port, u16 feat)
+{
+ struct ast_vhub *vhub = ep->vhub;
+ struct ast_vhub_port *p;
+
+ if (port == 0 || port > vhub->max_ports)
+ return std_req_stall;
+ port--;
+ p = &vhub->ports[port];
+
+ switch(feat) {
+ case USB_PORT_FEAT_ENABLE:
+ ast_vhub_change_port_stat(vhub, port,
+ USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_SUSPEND, 0,
+ false);
+ ast_vhub_dev_suspend(&p->dev);
+ return std_req_complete;
+ case USB_PORT_FEAT_SUSPEND:
+ if (!(p->status & USB_PORT_STAT_SUSPEND))
+ return std_req_complete;
+ ast_vhub_change_port_stat(vhub, port,
+ USB_PORT_STAT_SUSPEND, 0,
+ false);
+ ast_vhub_dev_resume(&p->dev);
+ return std_req_complete;
+ case USB_PORT_FEAT_POWER:
+ /* We don't do power control */
+ return std_req_complete;
+ case USB_PORT_FEAT_INDICATOR:
+ /* We don't have indicators */
+ return std_req_complete;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_RESET:
+ /* Clear state-change feature */
+ p->change &= ~(1u << (feat - 16));
+ ast_vhub_update_hub_ep1(vhub, port);
+ return std_req_complete;
+ }
+ return std_req_stall;
+}
+
+static enum std_req_rc ast_vhub_get_port_stat(struct ast_vhub_ep *ep,
+ u8 port)
+{
+ struct ast_vhub *vhub = ep->vhub;
+ u16 stat, chg;
+
+ if (port == 0 || port > vhub->max_ports)
+ return std_req_stall;
+ port--;
+
+ stat = vhub->ports[port].status;
+ chg = vhub->ports[port].change;
+
+ /* We always have power */
+ stat |= USB_PORT_STAT_POWER;
+
+ EPDBG(ep, " port status=%04x change=%04x\n", stat, chg);
+
+ return ast_vhub_simple_reply(ep,
+ stat & 0xff,
+ stat >> 8,
+ chg & 0xff,
+ chg >> 8);
+}
+
+enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep,
+ struct usb_ctrlrequest *crq)
+{
+ u16 wValue, wIndex, wLength;
+
+ wValue = le16_to_cpu(crq->wValue);
+ wIndex = le16_to_cpu(crq->wIndex);
+ wLength = le16_to_cpu(crq->wLength);
+
+ switch ((crq->bRequestType << 8) | crq->bRequest) {
+ case GetHubStatus:
+ EPDBG(ep, "GetHubStatus\n");
+ return ast_vhub_simple_reply(ep, 0, 0, 0, 0);
+ case GetPortStatus:
+ EPDBG(ep, "GetPortStatus(%d)\n", wIndex & 0xff);
+ return ast_vhub_get_port_stat(ep, wIndex & 0xf);
+ case GetHubDescriptor:
+ if (wValue != (USB_DT_HUB << 8))
+ return std_req_stall;
+ EPDBG(ep, "GetHubDescriptor(%d)\n", wIndex & 0xff);
+ return ast_vhub_rep_desc(ep, USB_DT_HUB, wLength);
+ case SetHubFeature:
+ case ClearHubFeature:
+ EPDBG(ep, "Get/SetHubFeature(%d)\n", wValue);
+ /* No feature, just complete the requests */
+ if (wValue == C_HUB_LOCAL_POWER ||
+ wValue == C_HUB_OVER_CURRENT)
+ return std_req_complete;
+ return std_req_stall;
+ case SetPortFeature:
+ EPDBG(ep, "SetPortFeature(%d,%d)\n", wIndex & 0xf, wValue);
+ return ast_vhub_set_port_feature(ep, wIndex & 0xf, wValue);
+ case ClearPortFeature:
+ EPDBG(ep, "ClearPortFeature(%d,%d)\n", wIndex & 0xf, wValue);
+ return ast_vhub_clr_port_feature(ep, wIndex & 0xf, wValue);
+ case ClearTTBuffer:
+ case ResetTT:
+ case StopTT:
+ return std_req_complete;
+ case GetTTState:
+ return ast_vhub_simple_reply(ep, 0, 0, 0, 0);
+ default:
+ EPDBG(ep, "Unknown class request\n");
+ }
+ return std_req_stall;
+}
+
+void ast_vhub_hub_suspend(struct ast_vhub *vhub)
+{
+ unsigned int i;
+
+ UDCDBG(vhub, "USB bus suspend\n");
+
+ if (vhub->suspended)
+ return;
+
+ vhub->suspended = true;
+
+ /*
+ * Forward to unsuspended ports without changing
+ * their connection status.
+ */
+ for (i = 0; i < vhub->max_ports; i++) {
+ struct ast_vhub_port *p = &vhub->ports[i];
+
+ if (!(p->status & USB_PORT_STAT_SUSPEND))
+ ast_vhub_dev_suspend(&p->dev);
+ }
+}
+
+void ast_vhub_hub_resume(struct ast_vhub *vhub)
+{
+ unsigned int i;
+
+ UDCDBG(vhub, "USB bus resume\n");
+
+ if (!vhub->suspended)
+ return;
+
+ vhub->suspended = false;
+
+ /*
+ * Forward to unsuspended ports without changing
+ * their connection status.
+ */
+ for (i = 0; i < vhub->max_ports; i++) {
+ struct ast_vhub_port *p = &vhub->ports[i];
+
+ if (!(p->status & USB_PORT_STAT_SUSPEND))
+ ast_vhub_dev_resume(&p->dev);
+ }
+}
+
+void ast_vhub_hub_reset(struct ast_vhub *vhub)
+{
+ unsigned int i;
+
+ UDCDBG(vhub, "USB bus reset\n");
+
+ /*
+ * Is the speed known ? If not we don't care, we aren't
+ * initialized yet and ports haven't been enabled.
+ */
+ if (vhub->speed == USB_SPEED_UNKNOWN)
+ return;
+
+ /* We aren't suspended anymore obviously */
+ vhub->suspended = false;
+
+ /* No speed set */
+ vhub->speed = USB_SPEED_UNKNOWN;
+
+ /* Wakeup not enabled anymore */
+ vhub->wakeup_en = false;
+
+ /*
+ * Clear all port status, disable gadgets and "suspend"
+ * them. They will be woken up by a port reset.
+ */
+ for (i = 0; i < vhub->max_ports; i++) {
+ struct ast_vhub_port *p = &vhub->ports[i];
+
+ /* Only keep the connected flag */
+ p->status &= USB_PORT_STAT_CONNECTION;
+ p->change = 0;
+
+ /* Suspend the gadget if any */
+ ast_vhub_dev_suspend(&p->dev);
+ }
+
+ /* Cleanup HW */
+ writel(0, vhub->regs + AST_VHUB_CONF);
+ writel(0, vhub->regs + AST_VHUB_EP0_CTRL);
+ writel(VHUB_EP1_CTRL_RESET_TOGGLE |
+ VHUB_EP1_CTRL_ENABLE,
+ vhub->regs + AST_VHUB_EP1_CTRL);
+ writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG);
+}
+
+static void ast_vhub_of_parse_dev_desc(struct ast_vhub *vhub,
+ const struct device_node *vhub_np)
+{
+ u16 id;
+ u32 data;
+
+ if (!of_property_read_u32(vhub_np, "vhub-vendor-id", &data)) {
+ id = (u16)data;
+ vhub->vhub_dev_desc.idVendor = cpu_to_le16(id);
+ }
+ if (!of_property_read_u32(vhub_np, "vhub-product-id", &data)) {
+ id = (u16)data;
+ vhub->vhub_dev_desc.idProduct = cpu_to_le16(id);
+ }
+ if (!of_property_read_u32(vhub_np, "vhub-device-revision", &data)) {
+ id = (u16)data;
+ vhub->vhub_dev_desc.bcdDevice = cpu_to_le16(id);
+ }
+}
+
+static void ast_vhub_fixup_usb1_dev_desc(struct ast_vhub *vhub)
+{
+ vhub->vhub_dev_desc.bcdUSB = cpu_to_le16(0x0100);
+ vhub->vhub_dev_desc.bDeviceProtocol = 0;
+}
+
+static struct usb_gadget_string_container*
+ast_vhub_str_container_alloc(struct ast_vhub *vhub)
+{
+ unsigned int size;
+ struct usb_string *str_array;
+ struct usb_gadget_strings *lang_str;
+ struct usb_gadget_string_container *container;
+
+ size = sizeof(*container);
+ size += sizeof(struct usb_gadget_strings);
+ size += sizeof(struct usb_string) * AST_VHUB_STR_INDEX_MAX;
+ container = devm_kzalloc(&vhub->pdev->dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ lang_str = ast_vhub_str_of_container(container);
+ str_array = (struct usb_string *)(lang_str + 1);
+ lang_str->strings = str_array;
+ return container;
+}
+
+static void ast_vhub_str_deep_copy(struct usb_gadget_strings *dest,
+ const struct usb_gadget_strings *src)
+{
+ struct usb_string *src_array = src->strings;
+ struct usb_string *dest_array = dest->strings;
+
+ dest->language = src->language;
+ if (src_array && dest_array) {
+ do {
+ *dest_array = *src_array;
+ dest_array++;
+ src_array++;
+ } while (src_array->s);
+ }
+}
+
+static int ast_vhub_str_alloc_add(struct ast_vhub *vhub,
+ const struct usb_gadget_strings *src_str)
+{
+ struct usb_gadget_strings *dest_str;
+ struct usb_gadget_string_container *container;
+
+ container = ast_vhub_str_container_alloc(vhub);
+ if (IS_ERR(container))
+ return PTR_ERR(container);
+
+ dest_str = ast_vhub_str_of_container(container);
+ ast_vhub_str_deep_copy(dest_str, src_str);
+ list_add_tail(&container->list, &vhub->vhub_str_desc);
+
+ return 0;
+}
+
+static const struct {
+ const char *name;
+ u8 id;
+} str_id_map[] = {
+ {"manufacturer", AST_VHUB_STR_MANUF},
+ {"product", AST_VHUB_STR_PRODUCT},
+ {"serial-number", AST_VHUB_STR_SERIAL},
+ {},
+};
+
+static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
+ const struct device_node *desc_np)
+{
+ u32 langid;
+ int ret = 0;
+ int i, offset;
+ const char *str;
+ struct device_node *child;
+ struct usb_string str_array[AST_VHUB_STR_INDEX_MAX];
+ struct usb_gadget_strings lang_str = {
+ .strings = (struct usb_string *)str_array,
+ };
+
+ for_each_child_of_node(desc_np, child) {
+ if (of_property_read_u32(child, "reg", &langid))
+ continue; /* no language identifier specified */
+
+ if (!usb_validate_langid(langid))
+ continue; /* invalid language identifier */
+
+ lang_str.language = langid;
+ for (i = offset = 0; str_id_map[i].name; i++) {
+ str = of_get_property(child, str_id_map[i].name, NULL);
+ if (str) {
+ str_array[offset].s = str;
+ str_array[offset].id = str_id_map[i].id;
+ offset++;
+ }
+ }
+ str_array[offset].id = 0;
+ str_array[offset].s = NULL;
+
+ ret = ast_vhub_str_alloc_add(vhub, &lang_str);
+ if (ret) {
+ of_node_put(child);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ast_vhub_init_desc(struct ast_vhub *vhub)
+{
+ int ret;
+ struct device_node *desc_np;
+ const struct device_node *vhub_np = vhub->pdev->dev.of_node;
+
+ /* Initialize vhub Device Descriptor. */
+ memcpy(&vhub->vhub_dev_desc, &ast_vhub_dev_desc,
+ sizeof(vhub->vhub_dev_desc));
+ ast_vhub_of_parse_dev_desc(vhub, vhub_np);
+ if (vhub->force_usb1)
+ ast_vhub_fixup_usb1_dev_desc(vhub);
+
+ /* Initialize vhub Configuration Descriptor. */
+ memcpy(&vhub->vhub_conf_desc, &ast_vhub_conf_desc,
+ sizeof(vhub->vhub_conf_desc));
+
+ /* Initialize vhub Hub Descriptor. */
+ memcpy(&vhub->vhub_hub_desc, &ast_vhub_hub_desc,
+ sizeof(vhub->vhub_hub_desc));
+ vhub->vhub_hub_desc.bNbrPorts = vhub->max_ports;
+
+ /* Initialize vhub String Descriptors. */
+ INIT_LIST_HEAD(&vhub->vhub_str_desc);
+ desc_np = of_get_child_by_name(vhub_np, "vhub-strings");
+ if (desc_np) {
+ ret = ast_vhub_of_parse_str_desc(vhub, desc_np);
+ of_node_put(desc_np);
+ }
+ else
+ ret = ast_vhub_str_alloc_add(vhub, &ast_vhub_strings);
+
+ /* Initialize vhub Qualifier Descriptor. */
+ memcpy(&vhub->vhub_qual_desc, &ast_vhub_qual_desc,
+ sizeof(vhub->vhub_qual_desc));
+
+ return ret;
+}
+
+int ast_vhub_init_hub(struct ast_vhub *vhub)
+{
+ vhub->speed = USB_SPEED_UNKNOWN;
+ INIT_WORK(&vhub->wake_work, ast_vhub_wake_work);
+
+ return ast_vhub_init_desc(vhub);
+}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
new file mode 100644
index 000000000..6b9dfa6e1
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __ASPEED_VHUB_H
+#define __ASPEED_VHUB_H
+
+#include <linux/usb.h>
+#include <linux/usb/ch11.h>
+
+/*****************************
+ * *
+ * VHUB register definitions *
+ * *
+ *****************************/
+
+#define AST_VHUB_CTRL 0x00 /* Root Function Control & Status Register */
+#define AST_VHUB_CONF 0x04 /* Root Configuration Setting Register */
+#define AST_VHUB_IER 0x08 /* Interrupt Ctrl Register */
+#define AST_VHUB_ISR 0x0C /* Interrupt Status Register */
+#define AST_VHUB_EP_ACK_IER 0x10 /* Programmable Endpoint Pool ACK Interrupt Enable Register */
+#define AST_VHUB_EP_NACK_IER 0x14 /* Programmable Endpoint Pool NACK Interrupt Enable Register */
+#define AST_VHUB_EP_ACK_ISR 0x18 /* Programmable Endpoint Pool ACK Interrupt Status Register */
+#define AST_VHUB_EP_NACK_ISR 0x1C /* Programmable Endpoint Pool NACK Interrupt Status Register */
+#define AST_VHUB_SW_RESET 0x20 /* Device Controller Soft Reset Enable Register */
+#define AST_VHUB_USBSTS 0x24 /* USB Status Register */
+#define AST_VHUB_EP_TOGGLE 0x28 /* Programmable Endpoint Pool Data Toggle Value Set */
+#define AST_VHUB_ISO_FAIL_ACC 0x2C /* Isochronous Transaction Fail Accumulator */
+#define AST_VHUB_EP0_CTRL 0x30 /* Endpoint 0 Contrl/Status Register */
+#define AST_VHUB_EP0_DATA 0x34 /* Base Address of Endpoint 0 In/OUT Data Buffer Register */
+#define AST_VHUB_EP1_CTRL 0x38 /* Endpoint 1 Contrl/Status Register */
+#define AST_VHUB_EP1_STS_CHG 0x3C /* Endpoint 1 Status Change Bitmap Data */
+#define AST_VHUB_SETUP0 0x80 /* Root Device Setup Data Buffer0 */
+#define AST_VHUB_SETUP1 0x84 /* Root Device Setup Data Buffer1 */
+
+/* Main control reg */
+#define VHUB_CTRL_PHY_CLK (1 << 31)
+#define VHUB_CTRL_PHY_LOOP_TEST (1 << 25)
+#define VHUB_CTRL_DN_PWN (1 << 24)
+#define VHUB_CTRL_DP_PWN (1 << 23)
+#define VHUB_CTRL_LONG_DESC (1 << 18)
+#define VHUB_CTRL_ISO_RSP_CTRL (1 << 17)
+#define VHUB_CTRL_SPLIT_IN (1 << 16)
+#define VHUB_CTRL_LOOP_T_RESULT (1 << 15)
+#define VHUB_CTRL_LOOP_T_STS (1 << 14)
+#define VHUB_CTRL_PHY_BIST_RESULT (1 << 13)
+#define VHUB_CTRL_PHY_BIST_CTRL (1 << 12)
+#define VHUB_CTRL_PHY_RESET_DIS (1 << 11)
+#define VHUB_CTRL_SET_TEST_MODE(x) ((x) << 8)
+#define VHUB_CTRL_MANUAL_REMOTE_WAKEUP (1 << 4)
+#define VHUB_CTRL_AUTO_REMOTE_WAKEUP (1 << 3)
+#define VHUB_CTRL_CLK_STOP_SUSPEND (1 << 2)
+#define VHUB_CTRL_FULL_SPEED_ONLY (1 << 1)
+#define VHUB_CTRL_UPSTREAM_CONNECT (1 << 0)
+
+/* IER & ISR */
+#define VHUB_IRQ_DEV1_BIT 9
+#define VHUB_IRQ_USB_CMD_DEADLOCK (1 << 18)
+#define VHUB_IRQ_EP_POOL_NAK (1 << 17)
+#define VHUB_IRQ_EP_POOL_ACK_STALL (1 << 16)
+#define VHUB_IRQ_DEVICE1 (1 << (VHUB_IRQ_DEV1_BIT))
+#define VHUB_IRQ_BUS_RESUME (1 << 8)
+#define VHUB_IRQ_BUS_SUSPEND (1 << 7)
+#define VHUB_IRQ_BUS_RESET (1 << 6)
+#define VHUB_IRQ_HUB_EP1_IN_DATA_ACK (1 << 5)
+#define VHUB_IRQ_HUB_EP0_IN_DATA_NAK (1 << 4)
+#define VHUB_IRQ_HUB_EP0_IN_ACK_STALL (1 << 3)
+#define VHUB_IRQ_HUB_EP0_OUT_NAK (1 << 2)
+#define VHUB_IRQ_HUB_EP0_OUT_ACK_STALL (1 << 1)
+#define VHUB_IRQ_HUB_EP0_SETUP (1 << 0)
+#define VHUB_IRQ_ACK_ALL 0x1ff
+
+/* Downstream device IRQ mask. */
+#define VHUB_DEV_IRQ(n) (VHUB_IRQ_DEVICE1 << (n))
+
+/* SW reset reg */
+#define VHUB_SW_RESET_EP_POOL (1 << 9)
+#define VHUB_SW_RESET_DMA_CONTROLLER (1 << 8)
+#define VHUB_SW_RESET_DEVICE5 (1 << 5)
+#define VHUB_SW_RESET_DEVICE4 (1 << 4)
+#define VHUB_SW_RESET_DEVICE3 (1 << 3)
+#define VHUB_SW_RESET_DEVICE2 (1 << 2)
+#define VHUB_SW_RESET_DEVICE1 (1 << 1)
+#define VHUB_SW_RESET_ROOT_HUB (1 << 0)
+
+/* EP ACK/NACK IRQ masks */
+#define VHUB_EP_IRQ(n) (1 << (n))
+
+/* USB status reg */
+#define VHUB_USBSTS_HISPEED (1 << 27)
+
+/* EP toggle */
+#define VHUB_EP_TOGGLE_VALUE (1 << 8)
+#define VHUB_EP_TOGGLE_SET_EPNUM(x) ((x) & 0x1f)
+
+/* HUB EP0 control */
+#define VHUB_EP0_CTRL_STALL (1 << 0)
+#define VHUB_EP0_TX_BUFF_RDY (1 << 1)
+#define VHUB_EP0_RX_BUFF_RDY (1 << 2)
+#define VHUB_EP0_RX_LEN(x) (((x) >> 16) & 0x7f)
+#define VHUB_EP0_SET_TX_LEN(x) (((x) & 0x7f) << 8)
+
+/* HUB EP1 control */
+#define VHUB_EP1_CTRL_RESET_TOGGLE (1 << 2)
+#define VHUB_EP1_CTRL_STALL (1 << 1)
+#define VHUB_EP1_CTRL_ENABLE (1 << 0)
+
+/***********************************
+ * *
+ * per-device register definitions *
+ * *
+ ***********************************/
+#define AST_VHUB_DEV_EN_CTRL 0x00
+#define AST_VHUB_DEV_ISR 0x04
+#define AST_VHUB_DEV_EP0_CTRL 0x08
+#define AST_VHUB_DEV_EP0_DATA 0x0c
+
+/* Device enable control */
+#define VHUB_DEV_EN_SET_ADDR(x) ((x) << 8)
+#define VHUB_DEV_EN_ADDR_MASK ((0xff) << 8)
+#define VHUB_DEV_EN_EP0_NAK_IRQEN (1 << 6)
+#define VHUB_DEV_EN_EP0_IN_ACK_IRQEN (1 << 5)
+#define VHUB_DEV_EN_EP0_OUT_NAK_IRQEN (1 << 4)
+#define VHUB_DEV_EN_EP0_OUT_ACK_IRQEN (1 << 3)
+#define VHUB_DEV_EN_EP0_SETUP_IRQEN (1 << 2)
+#define VHUB_DEV_EN_SPEED_SEL_HIGH (1 << 1)
+#define VHUB_DEV_EN_ENABLE_PORT (1 << 0)
+
+/* Interrupt status */
+#define VHUV_DEV_IRQ_EP0_IN_DATA_NACK (1 << 4)
+#define VHUV_DEV_IRQ_EP0_IN_ACK_STALL (1 << 3)
+#define VHUV_DEV_IRQ_EP0_OUT_DATA_NACK (1 << 2)
+#define VHUV_DEV_IRQ_EP0_OUT_ACK_STALL (1 << 1)
+#define VHUV_DEV_IRQ_EP0_SETUP (1 << 0)
+
+/* Control bits.
+ *
+ * Note: The driver relies on the bulk of those bits
+ * matching corresponding vHub EP0 control bits
+ */
+#define VHUB_DEV_EP0_CTRL_STALL VHUB_EP0_CTRL_STALL
+#define VHUB_DEV_EP0_TX_BUFF_RDY VHUB_EP0_TX_BUFF_RDY
+#define VHUB_DEV_EP0_RX_BUFF_RDY VHUB_EP0_RX_BUFF_RDY
+#define VHUB_DEV_EP0_RX_LEN(x) VHUB_EP0_RX_LEN(x)
+#define VHUB_DEV_EP0_SET_TX_LEN(x) VHUB_EP0_SET_TX_LEN(x)
+
+/*************************************
+ * *
+ * per-endpoint register definitions *
+ * *
+ *************************************/
+
+#define AST_VHUB_EP_CONFIG 0x00
+#define AST_VHUB_EP_DMA_CTLSTAT 0x04
+#define AST_VHUB_EP_DESC_BASE 0x08
+#define AST_VHUB_EP_DESC_STATUS 0x0C
+
+/* EP config reg */
+#define VHUB_EP_CFG_SET_MAX_PKT(x) (((x) & 0x3ff) << 16)
+#define VHUB_EP_CFG_AUTO_DATA_DISABLE (1 << 13)
+#define VHUB_EP_CFG_STALL_CTRL (1 << 12)
+#define VHUB_EP_CFG_SET_EP_NUM(x) (((x) & 0xf) << 8)
+#define VHUB_EP_CFG_SET_TYPE(x) ((x) << 5)
+#define EP_TYPE_OFF 0
+#define EP_TYPE_BULK 1
+#define EP_TYPE_INT 2
+#define EP_TYPE_ISO 3
+#define VHUB_EP_CFG_DIR_OUT (1 << 4)
+#define VHUB_EP_CFG_SET_DEV(x) ((x) << 1)
+#define VHUB_EP_CFG_ENABLE (1 << 0)
+
+/* EP DMA control */
+#define VHUB_EP_DMA_PROC_STATUS(x) (((x) >> 4) & 0xf)
+#define EP_DMA_PROC_RX_IDLE 0
+#define EP_DMA_PROC_TX_IDLE 8
+#define VHUB_EP_DMA_IN_LONG_MODE (1 << 3)
+#define VHUB_EP_DMA_OUT_CONTIG_MODE (1 << 3)
+#define VHUB_EP_DMA_CTRL_RESET (1 << 2)
+#define VHUB_EP_DMA_SINGLE_STAGE (1 << 1)
+#define VHUB_EP_DMA_DESC_MODE (1 << 0)
+
+/* EP DMA status */
+#define VHUB_EP_DMA_SET_TX_SIZE(x) ((x) << 16)
+#define VHUB_EP_DMA_TX_SIZE(x) (((x) >> 16) & 0x7ff)
+#define VHUB_EP_DMA_RPTR(x) (((x) >> 8) & 0xff)
+#define VHUB_EP_DMA_SET_RPTR(x) (((x) & 0xff) << 8)
+#define VHUB_EP_DMA_SET_CPU_WPTR(x) (x)
+#define VHUB_EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */
+
+/*******************************
+ * *
+ * DMA descriptors definitions *
+ * *
+ *******************************/
+
+/* Desc W1 IN */
+#define VHUB_DSC1_IN_INTERRUPT (1 << 31)
+#define VHUB_DSC1_IN_SPID_DATA0 (0 << 14)
+#define VHUB_DSC1_IN_SPID_DATA2 (1 << 14)
+#define VHUB_DSC1_IN_SPID_DATA1 (2 << 14)
+#define VHUB_DSC1_IN_SPID_MDATA (3 << 14)
+#define VHUB_DSC1_IN_SET_LEN(x) ((x) & 0xfff)
+#define VHUB_DSC1_IN_LEN(x) ((x) & 0xfff)
+
+/****************************************
+ * *
+ * Data structures and misc definitions *
+ * *
+ ****************************************/
+
+/*
+ * AST_VHUB_NUM_GEN_EPs and AST_VHUB_NUM_PORTS are kept to avoid breaking
+ * existing AST2400/AST2500 platforms. AST2600 and future vhub revisions
+ * should define number of downstream ports and endpoints in device tree.
+ */
+#define AST_VHUB_NUM_GEN_EPs 15 /* Generic non-0 EPs */
+#define AST_VHUB_NUM_PORTS 5 /* vHub ports */
+#define AST_VHUB_EP0_MAX_PACKET 64 /* EP0's max packet size */
+#define AST_VHUB_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */
+#define AST_VHUB_DESCS_COUNT 256 /* Use 256 descriptor mode (valid
+ * values are 256 and 32)
+ */
+
+struct ast_vhub;
+struct ast_vhub_dev;
+
+/*
+ * DMA descriptor (generic EPs only, currently only used
+ * for IN endpoints
+ */
+struct ast_vhub_desc {
+ __le32 w0;
+ __le32 w1;
+};
+
+/* A transfer request, either core-originated or internal */
+struct ast_vhub_req {
+ struct usb_request req;
+ struct list_head queue;
+
+ /* Actual count written to descriptors (desc mode only) */
+ unsigned int act_count;
+
+ /*
+ * Desc number of the final packet or -1. For non-desc
+ * mode (or ep0), any >= 0 value means "last packet"
+ */
+ int last_desc;
+
+ /* Request active (pending DMAs) */
+ bool active : 1;
+
+ /* Internal request (don't call back core) */
+ bool internal : 1;
+};
+#define to_ast_req(__ureq) container_of(__ureq, struct ast_vhub_req, req)
+
+/* Current state of an EP0 */
+enum ep0_state {
+ ep0_state_token,
+ ep0_state_data,
+ ep0_state_status,
+ ep0_state_stall,
+};
+
+/*
+ * An endpoint, either generic, ep0, actual gadget EP
+ * or internal use vhub EP0. vhub EP1 doesn't have an
+ * associated structure as it's mostly HW managed.
+ */
+struct ast_vhub_ep {
+ struct usb_ep ep;
+
+ /* Request queue */
+ struct list_head queue;
+
+ /* EP index in the device, 0 means this is an EP0 */
+ unsigned int d_idx;
+
+ /* Dev pointer or NULL for vHub EP0 */
+ struct ast_vhub_dev *dev;
+
+ /* vHub itself */
+ struct ast_vhub *vhub;
+
+ /*
+ * DMA buffer for EP0, fallback DMA buffer for misaligned
+ * OUT transfers for generic EPs
+ */
+ void *buf;
+ dma_addr_t buf_dma;
+
+ /* The rest depends on the EP type */
+ union {
+ /* EP0 (either device or vhub) */
+ struct {
+ /*
+ * EP0 registers are "similar" for
+ * vHub and devices but located in
+ * different places.
+ */
+ void __iomem *ctlstat;
+ void __iomem *setup;
+
+ /* Current state & direction */
+ enum ep0_state state;
+ bool dir_in;
+
+ /* Internal use request */
+ struct ast_vhub_req req;
+ } ep0;
+
+ /* Generic endpoint (aka EPn) */
+ struct {
+ /* Registers */
+ void __iomem *regs;
+
+ /* Index in global pool (zero-based) */
+ unsigned int g_idx;
+
+ /* DMA Descriptors */
+ struct ast_vhub_desc *descs;
+ dma_addr_t descs_dma;
+ unsigned int d_next;
+ unsigned int d_last;
+ unsigned int dma_conf;
+
+ /* Max chunk size for IN EPs */
+ unsigned int chunk_max;
+
+ /* State flags */
+ bool is_in : 1;
+ bool is_iso : 1;
+ bool stalled : 1;
+ bool wedged : 1;
+ bool enabled : 1;
+ bool desc_mode : 1;
+ } epn;
+ };
+};
+#define to_ast_ep(__uep) container_of(__uep, struct ast_vhub_ep, ep)
+
+/* A device attached to a vHub port */
+struct ast_vhub_dev {
+ struct ast_vhub *vhub;
+ void __iomem *regs;
+
+ /* Device index (zero-based) and name string */
+ unsigned int index;
+ const char *name;
+
+ /* sysfs enclosure for the gadget gunk */
+ struct device *port_dev;
+
+ /* Link to gadget core */
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ bool registered : 1;
+ bool wakeup_en : 1;
+ bool enabled : 1;
+
+ /* Endpoint structures */
+ struct ast_vhub_ep ep0;
+ struct ast_vhub_ep **epns;
+ u32 max_epns;
+
+};
+#define to_ast_dev(__g) container_of(__g, struct ast_vhub_dev, gadget)
+
+/* Per vhub port stateinfo structure */
+struct ast_vhub_port {
+ /* Port status & status change registers */
+ u16 status;
+ u16 change;
+
+ /* Associated device slot */
+ struct ast_vhub_dev dev;
+};
+
+struct ast_vhub_full_cdesc {
+ struct usb_config_descriptor cfg;
+ struct usb_interface_descriptor intf;
+ struct usb_endpoint_descriptor ep;
+} __packed;
+
+/* Global vhub structure */
+struct ast_vhub {
+ struct platform_device *pdev;
+ void __iomem *regs;
+ int irq;
+ spinlock_t lock;
+ struct work_struct wake_work;
+ struct clk *clk;
+
+ /* EP0 DMA buffers allocated in one chunk */
+ void *ep0_bufs;
+ dma_addr_t ep0_bufs_dma;
+
+ /* EP0 of the vhub itself */
+ struct ast_vhub_ep ep0;
+
+ /* State of vhub ep1 */
+ bool ep1_stalled : 1;
+
+ /* Per-port info */
+ struct ast_vhub_port *ports;
+ u32 max_ports;
+ u32 port_irq_mask;
+
+ /* Generic EP data structures */
+ struct ast_vhub_ep *epns;
+ u32 max_epns;
+
+ /* Upstream bus is suspended ? */
+ bool suspended : 1;
+
+ /* Hub itself can signal remote wakeup */
+ bool wakeup_en : 1;
+
+ /* Force full speed only */
+ bool force_usb1 : 1;
+
+ /* Upstream bus speed captured at bus reset */
+ unsigned int speed;
+
+ /* Standard USB Descriptors of the vhub. */
+ struct usb_device_descriptor vhub_dev_desc;
+ struct ast_vhub_full_cdesc vhub_conf_desc;
+ struct usb_hub_descriptor vhub_hub_desc;
+ struct list_head vhub_str_desc;
+ struct usb_qualifier_descriptor vhub_qual_desc;
+};
+
+/* Standard request handlers result codes */
+enum std_req_rc {
+ std_req_stall = -1, /* Stall requested */
+ std_req_complete = 0, /* Request completed with no data */
+ std_req_data = 1, /* Request completed with data */
+ std_req_driver = 2, /* Pass to driver pls */
+};
+
+#ifdef CONFIG_USB_GADGET_VERBOSE
+#define UDCVDBG(u, fmt...) dev_dbg(&(u)->pdev->dev, fmt)
+
+#define EPVDBG(ep, fmt, ...) do { \
+ dev_dbg(&(ep)->vhub->pdev->dev, \
+ "%s:EP%d " fmt, \
+ (ep)->dev ? (ep)->dev->name : "hub", \
+ (ep)->d_idx, ##__VA_ARGS__); \
+ } while(0)
+
+#define DVDBG(d, fmt, ...) do { \
+ dev_dbg(&(d)->vhub->pdev->dev, \
+ "%s " fmt, (d)->name, \
+ ##__VA_ARGS__); \
+ } while(0)
+
+#else
+#define UDCVDBG(u, fmt...) do { } while(0)
+#define EPVDBG(ep, fmt, ...) do { } while(0)
+#define DVDBG(d, fmt, ...) do { } while(0)
+#endif
+
+#ifdef CONFIG_USB_GADGET_DEBUG
+#define UDCDBG(u, fmt...) dev_dbg(&(u)->pdev->dev, fmt)
+
+#define EPDBG(ep, fmt, ...) do { \
+ dev_dbg(&(ep)->vhub->pdev->dev, \
+ "%s:EP%d " fmt, \
+ (ep)->dev ? (ep)->dev->name : "hub", \
+ (ep)->d_idx, ##__VA_ARGS__); \
+ } while(0)
+
+#define DDBG(d, fmt, ...) do { \
+ dev_dbg(&(d)->vhub->pdev->dev, \
+ "%s " fmt, (d)->name, \
+ ##__VA_ARGS__); \
+ } while(0)
+#else
+#define UDCDBG(u, fmt...) do { } while(0)
+#define EPDBG(ep, fmt, ...) do { } while(0)
+#define DDBG(d, fmt, ...) do { } while(0)
+#endif
+
+static inline void vhub_dma_workaround(void *addr)
+{
+ /*
+ * This works around a confirmed HW issue with the Aspeed chip.
+ *
+ * The core uses a different bus to memory than the AHB going to
+ * the USB device controller. Due to the latter having a higher
+ * priority than the core for arbitration on that bus, it's
+ * possible for an MMIO to the device, followed by a DMA by the
+ * device from memory to all be performed and services before
+ * a previous store to memory gets completed.
+ *
+ * This the following scenario can happen:
+ *
+ * - Driver writes to a DMA descriptor (Mbus)
+ * - Driver writes to the MMIO register to start the DMA (AHB)
+ * - The gadget sees the second write and sends a read of the
+ * descriptor to the memory controller (Mbus)
+ * - The gadget hits memory before the descriptor write
+ * causing it to read an obsolete value.
+ *
+ * Thankfully the problem is limited to the USB gadget device, other
+ * masters in the SoC all have a lower priority than the core, thus
+ * ensuring that the store by the core arrives first.
+ *
+ * The workaround consists of using a dummy read of the memory before
+ * doing the MMIO writes. This will ensure that the previous writes
+ * have been "pushed out".
+ */
+ mb();
+ (void)__raw_readl((void __iomem *)addr);
+}
+
+/* core.c */
+void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
+ int status);
+void ast_vhub_nuke(struct ast_vhub_ep *ep, int status);
+struct usb_request *ast_vhub_alloc_request(struct usb_ep *u_ep,
+ gfp_t gfp_flags);
+void ast_vhub_free_request(struct usb_ep *u_ep, struct usb_request *u_req);
+void ast_vhub_init_hw(struct ast_vhub *vhub);
+
+/* ep0.c */
+void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack);
+void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep);
+void ast_vhub_reset_ep0(struct ast_vhub_dev *dev);
+void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
+ struct ast_vhub_dev *dev);
+int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len);
+int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...);
+#define ast_vhub_simple_reply(udc, ...) \
+ __ast_vhub_simple_reply((udc), \
+ sizeof((u8[]) { __VA_ARGS__ })/sizeof(u8), \
+ __VA_ARGS__)
+
+/* hub.c */
+int ast_vhub_init_hub(struct ast_vhub *vhub);
+enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
+ struct usb_ctrlrequest *crq);
+enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep,
+ struct usb_ctrlrequest *crq);
+void ast_vhub_device_connect(struct ast_vhub *vhub, unsigned int port,
+ bool on);
+void ast_vhub_hub_suspend(struct ast_vhub *vhub);
+void ast_vhub_hub_resume(struct ast_vhub *vhub);
+void ast_vhub_hub_reset(struct ast_vhub *vhub);
+void ast_vhub_hub_wake_all(struct ast_vhub *vhub);
+
+/* dev.c */
+int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx);
+void ast_vhub_del_dev(struct ast_vhub_dev *d);
+void ast_vhub_dev_irq(struct ast_vhub_dev *d);
+int ast_vhub_std_dev_request(struct ast_vhub_ep *ep,
+ struct usb_ctrlrequest *crq);
+
+/* epn.c */
+void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep);
+void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep);
+struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr);
+void ast_vhub_dev_suspend(struct ast_vhub_dev *d);
+void ast_vhub_dev_resume(struct ast_vhub_dev *d);
+void ast_vhub_dev_reset(struct ast_vhub_dev *d);
+
+#endif /* __ASPEED_VHUB_H */
diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
new file mode 100644
index 000000000..01968e216
--- /dev/null
+++ b/drivers/usb/gadget/udc/aspeed_udc.c
@@ -0,0 +1,1597 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/slab.h>
+
+#define AST_UDC_NUM_ENDPOINTS (1 + 4)
+#define AST_UDC_EP0_MAX_PACKET 64 /* EP0's max packet size */
+#define AST_UDC_EPn_MAX_PACKET 1024 /* Generic EPs max packet size */
+#define AST_UDC_DESCS_COUNT 256 /* Use 256 stages descriptor mode (32/256) */
+#define AST_UDC_DESC_MODE 1 /* Single/Multiple Stage(s) Descriptor Mode */
+
+#define AST_UDC_EP_DMA_SIZE (AST_UDC_EPn_MAX_PACKET + 8 * AST_UDC_DESCS_COUNT)
+
+/*****************************
+ * *
+ * UDC register definitions *
+ * *
+ *****************************/
+
+#define AST_UDC_FUNC_CTRL 0x00 /* Root Function Control & Status Register */
+#define AST_UDC_CONFIG 0x04 /* Root Configuration Setting Register */
+#define AST_UDC_IER 0x08 /* Interrupt Control Register */
+#define AST_UDC_ISR 0x0C /* Interrupt Status Register */
+#define AST_UDC_EP_ACK_IER 0x10 /* Programmable ep Pool ACK Interrupt Enable Reg */
+#define AST_UDC_EP_NAK_IER 0x14 /* Programmable ep Pool NAK Interrupt Enable Reg */
+#define AST_UDC_EP_ACK_ISR 0x18 /* Programmable ep Pool ACK Interrupt Status Reg */
+#define AST_UDC_EP_NAK_ISR 0x1C /* Programmable ep Pool NAK Interrupt Status Reg */
+#define AST_UDC_DEV_RESET 0x20 /* Device Controller Soft Reset Enable Register */
+#define AST_UDC_STS 0x24 /* USB Status Register */
+#define AST_VHUB_EP_DATA 0x28 /* Programmable ep Pool Data Toggle Value Set */
+#define AST_VHUB_ISO_TX_FAIL 0x2C /* Isochronous Transaction Fail Accumulator */
+#define AST_UDC_EP0_CTRL 0x30 /* Endpoint 0 Control/Status Register */
+#define AST_UDC_EP0_DATA_BUFF 0x34 /* Base Address of ep0 IN/OUT Data Buffer Reg */
+#define AST_UDC_SETUP0 0x80 /* Root Device Setup Data Buffer0 */
+#define AST_UDC_SETUP1 0x84 /* Root Device Setup Data Buffer1 */
+
+
+/* Main control reg */
+#define USB_PHY_CLK_EN BIT(31)
+#define USB_FIFO_DYN_PWRD_EN BIT(19)
+#define USB_EP_LONG_DESC BIT(18)
+#define USB_BIST_TEST_PASS BIT(13)
+#define USB_BIST_TURN_ON BIT(12)
+#define USB_PHY_RESET_DIS BIT(11)
+#define USB_TEST_MODE(x) ((x) << 8)
+#define USB_FORCE_TIMER_HS BIT(7)
+#define USB_FORCE_HS BIT(6)
+#define USB_REMOTE_WAKEUP_12MS BIT(5)
+#define USB_REMOTE_WAKEUP_EN BIT(4)
+#define USB_AUTO_REMOTE_WAKEUP_EN BIT(3)
+#define USB_STOP_CLK_IN_SUPEND BIT(2)
+#define USB_UPSTREAM_FS BIT(1)
+#define USB_UPSTREAM_EN BIT(0)
+
+/* Main config reg */
+#define UDC_CFG_SET_ADDR(x) ((x) & 0x3f)
+#define UDC_CFG_ADDR_MASK (0x3f)
+
+/* Interrupt ctrl & status reg */
+#define UDC_IRQ_EP_POOL_NAK BIT(17)
+#define UDC_IRQ_EP_POOL_ACK_STALL BIT(16)
+#define UDC_IRQ_BUS_RESUME BIT(8)
+#define UDC_IRQ_BUS_SUSPEND BIT(7)
+#define UDC_IRQ_BUS_RESET BIT(6)
+#define UDC_IRQ_EP0_IN_DATA_NAK BIT(4)
+#define UDC_IRQ_EP0_IN_ACK_STALL BIT(3)
+#define UDC_IRQ_EP0_OUT_NAK BIT(2)
+#define UDC_IRQ_EP0_OUT_ACK_STALL BIT(1)
+#define UDC_IRQ_EP0_SETUP BIT(0)
+#define UDC_IRQ_ACK_ALL (0x1ff)
+
+/* EP isr reg */
+#define USB_EP3_ISR BIT(3)
+#define USB_EP2_ISR BIT(2)
+#define USB_EP1_ISR BIT(1)
+#define USB_EP0_ISR BIT(0)
+#define UDC_IRQ_EP_ACK_ALL (0xf)
+
+/*Soft reset reg */
+#define ROOT_UDC_SOFT_RESET BIT(0)
+
+/* USB status reg */
+#define UDC_STS_HIGHSPEED BIT(27)
+
+/* Programmable EP data toggle */
+#define EP_TOGGLE_SET_EPNUM(x) ((x) & 0x3)
+
+/* EP0 ctrl reg */
+#define EP0_GET_RX_LEN(x) ((x >> 16) & 0x7f)
+#define EP0_TX_LEN(x) ((x & 0x7f) << 8)
+#define EP0_RX_BUFF_RDY BIT(2)
+#define EP0_TX_BUFF_RDY BIT(1)
+#define EP0_STALL BIT(0)
+
+/*************************************
+ * *
+ * per-endpoint register definitions *
+ * *
+ *************************************/
+
+#define AST_UDC_EP_CONFIG 0x00 /* Endpoint Configuration Register */
+#define AST_UDC_EP_DMA_CTRL 0x04 /* DMA Descriptor List Control/Status Register */
+#define AST_UDC_EP_DMA_BUFF 0x08 /* DMA Descriptor/Buffer Base Address */
+#define AST_UDC_EP_DMA_STS 0x0C /* DMA Descriptor List R/W Pointer and Status */
+
+#define AST_UDC_EP_BASE 0x200
+#define AST_UDC_EP_OFFSET 0x10
+
+/* EP config reg */
+#define EP_SET_MAX_PKT(x) ((x & 0x3ff) << 16)
+#define EP_DATA_FETCH_CTRL(x) ((x & 0x3) << 14)
+#define EP_AUTO_DATA_DISABLE (0x1 << 13)
+#define EP_SET_EP_STALL (0x1 << 12)
+#define EP_SET_EP_NUM(x) ((x & 0xf) << 8)
+#define EP_SET_TYPE_MASK(x) ((x) << 5)
+#define EP_TYPE_BULK (0x1)
+#define EP_TYPE_INT (0x2)
+#define EP_TYPE_ISO (0x3)
+#define EP_DIR_OUT (0x1 << 4)
+#define EP_ALLOCATED_MASK (0x7 << 1)
+#define EP_ENABLE BIT(0)
+
+/* EP DMA ctrl reg */
+#define EP_DMA_CTRL_GET_PROC_STS(x) ((x >> 4) & 0xf)
+#define EP_DMA_CTRL_STS_RX_IDLE 0x0
+#define EP_DMA_CTRL_STS_TX_IDLE 0x8
+#define EP_DMA_CTRL_IN_LONG_MODE (0x1 << 3)
+#define EP_DMA_CTRL_RESET (0x1 << 2)
+#define EP_DMA_SINGLE_STAGE (0x1 << 1)
+#define EP_DMA_DESC_MODE (0x1 << 0)
+
+/* EP DMA status reg */
+#define EP_DMA_SET_TX_SIZE(x) ((x & 0x7ff) << 16)
+#define EP_DMA_GET_TX_SIZE(x) (((x) >> 16) & 0x7ff)
+#define EP_DMA_GET_RPTR(x) (((x) >> 8) & 0xff)
+#define EP_DMA_GET_WPTR(x) ((x) & 0xff)
+#define EP_DMA_SINGLE_KICK (1 << 0) /* WPTR = 1 for single mode */
+
+/* EP desc reg */
+#define AST_EP_DMA_DESC_INTR_ENABLE BIT(31)
+#define AST_EP_DMA_DESC_PID_DATA0 (0 << 14)
+#define AST_EP_DMA_DESC_PID_DATA2 BIT(14)
+#define AST_EP_DMA_DESC_PID_DATA1 (2 << 14)
+#define AST_EP_DMA_DESC_PID_MDATA (3 << 14)
+#define EP_DESC1_IN_LEN(x) ((x) & 0x1fff)
+#define AST_EP_DMA_DESC_MAX_LEN (7680) /* Max packet length for trasmit in 1 desc */
+
+struct ast_udc_request {
+ struct usb_request req;
+ struct list_head queue;
+ unsigned mapped:1;
+ unsigned int actual_dma_length;
+ u32 saved_dma_wptr;
+};
+
+#define to_ast_req(__req) container_of(__req, struct ast_udc_request, req)
+
+struct ast_dma_desc {
+ u32 des_0;
+ u32 des_1;
+};
+
+struct ast_udc_ep {
+ struct usb_ep ep;
+
+ /* Request queue */
+ struct list_head queue;
+
+ struct ast_udc_dev *udc;
+ void __iomem *ep_reg;
+ void *epn_buf;
+ dma_addr_t epn_buf_dma;
+ const struct usb_endpoint_descriptor *desc;
+
+ /* DMA Descriptors */
+ struct ast_dma_desc *descs;
+ dma_addr_t descs_dma;
+ u32 descs_wptr;
+ u32 chunk_max;
+
+ bool dir_in:1;
+ unsigned stopped:1;
+ bool desc_mode:1;
+};
+
+#define to_ast_ep(__ep) container_of(__ep, struct ast_udc_ep, ep)
+
+struct ast_udc_dev {
+ struct platform_device *pdev;
+ void __iomem *reg;
+ int irq;
+ spinlock_t lock;
+ struct clk *clk;
+ struct work_struct wake_work;
+
+ /* EP0 DMA buffers allocated in one chunk */
+ void *ep0_buf;
+ dma_addr_t ep0_buf_dma;
+ struct ast_udc_ep ep[AST_UDC_NUM_ENDPOINTS];
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ void __iomem *creq;
+ enum usb_device_state suspended_from;
+ int desc_mode;
+
+ /* Force full speed only */
+ bool force_usb1:1;
+ unsigned is_control_tx:1;
+ bool wakeup_en:1;
+};
+
+#define to_ast_dev(__g) container_of(__g, struct ast_udc_dev, gadget)
+
+static const char * const ast_ep_name[] = {
+ "ep0", "ep1", "ep2", "ep3", "ep4"
+};
+
+#ifdef AST_UDC_DEBUG_ALL
+#define AST_UDC_DEBUG
+#define AST_SETUP_DEBUG
+#define AST_EP_DEBUG
+#define AST_ISR_DEBUG
+#endif
+
+#ifdef AST_SETUP_DEBUG
+#define SETUP_DBG(u, fmt, ...) \
+ dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define SETUP_DBG(u, fmt, ...)
+#endif
+
+#ifdef AST_EP_DEBUG
+#define EP_DBG(e, fmt, ...) \
+ dev_dbg(&(e)->udc->pdev->dev, "%s():%s " fmt, __func__, \
+ (e)->ep.name, ##__VA_ARGS__)
+#else
+#define EP_DBG(ep, fmt, ...) ((void)(ep))
+#endif
+
+#ifdef AST_UDC_DEBUG
+#define UDC_DBG(u, fmt, ...) \
+ dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define UDC_DBG(u, fmt, ...)
+#endif
+
+#ifdef AST_ISR_DEBUG
+#define ISR_DBG(u, fmt, ...) \
+ dev_dbg(&(u)->pdev->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define ISR_DBG(u, fmt, ...)
+#endif
+
+/*-------------------------------------------------------------------------*/
+#define ast_udc_read(udc, offset) \
+ readl((udc)->reg + (offset))
+#define ast_udc_write(udc, val, offset) \
+ writel((val), (udc)->reg + (offset))
+
+#define ast_ep_read(ep, reg) \
+ readl((ep)->ep_reg + (reg))
+#define ast_ep_write(ep, val, reg) \
+ writel((val), (ep)->ep_reg + (reg))
+
+/*-------------------------------------------------------------------------*/
+
+static void ast_udc_done(struct ast_udc_ep *ep, struct ast_udc_request *req,
+ int status)
+{
+ struct ast_udc_dev *udc = ep->udc;
+
+ EP_DBG(ep, "req @%p, len (%d/%d), buf:0x%x, dir:0x%x\n",
+ req, req->req.actual, req->req.length,
+ (u32)req->req.buf, ep->dir_in);
+
+ list_del(&req->queue);
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ if (status && status != -ESHUTDOWN)
+ EP_DBG(ep, "done req:%p, status:%d\n", req, status);
+
+ spin_unlock(&udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&udc->lock);
+}
+
+static void ast_udc_nuke(struct ast_udc_ep *ep, int status)
+{
+ int count = 0;
+
+ while (!list_empty(&ep->queue)) {
+ struct ast_udc_request *req;
+
+ req = list_entry(ep->queue.next, struct ast_udc_request,
+ queue);
+ ast_udc_done(ep, req, status);
+ count++;
+ }
+
+ if (count)
+ EP_DBG(ep, "Nuked %d request(s)\n", count);
+}
+
+/*
+ * Stop activity on all endpoints.
+ * Device controller for which EP activity is to be stopped.
+ *
+ * All the endpoints are stopped and any pending transfer requests if any on
+ * the endpoint are terminated.
+ */
+static void ast_udc_stop_activity(struct ast_udc_dev *udc)
+{
+ struct ast_udc_ep *ep;
+ int i;
+
+ for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ ep->stopped = 1;
+ ast_udc_nuke(ep, -ESHUTDOWN);
+ }
+}
+
+static int ast_udc_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ u16 maxpacket = usb_endpoint_maxp(desc);
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ u8 epnum = usb_endpoint_num(desc);
+ unsigned long flags;
+ u32 ep_conf = 0;
+ u8 dir_in;
+ u8 type;
+
+ if (!_ep || !ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
+ maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
+ EP_DBG(ep, "Failed, invalid EP enable param\n");
+ return -EINVAL;
+ }
+
+ if (!udc->driver) {
+ EP_DBG(ep, "bogus device state\n");
+ return -ESHUTDOWN;
+ }
+
+ EP_DBG(ep, "maxpacket:0x%x\n", maxpacket);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ ep->desc = desc;
+ ep->stopped = 0;
+ ep->ep.maxpacket = maxpacket;
+ ep->chunk_max = AST_EP_DMA_DESC_MAX_LEN;
+
+ if (maxpacket < AST_UDC_EPn_MAX_PACKET)
+ ep_conf = EP_SET_MAX_PKT(maxpacket);
+
+ ep_conf |= EP_SET_EP_NUM(epnum);
+
+ type = usb_endpoint_type(desc);
+ dir_in = usb_endpoint_dir_in(desc);
+ ep->dir_in = dir_in;
+ if (!ep->dir_in)
+ ep_conf |= EP_DIR_OUT;
+
+ EP_DBG(ep, "type %d, dir_in %d\n", type, dir_in);
+ switch (type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_ISO);
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_BULK);
+ break;
+
+ case USB_ENDPOINT_XFER_INT:
+ ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_INT);
+ break;
+ }
+
+ ep->desc_mode = udc->desc_mode && ep->descs_dma && ep->dir_in;
+ if (ep->desc_mode) {
+ ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL);
+ ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS);
+ ast_ep_write(ep, ep->descs_dma, AST_UDC_EP_DMA_BUFF);
+
+ /* Enable Long Descriptor Mode */
+ ast_ep_write(ep, EP_DMA_CTRL_IN_LONG_MODE | EP_DMA_DESC_MODE,
+ AST_UDC_EP_DMA_CTRL);
+
+ ep->descs_wptr = 0;
+
+ } else {
+ ast_ep_write(ep, EP_DMA_CTRL_RESET, AST_UDC_EP_DMA_CTRL);
+ ast_ep_write(ep, EP_DMA_SINGLE_STAGE, AST_UDC_EP_DMA_CTRL);
+ ast_ep_write(ep, 0, AST_UDC_EP_DMA_STS);
+ }
+
+ /* Cleanup data toggle just in case */
+ ast_udc_write(udc, EP_TOGGLE_SET_EPNUM(epnum), AST_VHUB_EP_DATA);
+
+ /* Enable EP */
+ ast_ep_write(ep, ep_conf | EP_ENABLE, AST_UDC_EP_CONFIG);
+
+ EP_DBG(ep, "ep_config: 0x%x\n", ast_ep_read(ep, AST_UDC_EP_CONFIG));
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int ast_udc_ep_disable(struct usb_ep *_ep)
+{
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ ep->ep.desc = NULL;
+ ep->stopped = 1;
+
+ ast_udc_nuke(ep, -ESHUTDOWN);
+ ast_ep_write(ep, 0, AST_UDC_EP_CONFIG);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static struct usb_request *ast_udc_ep_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_request *req;
+
+ req = kzalloc(sizeof(struct ast_udc_request), gfp_flags);
+ if (!req) {
+ EP_DBG(ep, "request allocation failed\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void ast_udc_ep_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct ast_udc_request *req = to_ast_req(_req);
+
+ kfree(req);
+}
+
+static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf,
+ u16 tx_len, struct ast_udc_request *req)
+{
+ struct ast_udc_dev *udc = ep->udc;
+ struct device *dev = &udc->pdev->dev;
+ bool last = false;
+ int chunk, count;
+ u32 offset;
+
+ if (!ep->descs) {
+ dev_warn(dev, "%s: Empty DMA descs list failure\n",
+ ep->ep.name);
+ return -EINVAL;
+ }
+
+ chunk = tx_len;
+ offset = count = 0;
+
+ EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x\n", req,
+ "wptr", ep->descs_wptr, "dma_buf", dma_buf,
+ "tx_len", tx_len);
+
+ /* Create Descriptor Lists */
+ while (chunk >= 0 && !last && count < AST_UDC_DESCS_COUNT) {
+
+ ep->descs[ep->descs_wptr].des_0 = dma_buf + offset;
+
+ if (chunk > ep->chunk_max) {
+ ep->descs[ep->descs_wptr].des_1 = ep->chunk_max;
+ } else {
+ ep->descs[ep->descs_wptr].des_1 = chunk;
+ last = true;
+ }
+
+ chunk -= ep->chunk_max;
+
+ EP_DBG(ep, "descs[%d]: 0x%x 0x%x\n",
+ ep->descs_wptr,
+ ep->descs[ep->descs_wptr].des_0,
+ ep->descs[ep->descs_wptr].des_1);
+
+ if (count == 0)
+ req->saved_dma_wptr = ep->descs_wptr;
+
+ ep->descs_wptr++;
+ count++;
+
+ if (ep->descs_wptr >= AST_UDC_DESCS_COUNT)
+ ep->descs_wptr = 0;
+
+ offset = ep->chunk_max * count;
+ }
+
+ return 0;
+}
+
+static void ast_udc_epn_kick(struct ast_udc_ep *ep, struct ast_udc_request *req)
+{
+ u32 tx_len;
+ u32 last;
+
+ last = req->req.length - req->req.actual;
+ tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last;
+
+ EP_DBG(ep, "kick req @%p, len:%d, dir:%d\n",
+ req, tx_len, ep->dir_in);
+
+ ast_ep_write(ep, req->req.dma + req->req.actual, AST_UDC_EP_DMA_BUFF);
+
+ /* Start DMA */
+ ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len), AST_UDC_EP_DMA_STS);
+ ast_ep_write(ep, EP_DMA_SET_TX_SIZE(tx_len) | EP_DMA_SINGLE_KICK,
+ AST_UDC_EP_DMA_STS);
+}
+
+static void ast_udc_epn_kick_desc(struct ast_udc_ep *ep,
+ struct ast_udc_request *req)
+{
+ u32 descs_max_size;
+ u32 tx_len;
+ u32 last;
+
+ descs_max_size = AST_EP_DMA_DESC_MAX_LEN * AST_UDC_DESCS_COUNT;
+
+ last = req->req.length - req->req.actual;
+ tx_len = last > descs_max_size ? descs_max_size : last;
+
+ EP_DBG(ep, "kick req @%p, %s:%d, %s:0x%x, %s:0x%x (%d/%d), %s:0x%x\n",
+ req, "tx_len", tx_len, "dir_in", ep->dir_in,
+ "dma", req->req.dma + req->req.actual,
+ req->req.actual, req->req.length,
+ "descs_max_size", descs_max_size);
+
+ if (!ast_dma_descriptor_setup(ep, req->req.dma + req->req.actual,
+ tx_len, req))
+ req->actual_dma_length += tx_len;
+
+ /* make sure CPU done everything before triggering DMA */
+ mb();
+
+ ast_ep_write(ep, ep->descs_wptr, AST_UDC_EP_DMA_STS);
+
+ EP_DBG(ep, "descs_wptr:%d, dstat:0x%x, dctrl:0x%x\n",
+ ep->descs_wptr,
+ ast_ep_read(ep, AST_UDC_EP_DMA_STS),
+ ast_ep_read(ep, AST_UDC_EP_DMA_CTRL));
+}
+
+static void ast_udc_ep0_queue(struct ast_udc_ep *ep,
+ struct ast_udc_request *req)
+{
+ struct ast_udc_dev *udc = ep->udc;
+ u32 tx_len;
+ u32 last;
+
+ last = req->req.length - req->req.actual;
+ tx_len = last > ep->ep.maxpacket ? ep->ep.maxpacket : last;
+
+ ast_udc_write(udc, req->req.dma + req->req.actual,
+ AST_UDC_EP0_DATA_BUFF);
+
+ if (ep->dir_in) {
+ /* IN requests, send data */
+ SETUP_DBG(udc, "IN: %s:0x%x, %s:0x%x, %s:%d (%d/%d), %s:%d\n",
+ "buf", (u32)req->req.buf,
+ "dma", req->req.dma + req->req.actual,
+ "tx_len", tx_len,
+ req->req.actual, req->req.length,
+ "dir_in", ep->dir_in);
+
+ req->req.actual += tx_len;
+ ast_udc_write(udc, EP0_TX_LEN(tx_len), AST_UDC_EP0_CTRL);
+ ast_udc_write(udc, EP0_TX_LEN(tx_len) | EP0_TX_BUFF_RDY,
+ AST_UDC_EP0_CTRL);
+
+ } else {
+ /* OUT requests, receive data */
+ SETUP_DBG(udc, "OUT: %s:%x, %s:%x, %s:(%d/%d), %s:%d\n",
+ "buf", (u32)req->req.buf,
+ "dma", req->req.dma + req->req.actual,
+ "len", req->req.actual, req->req.length,
+ "dir_in", ep->dir_in);
+
+ if (!req->req.length) {
+ /* 0 len request, send tx as completion */
+ ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
+ ep->dir_in = 0x1;
+ } else
+ ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL);
+ }
+}
+
+static int ast_udc_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct ast_udc_request *req = to_ast_req(_req);
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ struct device *dev = &udc->pdev->dev;
+ unsigned long flags;
+ int rc;
+
+ if (unlikely(!_req || !_req->complete || !_req->buf || !_ep)) {
+ dev_warn(dev, "Invalid EP request !\n");
+ return -EINVAL;
+ }
+
+ if (ep->stopped) {
+ dev_warn(dev, "%s is already stopped !\n", _ep->name);
+ return -ESHUTDOWN;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+ req->actual_dma_length = 0;
+
+ rc = usb_gadget_map_request(&udc->gadget, &req->req, ep->dir_in);
+ if (rc) {
+ EP_DBG(ep, "Request mapping failure %d\n", rc);
+ dev_warn(dev, "Request mapping failure %d\n", rc);
+ goto end;
+ }
+
+ EP_DBG(ep, "enqueue req @%p\n", req);
+ EP_DBG(ep, "l=%d, dma:0x%x, zero:%d, is_in:%d\n",
+ _req->length, _req->dma, _req->zero, ep->dir_in);
+
+ /* EP0 request enqueue */
+ if (ep->ep.desc == NULL) {
+ if ((req->req.dma % 4) != 0) {
+ dev_warn(dev, "EP0 req dma alignment error\n");
+ rc = -ESHUTDOWN;
+ goto end;
+ }
+
+ ast_udc_ep0_queue(ep, req);
+ goto end;
+ }
+
+ /* EPn request enqueue */
+ if (list_is_singular(&ep->queue)) {
+ if (ep->desc_mode)
+ ast_udc_epn_kick_desc(ep, req);
+ else
+ ast_udc_epn_kick(ep, req);
+ }
+
+end:
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return rc;
+}
+
+static int ast_udc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ struct ast_udc_request *req;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req) {
+ list_del_init(&req->queue);
+ ast_udc_done(ep, req, -ESHUTDOWN);
+ _req->status = -ECONNRESET;
+ break;
+ }
+ }
+
+ /* dequeue request not found */
+ if (&req->req != _req)
+ rc = -EINVAL;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return rc;
+}
+
+static int ast_udc_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct ast_udc_ep *ep = to_ast_ep(_ep);
+ struct ast_udc_dev *udc = ep->udc;
+ unsigned long flags;
+ int epnum;
+ u32 ctrl;
+
+ EP_DBG(ep, "val:%d\n", value);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ epnum = usb_endpoint_num(ep->desc);
+
+ /* EP0 */
+ if (epnum == 0) {
+ ctrl = ast_udc_read(udc, AST_UDC_EP0_CTRL);
+ if (value)
+ ctrl |= EP0_STALL;
+ else
+ ctrl &= ~EP0_STALL;
+
+ ast_udc_write(udc, ctrl, AST_UDC_EP0_CTRL);
+
+ } else {
+ /* EPn */
+ ctrl = ast_udc_read(udc, AST_UDC_EP_CONFIG);
+ if (value)
+ ctrl |= EP_SET_EP_STALL;
+ else
+ ctrl &= ~EP_SET_EP_STALL;
+
+ ast_ep_write(ep, ctrl, AST_UDC_EP_CONFIG);
+
+ /* only epn is stopped and waits for clear */
+ ep->stopped = value ? 1 : 0;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_ep_ops ast_udc_ep_ops = {
+ .enable = ast_udc_ep_enable,
+ .disable = ast_udc_ep_disable,
+ .alloc_request = ast_udc_ep_alloc_request,
+ .free_request = ast_udc_ep_free_request,
+ .queue = ast_udc_ep_queue,
+ .dequeue = ast_udc_ep_dequeue,
+ .set_halt = ast_udc_ep_set_halt,
+ /* there's only imprecise fifo status reporting */
+};
+
+static void ast_udc_ep0_rx(struct ast_udc_dev *udc)
+{
+ ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
+ ast_udc_write(udc, EP0_RX_BUFF_RDY, AST_UDC_EP0_CTRL);
+}
+
+static void ast_udc_ep0_tx(struct ast_udc_dev *udc)
+{
+ ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
+ ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
+}
+
+static void ast_udc_ep0_out(struct ast_udc_dev *udc)
+{
+ struct device *dev = &udc->pdev->dev;
+ struct ast_udc_ep *ep = &udc->ep[0];
+ struct ast_udc_request *req;
+ u16 rx_len;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ req = list_entry(ep->queue.next, struct ast_udc_request, queue);
+
+ rx_len = EP0_GET_RX_LEN(ast_udc_read(udc, AST_UDC_EP0_CTRL));
+ req->req.actual += rx_len;
+
+ SETUP_DBG(udc, "req %p (%d/%d)\n", req,
+ req->req.actual, req->req.length);
+
+ if ((rx_len < ep->ep.maxpacket) ||
+ (req->req.actual == req->req.length)) {
+ ast_udc_ep0_tx(udc);
+ if (!ep->dir_in)
+ ast_udc_done(ep, req, 0);
+
+ } else {
+ if (rx_len > req->req.length) {
+ // Issue Fix
+ dev_warn(dev, "Something wrong (%d/%d)\n",
+ req->req.actual, req->req.length);
+ ast_udc_ep0_tx(udc);
+ ast_udc_done(ep, req, 0);
+ return;
+ }
+
+ ep->dir_in = 0;
+
+ /* More works */
+ ast_udc_ep0_queue(ep, req);
+ }
+}
+
+static void ast_udc_ep0_in(struct ast_udc_dev *udc)
+{
+ struct ast_udc_ep *ep = &udc->ep[0];
+ struct ast_udc_request *req;
+
+ if (list_empty(&ep->queue)) {
+ if (udc->is_control_tx) {
+ ast_udc_ep0_rx(udc);
+ udc->is_control_tx = 0;
+ }
+
+ return;
+ }
+
+ req = list_entry(ep->queue.next, struct ast_udc_request, queue);
+
+ SETUP_DBG(udc, "req %p (%d/%d)\n", req,
+ req->req.actual, req->req.length);
+
+ if (req->req.length == req->req.actual) {
+ if (req->req.length)
+ ast_udc_ep0_rx(udc);
+
+ if (ep->dir_in)
+ ast_udc_done(ep, req, 0);
+
+ } else {
+ /* More works */
+ ast_udc_ep0_queue(ep, req);
+ }
+}
+
+static void ast_udc_epn_handle(struct ast_udc_dev *udc, u16 ep_num)
+{
+ struct ast_udc_ep *ep = &udc->ep[ep_num];
+ struct ast_udc_request *req;
+ u16 len = 0;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ req = list_first_entry(&ep->queue, struct ast_udc_request, queue);
+
+ len = EP_DMA_GET_TX_SIZE(ast_ep_read(ep, AST_UDC_EP_DMA_STS));
+ req->req.actual += len;
+
+ EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req,
+ req->req.actual, req->req.length, "len", len);
+
+ /* Done this request */
+ if (req->req.length == req->req.actual) {
+ ast_udc_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue,
+ struct ast_udc_request,
+ queue);
+
+ } else {
+ /* Check for short packet */
+ if (len < ep->ep.maxpacket) {
+ ast_udc_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue,
+ struct ast_udc_request,
+ queue);
+ }
+ }
+
+ /* More requests */
+ if (req)
+ ast_udc_epn_kick(ep, req);
+}
+
+static void ast_udc_epn_handle_desc(struct ast_udc_dev *udc, u16 ep_num)
+{
+ struct ast_udc_ep *ep = &udc->ep[ep_num];
+ struct device *dev = &udc->pdev->dev;
+ struct ast_udc_request *req;
+ u32 proc_sts, wr_ptr, rd_ptr;
+ u32 len_in_desc, ctrl;
+ u16 total_len = 0;
+ int i;
+
+ if (list_empty(&ep->queue)) {
+ dev_warn(dev, "%s request queue empty!\n", ep->ep.name);
+ return;
+ }
+
+ req = list_first_entry(&ep->queue, struct ast_udc_request, queue);
+
+ ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_CTRL);
+ proc_sts = EP_DMA_CTRL_GET_PROC_STS(ctrl);
+
+ /* Check processing status is idle */
+ if (proc_sts != EP_DMA_CTRL_STS_RX_IDLE &&
+ proc_sts != EP_DMA_CTRL_STS_TX_IDLE) {
+ dev_warn(dev, "EP DMA CTRL: 0x%x, PS:0x%x\n",
+ ast_ep_read(ep, AST_UDC_EP_DMA_CTRL),
+ proc_sts);
+ return;
+ }
+
+ ctrl = ast_ep_read(ep, AST_UDC_EP_DMA_STS);
+ rd_ptr = EP_DMA_GET_RPTR(ctrl);
+ wr_ptr = EP_DMA_GET_WPTR(ctrl);
+
+ if (rd_ptr != wr_ptr) {
+ dev_warn(dev, "desc list is not empty ! %s:%d, %s:%d\n",
+ "rptr", rd_ptr, "wptr", wr_ptr);
+ return;
+ }
+
+ EP_DBG(ep, "rd_ptr:%d, wr_ptr:%d\n", rd_ptr, wr_ptr);
+ i = req->saved_dma_wptr;
+
+ do {
+ len_in_desc = EP_DESC1_IN_LEN(ep->descs[i].des_1);
+ EP_DBG(ep, "desc[%d] len: %d\n", i, len_in_desc);
+ total_len += len_in_desc;
+ i++;
+ if (i >= AST_UDC_DESCS_COUNT)
+ i = 0;
+
+ } while (i != wr_ptr);
+
+ req->req.actual += total_len;
+
+ EP_DBG(ep, "req @%p, length:(%d/%d), %s:0x%x\n", req,
+ req->req.actual, req->req.length, "len", total_len);
+
+ /* Done this request */
+ if (req->req.length == req->req.actual) {
+ ast_udc_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue,
+ struct ast_udc_request,
+ queue);
+
+ } else {
+ /* Check for short packet */
+ if (total_len < ep->ep.maxpacket) {
+ ast_udc_done(ep, req, 0);
+ req = list_first_entry_or_null(&ep->queue,
+ struct ast_udc_request,
+ queue);
+ }
+ }
+
+ /* More requests & dma descs not setup yet */
+ if (req && (req->actual_dma_length == req->req.actual)) {
+ EP_DBG(ep, "More requests\n");
+ ast_udc_epn_kick_desc(ep, req);
+ }
+}
+
+static void ast_udc_ep0_data_tx(struct ast_udc_dev *udc, u8 *tx_data, u32 len)
+{
+ if (len) {
+ memcpy(udc->ep0_buf, tx_data, len);
+
+ ast_udc_write(udc, udc->ep0_buf_dma, AST_UDC_EP0_DATA_BUFF);
+ ast_udc_write(udc, EP0_TX_LEN(len), AST_UDC_EP0_CTRL);
+ ast_udc_write(udc, EP0_TX_LEN(len) | EP0_TX_BUFF_RDY,
+ AST_UDC_EP0_CTRL);
+ udc->is_control_tx = 1;
+
+ } else
+ ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
+}
+
+static void ast_udc_getstatus(struct ast_udc_dev *udc)
+{
+ struct usb_ctrlrequest crq;
+ struct ast_udc_ep *ep;
+ u16 status = 0;
+ u16 epnum = 0;
+
+ memcpy_fromio(&crq, udc->creq, sizeof(crq));
+
+ switch (crq.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ /* Get device status */
+ status = 1 << USB_DEVICE_SELF_POWERED;
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ status = udc->ep[epnum].stopped;
+ break;
+ default:
+ goto stall;
+ }
+
+ ep = &udc->ep[epnum];
+ EP_DBG(ep, "status: 0x%x\n", status);
+ ast_udc_ep0_data_tx(udc, (u8 *)&status, sizeof(status));
+
+ return;
+
+stall:
+ EP_DBG(ep, "Can't respond request\n");
+ ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL,
+ AST_UDC_EP0_CTRL);
+}
+
+static void ast_udc_ep0_handle_setup(struct ast_udc_dev *udc)
+{
+ struct ast_udc_ep *ep = &udc->ep[0];
+ struct ast_udc_request *req;
+ struct usb_ctrlrequest crq;
+ int req_num = 0;
+ int rc = 0;
+ u32 reg;
+
+ memcpy_fromio(&crq, udc->creq, sizeof(crq));
+
+ SETUP_DBG(udc, "SETUP packet: %02x/%02x/%04x/%04x/%04x\n",
+ crq.bRequestType, crq.bRequest, le16_to_cpu(crq.wValue),
+ le16_to_cpu(crq.wIndex), le16_to_cpu(crq.wLength));
+
+ /*
+ * Cleanup ep0 request(s) in queue because
+ * there is a new control setup comes.
+ */
+ list_for_each_entry(req, &udc->ep[0].queue, queue) {
+ req_num++;
+ EP_DBG(ep, "there is req %p in ep0 queue !\n", req);
+ }
+
+ if (req_num)
+ ast_udc_nuke(&udc->ep[0], -ETIMEDOUT);
+
+ udc->ep[0].dir_in = crq.bRequestType & USB_DIR_IN;
+
+ if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (crq.bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ if (ast_udc_read(udc, AST_UDC_STS) & UDC_STS_HIGHSPEED)
+ udc->gadget.speed = USB_SPEED_HIGH;
+ else
+ udc->gadget.speed = USB_SPEED_FULL;
+
+ SETUP_DBG(udc, "set addr: 0x%x\n", crq.wValue);
+ reg = ast_udc_read(udc, AST_UDC_CONFIG);
+ reg &= ~UDC_CFG_ADDR_MASK;
+ reg |= UDC_CFG_SET_ADDR(crq.wValue);
+ ast_udc_write(udc, reg, AST_UDC_CONFIG);
+ goto req_complete;
+
+ case USB_REQ_CLEAR_FEATURE:
+ SETUP_DBG(udc, "ep0: CLEAR FEATURE\n");
+ goto req_driver;
+
+ case USB_REQ_SET_FEATURE:
+ SETUP_DBG(udc, "ep0: SET FEATURE\n");
+ goto req_driver;
+
+ case USB_REQ_GET_STATUS:
+ ast_udc_getstatus(udc);
+ return;
+
+ default:
+ goto req_driver;
+ }
+
+ }
+
+req_driver:
+ if (udc->driver) {
+ SETUP_DBG(udc, "Forwarding %s to gadget...\n",
+ udc->gadget.name);
+
+ spin_unlock(&udc->lock);
+ rc = udc->driver->setup(&udc->gadget, &crq);
+ spin_lock(&udc->lock);
+
+ } else {
+ SETUP_DBG(udc, "No gadget for request !\n");
+ }
+
+ if (rc >= 0)
+ return;
+
+ /* Stall if gadget failed */
+ SETUP_DBG(udc, "Stalling, rc:0x%x\n", rc);
+ ast_udc_write(udc, ast_udc_read(udc, AST_UDC_EP0_CTRL) | EP0_STALL,
+ AST_UDC_EP0_CTRL);
+ return;
+
+req_complete:
+ SETUP_DBG(udc, "ep0: Sending IN status without data\n");
+ ast_udc_write(udc, EP0_TX_BUFF_RDY, AST_UDC_EP0_CTRL);
+}
+
+static irqreturn_t ast_udc_isr(int irq, void *data)
+{
+ struct ast_udc_dev *udc = (struct ast_udc_dev *)data;
+ struct ast_udc_ep *ep;
+ u32 isr, ep_isr;
+ int i;
+
+ spin_lock(&udc->lock);
+
+ isr = ast_udc_read(udc, AST_UDC_ISR);
+ if (!isr)
+ goto done;
+
+ /* Ack interrupts */
+ ast_udc_write(udc, isr, AST_UDC_ISR);
+
+ if (isr & UDC_IRQ_BUS_RESET) {
+ ISR_DBG(udc, "UDC_IRQ_BUS_RESET\n");
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+
+ ep = &udc->ep[1];
+ EP_DBG(ep, "dctrl:0x%x\n",
+ ast_ep_read(ep, AST_UDC_EP_DMA_CTRL));
+
+ if (udc->driver && udc->driver->reset) {
+ spin_unlock(&udc->lock);
+ udc->driver->reset(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ if (isr & UDC_IRQ_BUS_SUSPEND) {
+ ISR_DBG(udc, "UDC_IRQ_BUS_SUSPEND\n");
+ udc->suspended_from = udc->gadget.state;
+ usb_gadget_set_state(&udc->gadget, USB_STATE_SUSPENDED);
+
+ if (udc->driver && udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ if (isr & UDC_IRQ_BUS_RESUME) {
+ ISR_DBG(udc, "UDC_IRQ_BUS_RESUME\n");
+ usb_gadget_set_state(&udc->gadget, udc->suspended_from);
+
+ if (udc->driver && udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ if (isr & UDC_IRQ_EP0_IN_ACK_STALL) {
+ ISR_DBG(udc, "UDC_IRQ_EP0_IN_ACK_STALL\n");
+ ast_udc_ep0_in(udc);
+ }
+
+ if (isr & UDC_IRQ_EP0_OUT_ACK_STALL) {
+ ISR_DBG(udc, "UDC_IRQ_EP0_OUT_ACK_STALL\n");
+ ast_udc_ep0_out(udc);
+ }
+
+ if (isr & UDC_IRQ_EP0_SETUP) {
+ ISR_DBG(udc, "UDC_IRQ_EP0_SETUP\n");
+ ast_udc_ep0_handle_setup(udc);
+ }
+
+ if (isr & UDC_IRQ_EP_POOL_ACK_STALL) {
+ ISR_DBG(udc, "UDC_IRQ_EP_POOL_ACK_STALL\n");
+ ep_isr = ast_udc_read(udc, AST_UDC_EP_ACK_ISR);
+
+ /* Ack EP interrupts */
+ ast_udc_write(udc, ep_isr, AST_UDC_EP_ACK_ISR);
+
+ /* Handle each EP */
+ for (i = 0; i < AST_UDC_NUM_ENDPOINTS - 1; i++) {
+ if (ep_isr & (0x1 << i)) {
+ ep = &udc->ep[i + 1];
+ if (ep->desc_mode)
+ ast_udc_epn_handle_desc(udc, i + 1);
+ else
+ ast_udc_epn_handle(udc, i + 1);
+ }
+ }
+ }
+
+done:
+ spin_unlock(&udc->lock);
+ return IRQ_HANDLED;
+}
+
+static int ast_udc_gadget_getframe(struct usb_gadget *gadget)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+
+ return (ast_udc_read(udc, AST_UDC_STS) >> 16) & 0x7ff;
+}
+
+static void ast_udc_wake_work(struct work_struct *work)
+{
+ struct ast_udc_dev *udc = container_of(work, struct ast_udc_dev,
+ wake_work);
+ unsigned long flags;
+ u32 ctrl;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ UDC_DBG(udc, "Wakeup Host !\n");
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL);
+ ast_udc_write(udc, ctrl | USB_REMOTE_WAKEUP_EN, AST_UDC_FUNC_CTRL);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static void ast_udc_wakeup_all(struct ast_udc_dev *udc)
+{
+ /*
+ * A device is trying to wake the world, because this
+ * can recurse into the device, we break the call chain
+ * using a work queue
+ */
+ schedule_work(&udc->wake_work);
+}
+
+static int ast_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!udc->wakeup_en) {
+ UDC_DBG(udc, "Remote Wakeup is disabled\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ UDC_DBG(udc, "Device initiated wakeup\n");
+ ast_udc_wakeup_all(udc);
+
+err:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return rc;
+}
+
+/*
+ * Activate/Deactivate link with host
+ */
+static int ast_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+ unsigned long flags;
+ u32 ctrl;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ UDC_DBG(udc, "is_on: %d\n", is_on);
+ if (is_on)
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) | USB_UPSTREAM_EN;
+ else
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
+
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int ast_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+ struct ast_udc_ep *ep;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ UDC_DBG(udc, "\n");
+ udc->driver = driver;
+ udc->gadget.dev.of_node = udc->pdev->dev.of_node;
+
+ for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ ep->stopped = 0;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int ast_udc_stop(struct usb_gadget *gadget)
+{
+ struct ast_udc_dev *udc = to_ast_dev(gadget);
+ unsigned long flags;
+ u32 ctrl;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ UDC_DBG(udc, "\n");
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->driver = NULL;
+
+ ast_udc_stop_activity(udc);
+ usb_gadget_set_state(&udc->gadget, USB_STATE_NOTATTACHED);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops ast_udc_ops = {
+ .get_frame = ast_udc_gadget_getframe,
+ .wakeup = ast_udc_wakeup,
+ .pullup = ast_udc_pullup,
+ .udc_start = ast_udc_start,
+ .udc_stop = ast_udc_stop,
+};
+
+/*
+ * Support 1 Control Endpoint.
+ * Support multiple programmable endpoints that can be configured to
+ * Bulk IN/OUT, Interrupt IN/OUT, and Isochronous IN/OUT type endpoint.
+ */
+static void ast_udc_init_ep(struct ast_udc_dev *udc)
+{
+ struct ast_udc_ep *ep;
+ int i;
+
+ for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ ep->ep.name = ast_ep_name[i];
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+
+ ep->ep.ops = &ast_udc_ep_ops;
+ ep->udc = udc;
+
+ INIT_LIST_HEAD(&ep->queue);
+
+ if (i == 0) {
+ usb_ep_set_maxpacket_limit(&ep->ep,
+ AST_UDC_EP0_MAX_PACKET);
+ continue;
+ }
+
+ ep->ep_reg = udc->reg + AST_UDC_EP_BASE +
+ (AST_UDC_EP_OFFSET * (i - 1));
+
+ ep->epn_buf = udc->ep0_buf + (i * AST_UDC_EP_DMA_SIZE);
+ ep->epn_buf_dma = udc->ep0_buf_dma + (i * AST_UDC_EP_DMA_SIZE);
+ usb_ep_set_maxpacket_limit(&ep->ep, AST_UDC_EPn_MAX_PACKET);
+
+ ep->descs = ep->epn_buf + AST_UDC_EPn_MAX_PACKET;
+ ep->descs_dma = ep->epn_buf_dma + AST_UDC_EPn_MAX_PACKET;
+ ep->descs_wptr = 0;
+
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ }
+}
+
+static void ast_udc_init_dev(struct ast_udc_dev *udc)
+{
+ INIT_WORK(&udc->wake_work, ast_udc_wake_work);
+}
+
+static void ast_udc_init_hw(struct ast_udc_dev *udc)
+{
+ u32 ctrl;
+
+ /* Enable PHY */
+ ctrl = USB_PHY_CLK_EN | USB_PHY_RESET_DIS;
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+
+ udelay(1);
+ ast_udc_write(udc, 0, AST_UDC_DEV_RESET);
+
+ /* Set descriptor ring size */
+ if (AST_UDC_DESCS_COUNT == 256) {
+ ctrl |= USB_EP_LONG_DESC;
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+ }
+
+ /* Mask & ack all interrupts before installing the handler */
+ ast_udc_write(udc, 0, AST_UDC_IER);
+ ast_udc_write(udc, UDC_IRQ_ACK_ALL, AST_UDC_ISR);
+
+ /* Enable some interrupts */
+ ctrl = UDC_IRQ_EP_POOL_ACK_STALL | UDC_IRQ_BUS_RESUME |
+ UDC_IRQ_BUS_SUSPEND | UDC_IRQ_BUS_RESET |
+ UDC_IRQ_EP0_IN_ACK_STALL | UDC_IRQ_EP0_OUT_ACK_STALL |
+ UDC_IRQ_EP0_SETUP;
+ ast_udc_write(udc, ctrl, AST_UDC_IER);
+
+ /* Cleanup and enable ep ACK interrupts */
+ ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_IER);
+ ast_udc_write(udc, UDC_IRQ_EP_ACK_ALL, AST_UDC_EP_ACK_ISR);
+
+ ast_udc_write(udc, 0, AST_UDC_EP0_CTRL);
+}
+
+static int ast_udc_remove(struct platform_device *pdev)
+{
+ struct ast_udc_dev *udc = platform_get_drvdata(pdev);
+ unsigned long flags;
+ u32 ctrl;
+
+ usb_del_gadget_udc(&udc->gadget);
+ if (udc->driver)
+ return -EBUSY;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Disable upstream port connection */
+ ctrl = ast_udc_read(udc, AST_UDC_FUNC_CTRL) & ~USB_UPSTREAM_EN;
+ ast_udc_write(udc, ctrl, AST_UDC_FUNC_CTRL);
+
+ clk_disable_unprepare(udc->clk);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->ep0_buf)
+ dma_free_coherent(&pdev->dev,
+ AST_UDC_EP_DMA_SIZE * AST_UDC_NUM_ENDPOINTS,
+ udc->ep0_buf,
+ udc->ep0_buf_dma);
+
+ udc->ep0_buf = NULL;
+
+ return 0;
+}
+
+static int ast_udc_probe(struct platform_device *pdev)
+{
+ enum usb_device_speed max_speed;
+ struct device *dev = &pdev->dev;
+ struct ast_udc_dev *udc;
+ struct resource *res;
+ int rc;
+
+ udc = devm_kzalloc(&pdev->dev, sizeof(struct ast_udc_dev), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ udc->gadget.dev.parent = dev;
+ udc->pdev = pdev;
+ spin_lock_init(&udc->lock);
+
+ udc->gadget.ops = &ast_udc_ops;
+ udc->gadget.ep0 = &udc->ep[0].ep;
+ udc->gadget.name = "aspeed-udc";
+ udc->gadget.dev.init_name = "gadget";
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ udc->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(udc->reg)) {
+ dev_err(&pdev->dev, "Failed to map resources\n");
+ return PTR_ERR(udc->reg);
+ }
+
+ platform_set_drvdata(pdev, udc);
+
+ udc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(udc->clk)) {
+ rc = PTR_ERR(udc->clk);
+ goto err;
+ }
+ rc = clk_prepare_enable(udc->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to enable clock (0x%x)\n", rc);
+ goto err;
+ }
+
+ /* Check if we need to limit the HW to USB1 */
+ max_speed = usb_get_maximum_speed(&pdev->dev);
+ if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH)
+ udc->force_usb1 = true;
+
+ /*
+ * Allocate DMA buffers for all EPs in one chunk
+ */
+ udc->ep0_buf = dma_alloc_coherent(&pdev->dev,
+ AST_UDC_EP_DMA_SIZE *
+ AST_UDC_NUM_ENDPOINTS,
+ &udc->ep0_buf_dma, GFP_KERNEL);
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->creq = udc->reg + AST_UDC_SETUP0;
+
+ /*
+ * Support single stage mode or 32/256 stages descriptor mode.
+ * Set default as Descriptor Mode.
+ */
+ udc->desc_mode = AST_UDC_DESC_MODE;
+
+ dev_info(&pdev->dev, "DMA %s\n", udc->desc_mode ?
+ "descriptor mode" : "single mode");
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
+
+ /* Initialized udc ep */
+ ast_udc_init_ep(udc);
+
+ /* Initialized udc device */
+ ast_udc_init_dev(udc);
+
+ /* Initialized udc hardware */
+ ast_udc_init_hw(udc);
+
+ /* Find interrupt and install handler */
+ udc->irq = platform_get_irq(pdev, 0);
+ if (udc->irq < 0) {
+ rc = udc->irq;
+ goto err;
+ }
+
+ rc = devm_request_irq(&pdev->dev, udc->irq, ast_udc_isr, 0,
+ KBUILD_MODNAME, udc);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to request interrupt\n");
+ goto err;
+ }
+
+ rc = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to add gadget udc\n");
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "Initialized udc in USB%s mode\n",
+ udc->force_usb1 ? "1" : "2");
+
+ return 0;
+
+err:
+ dev_err(&pdev->dev, "Failed to udc probe, rc:0x%x\n", rc);
+ ast_udc_remove(pdev);
+
+ return rc;
+}
+
+static const struct of_device_id ast_udc_of_dt_ids[] = {
+ { .compatible = "aspeed,ast2600-udc", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, ast_udc_of_dt_ids);
+
+static struct platform_driver ast_udc_driver = {
+ .probe = ast_udc_probe,
+ .remove = ast_udc_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ast_udc_of_dt_ids,
+ },
+};
+
+module_platform_driver(ast_udc_driver);
+
+MODULE_DESCRIPTION("ASPEED UDC driver");
+MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
new file mode 100644
index 000000000..a9a7b3fc6
--- /dev/null
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -0,0 +1,2021 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * at91_udc -- driver for at91-series USB peripheral controller
+ *
+ * Copyright (C) 2004 by Thomas Rathbone
+ * Copyright (C) 2005 by HP Labs
+ * Copyright (C) 2005 by David Brownell
+ */
+
+#undef VERBOSE_DEBUG
+#undef PACKET_TRACE
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
+#include <linux/clk.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/of.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/atmel-matrix.h>
+
+#include "at91_udc.h"
+
+
+/*
+ * This controller is simple and PIO-only. It's used in many AT91-series
+ * full speed USB controllers, including the at91rm9200 (arm920T, with MMU),
+ * at91sam926x (arm926ejs, with MMU), and several no-mmu versions.
+ *
+ * This driver expects the board has been wired with two GPIOs supporting
+ * a VBUS sensing IRQ, and a D+ pullup. (They may be omitted, but the
+ * testing hasn't covered such cases.)
+ *
+ * The pullup is most important (so it's integrated on sam926x parts). It
+ * provides software control over whether the host enumerates the device.
+ *
+ * The VBUS sensing helps during enumeration, and allows both USB clocks
+ * (and the transceiver) to stay gated off until they're necessary, saving
+ * power. During USB suspend, the 48 MHz clock is gated off in hardware;
+ * it may also be gated off by software during some Linux sleep states.
+ */
+
+#define DRIVER_VERSION "3 May 2006"
+
+static const char driver_name [] = "at91_udc";
+
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} ep_info[] = {
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+ EP_INFO("ep0",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep1",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep2",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep3-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep4",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep5",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+
+#undef EP_INFO
+};
+
+#define ep0name ep_info[0].name
+
+#define VBUS_POLL_TIMEOUT msecs_to_jiffies(1000)
+
+#define at91_udp_read(udc, reg) \
+ __raw_readl((udc)->udp_baseaddr + (reg))
+#define at91_udp_write(udc, reg, val) \
+ __raw_writel((val), (udc)->udp_baseaddr + (reg))
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+#include <linux/seq_file.h>
+
+static const char debug_filename[] = "driver/udc";
+
+#define FOURBITS "%s%s%s%s"
+#define EIGHTBITS FOURBITS FOURBITS
+
+static void proc_ep_show(struct seq_file *s, struct at91_ep *ep)
+{
+ static char *types[] = {
+ "control", "out-iso", "out-bulk", "out-int",
+ "BOGUS", "in-iso", "in-bulk", "in-int"};
+
+ u32 csr;
+ struct at91_request *req;
+ unsigned long flags;
+ struct at91_udc *udc = ep->udc;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ csr = __raw_readl(ep->creg);
+
+ /* NOTE: not collecting per-endpoint irq statistics... */
+
+ seq_printf(s, "\n");
+ seq_printf(s, "%s, maxpacket %d %s%s %s%s\n",
+ ep->ep.name, ep->ep.maxpacket,
+ ep->is_in ? "in" : "out",
+ ep->is_iso ? " iso" : "",
+ ep->is_pingpong
+ ? (ep->fifo_bank ? "pong" : "ping")
+ : "",
+ ep->stopped ? " stopped" : "");
+ seq_printf(s, "csr %08x rxbytes=%d %s %s %s" EIGHTBITS "\n",
+ csr,
+ (csr & 0x07ff0000) >> 16,
+ (csr & (1 << 15)) ? "enabled" : "disabled",
+ (csr & (1 << 11)) ? "DATA1" : "DATA0",
+ types[(csr & 0x700) >> 8],
+
+ /* iff type is control then print current direction */
+ (!(csr & 0x700))
+ ? ((csr & (1 << 7)) ? " IN" : " OUT")
+ : "",
+ (csr & (1 << 6)) ? " rxdatabk1" : "",
+ (csr & (1 << 5)) ? " forcestall" : "",
+ (csr & (1 << 4)) ? " txpktrdy" : "",
+
+ (csr & (1 << 3)) ? " stallsent" : "",
+ (csr & (1 << 2)) ? " rxsetup" : "",
+ (csr & (1 << 1)) ? " rxdatabk0" : "",
+ (csr & (1 << 0)) ? " txcomp" : "");
+ if (list_empty (&ep->queue))
+ seq_printf(s, "\t(queue empty)\n");
+
+ else list_for_each_entry (req, &ep->queue, queue) {
+ unsigned length = req->req.actual;
+
+ seq_printf(s, "\treq %p len %d/%d buf %p\n",
+ &req->req, length,
+ req->req.length, req->req.buf);
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static void proc_irq_show(struct seq_file *s, const char *label, u32 mask)
+{
+ int i;
+
+ seq_printf(s, "%s %04x:%s%s" FOURBITS, label, mask,
+ (mask & (1 << 13)) ? " wakeup" : "",
+ (mask & (1 << 12)) ? " endbusres" : "",
+
+ (mask & (1 << 11)) ? " sofint" : "",
+ (mask & (1 << 10)) ? " extrsm" : "",
+ (mask & (1 << 9)) ? " rxrsm" : "",
+ (mask & (1 << 8)) ? " rxsusp" : "");
+ for (i = 0; i < 8; i++) {
+ if (mask & (1 << i))
+ seq_printf(s, " ep%d", i);
+ }
+ seq_printf(s, "\n");
+}
+
+static int proc_udc_show(struct seq_file *s, void *unused)
+{
+ struct at91_udc *udc = s->private;
+ struct at91_ep *ep;
+ u32 tmp;
+
+ seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
+
+ seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
+ udc->vbus ? "present" : "off",
+ udc->enabled
+ ? (udc->vbus ? "active" : "enabled")
+ : "disabled",
+ udc->gadget.is_selfpowered ? "self" : "VBUS",
+ udc->suspended ? ", suspended" : "",
+ udc->driver ? udc->driver->driver.name : "(none)");
+
+ /* don't access registers when interface isn't clocked */
+ if (!udc->clocked) {
+ seq_printf(s, "(not clocked)\n");
+ return 0;
+ }
+
+ tmp = at91_udp_read(udc, AT91_UDP_FRM_NUM);
+ seq_printf(s, "frame %05x:%s%s frame=%d\n", tmp,
+ (tmp & AT91_UDP_FRM_OK) ? " ok" : "",
+ (tmp & AT91_UDP_FRM_ERR) ? " err" : "",
+ (tmp & AT91_UDP_NUM));
+
+ tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+ seq_printf(s, "glbstate %02x:%s" FOURBITS "\n", tmp,
+ (tmp & AT91_UDP_RMWUPE) ? " rmwupe" : "",
+ (tmp & AT91_UDP_RSMINPR) ? " rsminpr" : "",
+ (tmp & AT91_UDP_ESR) ? " esr" : "",
+ (tmp & AT91_UDP_CONFG) ? " confg" : "",
+ (tmp & AT91_UDP_FADDEN) ? " fadden" : "");
+
+ tmp = at91_udp_read(udc, AT91_UDP_FADDR);
+ seq_printf(s, "faddr %03x:%s fadd=%d\n", tmp,
+ (tmp & AT91_UDP_FEN) ? " fen" : "",
+ (tmp & AT91_UDP_FADD));
+
+ proc_irq_show(s, "imr ", at91_udp_read(udc, AT91_UDP_IMR));
+ proc_irq_show(s, "isr ", at91_udp_read(udc, AT91_UDP_ISR));
+
+ if (udc->enabled && udc->vbus) {
+ proc_ep_show(s, &udc->ep[0]);
+ list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
+ if (ep->ep.desc)
+ proc_ep_show(s, ep);
+ }
+ }
+ return 0;
+}
+
+static void create_debug_file(struct at91_udc *udc)
+{
+ udc->pde = proc_create_single_data(debug_filename, 0, NULL,
+ proc_udc_show, udc);
+}
+
+static void remove_debug_file(struct at91_udc *udc)
+{
+ if (udc->pde)
+ remove_proc_entry(debug_filename, NULL);
+}
+
+#else
+
+static inline void create_debug_file(struct at91_udc *udc) {}
+static inline void remove_debug_file(struct at91_udc *udc) {}
+
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+static void done(struct at91_ep *ep, struct at91_request *req, int status)
+{
+ unsigned stopped = ep->stopped;
+ struct at91_udc *udc = ep->udc;
+
+ list_del_init(&req->queue);
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+ if (status && status != -ESHUTDOWN)
+ VDBG("%s done %p, status %d\n", ep->ep.name, req, status);
+
+ ep->stopped = 1;
+ spin_unlock(&udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&udc->lock);
+ ep->stopped = stopped;
+
+ /* ep0 is always ready; other endpoints need a non-empty queue */
+ if (list_empty(&ep->queue) && ep->int_mask != (1 << 0))
+ at91_udp_write(udc, AT91_UDP_IDR, ep->int_mask);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* bits indicating OUT fifo has data ready */
+#define RX_DATA_READY (AT91_UDP_RX_DATA_BK0 | AT91_UDP_RX_DATA_BK1)
+
+/*
+ * Endpoint FIFO CSR bits have a mix of bits, making it unsafe to just write
+ * back most of the value you just read (because of side effects, including
+ * bits that may change after reading and before writing).
+ *
+ * Except when changing a specific bit, always write values which:
+ * - clear SET_FX bits (setting them could change something)
+ * - set CLR_FX bits (clearing them could change something)
+ *
+ * There are also state bits like FORCESTALL, EPEDS, DIR, and EPTYPE
+ * that shouldn't normally be changed.
+ *
+ * NOTE at91sam9260 docs mention synch between UDPCK and MCK clock domains,
+ * implying a need to wait for one write to complete (test relevant bits)
+ * before starting the next write. This shouldn't be an issue given how
+ * infrequently we write, except maybe for write-then-read idioms.
+ */
+#define SET_FX (AT91_UDP_TXPKTRDY)
+#define CLR_FX (RX_DATA_READY | AT91_UDP_RXSETUP \
+ | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP)
+
+/* pull OUT packet data from the endpoint's fifo */
+static int read_fifo (struct at91_ep *ep, struct at91_request *req)
+{
+ u32 __iomem *creg = ep->creg;
+ u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0));
+ u32 csr;
+ u8 *buf;
+ unsigned int count, bufferspace, is_done;
+
+ buf = req->req.buf + req->req.actual;
+ bufferspace = req->req.length - req->req.actual;
+
+ /*
+ * there might be nothing to read if ep_queue() calls us,
+ * or if we already emptied both pingpong buffers
+ */
+rescan:
+ csr = __raw_readl(creg);
+ if ((csr & RX_DATA_READY) == 0)
+ return 0;
+
+ count = (csr & AT91_UDP_RXBYTECNT) >> 16;
+ if (count > ep->ep.maxpacket)
+ count = ep->ep.maxpacket;
+ if (count > bufferspace) {
+ DBG("%s buffer overflow\n", ep->ep.name);
+ req->req.status = -EOVERFLOW;
+ count = bufferspace;
+ }
+ __raw_readsb(dreg, buf, count);
+
+ /* release and swap pingpong mem bank */
+ csr |= CLR_FX;
+ if (ep->is_pingpong) {
+ if (ep->fifo_bank == 0) {
+ csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0);
+ ep->fifo_bank = 1;
+ } else {
+ csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK1);
+ ep->fifo_bank = 0;
+ }
+ } else
+ csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0);
+ __raw_writel(csr, creg);
+
+ req->req.actual += count;
+ is_done = (count < ep->ep.maxpacket);
+ if (count == bufferspace)
+ is_done = 1;
+
+ PACKET("%s %p out/%d%s\n", ep->ep.name, &req->req, count,
+ is_done ? " (done)" : "");
+
+ /*
+ * avoid extra trips through IRQ logic for packets already in
+ * the fifo ... maybe preventing an extra (expensive) OUT-NAK
+ */
+ if (is_done)
+ done(ep, req, 0);
+ else if (ep->is_pingpong) {
+ /*
+ * One dummy read to delay the code because of a HW glitch:
+ * CSR returns bad RXCOUNT when read too soon after updating
+ * RX_DATA_BK flags.
+ */
+ csr = __raw_readl(creg);
+
+ bufferspace -= count;
+ buf += count;
+ goto rescan;
+ }
+
+ return is_done;
+}
+
+/* load fifo for an IN packet */
+static int write_fifo(struct at91_ep *ep, struct at91_request *req)
+{
+ u32 __iomem *creg = ep->creg;
+ u32 csr = __raw_readl(creg);
+ u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0));
+ unsigned total, count, is_last;
+ u8 *buf;
+
+ /*
+ * TODO: allow for writing two packets to the fifo ... that'll
+ * reduce the amount of IN-NAKing, but probably won't affect
+ * throughput much. (Unlike preventing OUT-NAKing!)
+ */
+
+ /*
+ * If ep_queue() calls us, the queue is empty and possibly in
+ * odd states like TXCOMP not yet cleared (we do it, saving at
+ * least one IRQ) or the fifo not yet being free. Those aren't
+ * issues normally (IRQ handler fast path).
+ */
+ if (unlikely(csr & (AT91_UDP_TXCOMP | AT91_UDP_TXPKTRDY))) {
+ if (csr & AT91_UDP_TXCOMP) {
+ csr |= CLR_FX;
+ csr &= ~(SET_FX | AT91_UDP_TXCOMP);
+ __raw_writel(csr, creg);
+ csr = __raw_readl(creg);
+ }
+ if (csr & AT91_UDP_TXPKTRDY)
+ return 0;
+ }
+
+ buf = req->req.buf + req->req.actual;
+ prefetch(buf);
+ total = req->req.length - req->req.actual;
+ if (ep->ep.maxpacket < total) {
+ count = ep->ep.maxpacket;
+ is_last = 0;
+ } else {
+ count = total;
+ is_last = (count < ep->ep.maxpacket) || !req->req.zero;
+ }
+
+ /*
+ * Write the packet, maybe it's a ZLP.
+ *
+ * NOTE: incrementing req->actual before we receive the ACK means
+ * gadget driver IN bytecounts can be wrong in fault cases. That's
+ * fixable with PIO drivers like this one (save "count" here, and
+ * do the increment later on TX irq), but not for most DMA hardware.
+ *
+ * So all gadget drivers must accept that potential error. Some
+ * hardware supports precise fifo status reporting, letting them
+ * recover when the actual bytecount matters (e.g. for USB Test
+ * and Measurement Class devices).
+ */
+ __raw_writesb(dreg, buf, count);
+ csr &= ~SET_FX;
+ csr |= CLR_FX | AT91_UDP_TXPKTRDY;
+ __raw_writel(csr, creg);
+ req->req.actual += count;
+
+ PACKET("%s %p in/%d%s\n", ep->ep.name, &req->req, count,
+ is_last ? " (done)" : "");
+ if (is_last)
+ done(ep, req, 0);
+ return is_last;
+}
+
+static void nuke(struct at91_ep *ep, int status)
+{
+ struct at91_request *req;
+
+ /* terminate any request in the queue */
+ ep->stopped = 1;
+ if (list_empty(&ep->queue))
+ return;
+
+ VDBG("%s %s\n", __func__, ep->ep.name);
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct at91_request, queue);
+ done(ep, req, status);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int at91_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct at91_ep *ep = container_of(_ep, struct at91_ep, ep);
+ struct at91_udc *udc;
+ u16 maxpacket;
+ u32 tmp;
+ unsigned long flags;
+
+ if (!_ep || !ep
+ || !desc || _ep->name == ep0name
+ || desc->bDescriptorType != USB_DT_ENDPOINT
+ || (maxpacket = usb_endpoint_maxp(desc)) == 0
+ || maxpacket > ep->maxpacket) {
+ DBG("bad ep or descriptor\n");
+ return -EINVAL;
+ }
+
+ udc = ep->udc;
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+ DBG("bogus device state\n");
+ return -ESHUTDOWN;
+ }
+
+ tmp = usb_endpoint_type(desc);
+ switch (tmp) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ DBG("only one control endpoint\n");
+ return -EINVAL;
+ case USB_ENDPOINT_XFER_INT:
+ if (maxpacket > 64)
+ goto bogus_max;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ switch (maxpacket) {
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ goto ok;
+ }
+bogus_max:
+ DBG("bogus maxpacket %d\n", maxpacket);
+ return -EINVAL;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!ep->is_pingpong) {
+ DBG("iso requires double buffering\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ok:
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* initialize endpoint to match this descriptor */
+ ep->is_in = usb_endpoint_dir_in(desc);
+ ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
+ ep->stopped = 0;
+ if (ep->is_in)
+ tmp |= 0x04;
+ tmp <<= 8;
+ tmp |= AT91_UDP_EPEDS;
+ __raw_writel(tmp, ep->creg);
+
+ ep->ep.maxpacket = maxpacket;
+
+ /*
+ * reset/init endpoint fifo. NOTE: leaves fifo_bank alone,
+ * since endpoint resets don't reset hw pingpong state.
+ */
+ at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
+ at91_udp_write(udc, AT91_UDP_RST_EP, 0);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static int at91_ep_disable (struct usb_ep * _ep)
+{
+ struct at91_ep *ep = container_of(_ep, struct at91_ep, ep);
+ struct at91_udc *udc = ep->udc;
+ unsigned long flags;
+
+ if (ep == &ep->udc->ep[0])
+ return -EINVAL;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ nuke(ep, -ESHUTDOWN);
+
+ /* restore the endpoint's pristine config */
+ ep->ep.desc = NULL;
+ ep->ep.maxpacket = ep->maxpacket;
+
+ /* reset fifos and endpoint */
+ if (ep->udc->clocked) {
+ at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
+ at91_udp_write(udc, AT91_UDP_RST_EP, 0);
+ __raw_writel(0, ep->creg);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+/*
+ * this is a PIO-only driver, so there's nothing
+ * interesting for request or buffer allocation.
+ */
+
+static struct usb_request *
+at91_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct at91_request *req;
+
+ req = kzalloc(sizeof (struct at91_request), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+ return &req->req;
+}
+
+static void at91_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct at91_request *req;
+
+ req = container_of(_req, struct at91_request, req);
+ BUG_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+static int at91_ep_queue(struct usb_ep *_ep,
+ struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct at91_request *req;
+ struct at91_ep *ep;
+ struct at91_udc *udc;
+ int status;
+ unsigned long flags;
+
+ req = container_of(_req, struct at91_request, req);
+ ep = container_of(_ep, struct at91_ep, ep);
+
+ if (!_req || !_req->complete
+ || !_req->buf || !list_empty(&req->queue)) {
+ DBG("invalid request\n");
+ return -EINVAL;
+ }
+
+ if (!_ep || (!ep->ep.desc && ep->ep.name != ep0name)) {
+ DBG("invalid ep\n");
+ return -EINVAL;
+ }
+
+ udc = ep->udc;
+
+ if (!udc || !udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+ DBG("invalid device\n");
+ return -EINVAL;
+ }
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* try to kickstart any empty and idle queue */
+ if (list_empty(&ep->queue) && !ep->stopped) {
+ int is_ep0;
+
+ /*
+ * If this control request has a non-empty DATA stage, this
+ * will start that stage. It works just like a non-control
+ * request (until the status stage starts, maybe early).
+ *
+ * If the data stage is empty, then this starts a successful
+ * IN/STATUS stage. (Unsuccessful ones use set_halt.)
+ */
+ is_ep0 = (ep->ep.name == ep0name);
+ if (is_ep0) {
+ u32 tmp;
+
+ if (!udc->req_pending) {
+ status = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * defer changing CONFG until after the gadget driver
+ * reconfigures the endpoints.
+ */
+ if (udc->wait_for_config_ack) {
+ tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+ tmp ^= AT91_UDP_CONFG;
+ VDBG("toggle config\n");
+ at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
+ }
+ if (req->req.length == 0) {
+ep0_in_status:
+ PACKET("ep0 in/status\n");
+ status = 0;
+ tmp = __raw_readl(ep->creg);
+ tmp &= ~SET_FX;
+ tmp |= CLR_FX | AT91_UDP_TXPKTRDY;
+ __raw_writel(tmp, ep->creg);
+ udc->req_pending = 0;
+ goto done;
+ }
+ }
+
+ if (ep->is_in)
+ status = write_fifo(ep, req);
+ else {
+ status = read_fifo(ep, req);
+
+ /* IN/STATUS stage is otherwise triggered by irq */
+ if (status && is_ep0)
+ goto ep0_in_status;
+ }
+ } else
+ status = 0;
+
+ if (req && !status) {
+ list_add_tail (&req->queue, &ep->queue);
+ at91_udp_write(udc, AT91_UDP_IER, ep->int_mask);
+ }
+done:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return (status < 0) ? status : 0;
+}
+
+static int at91_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct at91_ep *ep;
+ struct at91_request *req = NULL, *iter;
+ unsigned long flags;
+ struct at91_udc *udc;
+
+ ep = container_of(_ep, struct at91_ep, ep);
+ if (!_ep || ep->ep.name == ep0name)
+ return -EINVAL;
+
+ udc = ep->udc;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EINVAL;
+ }
+
+ done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static int at91_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct at91_ep *ep = container_of(_ep, struct at91_ep, ep);
+ struct at91_udc *udc = ep->udc;
+ u32 __iomem *creg;
+ u32 csr;
+ unsigned long flags;
+ int status = 0;
+
+ if (!_ep || ep->is_iso || !ep->udc->clocked)
+ return -EINVAL;
+
+ creg = ep->creg;
+ spin_lock_irqsave(&udc->lock, flags);
+
+ csr = __raw_readl(creg);
+
+ /*
+ * fail with still-busy IN endpoints, ensuring correct sequencing
+ * of data tx then stall. note that the fifo rx bytecount isn't
+ * completely accurate as a tx bytecount.
+ */
+ if (ep->is_in && (!list_empty(&ep->queue) || (csr >> 16) != 0))
+ status = -EAGAIN;
+ else {
+ csr |= CLR_FX;
+ csr &= ~SET_FX;
+ if (value) {
+ csr |= AT91_UDP_FORCESTALL;
+ VDBG("halt %s\n", ep->ep.name);
+ } else {
+ at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
+ at91_udp_write(udc, AT91_UDP_RST_EP, 0);
+ csr &= ~AT91_UDP_FORCESTALL;
+ }
+ __raw_writel(csr, creg);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return status;
+}
+
+static const struct usb_ep_ops at91_ep_ops = {
+ .enable = at91_ep_enable,
+ .disable = at91_ep_disable,
+ .alloc_request = at91_ep_alloc_request,
+ .free_request = at91_ep_free_request,
+ .queue = at91_ep_queue,
+ .dequeue = at91_ep_dequeue,
+ .set_halt = at91_ep_set_halt,
+ /* there's only imprecise fifo status reporting */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int at91_get_frame(struct usb_gadget *gadget)
+{
+ struct at91_udc *udc = to_udc(gadget);
+
+ if (!to_udc(gadget)->clocked)
+ return -EINVAL;
+ return at91_udp_read(udc, AT91_UDP_FRM_NUM) & AT91_UDP_NUM;
+}
+
+static int at91_wakeup(struct usb_gadget *gadget)
+{
+ struct at91_udc *udc = to_udc(gadget);
+ u32 glbstate;
+ unsigned long flags;
+
+ DBG("%s\n", __func__ );
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!udc->clocked || !udc->suspended)
+ goto done;
+
+ /* NOTE: some "early versions" handle ESR differently ... */
+
+ glbstate = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+ if (!(glbstate & AT91_UDP_ESR))
+ goto done;
+ glbstate |= AT91_UDP_ESR;
+ at91_udp_write(udc, AT91_UDP_GLB_STAT, glbstate);
+
+done:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+/* reinit == restore initial software state */
+static void udc_reinit(struct at91_udc *udc)
+{
+ u32 i;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
+ udc->gadget.quirk_stall_not_supp = 1;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ struct at91_ep *ep = &udc->ep[i];
+
+ if (i != 0)
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ ep->ep.desc = NULL;
+ ep->stopped = 0;
+ ep->fifo_bank = 0;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
+ ep->creg = (void __iomem *) udc->udp_baseaddr + AT91_UDP_CSR(i);
+ /* initialize one queue per endpoint */
+ INIT_LIST_HEAD(&ep->queue);
+ }
+}
+
+static void reset_gadget(struct at91_udc *udc)
+{
+ struct usb_gadget_driver *driver = udc->driver;
+ int i;
+
+ if (udc->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->suspended = 0;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ struct at91_ep *ep = &udc->ep[i];
+
+ ep->stopped = 1;
+ nuke(ep, -ESHUTDOWN);
+ }
+ if (driver) {
+ spin_unlock(&udc->lock);
+ usb_gadget_udc_reset(&udc->gadget, driver);
+ spin_lock(&udc->lock);
+ }
+
+ udc_reinit(udc);
+}
+
+static void stop_activity(struct at91_udc *udc)
+{
+ struct usb_gadget_driver *driver = udc->driver;
+ int i;
+
+ if (udc->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->suspended = 0;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ struct at91_ep *ep = &udc->ep[i];
+ ep->stopped = 1;
+ nuke(ep, -ESHUTDOWN);
+ }
+ if (driver) {
+ spin_unlock(&udc->lock);
+ driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+
+ udc_reinit(udc);
+}
+
+static void clk_on(struct at91_udc *udc)
+{
+ if (udc->clocked)
+ return;
+ udc->clocked = 1;
+
+ clk_enable(udc->iclk);
+ clk_enable(udc->fclk);
+}
+
+static void clk_off(struct at91_udc *udc)
+{
+ if (!udc->clocked)
+ return;
+ udc->clocked = 0;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ clk_disable(udc->fclk);
+ clk_disable(udc->iclk);
+}
+
+/*
+ * activate/deactivate link with host; minimize power usage for
+ * inactive links by cutting clocks and transceiver power.
+ */
+static void pullup(struct at91_udc *udc, int is_on)
+{
+ if (!udc->enabled || !udc->vbus)
+ is_on = 0;
+ DBG("%sactive\n", is_on ? "" : "in");
+
+ if (is_on) {
+ clk_on(udc);
+ at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXRSM);
+ at91_udp_write(udc, AT91_UDP_TXVC, 0);
+ } else {
+ stop_activity(udc);
+ at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXRSM);
+ at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
+ clk_off(udc);
+ }
+
+ if (udc->caps && udc->caps->pullup)
+ udc->caps->pullup(udc, is_on);
+}
+
+/* vbus is here! turn everything on that's ready */
+static int at91_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct at91_udc *udc = to_udc(gadget);
+ unsigned long flags;
+
+ /* VDBG("vbus %s\n", is_active ? "on" : "off"); */
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->vbus = (is_active != 0);
+ if (udc->driver)
+ pullup(udc, is_active);
+ else
+ pullup(udc, 0);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static int at91_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct at91_udc *udc = to_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->enabled = is_on = !!is_on;
+ pullup(udc, is_on);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static int at91_set_selfpowered(struct usb_gadget *gadget, int is_on)
+{
+ struct at91_udc *udc = to_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ gadget->is_selfpowered = (is_on != 0);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static int at91_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver);
+static int at91_stop(struct usb_gadget *gadget);
+
+static const struct usb_gadget_ops at91_udc_ops = {
+ .get_frame = at91_get_frame,
+ .wakeup = at91_wakeup,
+ .set_selfpowered = at91_set_selfpowered,
+ .vbus_session = at91_vbus_session,
+ .pullup = at91_pullup,
+ .udc_start = at91_start,
+ .udc_stop = at91_stop,
+
+ /*
+ * VBUS-powered devices may also want to support bigger
+ * power budgets after an appropriate SET_CONFIGURATION.
+ */
+ /* .vbus_power = at91_vbus_power, */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int handle_ep(struct at91_ep *ep)
+{
+ struct at91_request *req;
+ u32 __iomem *creg = ep->creg;
+ u32 csr = __raw_readl(creg);
+
+ if (!list_empty(&ep->queue))
+ req = list_entry(ep->queue.next,
+ struct at91_request, queue);
+ else
+ req = NULL;
+
+ if (ep->is_in) {
+ if (csr & (AT91_UDP_STALLSENT | AT91_UDP_TXCOMP)) {
+ csr |= CLR_FX;
+ csr &= ~(SET_FX | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP);
+ __raw_writel(csr, creg);
+ }
+ if (req)
+ return write_fifo(ep, req);
+
+ } else {
+ if (csr & AT91_UDP_STALLSENT) {
+ /* STALLSENT bit == ISOERR */
+ if (ep->is_iso && req)
+ req->req.status = -EILSEQ;
+ csr |= CLR_FX;
+ csr &= ~(SET_FX | AT91_UDP_STALLSENT);
+ __raw_writel(csr, creg);
+ csr = __raw_readl(creg);
+ }
+ if (req && (csr & RX_DATA_READY))
+ return read_fifo(ep, req);
+ }
+ return 0;
+}
+
+union setup {
+ u8 raw[8];
+ struct usb_ctrlrequest r;
+};
+
+static void handle_setup(struct at91_udc *udc, struct at91_ep *ep, u32 csr)
+{
+ u32 __iomem *creg = ep->creg;
+ u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0));
+ unsigned rxcount, i = 0;
+ u32 tmp;
+ union setup pkt;
+ int status = 0;
+
+ /* read and ack SETUP; hard-fail for bogus packets */
+ rxcount = (csr & AT91_UDP_RXBYTECNT) >> 16;
+ if (likely(rxcount == 8)) {
+ while (rxcount--)
+ pkt.raw[i++] = __raw_readb(dreg);
+ if (pkt.r.bRequestType & USB_DIR_IN) {
+ csr |= AT91_UDP_DIR;
+ ep->is_in = 1;
+ } else {
+ csr &= ~AT91_UDP_DIR;
+ ep->is_in = 0;
+ }
+ } else {
+ /* REVISIT this happens sometimes under load; why?? */
+ ERR("SETUP len %d, csr %08x\n", rxcount, csr);
+ status = -EINVAL;
+ }
+ csr |= CLR_FX;
+ csr &= ~(SET_FX | AT91_UDP_RXSETUP);
+ __raw_writel(csr, creg);
+ udc->wait_for_addr_ack = 0;
+ udc->wait_for_config_ack = 0;
+ ep->stopped = 0;
+ if (unlikely(status != 0))
+ goto stall;
+
+#define w_index le16_to_cpu(pkt.r.wIndex)
+#define w_value le16_to_cpu(pkt.r.wValue)
+#define w_length le16_to_cpu(pkt.r.wLength)
+
+ VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
+ pkt.r.bRequestType, pkt.r.bRequest,
+ w_value, w_index, w_length);
+
+ /*
+ * A few standard requests get handled here, ones that touch
+ * hardware ... notably for device and endpoint features.
+ */
+ udc->req_pending = 1;
+ csr = __raw_readl(creg);
+ csr |= CLR_FX;
+ csr &= ~SET_FX;
+ switch ((pkt.r.bRequestType << 8) | pkt.r.bRequest) {
+
+ case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+ | USB_REQ_SET_ADDRESS:
+ __raw_writel(csr | AT91_UDP_TXPKTRDY, creg);
+ udc->addr = w_value;
+ udc->wait_for_addr_ack = 1;
+ udc->req_pending = 0;
+ /* FADDR is set later, when we ack host STATUS */
+ return;
+
+ case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+ | USB_REQ_SET_CONFIGURATION:
+ tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT) & AT91_UDP_CONFG;
+ if (pkt.r.wValue)
+ udc->wait_for_config_ack = (tmp == 0);
+ else
+ udc->wait_for_config_ack = (tmp != 0);
+ if (udc->wait_for_config_ack)
+ VDBG("wait for config\n");
+ /* CONFG is toggled later, if gadget driver succeeds */
+ break;
+
+ /*
+ * Hosts may set or clear remote wakeup status, and
+ * devices may report they're VBUS powered.
+ */
+ case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+ | USB_REQ_GET_STATUS:
+ tmp = (udc->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED);
+ if (at91_udp_read(udc, AT91_UDP_GLB_STAT) & AT91_UDP_ESR)
+ tmp |= (1 << USB_DEVICE_REMOTE_WAKEUP);
+ PACKET("get device status\n");
+ __raw_writeb(tmp, dreg);
+ __raw_writeb(0, dreg);
+ goto write_in;
+ /* then STATUS starts later, automatically */
+ case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+ | USB_REQ_SET_FEATURE:
+ if (w_value != USB_DEVICE_REMOTE_WAKEUP)
+ goto stall;
+ tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+ tmp |= AT91_UDP_ESR;
+ at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
+ goto succeed;
+ case ((USB_TYPE_STANDARD|USB_RECIP_DEVICE) << 8)
+ | USB_REQ_CLEAR_FEATURE:
+ if (w_value != USB_DEVICE_REMOTE_WAKEUP)
+ goto stall;
+ tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+ tmp &= ~AT91_UDP_ESR;
+ at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
+ goto succeed;
+
+ /*
+ * Interfaces have no feature settings; this is pretty useless.
+ * we won't even insist the interface exists...
+ */
+ case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8)
+ | USB_REQ_GET_STATUS:
+ PACKET("get interface status\n");
+ __raw_writeb(0, dreg);
+ __raw_writeb(0, dreg);
+ goto write_in;
+ /* then STATUS starts later, automatically */
+ case ((USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8)
+ | USB_REQ_SET_FEATURE:
+ case ((USB_TYPE_STANDARD|USB_RECIP_INTERFACE) << 8)
+ | USB_REQ_CLEAR_FEATURE:
+ goto stall;
+
+ /*
+ * Hosts may clear bulk/intr endpoint halt after the gadget
+ * driver sets it (not widely used); or set it (for testing)
+ */
+ case ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8)
+ | USB_REQ_GET_STATUS:
+ tmp = w_index & USB_ENDPOINT_NUMBER_MASK;
+ ep = &udc->ep[tmp];
+ if (tmp >= NUM_ENDPOINTS || (tmp && !ep->ep.desc))
+ goto stall;
+
+ if (tmp) {
+ if ((w_index & USB_DIR_IN)) {
+ if (!ep->is_in)
+ goto stall;
+ } else if (ep->is_in)
+ goto stall;
+ }
+ PACKET("get %s status\n", ep->ep.name);
+ if (__raw_readl(ep->creg) & AT91_UDP_FORCESTALL)
+ tmp = (1 << USB_ENDPOINT_HALT);
+ else
+ tmp = 0;
+ __raw_writeb(tmp, dreg);
+ __raw_writeb(0, dreg);
+ goto write_in;
+ /* then STATUS starts later, automatically */
+ case ((USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8)
+ | USB_REQ_SET_FEATURE:
+ tmp = w_index & USB_ENDPOINT_NUMBER_MASK;
+ ep = &udc->ep[tmp];
+ if (w_value != USB_ENDPOINT_HALT || tmp >= NUM_ENDPOINTS)
+ goto stall;
+ if (!ep->ep.desc || ep->is_iso)
+ goto stall;
+ if ((w_index & USB_DIR_IN)) {
+ if (!ep->is_in)
+ goto stall;
+ } else if (ep->is_in)
+ goto stall;
+
+ tmp = __raw_readl(ep->creg);
+ tmp &= ~SET_FX;
+ tmp |= CLR_FX | AT91_UDP_FORCESTALL;
+ __raw_writel(tmp, ep->creg);
+ goto succeed;
+ case ((USB_TYPE_STANDARD|USB_RECIP_ENDPOINT) << 8)
+ | USB_REQ_CLEAR_FEATURE:
+ tmp = w_index & USB_ENDPOINT_NUMBER_MASK;
+ ep = &udc->ep[tmp];
+ if (w_value != USB_ENDPOINT_HALT || tmp >= NUM_ENDPOINTS)
+ goto stall;
+ if (tmp == 0)
+ goto succeed;
+ if (!ep->ep.desc || ep->is_iso)
+ goto stall;
+ if ((w_index & USB_DIR_IN)) {
+ if (!ep->is_in)
+ goto stall;
+ } else if (ep->is_in)
+ goto stall;
+
+ at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask);
+ at91_udp_write(udc, AT91_UDP_RST_EP, 0);
+ tmp = __raw_readl(ep->creg);
+ tmp |= CLR_FX;
+ tmp &= ~(SET_FX | AT91_UDP_FORCESTALL);
+ __raw_writel(tmp, ep->creg);
+ if (!list_empty(&ep->queue))
+ handle_ep(ep);
+ goto succeed;
+ }
+
+#undef w_value
+#undef w_index
+#undef w_length
+
+ /* pass request up to the gadget driver */
+ if (udc->driver) {
+ spin_unlock(&udc->lock);
+ status = udc->driver->setup(&udc->gadget, &pkt.r);
+ spin_lock(&udc->lock);
+ }
+ else
+ status = -ENODEV;
+ if (status < 0) {
+stall:
+ VDBG("req %02x.%02x protocol STALL; stat %d\n",
+ pkt.r.bRequestType, pkt.r.bRequest, status);
+ csr |= AT91_UDP_FORCESTALL;
+ __raw_writel(csr, creg);
+ udc->req_pending = 0;
+ }
+ return;
+
+succeed:
+ /* immediate successful (IN) STATUS after zero length DATA */
+ PACKET("ep0 in/status\n");
+write_in:
+ csr |= AT91_UDP_TXPKTRDY;
+ __raw_writel(csr, creg);
+ udc->req_pending = 0;
+}
+
+static void handle_ep0(struct at91_udc *udc)
+{
+ struct at91_ep *ep0 = &udc->ep[0];
+ u32 __iomem *creg = ep0->creg;
+ u32 csr = __raw_readl(creg);
+ struct at91_request *req;
+
+ if (unlikely(csr & AT91_UDP_STALLSENT)) {
+ nuke(ep0, -EPROTO);
+ udc->req_pending = 0;
+ csr |= CLR_FX;
+ csr &= ~(SET_FX | AT91_UDP_STALLSENT | AT91_UDP_FORCESTALL);
+ __raw_writel(csr, creg);
+ VDBG("ep0 stalled\n");
+ csr = __raw_readl(creg);
+ }
+ if (csr & AT91_UDP_RXSETUP) {
+ nuke(ep0, 0);
+ udc->req_pending = 0;
+ handle_setup(udc, ep0, csr);
+ return;
+ }
+
+ if (list_empty(&ep0->queue))
+ req = NULL;
+ else
+ req = list_entry(ep0->queue.next, struct at91_request, queue);
+
+ /* host ACKed an IN packet that we sent */
+ if (csr & AT91_UDP_TXCOMP) {
+ csr |= CLR_FX;
+ csr &= ~(SET_FX | AT91_UDP_TXCOMP);
+
+ /* write more IN DATA? */
+ if (req && ep0->is_in) {
+ if (handle_ep(ep0))
+ udc->req_pending = 0;
+
+ /*
+ * Ack after:
+ * - last IN DATA packet (including GET_STATUS)
+ * - IN/STATUS for OUT DATA
+ * - IN/STATUS for any zero-length DATA stage
+ * except for the IN DATA case, the host should send
+ * an OUT status later, which we'll ack.
+ */
+ } else {
+ udc->req_pending = 0;
+ __raw_writel(csr, creg);
+
+ /*
+ * SET_ADDRESS takes effect only after the STATUS
+ * (to the original address) gets acked.
+ */
+ if (udc->wait_for_addr_ack) {
+ u32 tmp;
+
+ at91_udp_write(udc, AT91_UDP_FADDR,
+ AT91_UDP_FEN | udc->addr);
+ tmp = at91_udp_read(udc, AT91_UDP_GLB_STAT);
+ tmp &= ~AT91_UDP_FADDEN;
+ if (udc->addr)
+ tmp |= AT91_UDP_FADDEN;
+ at91_udp_write(udc, AT91_UDP_GLB_STAT, tmp);
+
+ udc->wait_for_addr_ack = 0;
+ VDBG("address %d\n", udc->addr);
+ }
+ }
+ }
+
+ /* OUT packet arrived ... */
+ else if (csr & AT91_UDP_RX_DATA_BK0) {
+ csr |= CLR_FX;
+ csr &= ~(SET_FX | AT91_UDP_RX_DATA_BK0);
+
+ /* OUT DATA stage */
+ if (!ep0->is_in) {
+ if (req) {
+ if (handle_ep(ep0)) {
+ /* send IN/STATUS */
+ PACKET("ep0 in/status\n");
+ csr = __raw_readl(creg);
+ csr &= ~SET_FX;
+ csr |= CLR_FX | AT91_UDP_TXPKTRDY;
+ __raw_writel(csr, creg);
+ udc->req_pending = 0;
+ }
+ } else if (udc->req_pending) {
+ /*
+ * AT91 hardware has a hard time with this
+ * "deferred response" mode for control-OUT
+ * transfers. (For control-IN it's fine.)
+ *
+ * The normal solution leaves OUT data in the
+ * fifo until the gadget driver is ready.
+ * We couldn't do that here without disabling
+ * the IRQ that tells about SETUP packets,
+ * e.g. when the host gets impatient...
+ *
+ * Working around it by copying into a buffer
+ * would almost be a non-deferred response,
+ * except that it wouldn't permit reliable
+ * stalling of the request. Instead, demand
+ * that gadget drivers not use this mode.
+ */
+ DBG("no control-OUT deferred responses!\n");
+ __raw_writel(csr | AT91_UDP_FORCESTALL, creg);
+ udc->req_pending = 0;
+ }
+
+ /* STATUS stage for control-IN; ack. */
+ } else {
+ PACKET("ep0 out/status ACK\n");
+ __raw_writel(csr, creg);
+
+ /* "early" status stage */
+ if (req)
+ done(ep0, req, 0);
+ }
+ }
+}
+
+static irqreturn_t at91_udc_irq (int irq, void *_udc)
+{
+ struct at91_udc *udc = _udc;
+ u32 rescans = 5;
+ int disable_clock = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!udc->clocked) {
+ clk_on(udc);
+ disable_clock = 1;
+ }
+
+ while (rescans--) {
+ u32 status;
+
+ status = at91_udp_read(udc, AT91_UDP_ISR)
+ & at91_udp_read(udc, AT91_UDP_IMR);
+ if (!status)
+ break;
+
+ /* USB reset irq: not maskable */
+ if (status & AT91_UDP_ENDBUSRES) {
+ at91_udp_write(udc, AT91_UDP_IDR, ~MINIMUS_INTERRUPTUS);
+ at91_udp_write(udc, AT91_UDP_IER, MINIMUS_INTERRUPTUS);
+ /* Atmel code clears this irq twice */
+ at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_ENDBUSRES);
+ at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_ENDBUSRES);
+ VDBG("end bus reset\n");
+ udc->addr = 0;
+ reset_gadget(udc);
+
+ /* enable ep0 */
+ at91_udp_write(udc, AT91_UDP_CSR(0),
+ AT91_UDP_EPEDS | AT91_UDP_EPTYPE_CTRL);
+ udc->gadget.speed = USB_SPEED_FULL;
+ udc->suspended = 0;
+ at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_EP(0));
+
+ /*
+ * NOTE: this driver keeps clocks off unless the
+ * USB host is present. That saves power, but for
+ * boards that don't support VBUS detection, both
+ * clocks need to be active most of the time.
+ */
+
+ /* host initiated suspend (3+ms bus idle) */
+ } else if (status & AT91_UDP_RXSUSP) {
+ at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXSUSP);
+ at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXRSM);
+ at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXSUSP);
+ /* VDBG("bus suspend\n"); */
+ if (udc->suspended)
+ continue;
+ udc->suspended = 1;
+
+ /*
+ * NOTE: when suspending a VBUS-powered device, the
+ * gadget driver should switch into slow clock mode
+ * and then into standby to avoid drawing more than
+ * 500uA power (2500uA for some high-power configs).
+ */
+ if (udc->driver && udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+
+ /* host initiated resume */
+ } else if (status & AT91_UDP_RXRSM) {
+ at91_udp_write(udc, AT91_UDP_IDR, AT91_UDP_RXRSM);
+ at91_udp_write(udc, AT91_UDP_IER, AT91_UDP_RXSUSP);
+ at91_udp_write(udc, AT91_UDP_ICR, AT91_UDP_RXRSM);
+ /* VDBG("bus resume\n"); */
+ if (!udc->suspended)
+ continue;
+ udc->suspended = 0;
+
+ /*
+ * NOTE: for a VBUS-powered device, the gadget driver
+ * would normally want to switch out of slow clock
+ * mode into normal mode.
+ */
+ if (udc->driver && udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+
+ /* endpoint IRQs are cleared by handling them */
+ } else {
+ int i;
+ unsigned mask = 1;
+ struct at91_ep *ep = &udc->ep[1];
+
+ if (status & mask)
+ handle_ep0(udc);
+ for (i = 1; i < NUM_ENDPOINTS; i++) {
+ mask <<= 1;
+ if (status & mask)
+ handle_ep(ep);
+ ep++;
+ }
+ }
+ }
+
+ if (disable_clock)
+ clk_off(udc);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void at91_vbus_update(struct at91_udc *udc, unsigned value)
+{
+ if (value != udc->vbus)
+ at91_vbus_session(&udc->gadget, value);
+}
+
+static irqreturn_t at91_vbus_irq(int irq, void *_udc)
+{
+ struct at91_udc *udc = _udc;
+
+ /* vbus needs at least brief debouncing */
+ udelay(10);
+ at91_vbus_update(udc, gpiod_get_value(udc->board.vbus_pin));
+
+ return IRQ_HANDLED;
+}
+
+static void at91_vbus_timer_work(struct work_struct *work)
+{
+ struct at91_udc *udc = container_of(work, struct at91_udc,
+ vbus_timer_work);
+
+ at91_vbus_update(udc, gpiod_get_value_cansleep(udc->board.vbus_pin));
+
+ if (!timer_pending(&udc->vbus_timer))
+ mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
+}
+
+static void at91_vbus_timer(struct timer_list *t)
+{
+ struct at91_udc *udc = from_timer(udc, t, vbus_timer);
+
+ /*
+ * If we are polling vbus it is likely that the gpio is on an
+ * bus such as i2c or spi which may sleep, so schedule some work
+ * to read the vbus gpio
+ */
+ schedule_work(&udc->vbus_timer_work);
+}
+
+static int at91_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct at91_udc *udc;
+
+ udc = container_of(gadget, struct at91_udc, gadget);
+ udc->driver = driver;
+ udc->gadget.dev.of_node = udc->pdev->dev.of_node;
+ udc->enabled = 1;
+ udc->gadget.is_selfpowered = 1;
+
+ return 0;
+}
+
+static int at91_stop(struct usb_gadget *gadget)
+{
+ struct at91_udc *udc;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct at91_udc, gadget);
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->enabled = 0;
+ at91_udp_write(udc, AT91_UDP_IDR, ~0);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ udc->driver = NULL;
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void at91udc_shutdown(struct platform_device *dev)
+{
+ struct at91_udc *udc = platform_get_drvdata(dev);
+ unsigned long flags;
+
+ /* force disconnect on reboot */
+ spin_lock_irqsave(&udc->lock, flags);
+ pullup(platform_get_drvdata(dev), 0);
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static int at91rm9200_udc_init(struct at91_udc *udc)
+{
+ struct at91_ep *ep;
+ int i;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+
+ switch (i) {
+ case 0:
+ case 3:
+ ep->maxpacket = 8;
+ break;
+ case 1 ... 2:
+ ep->maxpacket = 64;
+ break;
+ case 4 ... 5:
+ ep->maxpacket = 256;
+ break;
+ }
+ }
+
+ if (!udc->board.pullup_pin) {
+ DBG("no D+ pullup?\n");
+ return -ENODEV;
+ }
+
+ gpiod_direction_output(udc->board.pullup_pin,
+ gpiod_is_active_low(udc->board.pullup_pin));
+
+ return 0;
+}
+
+static void at91rm9200_udc_pullup(struct at91_udc *udc, int is_on)
+{
+ if (is_on)
+ gpiod_set_value(udc->board.pullup_pin, 1);
+ else
+ gpiod_set_value(udc->board.pullup_pin, 0);
+}
+
+static const struct at91_udc_caps at91rm9200_udc_caps = {
+ .init = at91rm9200_udc_init,
+ .pullup = at91rm9200_udc_pullup,
+};
+
+static int at91sam9260_udc_init(struct at91_udc *udc)
+{
+ struct at91_ep *ep;
+ int i;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+
+ switch (i) {
+ case 0 ... 3:
+ ep->maxpacket = 64;
+ break;
+ case 4 ... 5:
+ ep->maxpacket = 512;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void at91sam9260_udc_pullup(struct at91_udc *udc, int is_on)
+{
+ u32 txvc = at91_udp_read(udc, AT91_UDP_TXVC);
+
+ if (is_on)
+ txvc |= AT91_UDP_TXVC_PUON;
+ else
+ txvc &= ~AT91_UDP_TXVC_PUON;
+
+ at91_udp_write(udc, AT91_UDP_TXVC, txvc);
+}
+
+static const struct at91_udc_caps at91sam9260_udc_caps = {
+ .init = at91sam9260_udc_init,
+ .pullup = at91sam9260_udc_pullup,
+};
+
+static int at91sam9261_udc_init(struct at91_udc *udc)
+{
+ struct at91_ep *ep;
+ int i;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+
+ switch (i) {
+ case 0:
+ ep->maxpacket = 8;
+ break;
+ case 1 ... 3:
+ ep->maxpacket = 64;
+ break;
+ case 4 ... 5:
+ ep->maxpacket = 256;
+ break;
+ }
+ }
+
+ udc->matrix = syscon_regmap_lookup_by_phandle(udc->pdev->dev.of_node,
+ "atmel,matrix");
+ return PTR_ERR_OR_ZERO(udc->matrix);
+}
+
+static void at91sam9261_udc_pullup(struct at91_udc *udc, int is_on)
+{
+ u32 usbpucr = 0;
+
+ if (is_on)
+ usbpucr = AT91_MATRIX_USBPUCR_PUON;
+
+ regmap_update_bits(udc->matrix, AT91SAM9261_MATRIX_USBPUCR,
+ AT91_MATRIX_USBPUCR_PUON, usbpucr);
+}
+
+static const struct at91_udc_caps at91sam9261_udc_caps = {
+ .init = at91sam9261_udc_init,
+ .pullup = at91sam9261_udc_pullup,
+};
+
+static int at91sam9263_udc_init(struct at91_udc *udc)
+{
+ struct at91_ep *ep;
+ int i;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+
+ switch (i) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ ep->maxpacket = 64;
+ break;
+ case 4:
+ case 5:
+ ep->maxpacket = 256;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct at91_udc_caps at91sam9263_udc_caps = {
+ .init = at91sam9263_udc_init,
+ .pullup = at91sam9260_udc_pullup,
+};
+
+static const struct of_device_id at91_udc_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-udc",
+ .data = &at91rm9200_udc_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9260-udc",
+ .data = &at91sam9260_udc_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9261-udc",
+ .data = &at91sam9261_udc_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9263-udc",
+ .data = &at91sam9263_udc_caps,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, at91_udc_dt_ids);
+
+static void at91udc_of_init(struct at91_udc *udc, struct device_node *np)
+{
+ struct at91_udc_data *board = &udc->board;
+ const struct of_device_id *match;
+ u32 val;
+
+ if (of_property_read_u32(np, "atmel,vbus-polled", &val) == 0)
+ board->vbus_polled = 1;
+
+ board->vbus_pin = fwnode_gpiod_get_index(of_fwnode_handle(np),
+ "atmel,vbus", 0, GPIOD_IN,
+ "udc_vbus");
+ if (IS_ERR(board->vbus_pin))
+ board->vbus_pin = NULL;
+
+ board->pullup_pin = fwnode_gpiod_get_index(of_fwnode_handle(np),
+ "atmel,pullup", 0,
+ GPIOD_ASIS, "udc_pullup");
+ if (IS_ERR(board->pullup_pin))
+ board->pullup_pin = NULL;
+
+ match = of_match_node(at91_udc_dt_ids, np);
+ if (match)
+ udc->caps = match->data;
+}
+
+static int at91udc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct at91_udc *udc;
+ int retval;
+ struct at91_ep *ep;
+ int i;
+
+ udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ /* init software state */
+ udc->gadget.dev.parent = dev;
+ at91udc_of_init(udc, pdev->dev.of_node);
+ udc->pdev = pdev;
+ udc->enabled = 0;
+ spin_lock_init(&udc->lock);
+
+ udc->gadget.ops = &at91_udc_ops;
+ udc->gadget.ep0 = &udc->ep[0].ep;
+ udc->gadget.name = driver_name;
+ udc->gadget.dev.init_name = "gadget";
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ ep->ep.name = ep_info[i].name;
+ ep->ep.caps = ep_info[i].caps;
+ ep->ep.ops = &at91_ep_ops;
+ ep->udc = udc;
+ ep->int_mask = BIT(i);
+ if (i != 0 && i != 3)
+ ep->is_pingpong = 1;
+ }
+
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(udc->udp_baseaddr))
+ return PTR_ERR(udc->udp_baseaddr);
+
+ if (udc->caps && udc->caps->init) {
+ retval = udc->caps->init(udc);
+ if (retval)
+ return retval;
+ }
+
+ udc_reinit(udc);
+
+ /* get interface and function clocks */
+ udc->iclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(udc->iclk))
+ return PTR_ERR(udc->iclk);
+
+ udc->fclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(udc->fclk))
+ return PTR_ERR(udc->fclk);
+
+ /* don't do anything until we have both gadget driver and VBUS */
+ clk_set_rate(udc->fclk, 48000000);
+ retval = clk_prepare(udc->fclk);
+ if (retval)
+ return retval;
+
+ retval = clk_prepare_enable(udc->iclk);
+ if (retval)
+ goto err_unprepare_fclk;
+
+ at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
+ at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff);
+ /* Clear all pending interrupts - UDP may be used by bootloader. */
+ at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff);
+ clk_disable(udc->iclk);
+
+ /* request UDC and maybe VBUS irqs */
+ udc->udp_irq = retval = platform_get_irq(pdev, 0);
+ if (retval < 0)
+ goto err_unprepare_iclk;
+ retval = devm_request_irq(dev, udc->udp_irq, at91_udc_irq, 0,
+ driver_name, udc);
+ if (retval) {
+ DBG("request irq %d failed\n", udc->udp_irq);
+ goto err_unprepare_iclk;
+ }
+
+ if (udc->board.vbus_pin) {
+ gpiod_direction_input(udc->board.vbus_pin);
+
+ /*
+ * Get the initial state of VBUS - we cannot expect
+ * a pending interrupt.
+ */
+ udc->vbus = gpiod_get_value_cansleep(udc->board.vbus_pin);
+
+ if (udc->board.vbus_polled) {
+ INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
+ timer_setup(&udc->vbus_timer, at91_vbus_timer, 0);
+ mod_timer(&udc->vbus_timer,
+ jiffies + VBUS_POLL_TIMEOUT);
+ } else {
+ retval = devm_request_irq(dev,
+ gpiod_to_irq(udc->board.vbus_pin),
+ at91_vbus_irq, 0, driver_name, udc);
+ if (retval) {
+ DBG("request vbus irq %d failed\n",
+ desc_to_gpio(udc->board.vbus_pin));
+ goto err_unprepare_iclk;
+ }
+ }
+ } else {
+ DBG("no VBUS detection, assuming always-on\n");
+ udc->vbus = 1;
+ }
+ retval = usb_add_gadget_udc(dev, &udc->gadget);
+ if (retval)
+ goto err_unprepare_iclk;
+ dev_set_drvdata(dev, udc);
+ device_init_wakeup(dev, 1);
+ create_debug_file(udc);
+
+ INFO("%s version %s\n", driver_name, DRIVER_VERSION);
+ return 0;
+
+err_unprepare_iclk:
+ clk_unprepare(udc->iclk);
+err_unprepare_fclk:
+ clk_unprepare(udc->fclk);
+
+ DBG("%s probe failed, %d\n", driver_name, retval);
+
+ return retval;
+}
+
+static int at91udc_remove(struct platform_device *pdev)
+{
+ struct at91_udc *udc = platform_get_drvdata(pdev);
+ unsigned long flags;
+
+ DBG("remove\n");
+
+ usb_del_gadget_udc(&udc->gadget);
+ if (udc->driver)
+ return -EBUSY;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ pullup(udc, 0);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ device_init_wakeup(&pdev->dev, 0);
+ remove_debug_file(udc);
+ clk_unprepare(udc->fclk);
+ clk_unprepare(udc->iclk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct at91_udc *udc = platform_get_drvdata(pdev);
+ int wake = udc->driver && device_may_wakeup(&pdev->dev);
+ unsigned long flags;
+
+ /* Unless we can act normally to the host (letting it wake us up
+ * whenever it has work for us) force disconnect. Wakeup requires
+ * PLLB for USB events (signaling for reset, wakeup, or incoming
+ * tokens) and VBUS irqs (on systems which support them).
+ */
+ if ((!udc->suspended && udc->addr)
+ || !wake
+ || at91_suspend_entering_slow_clock()) {
+ spin_lock_irqsave(&udc->lock, flags);
+ pullup(udc, 0);
+ wake = 0;
+ spin_unlock_irqrestore(&udc->lock, flags);
+ } else
+ enable_irq_wake(udc->udp_irq);
+
+ udc->active_suspend = wake;
+ if (udc->board.vbus_pin && !udc->board.vbus_polled && wake)
+ enable_irq_wake(gpiod_to_irq(udc->board.vbus_pin));
+ return 0;
+}
+
+static int at91udc_resume(struct platform_device *pdev)
+{
+ struct at91_udc *udc = platform_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (udc->board.vbus_pin && !udc->board.vbus_polled &&
+ udc->active_suspend)
+ disable_irq_wake(gpiod_to_irq(udc->board.vbus_pin));
+
+ /* maybe reconnect to host; if so, clocks on */
+ if (udc->active_suspend)
+ disable_irq_wake(udc->udp_irq);
+ else {
+ spin_lock_irqsave(&udc->lock, flags);
+ pullup(udc, 1);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ }
+ return 0;
+}
+#else
+#define at91udc_suspend NULL
+#define at91udc_resume NULL
+#endif
+
+static struct platform_driver at91_udc_driver = {
+ .remove = at91udc_remove,
+ .shutdown = at91udc_shutdown,
+ .suspend = at91udc_suspend,
+ .resume = at91udc_resume,
+ .driver = {
+ .name = driver_name,
+ .of_match_table = at91_udc_dt_ids,
+ },
+};
+
+module_platform_driver_probe(at91_udc_driver, at91udc_probe);
+
+MODULE_DESCRIPTION("AT91 udc driver");
+MODULE_AUTHOR("Thomas Rathbone, David Brownell");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_udc");
diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h
new file mode 100644
index 000000000..28c1042f8
--- /dev/null
+++ b/drivers/usb/gadget/udc/at91_udc.h
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2004 by Thomas Rathbone, HP Labs
+ * Copyright (C) 2005 by Ivan Kokshaysky
+ * Copyright (C) 2006 by SAN People
+ */
+
+#ifndef AT91_UDC_H
+#define AT91_UDC_H
+
+/*
+ * USB Device Port (UDP) registers.
+ * Based on AT91RM9200 datasheet revision E.
+ */
+
+#define AT91_UDP_FRM_NUM 0x00 /* Frame Number Register */
+#define AT91_UDP_NUM (0x7ff << 0) /* Frame Number */
+#define AT91_UDP_FRM_ERR (1 << 16) /* Frame Error */
+#define AT91_UDP_FRM_OK (1 << 17) /* Frame OK */
+
+#define AT91_UDP_GLB_STAT 0x04 /* Global State Register */
+#define AT91_UDP_FADDEN (1 << 0) /* Function Address Enable */
+#define AT91_UDP_CONFG (1 << 1) /* Configured */
+#define AT91_UDP_ESR (1 << 2) /* Enable Send Resume */
+#define AT91_UDP_RSMINPR (1 << 3) /* Resume has been sent */
+#define AT91_UDP_RMWUPE (1 << 4) /* Remote Wake Up Enable */
+
+#define AT91_UDP_FADDR 0x08 /* Function Address Register */
+#define AT91_UDP_FADD (0x7f << 0) /* Function Address Value */
+#define AT91_UDP_FEN (1 << 8) /* Function Enable */
+
+#define AT91_UDP_IER 0x10 /* Interrupt Enable Register */
+#define AT91_UDP_IDR 0x14 /* Interrupt Disable Register */
+#define AT91_UDP_IMR 0x18 /* Interrupt Mask Register */
+
+#define AT91_UDP_ISR 0x1c /* Interrupt Status Register */
+#define AT91_UDP_EP(n) (1 << (n)) /* Endpoint Interrupt Status */
+#define AT91_UDP_RXSUSP (1 << 8) /* USB Suspend Interrupt Status */
+#define AT91_UDP_RXRSM (1 << 9) /* USB Resume Interrupt Status */
+#define AT91_UDP_EXTRSM (1 << 10) /* External Resume Interrupt Status [AT91RM9200 only] */
+#define AT91_UDP_SOFINT (1 << 11) /* Start of Frame Interrupt Status */
+#define AT91_UDP_ENDBUSRES (1 << 12) /* End of Bus Reset Interrupt Status */
+#define AT91_UDP_WAKEUP (1 << 13) /* USB Wakeup Interrupt Status [AT91RM9200 only] */
+
+#define AT91_UDP_ICR 0x20 /* Interrupt Clear Register */
+#define AT91_UDP_RST_EP 0x28 /* Reset Endpoint Register */
+
+#define AT91_UDP_CSR(n) (0x30+((n)*4)) /* Endpoint Control/Status Registers 0-7 */
+#define AT91_UDP_TXCOMP (1 << 0) /* Generates IN packet with data previously written in DPR */
+#define AT91_UDP_RX_DATA_BK0 (1 << 1) /* Receive Data Bank 0 */
+#define AT91_UDP_RXSETUP (1 << 2) /* Send STALL to the host */
+#define AT91_UDP_STALLSENT (1 << 3) /* Stall Sent / Isochronous error (Isochronous endpoints) */
+#define AT91_UDP_TXPKTRDY (1 << 4) /* Transmit Packet Ready */
+#define AT91_UDP_FORCESTALL (1 << 5) /* Force Stall */
+#define AT91_UDP_RX_DATA_BK1 (1 << 6) /* Receive Data Bank 1 */
+#define AT91_UDP_DIR (1 << 7) /* Transfer Direction */
+#define AT91_UDP_EPTYPE (7 << 8) /* Endpoint Type */
+#define AT91_UDP_EPTYPE_CTRL (0 << 8)
+#define AT91_UDP_EPTYPE_ISO_OUT (1 << 8)
+#define AT91_UDP_EPTYPE_BULK_OUT (2 << 8)
+#define AT91_UDP_EPTYPE_INT_OUT (3 << 8)
+#define AT91_UDP_EPTYPE_ISO_IN (5 << 8)
+#define AT91_UDP_EPTYPE_BULK_IN (6 << 8)
+#define AT91_UDP_EPTYPE_INT_IN (7 << 8)
+#define AT91_UDP_DTGLE (1 << 11) /* Data Toggle */
+#define AT91_UDP_EPEDS (1 << 15) /* Endpoint Enable/Disable */
+#define AT91_UDP_RXBYTECNT (0x7ff << 16) /* Number of bytes in FIFO */
+
+#define AT91_UDP_FDR(n) (0x50+((n)*4)) /* Endpoint FIFO Data Registers 0-7 */
+
+#define AT91_UDP_TXVC 0x74 /* Transceiver Control Register */
+#define AT91_UDP_TXVC_TXVDIS (1 << 8) /* Transceiver Disable */
+#define AT91_UDP_TXVC_PUON (1 << 9) /* PullUp On [AT91SAM9260 only] */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * controller driver data structures
+ */
+
+#define NUM_ENDPOINTS 6
+
+/*
+ * hardware won't disable bus reset, or resume while the controller
+ * is suspended ... watching suspend helps keep the logic symmetric.
+ */
+#define MINIMUS_INTERRUPTUS \
+ (AT91_UDP_ENDBUSRES | AT91_UDP_RXRSM | AT91_UDP_RXSUSP)
+
+struct at91_ep {
+ struct usb_ep ep;
+ struct list_head queue;
+ struct at91_udc *udc;
+ void __iomem *creg;
+
+ unsigned maxpacket:16;
+ u8 int_mask;
+ unsigned is_pingpong:1;
+
+ unsigned stopped:1;
+ unsigned is_in:1;
+ unsigned is_iso:1;
+ unsigned fifo_bank:1;
+};
+
+struct at91_udc_caps {
+ int (*init)(struct at91_udc *udc);
+ void (*pullup)(struct at91_udc *udc, int is_on);
+};
+
+struct at91_udc_data {
+ struct gpio_desc *vbus_pin; /* high == host powering us */
+ u8 vbus_polled; /* Use polling, not interrupt */
+ struct gpio_desc *pullup_pin; /* active == D+ pulled up */
+};
+
+/*
+ * driver is non-SMP, and just blocks IRQs whenever it needs
+ * access protection for chip registers or driver state
+ */
+struct at91_udc {
+ struct usb_gadget gadget;
+ struct at91_ep ep[NUM_ENDPOINTS];
+ struct usb_gadget_driver *driver;
+ const struct at91_udc_caps *caps;
+ unsigned vbus:1;
+ unsigned enabled:1;
+ unsigned clocked:1;
+ unsigned suspended:1;
+ unsigned req_pending:1;
+ unsigned wait_for_addr_ack:1;
+ unsigned wait_for_config_ack:1;
+ unsigned active_suspend:1;
+ u8 addr;
+ struct at91_udc_data board;
+ struct clk *iclk, *fclk;
+ struct platform_device *pdev;
+ struct proc_dir_entry *pde;
+ void __iomem *udp_baseaddr;
+ int udp_irq;
+ spinlock_t lock;
+ struct timer_list vbus_timer;
+ struct work_struct vbus_timer_work;
+ struct regmap *matrix;
+};
+
+static inline struct at91_udc *to_udc(struct usb_gadget *g)
+{
+ return container_of(g, struct at91_udc, gadget);
+}
+
+struct at91_request {
+ struct usb_request req;
+ struct list_head queue;
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef VERBOSE_DEBUG
+# define VDBG DBG
+#else
+# define VDBG(stuff...) do{}while(0)
+#endif
+
+#ifdef PACKET_TRACE
+# define PACKET VDBG
+#else
+# define PACKET(stuff...) do{}while(0)
+#endif
+
+#define ERR(stuff...) pr_err("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
+#define INFO(stuff...) pr_info("udc: " stuff)
+#define DBG(stuff...) pr_debug("udc: " stuff)
+
+#endif
+
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
new file mode 100644
index 000000000..53ca38c4b
--- /dev/null
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -0,0 +1,2465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Atmel USBA high speed USB device controller
+ *
+ * Copyright (C) 2005-2007 Atmel Corporation
+ */
+#include <linux/clk.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/ctype.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/gpio/consumer.h>
+
+#include "atmel_usba_udc.h"
+#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
+ | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+static int queue_dbg_open(struct inode *inode, struct file *file)
+{
+ struct usba_ep *ep = inode->i_private;
+ struct usba_request *req, *req_copy;
+ struct list_head *queue_data;
+
+ queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
+ if (!queue_data)
+ return -ENOMEM;
+ INIT_LIST_HEAD(queue_data);
+
+ spin_lock_irq(&ep->udc->lock);
+ list_for_each_entry(req, &ep->queue, queue) {
+ req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
+ if (!req_copy)
+ goto fail;
+ list_add_tail(&req_copy->queue, queue_data);
+ }
+ spin_unlock_irq(&ep->udc->lock);
+
+ file->private_data = queue_data;
+ return 0;
+
+fail:
+ spin_unlock_irq(&ep->udc->lock);
+ list_for_each_entry_safe(req, req_copy, queue_data, queue) {
+ list_del(&req->queue);
+ kfree(req);
+ }
+ kfree(queue_data);
+ return -ENOMEM;
+}
+
+/*
+ * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
+ *
+ * b: buffer address
+ * l: buffer length
+ * I/i: interrupt/no interrupt
+ * Z/z: zero/no zero
+ * S/s: short ok/short not ok
+ * s: status
+ * n: nr_packets
+ * F/f: submitted/not submitted to FIFO
+ * D/d: using/not using DMA
+ * L/l: last transaction/not last transaction
+ */
+static ssize_t queue_dbg_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct list_head *queue = file->private_data;
+ struct usba_request *req, *tmp_req;
+ size_t len, remaining, actual = 0;
+ char tmpbuf[38];
+
+ if (!access_ok(buf, nbytes))
+ return -EFAULT;
+
+ inode_lock(file_inode(file));
+ list_for_each_entry_safe(req, tmp_req, queue, queue) {
+ len = snprintf(tmpbuf, sizeof(tmpbuf),
+ "%8p %08x %c%c%c %5d %c%c%c\n",
+ req->req.buf, req->req.length,
+ req->req.no_interrupt ? 'i' : 'I',
+ req->req.zero ? 'Z' : 'z',
+ req->req.short_not_ok ? 's' : 'S',
+ req->req.status,
+ req->submitted ? 'F' : 'f',
+ req->using_dma ? 'D' : 'd',
+ req->last_transaction ? 'L' : 'l');
+ len = min(len, sizeof(tmpbuf));
+ if (len > nbytes)
+ break;
+
+ list_del(&req->queue);
+ kfree(req);
+
+ remaining = __copy_to_user(buf, tmpbuf, len);
+ actual += len - remaining;
+ if (remaining)
+ break;
+
+ nbytes -= len;
+ buf += len;
+ }
+ inode_unlock(file_inode(file));
+
+ return actual;
+}
+
+static int queue_dbg_release(struct inode *inode, struct file *file)
+{
+ struct list_head *queue_data = file->private_data;
+ struct usba_request *req, *tmp_req;
+
+ list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
+ list_del(&req->queue);
+ kfree(req);
+ }
+ kfree(queue_data);
+ return 0;
+}
+
+static int regs_dbg_open(struct inode *inode, struct file *file)
+{
+ struct usba_udc *udc;
+ unsigned int i;
+ u32 *data;
+ int ret = -ENOMEM;
+
+ inode_lock(inode);
+ udc = inode->i_private;
+ data = kmalloc(inode->i_size, GFP_KERNEL);
+ if (!data)
+ goto out;
+
+ spin_lock_irq(&udc->lock);
+ for (i = 0; i < inode->i_size / 4; i++)
+ data[i] = readl_relaxed(udc->regs + i * 4);
+ spin_unlock_irq(&udc->lock);
+
+ file->private_data = data;
+ ret = 0;
+
+out:
+ inode_unlock(inode);
+
+ return ret;
+}
+
+static ssize_t regs_dbg_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct inode *inode = file_inode(file);
+ int ret;
+
+ inode_lock(inode);
+ ret = simple_read_from_buffer(buf, nbytes, ppos,
+ file->private_data,
+ file_inode(file)->i_size);
+ inode_unlock(inode);
+
+ return ret;
+}
+
+static int regs_dbg_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations queue_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = queue_dbg_open,
+ .llseek = no_llseek,
+ .read = queue_dbg_read,
+ .release = queue_dbg_release,
+};
+
+static const struct file_operations regs_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = regs_dbg_open,
+ .llseek = generic_file_llseek,
+ .read = regs_dbg_read,
+ .release = regs_dbg_release,
+};
+
+static void usba_ep_init_debugfs(struct usba_udc *udc,
+ struct usba_ep *ep)
+{
+ struct dentry *ep_root;
+
+ ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
+ ep->debugfs_dir = ep_root;
+
+ debugfs_create_file("queue", 0400, ep_root, ep, &queue_dbg_fops);
+ if (ep->can_dma)
+ debugfs_create_u32("dma_status", 0400, ep_root,
+ &ep->last_dma_status);
+ if (ep_is_control(ep))
+ debugfs_create_u32("state", 0400, ep_root, &ep->state);
+}
+
+static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
+{
+ debugfs_remove_recursive(ep->debugfs_dir);
+}
+
+static void usba_init_debugfs(struct usba_udc *udc)
+{
+ struct dentry *root;
+ struct resource *regs_resource;
+
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
+ udc->debugfs_root = root;
+
+ regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
+ CTRL_IOMEM_ID);
+
+ if (regs_resource) {
+ debugfs_create_file_size("regs", 0400, root, udc,
+ &regs_dbg_fops,
+ resource_size(regs_resource));
+ }
+
+ usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
+}
+
+static void usba_cleanup_debugfs(struct usba_udc *udc)
+{
+ usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
+ debugfs_remove_recursive(udc->debugfs_root);
+}
+#else
+static inline void usba_ep_init_debugfs(struct usba_udc *udc,
+ struct usba_ep *ep)
+{
+
+}
+
+static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
+{
+
+}
+
+static inline void usba_init_debugfs(struct usba_udc *udc)
+{
+
+}
+
+static inline void usba_cleanup_debugfs(struct usba_udc *udc)
+{
+
+}
+#endif
+
+static ushort fifo_mode;
+
+module_param(fifo_mode, ushort, 0x0);
+MODULE_PARM_DESC(fifo_mode, "Endpoint configuration mode");
+
+/* mode 0 - uses autoconfig */
+
+/* mode 1 - fits in 8KB, generic max fifo configuration */
+static struct usba_fifo_cfg mode_1_cfg[] = {
+{ .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
+{ .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, },
+{ .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 1, },
+{ .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 1, },
+{ .hw_ep_num = 4, .fifo_size = 1024, .nr_banks = 1, },
+{ .hw_ep_num = 5, .fifo_size = 1024, .nr_banks = 1, },
+{ .hw_ep_num = 6, .fifo_size = 1024, .nr_banks = 1, },
+};
+
+/* mode 2 - fits in 8KB, performance max fifo configuration */
+static struct usba_fifo_cfg mode_2_cfg[] = {
+{ .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
+{ .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 3, },
+{ .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 2, },
+{ .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 2, },
+};
+
+/* mode 3 - fits in 8KB, mixed fifo configuration */
+static struct usba_fifo_cfg mode_3_cfg[] = {
+{ .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
+{ .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, },
+{ .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, },
+{ .hw_ep_num = 3, .fifo_size = 512, .nr_banks = 2, },
+{ .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, },
+{ .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, },
+{ .hw_ep_num = 6, .fifo_size = 512, .nr_banks = 2, },
+};
+
+/* mode 4 - fits in 8KB, custom fifo configuration */
+static struct usba_fifo_cfg mode_4_cfg[] = {
+{ .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
+{ .hw_ep_num = 1, .fifo_size = 512, .nr_banks = 2, },
+{ .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, },
+{ .hw_ep_num = 3, .fifo_size = 8, .nr_banks = 2, },
+{ .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, },
+{ .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, },
+{ .hw_ep_num = 6, .fifo_size = 16, .nr_banks = 2, },
+{ .hw_ep_num = 7, .fifo_size = 8, .nr_banks = 2, },
+{ .hw_ep_num = 8, .fifo_size = 8, .nr_banks = 2, },
+};
+/* Add additional configurations here */
+
+static int usba_config_fifo_table(struct usba_udc *udc)
+{
+ int n;
+
+ switch (fifo_mode) {
+ default:
+ fifo_mode = 0;
+ fallthrough;
+ case 0:
+ udc->fifo_cfg = NULL;
+ n = 0;
+ break;
+ case 1:
+ udc->fifo_cfg = mode_1_cfg;
+ n = ARRAY_SIZE(mode_1_cfg);
+ break;
+ case 2:
+ udc->fifo_cfg = mode_2_cfg;
+ n = ARRAY_SIZE(mode_2_cfg);
+ break;
+ case 3:
+ udc->fifo_cfg = mode_3_cfg;
+ n = ARRAY_SIZE(mode_3_cfg);
+ break;
+ case 4:
+ udc->fifo_cfg = mode_4_cfg;
+ n = ARRAY_SIZE(mode_4_cfg);
+ break;
+ }
+ DBG(DBG_HW, "Setup fifo_mode %d\n", fifo_mode);
+
+ return n;
+}
+
+static inline u32 usba_int_enb_get(struct usba_udc *udc)
+{
+ return udc->int_enb_cache;
+}
+
+static inline void usba_int_enb_set(struct usba_udc *udc, u32 mask)
+{
+ u32 val;
+
+ val = udc->int_enb_cache | mask;
+ usba_writel(udc, INT_ENB, val);
+ udc->int_enb_cache = val;
+}
+
+static inline void usba_int_enb_clear(struct usba_udc *udc, u32 mask)
+{
+ u32 val;
+
+ val = udc->int_enb_cache & ~mask;
+ usba_writel(udc, INT_ENB, val);
+ udc->int_enb_cache = val;
+}
+
+static int vbus_is_present(struct usba_udc *udc)
+{
+ if (udc->vbus_pin)
+ return gpiod_get_value(udc->vbus_pin);
+
+ /* No Vbus detection: Assume always present */
+ return 1;
+}
+
+static void toggle_bias(struct usba_udc *udc, int is_on)
+{
+ if (udc->errata && udc->errata->toggle_bias)
+ udc->errata->toggle_bias(udc, is_on);
+}
+
+static void generate_bias_pulse(struct usba_udc *udc)
+{
+ if (!udc->bias_pulse_needed)
+ return;
+
+ if (udc->errata && udc->errata->pulse_bias)
+ udc->errata->pulse_bias(udc);
+
+ udc->bias_pulse_needed = false;
+}
+
+static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
+{
+ unsigned int transaction_len;
+
+ transaction_len = req->req.length - req->req.actual;
+ req->last_transaction = 1;
+ if (transaction_len > ep->ep.maxpacket) {
+ transaction_len = ep->ep.maxpacket;
+ req->last_transaction = 0;
+ } else if (transaction_len == ep->ep.maxpacket && req->req.zero)
+ req->last_transaction = 0;
+
+ DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
+ ep->ep.name, req, transaction_len,
+ req->last_transaction ? ", done" : "");
+
+ memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
+ usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
+ req->req.actual += transaction_len;
+}
+
+static void submit_request(struct usba_ep *ep, struct usba_request *req)
+{
+ DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
+ ep->ep.name, req, req->req.length);
+
+ req->req.actual = 0;
+ req->submitted = 1;
+
+ if (req->using_dma) {
+ if (req->req.length == 0) {
+ usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
+ return;
+ }
+
+ if (req->req.zero)
+ usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
+ else
+ usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
+
+ usba_dma_writel(ep, ADDRESS, req->req.dma);
+ usba_dma_writel(ep, CONTROL, req->ctrl);
+ } else {
+ next_fifo_transaction(ep, req);
+ if (req->last_transaction) {
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
+ if (ep_is_control(ep))
+ usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+ } else {
+ if (ep_is_control(ep))
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+ usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
+ }
+ }
+}
+
+static void submit_next_request(struct usba_ep *ep)
+{
+ struct usba_request *req;
+
+ if (list_empty(&ep->queue)) {
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
+ return;
+ }
+
+ req = list_entry(ep->queue.next, struct usba_request, queue);
+ if (!req->submitted)
+ submit_request(ep, req);
+}
+
+static void send_status(struct usba_udc *udc, struct usba_ep *ep)
+{
+ ep->state = STATUS_STAGE_IN;
+ usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
+ usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+}
+
+static void receive_data(struct usba_ep *ep)
+{
+ struct usba_udc *udc = ep->udc;
+ struct usba_request *req;
+ unsigned long status;
+ unsigned int bytecount, nr_busy;
+ int is_complete = 0;
+
+ status = usba_ep_readl(ep, STA);
+ nr_busy = USBA_BFEXT(BUSY_BANKS, status);
+
+ DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
+
+ while (nr_busy > 0) {
+ if (list_empty(&ep->queue)) {
+ usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+ break;
+ }
+ req = list_entry(ep->queue.next,
+ struct usba_request, queue);
+
+ bytecount = USBA_BFEXT(BYTE_COUNT, status);
+
+ if (status & (1 << 31))
+ is_complete = 1;
+ if (req->req.actual + bytecount >= req->req.length) {
+ is_complete = 1;
+ bytecount = req->req.length - req->req.actual;
+ }
+
+ memcpy_fromio(req->req.buf + req->req.actual,
+ ep->fifo, bytecount);
+ req->req.actual += bytecount;
+
+ usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
+
+ if (is_complete) {
+ DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
+ req->req.status = 0;
+ list_del_init(&req->queue);
+ usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+ spin_unlock(&udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&udc->lock);
+ }
+
+ status = usba_ep_readl(ep, STA);
+ nr_busy = USBA_BFEXT(BUSY_BANKS, status);
+
+ if (is_complete && ep_is_control(ep)) {
+ send_status(udc, ep);
+ break;
+ }
+ }
+}
+
+static void
+request_complete(struct usba_ep *ep, struct usba_request *req, int status)
+{
+ struct usba_udc *udc = ep->udc;
+
+ WARN_ON(!list_empty(&req->queue));
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+
+ if (req->using_dma)
+ usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
+
+ DBG(DBG_GADGET | DBG_REQ,
+ "%s: req %p complete: status %d, actual %u\n",
+ ep->ep.name, req, req->req.status, req->req.actual);
+
+ spin_unlock(&udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&udc->lock);
+}
+
+static void
+request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
+{
+ struct usba_request *req, *tmp_req;
+
+ list_for_each_entry_safe(req, tmp_req, list, queue) {
+ list_del_init(&req->queue);
+ request_complete(ep, req, status);
+ }
+}
+
+static int
+usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+ struct usba_ep *ep = to_usba_ep(_ep);
+ struct usba_udc *udc = ep->udc;
+ unsigned long flags, maxpacket;
+ unsigned int nr_trans;
+
+ DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
+
+ maxpacket = usb_endpoint_maxp(desc);
+
+ if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
+ || ep->index == 0
+ || desc->bDescriptorType != USB_DT_ENDPOINT
+ || maxpacket == 0
+ || maxpacket > ep->fifo_size) {
+ DBG(DBG_ERR, "ep_enable: Invalid argument");
+ return -EINVAL;
+ }
+
+ ep->is_isoc = 0;
+ ep->is_in = 0;
+
+ DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n",
+ ep->ep.name, ep->ept_cfg, maxpacket);
+
+ if (usb_endpoint_dir_in(desc)) {
+ ep->is_in = 1;
+ ep->ept_cfg |= USBA_EPT_DIR_IN;
+ }
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!ep->can_isoc) {
+ DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
+ ep->ep.name);
+ return -EINVAL;
+ }
+
+ /*
+ * Bits 11:12 specify number of _additional_
+ * transactions per microframe.
+ */
+ nr_trans = usb_endpoint_maxp_mult(desc);
+ if (nr_trans > 3)
+ return -EINVAL;
+
+ ep->is_isoc = 1;
+ ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
+ ep->ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
+
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
+ break;
+ }
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ ep->ep.desc = desc;
+ ep->ep.maxpacket = maxpacket;
+
+ usba_ep_writel(ep, CFG, ep->ept_cfg);
+ usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
+
+ if (ep->can_dma) {
+ u32 ctrl;
+
+ usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index) |
+ USBA_BF(DMA_INT, 1 << ep->index));
+ ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
+ usba_ep_writel(ep, CTL_ENB, ctrl);
+ } else {
+ usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index));
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
+ (unsigned long)usba_ep_readl(ep, CFG));
+ DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
+ (unsigned long)usba_int_enb_get(udc));
+
+ return 0;
+}
+
+static int usba_ep_disable(struct usb_ep *_ep)
+{
+ struct usba_ep *ep = to_usba_ep(_ep);
+ struct usba_udc *udc = ep->udc;
+ LIST_HEAD(req_list);
+ unsigned long flags;
+
+ DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!ep->ep.desc) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name);
+ return -EINVAL;
+ }
+ ep->ep.desc = NULL;
+
+ list_splice_init(&ep->queue, &req_list);
+ if (ep->can_dma) {
+ usba_dma_writel(ep, CONTROL, 0);
+ usba_dma_writel(ep, ADDRESS, 0);
+ usba_dma_readl(ep, STATUS);
+ }
+ usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
+ usba_int_enb_clear(udc, USBA_BF(EPT_INT, 1 << ep->index));
+
+ request_complete_list(ep, &req_list, -ESHUTDOWN);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static struct usb_request *
+usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct usba_request *req;
+
+ DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void
+usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct usba_request *req = to_usba_req(_req);
+
+ DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
+
+ kfree(req);
+}
+
+static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
+ struct usba_request *req, gfp_t gfp_flags)
+{
+ unsigned long flags;
+ int ret;
+
+ DBG(DBG_DMA, "%s: req l/%u d/%pad %c%c%c\n",
+ ep->ep.name, req->req.length, &req->req.dma,
+ req->req.zero ? 'Z' : 'z',
+ req->req.short_not_ok ? 'S' : 's',
+ req->req.no_interrupt ? 'I' : 'i');
+
+ if (req->req.length > 0x10000) {
+ /* Lengths from 0 to 65536 (inclusive) are supported */
+ DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
+ return -EINVAL;
+ }
+
+ ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in);
+ if (ret)
+ return ret;
+
+ req->using_dma = 1;
+ req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
+ | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
+ | USBA_DMA_END_BUF_EN;
+
+ if (!ep->is_in)
+ req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
+
+ /*
+ * Add this request to the queue and submit for DMA if
+ * possible. Check if we're still alive first -- we may have
+ * received a reset since last time we checked.
+ */
+ ret = -ESHUTDOWN;
+ spin_lock_irqsave(&udc->lock, flags);
+ if (ep->ep.desc) {
+ if (list_empty(&ep->queue))
+ submit_request(ep, req);
+
+ list_add_tail(&req->queue, &ep->queue);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return ret;
+}
+
+static int
+usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct usba_request *req = to_usba_req(_req);
+ struct usba_ep *ep = to_usba_ep(_ep);
+ struct usba_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret;
+
+ DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
+ ep->ep.name, req, _req->length);
+
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN ||
+ !ep->ep.desc)
+ return -ESHUTDOWN;
+
+ req->submitted = 0;
+ req->using_dma = 0;
+ req->last_transaction = 0;
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ if (ep->can_dma)
+ return queue_dma(udc, ep, req, gfp_flags);
+
+ /* May have received a reset since last time we checked */
+ ret = -ESHUTDOWN;
+ spin_lock_irqsave(&udc->lock, flags);
+ if (ep->ep.desc) {
+ list_add_tail(&req->queue, &ep->queue);
+
+ if ((!ep_is_control(ep) && ep->is_in) ||
+ (ep_is_control(ep)
+ && (ep->state == DATA_STAGE_IN
+ || ep->state == STATUS_STAGE_IN)))
+ usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
+ else
+ usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return ret;
+}
+
+static void
+usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
+{
+ req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
+}
+
+static int stop_dma(struct usba_ep *ep, u32 *pstatus)
+{
+ unsigned int timeout;
+ u32 status;
+
+ /*
+ * Stop the DMA controller. When writing both CH_EN
+ * and LINK to 0, the other bits are not affected.
+ */
+ usba_dma_writel(ep, CONTROL, 0);
+
+ /* Wait for the FIFO to empty */
+ for (timeout = 40; timeout; --timeout) {
+ status = usba_dma_readl(ep, STATUS);
+ if (!(status & USBA_DMA_CH_EN))
+ break;
+ udelay(1);
+ }
+
+ if (pstatus)
+ *pstatus = status;
+
+ if (timeout == 0) {
+ dev_err(&ep->udc->pdev->dev,
+ "%s: timed out waiting for DMA FIFO to empty\n",
+ ep->ep.name);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct usba_ep *ep = to_usba_ep(_ep);
+ struct usba_udc *udc = ep->udc;
+ struct usba_request *req = NULL;
+ struct usba_request *iter;
+ unsigned long flags;
+ u32 status;
+
+ DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
+ ep->ep.name, _req);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+
+ if (!req) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EINVAL;
+ }
+
+ if (req->using_dma) {
+ /*
+ * If this request is currently being transferred,
+ * stop the DMA controller and reset the FIFO.
+ */
+ if (ep->queue.next == &req->queue) {
+ status = usba_dma_readl(ep, STATUS);
+ if (status & USBA_DMA_CH_EN)
+ stop_dma(ep, &status);
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+ ep->last_dma_status = status;
+#endif
+
+ usba_writel(udc, EPT_RST, 1 << ep->index);
+
+ usba_update_req(ep, req, status);
+ }
+ }
+
+ /*
+ * Errors should stop the queue from advancing until the
+ * completion function returns.
+ */
+ list_del_init(&req->queue);
+
+ request_complete(ep, req, -ECONNRESET);
+
+ /* Process the next request if any */
+ submit_next_request(ep);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int usba_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct usba_ep *ep = to_usba_ep(_ep);
+ struct usba_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret = 0;
+
+ DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
+ value ? "set" : "clear");
+
+ if (!ep->ep.desc) {
+ DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
+ ep->ep.name);
+ return -ENODEV;
+ }
+ if (ep->is_isoc) {
+ DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
+ ep->ep.name);
+ return -ENOTTY;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /*
+ * We can't halt IN endpoints while there are still data to be
+ * transferred
+ */
+ if (!list_empty(&ep->queue)
+ || ((value && ep->is_in && (usba_ep_readl(ep, STA)
+ & USBA_BF(BUSY_BANKS, -1L))))) {
+ ret = -EAGAIN;
+ } else {
+ if (value)
+ usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
+ else
+ usba_ep_writel(ep, CLR_STA,
+ USBA_FORCE_STALL | USBA_TOGGLE_CLR);
+ usba_ep_readl(ep, STA);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return ret;
+}
+
+static int usba_ep_fifo_status(struct usb_ep *_ep)
+{
+ struct usba_ep *ep = to_usba_ep(_ep);
+
+ return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
+}
+
+static void usba_ep_fifo_flush(struct usb_ep *_ep)
+{
+ struct usba_ep *ep = to_usba_ep(_ep);
+ struct usba_udc *udc = ep->udc;
+
+ usba_writel(udc, EPT_RST, 1 << ep->index);
+}
+
+static const struct usb_ep_ops usba_ep_ops = {
+ .enable = usba_ep_enable,
+ .disable = usba_ep_disable,
+ .alloc_request = usba_ep_alloc_request,
+ .free_request = usba_ep_free_request,
+ .queue = usba_ep_queue,
+ .dequeue = usba_ep_dequeue,
+ .set_halt = usba_ep_set_halt,
+ .fifo_status = usba_ep_fifo_status,
+ .fifo_flush = usba_ep_fifo_flush,
+};
+
+static int usba_udc_get_frame(struct usb_gadget *gadget)
+{
+ struct usba_udc *udc = to_usba_udc(gadget);
+
+ return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
+}
+
+static int usba_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct usba_udc *udc = to_usba_udc(gadget);
+ unsigned long flags;
+ u32 ctrl;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
+ ctrl = usba_readl(udc, CTRL);
+ usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return ret;
+}
+
+static int
+usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
+{
+ struct usba_udc *udc = to_usba_udc(gadget);
+ unsigned long flags;
+
+ gadget->is_selfpowered = (is_selfpowered != 0);
+ spin_lock_irqsave(&udc->lock, flags);
+ if (is_selfpowered)
+ udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
+ else
+ udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int atmel_usba_pullup(struct usb_gadget *gadget, int is_on);
+static int atmel_usba_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver);
+static int atmel_usba_stop(struct usb_gadget *gadget);
+
+static struct usb_ep *atmel_usba_match_ep(struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ struct usb_ep *_ep;
+ struct usba_ep *ep;
+
+ /* Look at endpoints until an unclaimed one looks usable */
+ list_for_each_entry(_ep, &gadget->ep_list, ep_list) {
+ if (usb_gadget_ep_match_desc(gadget, _ep, desc, ep_comp))
+ goto found_ep;
+ }
+ /* Fail */
+ return NULL;
+
+found_ep:
+
+ if (fifo_mode == 0) {
+ /* Optimize hw fifo size based on ep type and other info */
+ ep = to_usba_ep(_ep);
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ep->nr_banks = 1;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ ep->fifo_size = 1024;
+ if (ep->udc->ep_prealloc)
+ ep->nr_banks = 2;
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ ep->fifo_size = 512;
+ if (ep->udc->ep_prealloc)
+ ep->nr_banks = 1;
+ break;
+
+ case USB_ENDPOINT_XFER_INT:
+ if (desc->wMaxPacketSize == 0)
+ ep->fifo_size =
+ roundup_pow_of_two(_ep->maxpacket_limit);
+ else
+ ep->fifo_size =
+ roundup_pow_of_two(le16_to_cpu(desc->wMaxPacketSize));
+ if (ep->udc->ep_prealloc)
+ ep->nr_banks = 1;
+ break;
+ }
+
+ /* It might be a little bit late to set this */
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
+
+ /* Generate ept_cfg basd on FIFO size and number of banks */
+ if (ep->fifo_size <= 8)
+ ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
+ else
+ /* LSB is bit 1, not 0 */
+ ep->ept_cfg =
+ USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3);
+
+ ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks);
+ }
+
+ return _ep;
+}
+
+static const struct usb_gadget_ops usba_udc_ops = {
+ .get_frame = usba_udc_get_frame,
+ .wakeup = usba_udc_wakeup,
+ .set_selfpowered = usba_udc_set_selfpowered,
+ .pullup = atmel_usba_pullup,
+ .udc_start = atmel_usba_start,
+ .udc_stop = atmel_usba_stop,
+ .match_ep = atmel_usba_match_ep,
+};
+
+static struct usb_endpoint_descriptor usba_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(64),
+ /* FIXME: I have no idea what to put here */
+ .bInterval = 1,
+};
+
+static const struct usb_gadget usba_gadget_template = {
+ .ops = &usba_udc_ops,
+ .max_speed = USB_SPEED_HIGH,
+ .name = "atmel_usba_udc",
+};
+
+/*
+ * Called with interrupts disabled and udc->lock held.
+ */
+static void reset_all_endpoints(struct usba_udc *udc)
+{
+ struct usba_ep *ep;
+ struct usba_request *req, *tmp_req;
+
+ usba_writel(udc, EPT_RST, ~0UL);
+
+ ep = to_usba_ep(udc->gadget.ep0);
+ list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
+ list_del_init(&req->queue);
+ request_complete(ep, req, -ECONNRESET);
+ }
+}
+
+static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
+{
+ struct usba_ep *ep;
+
+ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+ return to_usba_ep(udc->gadget.ep0);
+
+ list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
+ u8 bEndpointAddress;
+
+ if (!ep->ep.desc)
+ continue;
+ bEndpointAddress = ep->ep.desc->bEndpointAddress;
+ if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+ continue;
+ if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
+ == (wIndex & USB_ENDPOINT_NUMBER_MASK))
+ return ep;
+ }
+
+ return NULL;
+}
+
+/* Called with interrupts disabled and udc->lock held */
+static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
+{
+ usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
+ ep->state = WAIT_FOR_SETUP;
+}
+
+static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
+{
+ if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
+ return 1;
+ return 0;
+}
+
+static inline void set_address(struct usba_udc *udc, unsigned int addr)
+{
+ u32 regval;
+
+ DBG(DBG_BUS, "setting address %u...\n", addr);
+ regval = usba_readl(udc, CTRL);
+ regval = USBA_BFINS(DEV_ADDR, addr, regval);
+ usba_writel(udc, CTRL, regval);
+}
+
+static int do_test_mode(struct usba_udc *udc)
+{
+ static const char test_packet_buffer[] = {
+ /* JKJKJKJK * 9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK * 8 */
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ /* JJKKJJKK * 8 */
+ 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
+ /* JJJJJJJKKKKKKK * 8 */
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ /* JJJJJJJK * 8 */
+ 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
+ /* {JKKKKKKK * 10}, JK */
+ 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
+ };
+ struct usba_ep *ep;
+ struct device *dev = &udc->pdev->dev;
+ int test_mode;
+
+ test_mode = udc->test_mode;
+
+ /* Start from a clean slate */
+ reset_all_endpoints(udc);
+
+ switch (test_mode) {
+ case 0x0100:
+ /* Test_J */
+ usba_writel(udc, TST, USBA_TST_J_MODE);
+ dev_info(dev, "Entering Test_J mode...\n");
+ break;
+ case 0x0200:
+ /* Test_K */
+ usba_writel(udc, TST, USBA_TST_K_MODE);
+ dev_info(dev, "Entering Test_K mode...\n");
+ break;
+ case 0x0300:
+ /*
+ * Test_SE0_NAK: Force high-speed mode and set up ep0
+ * for Bulk IN transfers
+ */
+ ep = &udc->usba_ep[0];
+ usba_writel(udc, TST,
+ USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
+ usba_ep_writel(ep, CFG,
+ USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
+ | USBA_EPT_DIR_IN
+ | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
+ | USBA_BF(BK_NUMBER, 1));
+ if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
+ set_protocol_stall(udc, ep);
+ dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
+ } else {
+ usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
+ dev_info(dev, "Entering Test_SE0_NAK mode...\n");
+ }
+ break;
+ case 0x0400:
+ /* Test_Packet */
+ ep = &udc->usba_ep[0];
+ usba_ep_writel(ep, CFG,
+ USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
+ | USBA_EPT_DIR_IN
+ | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
+ | USBA_BF(BK_NUMBER, 1));
+ if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
+ set_protocol_stall(udc, ep);
+ dev_err(dev, "Test_Packet: ep0 not mapped\n");
+ } else {
+ usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
+ usba_writel(udc, TST, USBA_TST_PKT_MODE);
+ memcpy_toio(ep->fifo, test_packet_buffer,
+ sizeof(test_packet_buffer));
+ usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
+ dev_info(dev, "Entering Test_Packet mode...\n");
+ }
+ break;
+ default:
+ dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Avoid overly long expressions */
+static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
+{
+ if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
+ return true;
+ return false;
+}
+
+static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
+{
+ if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
+ return true;
+ return false;
+}
+
+static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
+{
+ if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
+ return true;
+ return false;
+}
+
+static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
+ struct usb_ctrlrequest *crq)
+{
+ int retval = 0;
+
+ switch (crq->bRequest) {
+ case USB_REQ_GET_STATUS: {
+ u16 status;
+
+ if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
+ status = cpu_to_le16(udc->devstatus);
+ } else if (crq->bRequestType
+ == (USB_DIR_IN | USB_RECIP_INTERFACE)) {
+ status = cpu_to_le16(0);
+ } else if (crq->bRequestType
+ == (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
+ struct usba_ep *target;
+
+ target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
+ if (!target)
+ goto stall;
+
+ status = 0;
+ if (is_stalled(udc, target))
+ status |= cpu_to_le16(1);
+ } else
+ goto delegate;
+
+ /* Write directly to the FIFO. No queueing is done. */
+ if (crq->wLength != cpu_to_le16(sizeof(status)))
+ goto stall;
+ ep->state = DATA_STAGE_IN;
+ writew_relaxed(status, ep->fifo);
+ usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
+ break;
+ }
+
+ case USB_REQ_CLEAR_FEATURE: {
+ if (crq->bRequestType == USB_RECIP_DEVICE) {
+ if (feature_is_dev_remote_wakeup(crq))
+ udc->devstatus
+ &= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
+ else
+ /* Can't CLEAR_FEATURE TEST_MODE */
+ goto stall;
+ } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
+ struct usba_ep *target;
+
+ if (crq->wLength != cpu_to_le16(0)
+ || !feature_is_ep_halt(crq))
+ goto stall;
+ target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
+ if (!target)
+ goto stall;
+
+ usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
+ if (target->index != 0)
+ usba_ep_writel(target, CLR_STA,
+ USBA_TOGGLE_CLR);
+ } else {
+ goto delegate;
+ }
+
+ send_status(udc, ep);
+ break;
+ }
+
+ case USB_REQ_SET_FEATURE: {
+ if (crq->bRequestType == USB_RECIP_DEVICE) {
+ if (feature_is_dev_test_mode(crq)) {
+ send_status(udc, ep);
+ ep->state = STATUS_STAGE_TEST;
+ udc->test_mode = le16_to_cpu(crq->wIndex);
+ return 0;
+ } else if (feature_is_dev_remote_wakeup(crq)) {
+ udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
+ } else {
+ goto stall;
+ }
+ } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
+ struct usba_ep *target;
+
+ if (crq->wLength != cpu_to_le16(0)
+ || !feature_is_ep_halt(crq))
+ goto stall;
+
+ target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
+ if (!target)
+ goto stall;
+
+ usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
+ } else
+ goto delegate;
+
+ send_status(udc, ep);
+ break;
+ }
+
+ case USB_REQ_SET_ADDRESS:
+ if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
+ goto delegate;
+
+ set_address(udc, le16_to_cpu(crq->wValue));
+ send_status(udc, ep);
+ ep->state = STATUS_STAGE_ADDR;
+ break;
+
+ default:
+delegate:
+ spin_unlock(&udc->lock);
+ retval = udc->driver->setup(&udc->gadget, crq);
+ spin_lock(&udc->lock);
+ }
+
+ return retval;
+
+stall:
+ pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
+ "halting endpoint...\n",
+ ep->ep.name, crq->bRequestType, crq->bRequest,
+ le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
+ le16_to_cpu(crq->wLength));
+ set_protocol_stall(udc, ep);
+ return -1;
+}
+
+static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
+{
+ struct usba_request *req;
+ u32 epstatus;
+ u32 epctrl;
+
+restart:
+ epstatus = usba_ep_readl(ep, STA);
+ epctrl = usba_ep_readl(ep, CTL);
+
+ DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
+ ep->ep.name, ep->state, epstatus, epctrl);
+
+ req = NULL;
+ if (!list_empty(&ep->queue))
+ req = list_entry(ep->queue.next,
+ struct usba_request, queue);
+
+ if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
+ if (req->submitted)
+ next_fifo_transaction(ep, req);
+ else
+ submit_request(ep, req);
+
+ if (req->last_transaction) {
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
+ usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+ }
+ goto restart;
+ }
+ if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
+ usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
+
+ switch (ep->state) {
+ case DATA_STAGE_IN:
+ usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+ ep->state = STATUS_STAGE_OUT;
+ break;
+ case STATUS_STAGE_ADDR:
+ /* Activate our new address */
+ usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
+ | USBA_FADDR_EN));
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+ ep->state = WAIT_FOR_SETUP;
+ break;
+ case STATUS_STAGE_IN:
+ if (req) {
+ list_del_init(&req->queue);
+ request_complete(ep, req, 0);
+ submit_next_request(ep);
+ }
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+ ep->state = WAIT_FOR_SETUP;
+ break;
+ case STATUS_STAGE_TEST:
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+ ep->state = WAIT_FOR_SETUP;
+ if (do_test_mode(udc))
+ set_protocol_stall(udc, ep);
+ break;
+ default:
+ pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
+ "halting endpoint...\n",
+ ep->ep.name, ep->state);
+ set_protocol_stall(udc, ep);
+ break;
+ }
+
+ goto restart;
+ }
+ if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
+ switch (ep->state) {
+ case STATUS_STAGE_OUT:
+ usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
+ usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+
+ if (req) {
+ list_del_init(&req->queue);
+ request_complete(ep, req, 0);
+ }
+ ep->state = WAIT_FOR_SETUP;
+ break;
+
+ case DATA_STAGE_OUT:
+ receive_data(ep);
+ break;
+
+ default:
+ usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
+ usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+ pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
+ "halting endpoint...\n",
+ ep->ep.name, ep->state);
+ set_protocol_stall(udc, ep);
+ break;
+ }
+
+ goto restart;
+ }
+ if (epstatus & USBA_RX_SETUP) {
+ union {
+ struct usb_ctrlrequest crq;
+ unsigned long data[2];
+ } crq;
+ unsigned int pkt_len;
+ int ret;
+
+ if (ep->state != WAIT_FOR_SETUP) {
+ /*
+ * Didn't expect a SETUP packet at this
+ * point. Clean up any pending requests (which
+ * may be successful).
+ */
+ int status = -EPROTO;
+
+ /*
+ * RXRDY and TXCOMP are dropped when SETUP
+ * packets arrive. Just pretend we received
+ * the status packet.
+ */
+ if (ep->state == STATUS_STAGE_OUT
+ || ep->state == STATUS_STAGE_IN) {
+ usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
+ status = 0;
+ }
+
+ if (req) {
+ list_del_init(&req->queue);
+ request_complete(ep, req, status);
+ }
+ }
+
+ pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
+ DBG(DBG_HW, "Packet length: %u\n", pkt_len);
+ if (pkt_len != sizeof(crq)) {
+ pr_warn("udc: Invalid packet length %u (expected %zu)\n",
+ pkt_len, sizeof(crq));
+ set_protocol_stall(udc, ep);
+ return;
+ }
+
+ DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
+ memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
+
+ /* Free up one bank in the FIFO so that we can
+ * generate or receive a reply right away. */
+ usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
+
+ /* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
+ ep->state, crq.crq.bRequestType,
+ crq.crq.bRequest); */
+
+ if (crq.crq.bRequestType & USB_DIR_IN) {
+ /*
+ * The USB 2.0 spec states that "if wLength is
+ * zero, there is no data transfer phase."
+ * However, testusb #14 seems to actually
+ * expect a data phase even if wLength = 0...
+ */
+ ep->state = DATA_STAGE_IN;
+ } else {
+ if (crq.crq.wLength != cpu_to_le16(0))
+ ep->state = DATA_STAGE_OUT;
+ else
+ ep->state = STATUS_STAGE_IN;
+ }
+
+ ret = -1;
+ if (ep->index == 0)
+ ret = handle_ep0_setup(udc, ep, &crq.crq);
+ else {
+ spin_unlock(&udc->lock);
+ ret = udc->driver->setup(&udc->gadget, &crq.crq);
+ spin_lock(&udc->lock);
+ }
+
+ DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
+ crq.crq.bRequestType, crq.crq.bRequest,
+ le16_to_cpu(crq.crq.wLength), ep->state, ret);
+
+ if (ret < 0) {
+ /* Let the host know that we failed */
+ set_protocol_stall(udc, ep);
+ }
+ }
+}
+
+static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
+{
+ struct usba_request *req;
+ u32 epstatus;
+ u32 epctrl;
+
+ epstatus = usba_ep_readl(ep, STA);
+ epctrl = usba_ep_readl(ep, CTL);
+
+ DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
+
+ while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
+ DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
+
+ if (list_empty(&ep->queue)) {
+ dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
+ return;
+ }
+
+ req = list_entry(ep->queue.next, struct usba_request, queue);
+
+ if (req->using_dma) {
+ /* Send a zero-length packet */
+ usba_ep_writel(ep, SET_STA,
+ USBA_TX_PK_RDY);
+ usba_ep_writel(ep, CTL_DIS,
+ USBA_TX_PK_RDY);
+ list_del_init(&req->queue);
+ submit_next_request(ep);
+ request_complete(ep, req, 0);
+ } else {
+ if (req->submitted)
+ next_fifo_transaction(ep, req);
+ else
+ submit_request(ep, req);
+
+ if (req->last_transaction) {
+ list_del_init(&req->queue);
+ submit_next_request(ep);
+ request_complete(ep, req, 0);
+ }
+ }
+
+ epstatus = usba_ep_readl(ep, STA);
+ epctrl = usba_ep_readl(ep, CTL);
+ }
+ if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
+ DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
+ receive_data(ep);
+ }
+}
+
+static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
+{
+ struct usba_request *req;
+ u32 status, control, pending;
+
+ status = usba_dma_readl(ep, STATUS);
+ control = usba_dma_readl(ep, CONTROL);
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+ ep->last_dma_status = status;
+#endif
+ pending = status & control;
+ DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
+
+ if (status & USBA_DMA_CH_EN) {
+ dev_err(&udc->pdev->dev,
+ "DMA_CH_EN is set after transfer is finished!\n");
+ dev_err(&udc->pdev->dev,
+ "status=%#08x, pending=%#08x, control=%#08x\n",
+ status, pending, control);
+
+ /*
+ * try to pretend nothing happened. We might have to
+ * do something here...
+ */
+ }
+
+ if (list_empty(&ep->queue))
+ /* Might happen if a reset comes along at the right moment */
+ return;
+
+ if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
+ req = list_entry(ep->queue.next, struct usba_request, queue);
+ usba_update_req(ep, req, status);
+
+ list_del_init(&req->queue);
+ submit_next_request(ep);
+ request_complete(ep, req, 0);
+ }
+}
+
+static int start_clock(struct usba_udc *udc);
+static void stop_clock(struct usba_udc *udc);
+
+static irqreturn_t usba_udc_irq(int irq, void *devid)
+{
+ struct usba_udc *udc = devid;
+ u32 status, int_enb;
+ u32 dma_status;
+ u32 ep_status;
+
+ spin_lock(&udc->lock);
+
+ int_enb = usba_int_enb_get(udc);
+ status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
+ DBG(DBG_INT, "irq, status=%#08x\n", status);
+
+ if (status & USBA_DET_SUSPEND) {
+ usba_writel(udc, INT_CLR, USBA_DET_SUSPEND|USBA_WAKE_UP);
+ usba_int_enb_set(udc, USBA_WAKE_UP);
+ usba_int_enb_clear(udc, USBA_DET_SUSPEND);
+ udc->suspended = true;
+ toggle_bias(udc, 0);
+ udc->bias_pulse_needed = true;
+ stop_clock(udc);
+ DBG(DBG_BUS, "Suspend detected\n");
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN
+ && udc->driver && udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ if (status & USBA_WAKE_UP) {
+ start_clock(udc);
+ toggle_bias(udc, 1);
+ usba_writel(udc, INT_CLR, USBA_WAKE_UP);
+ DBG(DBG_BUS, "Wake Up CPU detected\n");
+ }
+
+ if (status & USBA_END_OF_RESUME) {
+ udc->suspended = false;
+ usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
+ usba_int_enb_clear(udc, USBA_WAKE_UP);
+ usba_int_enb_set(udc, USBA_DET_SUSPEND);
+ generate_bias_pulse(udc);
+ DBG(DBG_BUS, "Resume detected\n");
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN
+ && udc->driver && udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ dma_status = USBA_BFEXT(DMA_INT, status);
+ if (dma_status) {
+ int i;
+
+ usba_int_enb_set(udc, USBA_DET_SUSPEND);
+
+ for (i = 1; i <= USBA_NR_DMAS; i++)
+ if (dma_status & (1 << i))
+ usba_dma_irq(udc, &udc->usba_ep[i]);
+ }
+
+ ep_status = USBA_BFEXT(EPT_INT, status);
+ if (ep_status) {
+ int i;
+
+ usba_int_enb_set(udc, USBA_DET_SUSPEND);
+
+ for (i = 0; i < udc->num_ep; i++)
+ if (ep_status & (1 << i)) {
+ if (ep_is_control(&udc->usba_ep[i]))
+ usba_control_irq(udc, &udc->usba_ep[i]);
+ else
+ usba_ep_irq(udc, &udc->usba_ep[i]);
+ }
+ }
+
+ if (status & USBA_END_OF_RESET) {
+ struct usba_ep *ep0, *ep;
+ int i;
+
+ usba_writel(udc, INT_CLR,
+ USBA_END_OF_RESET|USBA_END_OF_RESUME
+ |USBA_DET_SUSPEND|USBA_WAKE_UP);
+ generate_bias_pulse(udc);
+ reset_all_endpoints(udc);
+
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN && udc->driver) {
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock(&udc->lock);
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
+ spin_lock(&udc->lock);
+ }
+
+ if (status & USBA_HIGH_SPEED)
+ udc->gadget.speed = USB_SPEED_HIGH;
+ else
+ udc->gadget.speed = USB_SPEED_FULL;
+ DBG(DBG_BUS, "%s bus reset detected\n",
+ usb_speed_string(udc->gadget.speed));
+
+ ep0 = &udc->usba_ep[0];
+ ep0->ep.desc = &usba_ep0_desc;
+ ep0->state = WAIT_FOR_SETUP;
+ usba_ep_writel(ep0, CFG,
+ (USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
+ | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
+ | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
+ usba_ep_writel(ep0, CTL_ENB,
+ USBA_EPT_ENABLE | USBA_RX_SETUP);
+
+ /* If we get reset while suspended... */
+ udc->suspended = false;
+ usba_int_enb_clear(udc, USBA_WAKE_UP);
+
+ usba_int_enb_set(udc, USBA_BF(EPT_INT, 1) |
+ USBA_DET_SUSPEND | USBA_END_OF_RESUME);
+
+ /*
+ * Unclear why we hit this irregularly, e.g. in usbtest,
+ * but it's clearly harmless...
+ */
+ if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
+ dev_err(&udc->pdev->dev,
+ "ODD: EP0 configuration is invalid!\n");
+
+ /* Preallocate other endpoints */
+ for (i = 1; i < udc->num_ep; i++) {
+ ep = &udc->usba_ep[i];
+ if (ep->ep.claimed) {
+ usba_ep_writel(ep, CFG, ep->ept_cfg);
+ if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED))
+ dev_err(&udc->pdev->dev,
+ "ODD: EP%d configuration is invalid!\n", i);
+ }
+ }
+ }
+
+ spin_unlock(&udc->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int start_clock(struct usba_udc *udc)
+{
+ int ret;
+
+ if (udc->clocked)
+ return 0;
+
+ pm_stay_awake(&udc->pdev->dev);
+
+ ret = clk_prepare_enable(udc->pclk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(udc->hclk);
+ if (ret) {
+ clk_disable_unprepare(udc->pclk);
+ return ret;
+ }
+
+ udc->clocked = true;
+ return 0;
+}
+
+static void stop_clock(struct usba_udc *udc)
+{
+ if (!udc->clocked)
+ return;
+
+ clk_disable_unprepare(udc->hclk);
+ clk_disable_unprepare(udc->pclk);
+
+ udc->clocked = false;
+
+ pm_relax(&udc->pdev->dev);
+}
+
+static int usba_start(struct usba_udc *udc)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = start_clock(udc);
+ if (ret)
+ return ret;
+
+ if (udc->suspended)
+ return 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ toggle_bias(udc, 1);
+ usba_writel(udc, CTRL, USBA_ENABLE_MASK);
+ /* Clear all requested and pending interrupts... */
+ usba_writel(udc, INT_ENB, 0);
+ udc->int_enb_cache = 0;
+ usba_writel(udc, INT_CLR,
+ USBA_END_OF_RESET|USBA_END_OF_RESUME
+ |USBA_DET_SUSPEND|USBA_WAKE_UP);
+ /* ...and enable just 'reset' IRQ to get us started */
+ usba_int_enb_set(udc, USBA_END_OF_RESET);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static void usba_stop(struct usba_udc *udc)
+{
+ unsigned long flags;
+
+ if (udc->suspended)
+ return;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ reset_all_endpoints(udc);
+
+ /* This will also disable the DP pullup */
+ toggle_bias(udc, 0);
+ usba_writel(udc, CTRL, USBA_DISABLE_MASK);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ stop_clock(udc);
+}
+
+static irqreturn_t usba_vbus_irq_thread(int irq, void *devid)
+{
+ struct usba_udc *udc = devid;
+ int vbus;
+
+ /* debounce */
+ udelay(10);
+
+ mutex_lock(&udc->vbus_mutex);
+
+ vbus = vbus_is_present(udc);
+ if (vbus != udc->vbus_prev) {
+ if (vbus) {
+ usba_start(udc);
+ } else {
+ udc->suspended = false;
+ if (udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
+
+ usba_stop(udc);
+ }
+ udc->vbus_prev = vbus;
+ }
+
+ mutex_unlock(&udc->vbus_mutex);
+ return IRQ_HANDLED;
+}
+
+static int atmel_usba_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
+ unsigned long flags;
+ u32 ctrl;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ ctrl = usba_readl(udc, CTRL);
+ if (is_on)
+ ctrl &= ~USBA_DETACH;
+ else
+ ctrl |= USBA_DETACH;
+ usba_writel(udc, CTRL, ctrl);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int atmel_usba_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ int ret;
+ struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
+ udc->driver = driver;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ mutex_lock(&udc->vbus_mutex);
+
+ if (udc->vbus_pin)
+ enable_irq(gpiod_to_irq(udc->vbus_pin));
+
+ /* If Vbus is present, enable the controller and wait for reset */
+ udc->vbus_prev = vbus_is_present(udc);
+ if (udc->vbus_prev) {
+ ret = usba_start(udc);
+ if (ret)
+ goto err;
+ }
+
+ mutex_unlock(&udc->vbus_mutex);
+ return 0;
+
+err:
+ if (udc->vbus_pin)
+ disable_irq(gpiod_to_irq(udc->vbus_pin));
+
+ mutex_unlock(&udc->vbus_mutex);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+ udc->driver = NULL;
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return ret;
+}
+
+static int atmel_usba_stop(struct usb_gadget *gadget)
+{
+ struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
+
+ if (udc->vbus_pin)
+ disable_irq(gpiod_to_irq(udc->vbus_pin));
+
+ udc->suspended = false;
+ usba_stop(udc);
+
+ udc->driver = NULL;
+
+ return 0;
+}
+
+static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on)
+{
+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
+ is_on ? AT91_PMC_BIASEN : 0);
+}
+
+static void at91sam9g45_pulse_bias(struct usba_udc *udc)
+{
+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0);
+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
+ AT91_PMC_BIASEN);
+}
+
+static const struct usba_udc_errata at91sam9rl_errata = {
+ .toggle_bias = at91sam9rl_toggle_bias,
+};
+
+static const struct usba_udc_errata at91sam9g45_errata = {
+ .pulse_bias = at91sam9g45_pulse_bias,
+};
+
+static const struct usba_ep_config ep_config_sam9[] = {
+ { .nr_banks = 1 }, /* ep 0 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
+ { .nr_banks = 3, .can_dma = 1 }, /* ep 3 */
+ { .nr_banks = 3, .can_dma = 1 }, /* ep 4 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */
+};
+
+static const struct usba_ep_config ep_config_sama5[] = {
+ { .nr_banks = 1 }, /* ep 0 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 3 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 4 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 7 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 8 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 9 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 10 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 11 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 12 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 13 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 14 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 15 */
+};
+
+static const struct usba_udc_config udc_at91sam9rl_cfg = {
+ .errata = &at91sam9rl_errata,
+ .config = ep_config_sam9,
+ .num_ep = ARRAY_SIZE(ep_config_sam9),
+ .ep_prealloc = true,
+};
+
+static const struct usba_udc_config udc_at91sam9g45_cfg = {
+ .errata = &at91sam9g45_errata,
+ .config = ep_config_sam9,
+ .num_ep = ARRAY_SIZE(ep_config_sam9),
+ .ep_prealloc = true,
+};
+
+static const struct usba_udc_config udc_sama5d3_cfg = {
+ .config = ep_config_sama5,
+ .num_ep = ARRAY_SIZE(ep_config_sama5),
+ .ep_prealloc = true,
+};
+
+static const struct usba_udc_config udc_sam9x60_cfg = {
+ .num_ep = ARRAY_SIZE(ep_config_sam9),
+ .config = ep_config_sam9,
+ .ep_prealloc = false,
+};
+
+static const struct of_device_id atmel_udc_dt_ids[] = {
+ { .compatible = "atmel,at91sam9rl-udc", .data = &udc_at91sam9rl_cfg },
+ { .compatible = "atmel,at91sam9g45-udc", .data = &udc_at91sam9g45_cfg },
+ { .compatible = "atmel,sama5d3-udc", .data = &udc_sama5d3_cfg },
+ { .compatible = "microchip,sam9x60-udc", .data = &udc_sam9x60_cfg },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
+
+static const struct of_device_id atmel_pmc_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g45-pmc" },
+ { .compatible = "atmel,at91sam9rl-pmc" },
+ { .compatible = "atmel,at91sam9x5-pmc" },
+ { /* sentinel */ }
+};
+
+static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
+ struct usba_udc *udc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct device_node *pp;
+ int i, ret;
+ struct usba_ep *eps, *ep;
+ const struct usba_udc_config *udc_config;
+
+ match = of_match_node(atmel_udc_dt_ids, np);
+ if (!match)
+ return ERR_PTR(-EINVAL);
+
+ udc_config = match->data;
+ udc->ep_prealloc = udc_config->ep_prealloc;
+ udc->errata = udc_config->errata;
+ if (udc->errata) {
+ pp = of_find_matching_node_and_match(NULL, atmel_pmc_dt_ids,
+ NULL);
+ if (!pp)
+ return ERR_PTR(-ENODEV);
+
+ udc->pmc = syscon_node_to_regmap(pp);
+ of_node_put(pp);
+ if (IS_ERR(udc->pmc))
+ return ERR_CAST(udc->pmc);
+ }
+
+ udc->num_ep = 0;
+
+ udc->vbus_pin = devm_gpiod_get_optional(&pdev->dev, "atmel,vbus",
+ GPIOD_IN);
+ if (IS_ERR(udc->vbus_pin))
+ return ERR_CAST(udc->vbus_pin);
+
+ if (fifo_mode == 0) {
+ udc->num_ep = udc_config->num_ep;
+ } else {
+ udc->num_ep = usba_config_fifo_table(udc);
+ }
+
+ eps = devm_kcalloc(&pdev->dev, udc->num_ep, sizeof(struct usba_ep),
+ GFP_KERNEL);
+ if (!eps)
+ return ERR_PTR(-ENOMEM);
+
+ udc->gadget.ep0 = &eps[0].ep;
+
+ INIT_LIST_HEAD(&eps[0].ep.ep_list);
+
+ i = 0;
+ while (i < udc->num_ep) {
+ const struct usba_ep_config *ep_cfg = &udc_config->config[i];
+
+ ep = &eps[i];
+
+ ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : i;
+
+ /* Only the first EP is 64 bytes */
+ if (ep->index == 0)
+ ep->fifo_size = 64;
+ else
+ ep->fifo_size = 1024;
+
+ if (fifo_mode) {
+ if (ep->fifo_size < udc->fifo_cfg[i].fifo_size)
+ dev_warn(&pdev->dev,
+ "Using default max fifo-size value\n");
+ else
+ ep->fifo_size = udc->fifo_cfg[i].fifo_size;
+ }
+
+ ep->nr_banks = ep_cfg->nr_banks;
+ if (fifo_mode) {
+ if (ep->nr_banks < udc->fifo_cfg[i].nr_banks)
+ dev_warn(&pdev->dev,
+ "Using default max nb-banks value\n");
+ else
+ ep->nr_banks = udc->fifo_cfg[i].nr_banks;
+ }
+
+ ep->can_dma = ep_cfg->can_dma;
+ ep->can_isoc = ep_cfg->can_isoc;
+
+ sprintf(ep->name, "ep%d", ep->index);
+ ep->ep.name = ep->name;
+
+ ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
+ ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
+ ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
+ ep->ep.ops = &usba_ep_ops;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
+ ep->udc = udc;
+ INIT_LIST_HEAD(&ep->queue);
+
+ if (ep->index == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = ep->can_isoc;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+
+ if (fifo_mode != 0) {
+ /*
+ * Generate ept_cfg based on FIFO size and
+ * banks number
+ */
+ if (ep->fifo_size <= 8)
+ ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
+ else
+ /* LSB is bit 1, not 0 */
+ ep->ept_cfg =
+ USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3);
+
+ ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks);
+ }
+
+ if (i)
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+
+ i++;
+ }
+
+ if (i == 0) {
+ dev_err(&pdev->dev, "of_probe: no endpoint specified\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return eps;
+err:
+ return ERR_PTR(ret);
+}
+
+static int usba_udc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct clk *pclk, *hclk;
+ struct usba_udc *udc;
+ int irq, ret, i;
+
+ udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ udc->gadget = usba_gadget_template;
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
+ udc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(udc->regs))
+ return PTR_ERR(udc->regs);
+ dev_info(&pdev->dev, "MMIO registers at %pR mapped at %p\n",
+ res, udc->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
+ udc->fifo = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(udc->fifo))
+ return PTR_ERR(udc->fifo);
+ dev_info(&pdev->dev, "FIFO at %pR mapped at %p\n", res, udc->fifo);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(pclk))
+ return PTR_ERR(pclk);
+ hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(hclk))
+ return PTR_ERR(hclk);
+
+ spin_lock_init(&udc->lock);
+ mutex_init(&udc->vbus_mutex);
+ udc->pdev = pdev;
+ udc->pclk = pclk;
+ udc->hclk = hclk;
+
+ platform_set_drvdata(pdev, udc);
+
+ /* Make sure we start from a clean slate */
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable pclk, aborting.\n");
+ return ret;
+ }
+
+ usba_writel(udc, CTRL, USBA_DISABLE_MASK);
+ clk_disable_unprepare(pclk);
+
+ udc->usba_ep = atmel_udc_of_init(pdev, udc);
+
+ toggle_bias(udc, 0);
+
+ if (IS_ERR(udc->usba_ep))
+ return PTR_ERR(udc->usba_ep);
+
+ ret = devm_request_irq(&pdev->dev, irq, usba_udc_irq, 0,
+ "atmel_usba_udc", udc);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
+ irq, ret);
+ return ret;
+ }
+ udc->irq = irq;
+
+ if (udc->vbus_pin) {
+ irq_set_status_flags(gpiod_to_irq(udc->vbus_pin), IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev,
+ gpiod_to_irq(udc->vbus_pin), NULL,
+ usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
+ "atmel_usba_udc", udc);
+ if (ret) {
+ udc->vbus_pin = NULL;
+ dev_warn(&udc->pdev->dev,
+ "failed to request vbus irq; "
+ "assuming always on\n");
+ }
+ }
+
+ ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (ret)
+ return ret;
+ device_init_wakeup(&pdev->dev, 1);
+
+ usba_init_debugfs(udc);
+ for (i = 1; i < udc->num_ep; i++)
+ usba_ep_init_debugfs(udc, &udc->usba_ep[i]);
+
+ return 0;
+}
+
+static int usba_udc_remove(struct platform_device *pdev)
+{
+ struct usba_udc *udc;
+ int i;
+
+ udc = platform_get_drvdata(pdev);
+
+ device_init_wakeup(&pdev->dev, 0);
+ usb_del_gadget_udc(&udc->gadget);
+
+ for (i = 1; i < udc->num_ep; i++)
+ usba_ep_cleanup_debugfs(&udc->usba_ep[i]);
+ usba_cleanup_debugfs(udc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int usba_udc_suspend(struct device *dev)
+{
+ struct usba_udc *udc = dev_get_drvdata(dev);
+
+ /* Not started */
+ if (!udc->driver)
+ return 0;
+
+ mutex_lock(&udc->vbus_mutex);
+
+ if (!device_may_wakeup(dev)) {
+ udc->suspended = false;
+ usba_stop(udc);
+ goto out;
+ }
+
+ /*
+ * Device may wake up. We stay clocked if we failed
+ * to request vbus irq, assuming always on.
+ */
+ if (udc->vbus_pin) {
+ /* FIXME: right to stop here...??? */
+ usba_stop(udc);
+ enable_irq_wake(gpiod_to_irq(udc->vbus_pin));
+ }
+
+ enable_irq_wake(udc->irq);
+
+out:
+ mutex_unlock(&udc->vbus_mutex);
+ return 0;
+}
+
+static int usba_udc_resume(struct device *dev)
+{
+ struct usba_udc *udc = dev_get_drvdata(dev);
+
+ /* Not started */
+ if (!udc->driver)
+ return 0;
+
+ if (device_may_wakeup(dev)) {
+ if (udc->vbus_pin)
+ disable_irq_wake(gpiod_to_irq(udc->vbus_pin));
+
+ disable_irq_wake(udc->irq);
+ }
+
+ /* If Vbus is present, enable the controller and wait for reset */
+ mutex_lock(&udc->vbus_mutex);
+ udc->vbus_prev = vbus_is_present(udc);
+ if (udc->vbus_prev)
+ usba_start(udc);
+ mutex_unlock(&udc->vbus_mutex);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume);
+
+static struct platform_driver udc_driver = {
+ .probe = usba_udc_probe,
+ .remove = usba_udc_remove,
+ .driver = {
+ .name = "atmel_usba_udc",
+ .pm = &usba_udc_pm_ops,
+ .of_match_table = atmel_udc_dt_ids,
+ },
+};
+module_platform_driver(udc_driver);
+
+MODULE_DESCRIPTION("Atmel USBA UDC driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atmel_usba_udc");
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
new file mode 100644
index 000000000..620472f21
--- /dev/null
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Atmel USBA high speed USB device controller
+ *
+ * Copyright (C) 2005-2007 Atmel Corporation
+ */
+#ifndef __LINUX_USB_GADGET_USBA_UDC_H__
+#define __LINUX_USB_GADGET_USBA_UDC_H__
+
+#include <linux/gpio/consumer.h>
+
+/* USB register offsets */
+#define USBA_CTRL 0x0000
+#define USBA_FNUM 0x0004
+#define USBA_INT_ENB 0x0010
+#define USBA_INT_STA 0x0014
+#define USBA_INT_CLR 0x0018
+#define USBA_EPT_RST 0x001c
+#define USBA_TST 0x00e0
+
+/* USB endpoint register offsets */
+#define USBA_EPT_CFG 0x0000
+#define USBA_EPT_CTL_ENB 0x0004
+#define USBA_EPT_CTL_DIS 0x0008
+#define USBA_EPT_CTL 0x000c
+#define USBA_EPT_SET_STA 0x0014
+#define USBA_EPT_CLR_STA 0x0018
+#define USBA_EPT_STA 0x001c
+
+/* USB DMA register offsets */
+#define USBA_DMA_NXT_DSC 0x0000
+#define USBA_DMA_ADDRESS 0x0004
+#define USBA_DMA_CONTROL 0x0008
+#define USBA_DMA_STATUS 0x000c
+
+/* Bitfields in CTRL */
+#define USBA_DEV_ADDR_OFFSET 0
+#define USBA_DEV_ADDR_SIZE 7
+#define USBA_FADDR_EN (1 << 7)
+#define USBA_EN_USBA (1 << 8)
+#define USBA_DETACH (1 << 9)
+#define USBA_REMOTE_WAKE_UP (1 << 10)
+#define USBA_PULLD_DIS (1 << 11)
+
+#define USBA_ENABLE_MASK (USBA_EN_USBA | USBA_PULLD_DIS)
+#define USBA_DISABLE_MASK USBA_DETACH
+
+/* Bitfields in FNUM */
+#define USBA_MICRO_FRAME_NUM_OFFSET 0
+#define USBA_MICRO_FRAME_NUM_SIZE 3
+#define USBA_FRAME_NUMBER_OFFSET 3
+#define USBA_FRAME_NUMBER_SIZE 11
+#define USBA_FRAME_NUM_ERROR (1 << 31)
+
+/* Bitfields in INT_ENB/INT_STA/INT_CLR */
+#define USBA_HIGH_SPEED (1 << 0)
+#define USBA_DET_SUSPEND (1 << 1)
+#define USBA_MICRO_SOF (1 << 2)
+#define USBA_SOF (1 << 3)
+#define USBA_END_OF_RESET (1 << 4)
+#define USBA_WAKE_UP (1 << 5)
+#define USBA_END_OF_RESUME (1 << 6)
+#define USBA_UPSTREAM_RESUME (1 << 7)
+#define USBA_EPT_INT_OFFSET 8
+#define USBA_EPT_INT_SIZE 16
+#define USBA_DMA_INT_OFFSET 24
+#define USBA_DMA_INT_SIZE 8
+
+/* Bitfields in EPT_RST */
+#define USBA_RST_OFFSET 0
+#define USBA_RST_SIZE 16
+
+/* Bitfields in USBA_TST */
+#define USBA_SPEED_CFG_OFFSET 0
+#define USBA_SPEED_CFG_SIZE 2
+#define USBA_TST_J_MODE (1 << 2)
+#define USBA_TST_K_MODE (1 << 3)
+#define USBA_TST_PKT_MODE (1 << 4)
+#define USBA_OPMODE2 (1 << 5)
+
+/* Bitfields in EPT_CFG */
+#define USBA_EPT_SIZE_OFFSET 0
+#define USBA_EPT_SIZE_SIZE 3
+#define USBA_EPT_DIR_IN (1 << 3)
+#define USBA_EPT_TYPE_OFFSET 4
+#define USBA_EPT_TYPE_SIZE 2
+#define USBA_BK_NUMBER_OFFSET 6
+#define USBA_BK_NUMBER_SIZE 2
+#define USBA_NB_TRANS_OFFSET 8
+#define USBA_NB_TRANS_SIZE 2
+#define USBA_EPT_MAPPED (1 << 31)
+
+/* Bitfields in EPT_CTL/EPT_CTL_ENB/EPT_CTL_DIS */
+#define USBA_EPT_ENABLE (1 << 0)
+#define USBA_AUTO_VALID (1 << 1)
+#define USBA_INTDIS_DMA (1 << 3)
+#define USBA_NYET_DIS (1 << 4)
+#define USBA_DATAX_RX (1 << 6)
+#define USBA_MDATA_RX (1 << 7)
+/* Bits 8-15 and 31 enable interrupts for respective bits in EPT_STA */
+#define USBA_BUSY_BANK_IE (1 << 18)
+
+/* Bitfields in EPT_SET_STA/EPT_CLR_STA/EPT_STA */
+#define USBA_FORCE_STALL (1 << 5)
+#define USBA_TOGGLE_CLR (1 << 6)
+#define USBA_TOGGLE_SEQ_OFFSET 6
+#define USBA_TOGGLE_SEQ_SIZE 2
+#define USBA_ERR_OVFLW (1 << 8)
+#define USBA_RX_BK_RDY (1 << 9)
+#define USBA_KILL_BANK (1 << 9)
+#define USBA_TX_COMPLETE (1 << 10)
+#define USBA_TX_PK_RDY (1 << 11)
+#define USBA_ISO_ERR_TRANS (1 << 11)
+#define USBA_RX_SETUP (1 << 12)
+#define USBA_ISO_ERR_FLOW (1 << 12)
+#define USBA_STALL_SENT (1 << 13)
+#define USBA_ISO_ERR_CRC (1 << 13)
+#define USBA_ISO_ERR_NBTRANS (1 << 13)
+#define USBA_NAK_IN (1 << 14)
+#define USBA_ISO_ERR_FLUSH (1 << 14)
+#define USBA_NAK_OUT (1 << 15)
+#define USBA_CURRENT_BANK_OFFSET 16
+#define USBA_CURRENT_BANK_SIZE 2
+#define USBA_BUSY_BANKS_OFFSET 18
+#define USBA_BUSY_BANKS_SIZE 2
+#define USBA_BYTE_COUNT_OFFSET 20
+#define USBA_BYTE_COUNT_SIZE 11
+#define USBA_SHORT_PACKET (1 << 31)
+
+/* Bitfields in DMA_CONTROL */
+#define USBA_DMA_CH_EN (1 << 0)
+#define USBA_DMA_LINK (1 << 1)
+#define USBA_DMA_END_TR_EN (1 << 2)
+#define USBA_DMA_END_BUF_EN (1 << 3)
+#define USBA_DMA_END_TR_IE (1 << 4)
+#define USBA_DMA_END_BUF_IE (1 << 5)
+#define USBA_DMA_DESC_LOAD_IE (1 << 6)
+#define USBA_DMA_BURST_LOCK (1 << 7)
+#define USBA_DMA_BUF_LEN_OFFSET 16
+#define USBA_DMA_BUF_LEN_SIZE 16
+
+/* Bitfields in DMA_STATUS */
+#define USBA_DMA_CH_ACTIVE (1 << 1)
+#define USBA_DMA_END_TR_ST (1 << 4)
+#define USBA_DMA_END_BUF_ST (1 << 5)
+#define USBA_DMA_DESC_LOAD_ST (1 << 6)
+
+/* Constants for SPEED_CFG */
+#define USBA_SPEED_CFG_NORMAL 0
+#define USBA_SPEED_CFG_FORCE_HIGH 2
+#define USBA_SPEED_CFG_FORCE_FULL 3
+
+/* Constants for EPT_SIZE */
+#define USBA_EPT_SIZE_8 0
+#define USBA_EPT_SIZE_16 1
+#define USBA_EPT_SIZE_32 2
+#define USBA_EPT_SIZE_64 3
+#define USBA_EPT_SIZE_128 4
+#define USBA_EPT_SIZE_256 5
+#define USBA_EPT_SIZE_512 6
+#define USBA_EPT_SIZE_1024 7
+
+/* Constants for EPT_TYPE */
+#define USBA_EPT_TYPE_CONTROL 0
+#define USBA_EPT_TYPE_ISO 1
+#define USBA_EPT_TYPE_BULK 2
+#define USBA_EPT_TYPE_INT 3
+
+/* Constants for BK_NUMBER */
+#define USBA_BK_NUMBER_ZERO 0
+#define USBA_BK_NUMBER_ONE 1
+#define USBA_BK_NUMBER_DOUBLE 2
+#define USBA_BK_NUMBER_TRIPLE 3
+
+/* Bit manipulation macros */
+#define USBA_BF(name, value) \
+ (((value) & ((1 << USBA_##name##_SIZE) - 1)) \
+ << USBA_##name##_OFFSET)
+#define USBA_BFEXT(name, value) \
+ (((value) >> USBA_##name##_OFFSET) \
+ & ((1 << USBA_##name##_SIZE) - 1))
+#define USBA_BFINS(name, value, old) \
+ (((old) & ~(((1 << USBA_##name##_SIZE) - 1) \
+ << USBA_##name##_OFFSET)) \
+ | USBA_BF(name, value))
+
+/* Register access macros */
+#define usba_readl(udc, reg) \
+ readl_relaxed((udc)->regs + USBA_##reg)
+#define usba_writel(udc, reg, value) \
+ writel_relaxed((value), (udc)->regs + USBA_##reg)
+#define usba_ep_readl(ep, reg) \
+ readl_relaxed((ep)->ep_regs + USBA_EPT_##reg)
+#define usba_ep_writel(ep, reg, value) \
+ writel_relaxed((value), (ep)->ep_regs + USBA_EPT_##reg)
+#define usba_dma_readl(ep, reg) \
+ readl_relaxed((ep)->dma_regs + USBA_DMA_##reg)
+#define usba_dma_writel(ep, reg, value) \
+ writel_relaxed((value), (ep)->dma_regs + USBA_DMA_##reg)
+
+/* Calculate base address for a given endpoint or DMA controller */
+#define USBA_EPT_BASE(x) (0x100 + (x) * 0x20)
+#define USBA_DMA_BASE(x) (0x300 + (x) * 0x10)
+#define USBA_FIFO_BASE(x) ((x) << 16)
+
+/* Synth parameters */
+#define USBA_NR_DMAS 7
+
+#define EP0_FIFO_SIZE 64
+#define EP0_EPT_SIZE USBA_EPT_SIZE_64
+#define EP0_NR_BANKS 1
+
+#define FIFO_IOMEM_ID 0
+#define CTRL_IOMEM_ID 1
+
+#define DBG_ERR 0x0001 /* report all error returns */
+#define DBG_HW 0x0002 /* debug hardware initialization */
+#define DBG_GADGET 0x0004 /* calls to/from gadget driver */
+#define DBG_INT 0x0008 /* interrupts */
+#define DBG_BUS 0x0010 /* report changes in bus state */
+#define DBG_QUEUE 0x0020 /* debug request queue processing */
+#define DBG_FIFO 0x0040 /* debug FIFO contents */
+#define DBG_DMA 0x0080 /* debug DMA handling */
+#define DBG_REQ 0x0100 /* print out queued request length */
+#define DBG_ALL 0xffff
+#define DBG_NONE 0x0000
+
+#define DEBUG_LEVEL (DBG_ERR)
+
+#define DBG(level, fmt, ...) \
+ do { \
+ if ((level) & DEBUG_LEVEL) \
+ pr_debug("udc: " fmt, ## __VA_ARGS__); \
+ } while (0)
+
+enum usba_ctrl_state {
+ WAIT_FOR_SETUP,
+ DATA_STAGE_IN,
+ DATA_STAGE_OUT,
+ STATUS_STAGE_IN,
+ STATUS_STAGE_OUT,
+ STATUS_STAGE_ADDR,
+ STATUS_STAGE_TEST,
+};
+/*
+ EP_STATE_IDLE,
+ EP_STATE_SETUP,
+ EP_STATE_IN_DATA,
+ EP_STATE_OUT_DATA,
+ EP_STATE_SET_ADDR_STATUS,
+ EP_STATE_RX_STATUS,
+ EP_STATE_TX_STATUS,
+ EP_STATE_HALT,
+*/
+
+struct usba_dma_desc {
+ dma_addr_t next;
+ dma_addr_t addr;
+ u32 ctrl;
+};
+
+struct usba_fifo_cfg {
+ u8 hw_ep_num;
+ u16 fifo_size;
+ u8 nr_banks;
+};
+
+struct usba_ep {
+ int state;
+ void __iomem *ep_regs;
+ void __iomem *dma_regs;
+ void __iomem *fifo;
+ char name[8];
+ struct usb_ep ep;
+ struct usba_udc *udc;
+
+ struct list_head queue;
+
+ u16 fifo_size;
+ u8 nr_banks;
+ u8 index;
+ unsigned int can_dma:1;
+ unsigned int can_isoc:1;
+ unsigned int is_isoc:1;
+ unsigned int is_in:1;
+ unsigned long ept_cfg;
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+ u32 last_dma_status;
+ struct dentry *debugfs_dir;
+#endif
+};
+
+struct usba_ep_config {
+ u8 nr_banks;
+ unsigned int can_dma:1;
+ unsigned int can_isoc:1;
+};
+
+struct usba_request {
+ struct usb_request req;
+ struct list_head queue;
+
+ u32 ctrl;
+
+ unsigned int submitted:1;
+ unsigned int last_transaction:1;
+ unsigned int using_dma:1;
+ unsigned int mapped:1;
+};
+
+struct usba_udc_errata {
+ void (*toggle_bias)(struct usba_udc *udc, int is_on);
+ void (*pulse_bias)(struct usba_udc *udc);
+};
+
+struct usba_udc_config {
+ const struct usba_udc_errata *errata;
+ const struct usba_ep_config *config;
+ const int num_ep;
+ const bool ep_prealloc;
+};
+
+struct usba_udc {
+ /* Protect hw registers from concurrent modifications */
+ spinlock_t lock;
+
+ /* Mutex to prevent concurrent start or stop */
+ struct mutex vbus_mutex;
+
+ void __iomem *regs;
+ void __iomem *fifo;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct platform_device *pdev;
+ const struct usba_udc_errata *errata;
+ int irq;
+ struct gpio_desc *vbus_pin;
+ int num_ep;
+ struct usba_fifo_cfg *fifo_cfg;
+ struct clk *pclk;
+ struct clk *hclk;
+ struct usba_ep *usba_ep;
+ bool bias_pulse_needed;
+ bool clocked;
+ bool suspended;
+ bool ep_prealloc;
+
+ u16 devstatus;
+
+ u16 test_mode;
+ int vbus_prev;
+
+ u32 int_enb_cache;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+ struct dentry *debugfs_root;
+#endif
+
+ struct regmap *pmc;
+};
+
+static inline struct usba_ep *to_usba_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct usba_ep, ep);
+}
+
+static inline struct usba_request *to_usba_req(struct usb_request *req)
+{
+ return container_of(req, struct usba_request, req);
+}
+
+static inline struct usba_udc *to_usba_udc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct usba_udc, gadget);
+}
+
+#define ep_is_control(ep) ((ep)->index == 0)
+#define ep_is_idle(ep) ((ep)->state == EP_STATE_IDLE)
+
+#endif /* __LINUX_USB_GADGET_USBA_UDC_H */
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
new file mode 100644
index 000000000..8d5892891
--- /dev/null
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -0,0 +1,2387 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
+ *
+ * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
+ * Copyright (C) 2012 Broadcom Corporation
+ */
+
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/workqueue.h>
+
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_iudma.h>
+#include <bcm63xx_dev_usb_usbd.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+
+#define DRV_MODULE_NAME "bcm63xx_udc"
+
+static const char bcm63xx_ep0name[] = "ep0";
+
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} bcm63xx_ep_info[] = {
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+ EP_INFO(bcm63xx_ep0name,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep1in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep2out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep3in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep4out-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
+
+#undef EP_INFO
+};
+
+static bool use_fullspeed;
+module_param(use_fullspeed, bool, S_IRUGO);
+MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
+
+/*
+ * RX IRQ coalescing options:
+ *
+ * false (default) - one IRQ per DATAx packet. Slow but reliable. The
+ * driver is able to pass the "testusb" suite and recover from conditions like:
+ *
+ * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
+ * 2) Host sends 512 bytes of data
+ * 3) Host decides to reconfigure the device and sends SET_INTERFACE
+ * 4) Device shuts down the endpoint and cancels the RX transaction
+ *
+ * true - one IRQ per transfer, for transfers <= 2048B. Generates
+ * considerably fewer IRQs, but error recovery is less robust. Does not
+ * reliably pass "testusb".
+ *
+ * TX always uses coalescing, because we can cancel partially complete TX
+ * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
+ * this on RX.
+ */
+static bool irq_coalesce;
+module_param(irq_coalesce, bool, S_IRUGO);
+MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
+
+#define BCM63XX_NUM_EP 5
+#define BCM63XX_NUM_IUDMA 6
+#define BCM63XX_NUM_FIFO_PAIRS 3
+
+#define IUDMA_RESET_TIMEOUT_US 10000
+
+#define IUDMA_EP0_RXCHAN 0
+#define IUDMA_EP0_TXCHAN 1
+
+#define IUDMA_MAX_FRAGMENT 2048
+#define BCM63XX_MAX_CTRL_PKT 64
+
+#define BCMEP_CTRL 0x00
+#define BCMEP_ISOC 0x01
+#define BCMEP_BULK 0x02
+#define BCMEP_INTR 0x03
+
+#define BCMEP_OUT 0x00
+#define BCMEP_IN 0x01
+
+#define BCM63XX_SPD_FULL 1
+#define BCM63XX_SPD_HIGH 0
+
+#define IUDMA_DMAC_OFFSET 0x200
+#define IUDMA_DMAS_OFFSET 0x400
+
+enum bcm63xx_ep0_state {
+ EP0_REQUEUE,
+ EP0_IDLE,
+ EP0_IN_DATA_PHASE_SETUP,
+ EP0_IN_DATA_PHASE_COMPLETE,
+ EP0_OUT_DATA_PHASE_SETUP,
+ EP0_OUT_DATA_PHASE_COMPLETE,
+ EP0_OUT_STATUS_PHASE,
+ EP0_IN_FAKE_STATUS_PHASE,
+ EP0_SHUTDOWN,
+};
+
+static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
+ "REQUEUE",
+ "IDLE",
+ "IN_DATA_PHASE_SETUP",
+ "IN_DATA_PHASE_COMPLETE",
+ "OUT_DATA_PHASE_SETUP",
+ "OUT_DATA_PHASE_COMPLETE",
+ "OUT_STATUS_PHASE",
+ "IN_FAKE_STATUS_PHASE",
+ "SHUTDOWN",
+};
+
+/**
+ * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
+ * @ep_num: USB endpoint number.
+ * @n_bds: Number of buffer descriptors in the ring.
+ * @ep_type: Endpoint type (control, bulk, interrupt).
+ * @dir: Direction (in, out).
+ * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
+ * @max_pkt_hs: Maximum packet size in high speed mode.
+ * @max_pkt_fs: Maximum packet size in full speed mode.
+ */
+struct iudma_ch_cfg {
+ int ep_num;
+ int n_bds;
+ int ep_type;
+ int dir;
+ int n_fifo_slots;
+ int max_pkt_hs;
+ int max_pkt_fs;
+};
+
+static const struct iudma_ch_cfg iudma_defaults[] = {
+
+ /* This controller was designed to support a CDC/RNDIS application.
+ It may be possible to reconfigure some of the endpoints, but
+ the hardware limitations (FIFO sizing and number of DMA channels)
+ may significantly impact flexibility and/or stability. Change
+ these values at your own risk.
+
+ ep_num ep_type n_fifo_slots max_pkt_fs
+ idx | n_bds | dir | max_pkt_hs |
+ | | | | | | | | */
+ [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
+ [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
+ [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
+ [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
+ [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
+ [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
+};
+
+struct bcm63xx_udc;
+
+/**
+ * struct iudma_ch - Represents the current state of a single IUDMA channel.
+ * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
+ * @ep_num: USB endpoint number. -1 for ep0 RX.
+ * @enabled: Whether bcm63xx_ep_enable() has been called.
+ * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
+ * @is_tx: true for TX, false for RX.
+ * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
+ * @udc: Reference to the device controller.
+ * @read_bd: Next buffer descriptor to reap from the hardware.
+ * @write_bd: Next BD available for a new packet.
+ * @end_bd: Points to the final BD in the ring.
+ * @n_bds_used: Number of BD entries currently occupied.
+ * @bd_ring: Base pointer to the BD ring.
+ * @bd_ring_dma: Physical (DMA) address of bd_ring.
+ * @n_bds: Total number of BDs in the ring.
+ *
+ * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
+ * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
+ * only.
+ *
+ * Each bulk/intr endpoint has a single IUDMA channel and a single
+ * struct usb_ep.
+ */
+struct iudma_ch {
+ unsigned int ch_idx;
+ int ep_num;
+ bool enabled;
+ int max_pkt;
+ bool is_tx;
+ struct bcm63xx_ep *bep;
+ struct bcm63xx_udc *udc;
+
+ struct bcm_enet_desc *read_bd;
+ struct bcm_enet_desc *write_bd;
+ struct bcm_enet_desc *end_bd;
+ int n_bds_used;
+
+ struct bcm_enet_desc *bd_ring;
+ dma_addr_t bd_ring_dma;
+ unsigned int n_bds;
+};
+
+/**
+ * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
+ * @ep_num: USB endpoint number.
+ * @iudma: Pointer to IUDMA channel state.
+ * @ep: USB gadget layer representation of the EP.
+ * @udc: Reference to the device controller.
+ * @queue: Linked list of outstanding requests for this EP.
+ * @halted: 1 if the EP is stalled; 0 otherwise.
+ */
+struct bcm63xx_ep {
+ unsigned int ep_num;
+ struct iudma_ch *iudma;
+ struct usb_ep ep;
+ struct bcm63xx_udc *udc;
+ struct list_head queue;
+ unsigned halted:1;
+};
+
+/**
+ * struct bcm63xx_req - Internal (driver) state of a single request.
+ * @queue: Links back to the EP's request list.
+ * @req: USB gadget layer representation of the request.
+ * @offset: Current byte offset into the data buffer (next byte to queue).
+ * @bd_bytes: Number of data bytes in outstanding BD entries.
+ * @iudma: IUDMA channel used for the request.
+ */
+struct bcm63xx_req {
+ struct list_head queue; /* ep's requests */
+ struct usb_request req;
+ unsigned int offset;
+ unsigned int bd_bytes;
+ struct iudma_ch *iudma;
+};
+
+/**
+ * struct bcm63xx_udc - Driver/hardware private context.
+ * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
+ * @dev: Generic Linux device structure.
+ * @pd: Platform data (board/port info).
+ * @usbd_clk: Clock descriptor for the USB device block.
+ * @usbh_clk: Clock descriptor for the USB host block.
+ * @gadget: USB device.
+ * @driver: Driver for USB device.
+ * @usbd_regs: Base address of the USBD/USB20D block.
+ * @iudma_regs: Base address of the USBD's associated IUDMA block.
+ * @bep: Array of endpoints, including ep0.
+ * @iudma: Array of all IUDMA channels used by this controller.
+ * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
+ * @iface: USB interface number, from SET_INTERFACE wIndex.
+ * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
+ * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
+ * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
+ * @ep0state: Current state of the ep0 state machine.
+ * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
+ * @wedgemap: Bitmap of wedged endpoints.
+ * @ep0_req_reset: USB reset is pending.
+ * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
+ * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
+ * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
+ * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
+ * @ep0_reply: Pending reply from gadget driver.
+ * @ep0_request: Outstanding ep0 request.
+ */
+struct bcm63xx_udc {
+ spinlock_t lock;
+
+ struct device *dev;
+ struct bcm63xx_usbd_platform_data *pd;
+ struct clk *usbd_clk;
+ struct clk *usbh_clk;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+ void __iomem *usbd_regs;
+ void __iomem *iudma_regs;
+
+ struct bcm63xx_ep bep[BCM63XX_NUM_EP];
+ struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
+
+ int cfg;
+ int iface;
+ int alt_iface;
+
+ struct bcm63xx_req ep0_ctrl_req;
+ u8 *ep0_ctrl_buf;
+
+ int ep0state;
+ struct work_struct ep0_wq;
+
+ unsigned long wedgemap;
+
+ unsigned ep0_req_reset:1;
+ unsigned ep0_req_set_cfg:1;
+ unsigned ep0_req_set_iface:1;
+ unsigned ep0_req_shutdown:1;
+
+ unsigned ep0_req_completed:1;
+ struct usb_request *ep0_reply;
+ struct usb_request *ep0_request;
+};
+
+static const struct usb_ep_ops bcm63xx_udc_ep_ops;
+
+/***********************************************************************
+ * Convenience functions
+ ***********************************************************************/
+
+static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
+{
+ return container_of(g, struct bcm63xx_udc, gadget);
+}
+
+static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct bcm63xx_ep, ep);
+}
+
+static inline struct bcm63xx_req *our_req(struct usb_request *req)
+{
+ return container_of(req, struct bcm63xx_req, req);
+}
+
+static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
+{
+ return bcm_readl(udc->usbd_regs + off);
+}
+
+static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+{
+ bcm_writel(val, udc->usbd_regs + off);
+}
+
+static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
+{
+ return bcm_readl(udc->iudma_regs + off);
+}
+
+static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+{
+ bcm_writel(val, udc->iudma_regs + off);
+}
+
+static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
+{
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
+}
+
+static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
+{
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
+}
+
+static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
+{
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
+}
+
+static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
+{
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
+}
+
+static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
+{
+ if (is_enabled) {
+ clk_enable(udc->usbh_clk);
+ clk_enable(udc->usbd_clk);
+ udelay(10);
+ } else {
+ clk_disable(udc->usbd_clk);
+ clk_disable(udc->usbh_clk);
+ }
+}
+
+/***********************************************************************
+ * Low-level IUDMA / FIFO operations
+ ***********************************************************************/
+
+/**
+ * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
+ * @udc: Reference to the device controller.
+ * @idx: Desired init_sel value.
+ *
+ * The "init_sel" signal is used as a selection index for both endpoints
+ * and IUDMA channels. Since these do not map 1:1, the use of this signal
+ * depends on the context.
+ */
+static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
+{
+ u32 val = usbd_readl(udc, USBD_CONTROL_REG);
+
+ val &= ~USBD_CONTROL_INIT_SEL_MASK;
+ val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
+ usbd_writel(udc, val, USBD_CONTROL_REG);
+}
+
+/**
+ * bcm63xx_set_stall - Enable/disable stall on one endpoint.
+ * @udc: Reference to the device controller.
+ * @bep: Endpoint on which to operate.
+ * @is_stalled: true to enable stall, false to disable.
+ *
+ * See notes in bcm63xx_update_wedge() regarding automatic clearing of
+ * halt/stall conditions.
+ */
+static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
+ bool is_stalled)
+{
+ u32 val;
+
+ val = USBD_STALL_UPDATE_MASK |
+ (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
+ (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
+ usbd_writel(udc, val, USBD_STALL_REG);
+}
+
+/**
+ * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
+ * @udc: Reference to the device controller.
+ *
+ * These parameters depend on the USB link speed. Settings are
+ * per-IUDMA-channel-pair.
+ */
+static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
+{
+ int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
+ u32 i, val, rx_fifo_slot, tx_fifo_slot;
+
+ /* set up FIFO boundaries and packet sizes; this is done in pairs */
+ rx_fifo_slot = tx_fifo_slot = 0;
+ for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
+ const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
+ const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
+
+ bcm63xx_ep_dma_select(udc, i >> 1);
+
+ val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
+ ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
+ USBD_RXFIFO_CONFIG_END_SHIFT);
+ rx_fifo_slot += rx_cfg->n_fifo_slots;
+ usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
+ usbd_writel(udc,
+ is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
+ USBD_RXFIFO_EPSIZE_REG);
+
+ val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
+ ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
+ USBD_TXFIFO_CONFIG_END_SHIFT);
+ tx_fifo_slot += tx_cfg->n_fifo_slots;
+ usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
+ usbd_writel(udc,
+ is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
+ USBD_TXFIFO_EPSIZE_REG);
+
+ usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
+ }
+}
+
+/**
+ * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
+ * @udc: Reference to the device controller.
+ * @ep_num: Endpoint number.
+ */
+static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
+{
+ u32 val;
+
+ bcm63xx_ep_dma_select(udc, ep_num);
+
+ val = usbd_readl(udc, USBD_CONTROL_REG);
+ val |= USBD_CONTROL_FIFO_RESET_MASK;
+ usbd_writel(udc, val, USBD_CONTROL_REG);
+ usbd_readl(udc, USBD_CONTROL_REG);
+}
+
+/**
+ * bcm63xx_fifo_reset - Flush all hardware FIFOs.
+ * @udc: Reference to the device controller.
+ */
+static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
+{
+ int i;
+
+ for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
+ bcm63xx_fifo_reset_ep(udc, i);
+}
+
+/**
+ * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
+ * @udc: Reference to the device controller.
+ */
+static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
+{
+ u32 i, val;
+
+ for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
+ const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
+
+ if (cfg->ep_num < 0)
+ continue;
+
+ bcm63xx_ep_dma_select(udc, cfg->ep_num);
+ val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
+ ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
+ usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
+ }
+}
+
+/**
+ * bcm63xx_ep_setup - Configure per-endpoint settings.
+ * @udc: Reference to the device controller.
+ *
+ * This needs to be rerun if the speed/cfg/intf/altintf changes.
+ */
+static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
+{
+ u32 val, i;
+
+ usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
+
+ for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
+ const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
+ int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
+ cfg->max_pkt_hs : cfg->max_pkt_fs;
+ int idx = cfg->ep_num;
+
+ udc->iudma[i].max_pkt = max_pkt;
+
+ if (idx < 0)
+ continue;
+ usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
+
+ val = (idx << USBD_CSR_EP_LOG_SHIFT) |
+ (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
+ (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
+ (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
+ (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
+ (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
+ (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
+ usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
+ }
+}
+
+/**
+ * iudma_write - Queue a single IUDMA transaction.
+ * @udc: Reference to the device controller.
+ * @iudma: IUDMA channel to use.
+ * @breq: Request containing the transaction data.
+ *
+ * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
+ * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
+ * So iudma_write() may be called several times to fulfill a single
+ * usb_request.
+ *
+ * For TX IUDMA, this can queue multiple buffer descriptors if needed.
+ */
+static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
+ struct bcm63xx_req *breq)
+{
+ int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
+ unsigned int bytes_left = breq->req.length - breq->offset;
+ const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
+ iudma->max_pkt : IUDMA_MAX_FRAGMENT;
+
+ iudma->n_bds_used = 0;
+ breq->bd_bytes = 0;
+ breq->iudma = iudma;
+
+ if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
+ extra_zero_pkt = 1;
+
+ do {
+ struct bcm_enet_desc *d = iudma->write_bd;
+ u32 dmaflags = 0;
+ unsigned int n_bytes;
+
+ if (d == iudma->end_bd) {
+ dmaflags |= DMADESC_WRAP_MASK;
+ iudma->write_bd = iudma->bd_ring;
+ } else {
+ iudma->write_bd++;
+ }
+ iudma->n_bds_used++;
+
+ n_bytes = min_t(int, bytes_left, max_bd_bytes);
+ if (n_bytes)
+ dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
+ else
+ dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
+ DMADESC_USB_ZERO_MASK;
+
+ dmaflags |= DMADESC_OWNER_MASK;
+ if (first_bd) {
+ dmaflags |= DMADESC_SOP_MASK;
+ first_bd = 0;
+ }
+
+ /*
+ * extra_zero_pkt forces one more iteration through the loop
+ * after all data is queued up, to send the zero packet
+ */
+ if (extra_zero_pkt && !bytes_left)
+ extra_zero_pkt = 0;
+
+ if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
+ (n_bytes == bytes_left && !extra_zero_pkt)) {
+ last_bd = 1;
+ dmaflags |= DMADESC_EOP_MASK;
+ }
+
+ d->address = breq->req.dma + breq->offset;
+ mb();
+ d->len_stat = dmaflags;
+
+ breq->offset += n_bytes;
+ breq->bd_bytes += n_bytes;
+ bytes_left -= n_bytes;
+ } while (!last_bd);
+
+ usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
+ ENETDMAC_CHANCFG_REG, iudma->ch_idx);
+}
+
+/**
+ * iudma_read - Check for IUDMA buffer completion.
+ * @udc: Reference to the device controller.
+ * @iudma: IUDMA channel to use.
+ *
+ * This checks to see if ALL of the outstanding BDs on the DMA channel
+ * have been filled. If so, it returns the actual transfer length;
+ * otherwise it returns -EBUSY.
+ */
+static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
+{
+ int i, actual_len = 0;
+ struct bcm_enet_desc *d = iudma->read_bd;
+
+ if (!iudma->n_bds_used)
+ return -EINVAL;
+
+ for (i = 0; i < iudma->n_bds_used; i++) {
+ u32 dmaflags;
+
+ dmaflags = d->len_stat;
+
+ if (dmaflags & DMADESC_OWNER_MASK)
+ return -EBUSY;
+
+ actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
+ DMADESC_LENGTH_SHIFT;
+ if (d == iudma->end_bd)
+ d = iudma->bd_ring;
+ else
+ d++;
+ }
+
+ iudma->read_bd = d;
+ iudma->n_bds_used = 0;
+ return actual_len;
+}
+
+/**
+ * iudma_reset_channel - Stop DMA on a single channel.
+ * @udc: Reference to the device controller.
+ * @iudma: IUDMA channel to reset.
+ */
+static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
+{
+ int timeout = IUDMA_RESET_TIMEOUT_US;
+ struct bcm_enet_desc *d;
+ int ch_idx = iudma->ch_idx;
+
+ if (!iudma->is_tx)
+ bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
+
+ /* stop DMA, then wait for the hardware to wrap up */
+ usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
+
+ while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
+ ENETDMAC_CHANCFG_EN_MASK) {
+ udelay(1);
+
+ /* repeatedly flush the FIFO data until the BD completes */
+ if (iudma->is_tx && iudma->ep_num >= 0)
+ bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
+
+ if (!timeout--) {
+ dev_err(udc->dev, "can't reset IUDMA channel %d\n",
+ ch_idx);
+ break;
+ }
+ if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
+ dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
+ ch_idx);
+ usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
+ ENETDMAC_CHANCFG_REG, ch_idx);
+ }
+ }
+ usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
+
+ /* don't leave "live" HW-owned entries for the next guy to step on */
+ for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
+ d->len_stat = 0;
+ mb();
+
+ iudma->read_bd = iudma->write_bd = iudma->bd_ring;
+ iudma->n_bds_used = 0;
+
+ /* set up IRQs, UBUS burst size, and BD base for this channel */
+ usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
+ ENETDMAC_IRMASK_REG, ch_idx);
+ usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
+
+ usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
+ usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
+}
+
+/**
+ * iudma_init_channel - One-time IUDMA channel initialization.
+ * @udc: Reference to the device controller.
+ * @ch_idx: Channel to initialize.
+ */
+static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
+{
+ struct iudma_ch *iudma = &udc->iudma[ch_idx];
+ const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
+ unsigned int n_bds = cfg->n_bds;
+ struct bcm63xx_ep *bep = NULL;
+
+ iudma->ep_num = cfg->ep_num;
+ iudma->ch_idx = ch_idx;
+ iudma->is_tx = !!(ch_idx & 0x01);
+ if (iudma->ep_num >= 0) {
+ bep = &udc->bep[iudma->ep_num];
+ bep->iudma = iudma;
+ INIT_LIST_HEAD(&bep->queue);
+ }
+
+ iudma->bep = bep;
+ iudma->udc = udc;
+
+ /* ep0 is always active; others are controlled by the gadget driver */
+ if (iudma->ep_num <= 0)
+ iudma->enabled = true;
+
+ iudma->n_bds = n_bds;
+ iudma->bd_ring = dmam_alloc_coherent(udc->dev,
+ n_bds * sizeof(struct bcm_enet_desc),
+ &iudma->bd_ring_dma, GFP_KERNEL);
+ if (!iudma->bd_ring)
+ return -ENOMEM;
+ iudma->end_bd = &iudma->bd_ring[n_bds - 1];
+
+ return 0;
+}
+
+/**
+ * iudma_init - One-time initialization of all IUDMA channels.
+ * @udc: Reference to the device controller.
+ *
+ * Enable DMA, flush channels, and enable global IUDMA IRQs.
+ */
+static int iudma_init(struct bcm63xx_udc *udc)
+{
+ int i, rc;
+
+ usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
+
+ for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
+ rc = iudma_init_channel(udc, i);
+ if (rc)
+ return rc;
+ iudma_reset_channel(udc, &udc->iudma[i]);
+ }
+
+ usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
+ return 0;
+}
+
+/**
+ * iudma_uninit - Uninitialize IUDMA channels.
+ * @udc: Reference to the device controller.
+ *
+ * Kill global IUDMA IRQs, flush channels, and kill DMA.
+ */
+static void iudma_uninit(struct bcm63xx_udc *udc)
+{
+ int i;
+
+ usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
+
+ for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
+ iudma_reset_channel(udc, &udc->iudma[i]);
+
+ usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
+}
+
+/***********************************************************************
+ * Other low-level USBD operations
+ ***********************************************************************/
+
+/**
+ * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
+ * @udc: Reference to the device controller.
+ * @enable_irqs: true to enable, false to disable.
+ */
+static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
+{
+ u32 val;
+
+ usbd_writel(udc, 0, USBD_STATUS_REG);
+
+ val = BIT(USBD_EVENT_IRQ_USB_RESET) |
+ BIT(USBD_EVENT_IRQ_SETUP) |
+ BIT(USBD_EVENT_IRQ_SETCFG) |
+ BIT(USBD_EVENT_IRQ_SETINTF) |
+ BIT(USBD_EVENT_IRQ_USB_LINK);
+ usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
+ usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
+}
+
+/**
+ * bcm63xx_select_phy_mode - Select between USB device and host mode.
+ * @udc: Reference to the device controller.
+ * @is_device: true for device, false for host.
+ *
+ * This should probably be reworked to use the drivers/usb/otg
+ * infrastructure.
+ *
+ * By default, the AFE/pullups are disabled in device mode, until
+ * bcm63xx_select_pullup() is called.
+ */
+static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
+{
+ u32 val, portmask = BIT(udc->pd->port_no);
+
+ if (BCMCPU_IS_6328()) {
+ /* configure pinmux to sense VBUS signal */
+ val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
+ val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
+ val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
+ GPIO_PINMUX_OTHR_6328_USB_HOST;
+ bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
+ }
+
+ val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
+ if (is_device) {
+ val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
+ val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
+ } else {
+ val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
+ val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
+ }
+ bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
+
+ val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
+ if (is_device)
+ val |= USBH_PRIV_SWAP_USBD_MASK;
+ else
+ val &= ~USBH_PRIV_SWAP_USBD_MASK;
+ bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
+}
+
+/**
+ * bcm63xx_select_pullup - Enable/disable the pullup on D+
+ * @udc: Reference to the device controller.
+ * @is_on: true to enable the pullup, false to disable.
+ *
+ * If the pullup is active, the host will sense a FS/HS device connected to
+ * the port. If the pullup is inactive, the host will think the USB
+ * device has been disconnected.
+ */
+static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
+{
+ u32 val, portmask = BIT(udc->pd->port_no);
+
+ val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
+ if (is_on)
+ val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
+ else
+ val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
+ bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
+}
+
+/**
+ * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
+ * @udc: Reference to the device controller.
+ *
+ * This just masks the IUDMA IRQs and releases the clocks. It is assumed
+ * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
+ */
+static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
+{
+ set_clocks(udc, true);
+ iudma_uninit(udc);
+ set_clocks(udc, false);
+
+ clk_put(udc->usbd_clk);
+ clk_put(udc->usbh_clk);
+}
+
+/**
+ * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
+ * @udc: Reference to the device controller.
+ */
+static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
+{
+ int i, rc = 0;
+ u32 val;
+
+ udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
+ GFP_KERNEL);
+ if (!udc->ep0_ctrl_buf)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ for (i = 0; i < BCM63XX_NUM_EP; i++) {
+ struct bcm63xx_ep *bep = &udc->bep[i];
+
+ bep->ep.name = bcm63xx_ep_info[i].name;
+ bep->ep.caps = bcm63xx_ep_info[i].caps;
+ bep->ep_num = i;
+ bep->ep.ops = &bcm63xx_udc_ep_ops;
+ list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
+ bep->halted = 0;
+ usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
+ bep->udc = udc;
+ bep->ep.desc = NULL;
+ INIT_LIST_HEAD(&bep->queue);
+ }
+
+ udc->gadget.ep0 = &udc->bep[0].ep;
+ list_del(&udc->bep[0].ep.ep_list);
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->ep0state = EP0_SHUTDOWN;
+
+ udc->usbh_clk = clk_get(udc->dev, "usbh");
+ if (IS_ERR(udc->usbh_clk))
+ return -EIO;
+
+ udc->usbd_clk = clk_get(udc->dev, "usbd");
+ if (IS_ERR(udc->usbd_clk)) {
+ clk_put(udc->usbh_clk);
+ return -EIO;
+ }
+
+ set_clocks(udc, true);
+
+ val = USBD_CONTROL_AUTO_CSRS_MASK |
+ USBD_CONTROL_DONE_CSRS_MASK |
+ (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
+ usbd_writel(udc, val, USBD_CONTROL_REG);
+
+ val = USBD_STRAPS_APP_SELF_PWR_MASK |
+ USBD_STRAPS_APP_RAM_IF_MASK |
+ USBD_STRAPS_APP_CSRPRGSUP_MASK |
+ USBD_STRAPS_APP_8BITPHY_MASK |
+ USBD_STRAPS_APP_RMTWKUP_MASK;
+
+ if (udc->gadget.max_speed == USB_SPEED_HIGH)
+ val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
+ else
+ val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
+ usbd_writel(udc, val, USBD_STRAPS_REG);
+
+ bcm63xx_set_ctrl_irqs(udc, false);
+
+ usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
+
+ val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
+ USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
+ usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
+
+ rc = iudma_init(udc);
+ set_clocks(udc, false);
+ if (rc)
+ bcm63xx_uninit_udc_hw(udc);
+
+ return 0;
+}
+
+/***********************************************************************
+ * Standard EP gadget operations
+ ***********************************************************************/
+
+/**
+ * bcm63xx_ep_enable - Enable one endpoint.
+ * @ep: Endpoint to enable.
+ * @desc: Contains max packet, direction, etc.
+ *
+ * Most of the endpoint parameters are fixed in this controller, so there
+ * isn't much for this function to do.
+ */
+static int bcm63xx_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct bcm63xx_ep *bep = our_ep(ep);
+ struct bcm63xx_udc *udc = bep->udc;
+ struct iudma_ch *iudma = bep->iudma;
+ unsigned long flags;
+
+ if (!ep || !desc || ep->name == bcm63xx_ep0name)
+ return -EINVAL;
+
+ if (!udc->driver)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (iudma->enabled) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EINVAL;
+ }
+
+ iudma->enabled = true;
+ BUG_ON(!list_empty(&bep->queue));
+
+ iudma_reset_channel(udc, iudma);
+
+ bep->halted = 0;
+ bcm63xx_set_stall(udc, bep, false);
+ clear_bit(bep->ep_num, &udc->wedgemap);
+
+ ep->desc = desc;
+ ep->maxpacket = usb_endpoint_maxp(desc);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+/**
+ * bcm63xx_ep_disable - Disable one endpoint.
+ * @ep: Endpoint to disable.
+ */
+static int bcm63xx_ep_disable(struct usb_ep *ep)
+{
+ struct bcm63xx_ep *bep = our_ep(ep);
+ struct bcm63xx_udc *udc = bep->udc;
+ struct iudma_ch *iudma = bep->iudma;
+ struct bcm63xx_req *breq, *n;
+ unsigned long flags;
+
+ if (!ep || !ep->desc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (!iudma->enabled) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EINVAL;
+ }
+ iudma->enabled = false;
+
+ iudma_reset_channel(udc, iudma);
+
+ if (!list_empty(&bep->queue)) {
+ list_for_each_entry_safe(breq, n, &bep->queue, queue) {
+ usb_gadget_unmap_request(&udc->gadget, &breq->req,
+ iudma->is_tx);
+ list_del(&breq->queue);
+ breq->req.status = -ESHUTDOWN;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
+ spin_lock_irqsave(&udc->lock, flags);
+ }
+ }
+ ep->desc = NULL;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+/**
+ * bcm63xx_udc_alloc_request - Allocate a new request.
+ * @ep: Endpoint associated with the request.
+ * @mem_flags: Flags to pass to kzalloc().
+ */
+static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
+ gfp_t mem_flags)
+{
+ struct bcm63xx_req *breq;
+
+ breq = kzalloc(sizeof(*breq), mem_flags);
+ if (!breq)
+ return NULL;
+ return &breq->req;
+}
+
+/**
+ * bcm63xx_udc_free_request - Free a request.
+ * @ep: Endpoint associated with the request.
+ * @req: Request to free.
+ */
+static void bcm63xx_udc_free_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct bcm63xx_req *breq = our_req(req);
+ kfree(breq);
+}
+
+/**
+ * bcm63xx_udc_queue - Queue up a new request.
+ * @ep: Endpoint associated with the request.
+ * @req: Request to add.
+ * @mem_flags: Unused.
+ *
+ * If the queue is empty, start this request immediately. Otherwise, add
+ * it to the list.
+ *
+ * ep0 replies are sent through this function from the gadget driver, but
+ * they are treated differently because they need to be handled by the ep0
+ * state machine. (Sometimes they are replies to control requests that
+ * were spoofed by this driver, and so they shouldn't be transmitted at all.)
+ */
+static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t mem_flags)
+{
+ struct bcm63xx_ep *bep = our_ep(ep);
+ struct bcm63xx_udc *udc = bep->udc;
+ struct bcm63xx_req *breq = our_req(req);
+ unsigned long flags;
+ int rc = 0;
+
+ if (unlikely(!req || !req->complete || !req->buf || !ep))
+ return -EINVAL;
+
+ req->actual = 0;
+ req->status = 0;
+ breq->offset = 0;
+
+ if (bep == &udc->bep[0]) {
+ /* only one reply per request, please */
+ if (udc->ep0_reply)
+ return -EINVAL;
+
+ udc->ep0_reply = req;
+ schedule_work(&udc->ep0_wq);
+ return 0;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (!bep->iudma->enabled) {
+ rc = -ESHUTDOWN;
+ goto out;
+ }
+
+ rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
+ if (rc == 0) {
+ list_add_tail(&breq->queue, &bep->queue);
+ if (list_is_singular(&bep->queue))
+ iudma_write(udc, bep->iudma, breq);
+ }
+
+out:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return rc;
+}
+
+/**
+ * bcm63xx_udc_dequeue - Remove a pending request from the queue.
+ * @ep: Endpoint associated with the request.
+ * @req: Request to remove.
+ *
+ * If the request is not at the head of the queue, this is easy - just nuke
+ * it. If the request is at the head of the queue, we'll need to stop the
+ * DMA transaction and then queue up the successor.
+ */
+static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct bcm63xx_ep *bep = our_ep(ep);
+ struct bcm63xx_udc *udc = bep->udc;
+ struct bcm63xx_req *breq = our_req(req), *cur;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (list_empty(&bep->queue)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
+ usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
+
+ if (breq == cur) {
+ iudma_reset_channel(udc, bep->iudma);
+ list_del(&breq->queue);
+
+ if (!list_empty(&bep->queue)) {
+ struct bcm63xx_req *next;
+
+ next = list_first_entry(&bep->queue,
+ struct bcm63xx_req, queue);
+ iudma_write(udc, bep->iudma, next);
+ }
+ } else {
+ list_del(&breq->queue);
+ }
+
+out:
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ req->status = -ESHUTDOWN;
+ req->complete(ep, req);
+
+ return rc;
+}
+
+/**
+ * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
+ * @ep: Endpoint to halt.
+ * @value: Zero to clear halt; nonzero to set halt.
+ *
+ * See comments in bcm63xx_update_wedge().
+ */
+static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
+{
+ struct bcm63xx_ep *bep = our_ep(ep);
+ struct bcm63xx_udc *udc = bep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ bcm63xx_set_stall(udc, bep, !!value);
+ bep->halted = value;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+/**
+ * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
+ * @ep: Endpoint to wedge.
+ *
+ * See comments in bcm63xx_update_wedge().
+ */
+static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
+{
+ struct bcm63xx_ep *bep = our_ep(ep);
+ struct bcm63xx_udc *udc = bep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ set_bit(bep->ep_num, &udc->wedgemap);
+ bcm63xx_set_stall(udc, bep, true);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
+ .enable = bcm63xx_ep_enable,
+ .disable = bcm63xx_ep_disable,
+
+ .alloc_request = bcm63xx_udc_alloc_request,
+ .free_request = bcm63xx_udc_free_request,
+
+ .queue = bcm63xx_udc_queue,
+ .dequeue = bcm63xx_udc_dequeue,
+
+ .set_halt = bcm63xx_udc_set_halt,
+ .set_wedge = bcm63xx_udc_set_wedge,
+};
+
+/***********************************************************************
+ * EP0 handling
+ ***********************************************************************/
+
+/**
+ * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
+ * @udc: Reference to the device controller.
+ * @ctrl: 8-byte SETUP request.
+ */
+static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int rc;
+
+ spin_unlock_irq(&udc->lock);
+ rc = udc->driver->setup(&udc->gadget, ctrl);
+ spin_lock_irq(&udc->lock);
+ return rc;
+}
+
+/**
+ * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
+ * @udc: Reference to the device controller.
+ *
+ * Many standard requests are handled automatically in the hardware, but
+ * we still need to pass them to the gadget driver so that it can
+ * reconfigure the interfaces/endpoints if necessary.
+ *
+ * Unfortunately we are not able to send a STALL response if the host
+ * requests an invalid configuration. If this happens, we'll have to be
+ * content with printing a warning.
+ */
+static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
+{
+ struct usb_ctrlrequest ctrl;
+ int rc;
+
+ ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
+ ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
+ ctrl.wValue = cpu_to_le16(udc->cfg);
+ ctrl.wIndex = 0;
+ ctrl.wLength = 0;
+
+ rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
+ if (rc < 0) {
+ dev_warn_ratelimited(udc->dev,
+ "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
+ udc->cfg);
+ }
+ return rc;
+}
+
+/**
+ * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
+ * @udc: Reference to the device controller.
+ */
+static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
+{
+ struct usb_ctrlrequest ctrl;
+ int rc;
+
+ ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
+ ctrl.bRequest = USB_REQ_SET_INTERFACE;
+ ctrl.wValue = cpu_to_le16(udc->alt_iface);
+ ctrl.wIndex = cpu_to_le16(udc->iface);
+ ctrl.wLength = 0;
+
+ rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
+ if (rc < 0) {
+ dev_warn_ratelimited(udc->dev,
+ "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
+ udc->iface, udc->alt_iface);
+ }
+ return rc;
+}
+
+/**
+ * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
+ * @udc: Reference to the device controller.
+ * @ch_idx: IUDMA channel number.
+ * @req: USB gadget layer representation of the request.
+ */
+static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
+ struct usb_request *req)
+{
+ struct bcm63xx_req *breq = our_req(req);
+ struct iudma_ch *iudma = &udc->iudma[ch_idx];
+
+ BUG_ON(udc->ep0_request);
+ udc->ep0_request = req;
+
+ req->actual = 0;
+ breq->offset = 0;
+ usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
+ iudma_write(udc, iudma, breq);
+}
+
+/**
+ * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
+ * @udc: Reference to the device controller.
+ * @req: USB gadget layer representation of the request.
+ * @status: Status to return to the gadget driver.
+ */
+static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
+ struct usb_request *req, int status)
+{
+ req->status = status;
+ if (status)
+ req->actual = 0;
+ if (req->complete) {
+ spin_unlock_irq(&udc->lock);
+ req->complete(&udc->bep[0].ep, req);
+ spin_lock_irq(&udc->lock);
+ }
+}
+
+/**
+ * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
+ * reset/shutdown.
+ * @udc: Reference to the device controller.
+ * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
+ */
+static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
+{
+ struct usb_request *req = udc->ep0_reply;
+
+ udc->ep0_reply = NULL;
+ usb_gadget_unmap_request(&udc->gadget, req, is_tx);
+ if (udc->ep0_request == req) {
+ udc->ep0_req_completed = 0;
+ udc->ep0_request = NULL;
+ }
+ bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
+}
+
+/**
+ * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
+ * transfer len.
+ * @udc: Reference to the device controller.
+ */
+static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
+{
+ struct usb_request *req = udc->ep0_request;
+
+ udc->ep0_req_completed = 0;
+ udc->ep0_request = NULL;
+
+ return req->actual;
+}
+
+/**
+ * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
+ * @udc: Reference to the device controller.
+ * @ch_idx: IUDMA channel number.
+ * @length: Number of bytes to TX/RX.
+ *
+ * Used for simple transfers performed by the ep0 worker. This will always
+ * use ep0_ctrl_req / ep0_ctrl_buf.
+ */
+static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
+ int length)
+{
+ struct usb_request *req = &udc->ep0_ctrl_req.req;
+
+ req->buf = udc->ep0_ctrl_buf;
+ req->length = length;
+ req->complete = NULL;
+
+ bcm63xx_ep0_map_write(udc, ch_idx, req);
+}
+
+/**
+ * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
+ * @udc: Reference to the device controller.
+ *
+ * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
+ * for the next packet. Anything else means the transaction requires multiple
+ * stages of handling.
+ */
+static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
+{
+ int rc;
+ struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
+
+ rc = bcm63xx_ep0_read_complete(udc);
+
+ if (rc < 0) {
+ dev_err(udc->dev, "missing SETUP packet\n");
+ return EP0_IDLE;
+ }
+
+ /*
+ * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
+ * ALWAYS deliver these 100% of the time, so if we happen to see one,
+ * just throw it away.
+ */
+ if (rc == 0)
+ return EP0_REQUEUE;
+
+ /* Drop malformed SETUP packets */
+ if (rc != sizeof(*ctrl)) {
+ dev_warn_ratelimited(udc->dev,
+ "malformed SETUP packet (%d bytes)\n", rc);
+ return EP0_REQUEUE;
+ }
+
+ /* Process new SETUP packet arriving on ep0 */
+ rc = bcm63xx_ep0_setup_callback(udc, ctrl);
+ if (rc < 0) {
+ bcm63xx_set_stall(udc, &udc->bep[0], true);
+ return EP0_REQUEUE;
+ }
+
+ if (!ctrl->wLength)
+ return EP0_REQUEUE;
+ else if (ctrl->bRequestType & USB_DIR_IN)
+ return EP0_IN_DATA_PHASE_SETUP;
+ else
+ return EP0_OUT_DATA_PHASE_SETUP;
+}
+
+/**
+ * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
+ * @udc: Reference to the device controller.
+ *
+ * In state EP0_IDLE, the RX descriptor is either pending, or has been
+ * filled with a SETUP packet from the host. This function handles new
+ * SETUP packets, control IRQ events (which can generate fake SETUP packets),
+ * and reset/shutdown events.
+ *
+ * Returns 0 if work was done; -EAGAIN if nothing to do.
+ */
+static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
+{
+ if (udc->ep0_req_reset) {
+ udc->ep0_req_reset = 0;
+ } else if (udc->ep0_req_set_cfg) {
+ udc->ep0_req_set_cfg = 0;
+ if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
+ udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
+ } else if (udc->ep0_req_set_iface) {
+ udc->ep0_req_set_iface = 0;
+ if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
+ udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
+ } else if (udc->ep0_req_completed) {
+ udc->ep0state = bcm63xx_ep0_do_setup(udc);
+ return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
+ } else if (udc->ep0_req_shutdown) {
+ udc->ep0_req_shutdown = 0;
+ udc->ep0_req_completed = 0;
+ udc->ep0_request = NULL;
+ iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
+ usb_gadget_unmap_request(&udc->gadget,
+ &udc->ep0_ctrl_req.req, 0);
+
+ /* bcm63xx_udc_pullup() is waiting for this */
+ mb();
+ udc->ep0state = EP0_SHUTDOWN;
+ } else if (udc->ep0_reply) {
+ /*
+ * This could happen if a USB RESET shows up during an ep0
+ * transaction (especially if a laggy driver like gadgetfs
+ * is in use).
+ */
+ dev_warn(udc->dev, "nuking unexpected reply\n");
+ bcm63xx_ep0_nuke_reply(udc, 0);
+ } else {
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/**
+ * bcm63xx_ep0_one_round - Handle the current ep0 state.
+ * @udc: Reference to the device controller.
+ *
+ * Returns 0 if work was done; -EAGAIN if nothing to do.
+ */
+static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
+{
+ enum bcm63xx_ep0_state ep0state = udc->ep0state;
+ bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
+
+ switch (udc->ep0state) {
+ case EP0_REQUEUE:
+ /* set up descriptor to receive SETUP packet */
+ bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
+ BCM63XX_MAX_CTRL_PKT);
+ ep0state = EP0_IDLE;
+ break;
+ case EP0_IDLE:
+ return bcm63xx_ep0_do_idle(udc);
+ case EP0_IN_DATA_PHASE_SETUP:
+ /*
+ * Normal case: TX request is in ep0_reply (queued by the
+ * callback), or will be queued shortly. When it's here,
+ * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
+ *
+ * Shutdown case: Stop waiting for the reply. Just
+ * REQUEUE->IDLE. The gadget driver is NOT expected to
+ * queue anything else now.
+ */
+ if (udc->ep0_reply) {
+ bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
+ udc->ep0_reply);
+ ep0state = EP0_IN_DATA_PHASE_COMPLETE;
+ } else if (shutdown) {
+ ep0state = EP0_REQUEUE;
+ }
+ break;
+ case EP0_IN_DATA_PHASE_COMPLETE: {
+ /*
+ * Normal case: TX packet (ep0_reply) is in flight; wait for
+ * it to finish, then go back to REQUEUE->IDLE.
+ *
+ * Shutdown case: Reset the TX channel, send -ESHUTDOWN
+ * completion to the gadget driver, then REQUEUE->IDLE.
+ */
+ if (udc->ep0_req_completed) {
+ udc->ep0_reply = NULL;
+ bcm63xx_ep0_read_complete(udc);
+ /*
+ * the "ack" sometimes gets eaten (see
+ * bcm63xx_ep0_do_idle)
+ */
+ ep0state = EP0_REQUEUE;
+ } else if (shutdown) {
+ iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
+ bcm63xx_ep0_nuke_reply(udc, 1);
+ ep0state = EP0_REQUEUE;
+ }
+ break;
+ }
+ case EP0_OUT_DATA_PHASE_SETUP:
+ /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
+ if (udc->ep0_reply) {
+ bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
+ udc->ep0_reply);
+ ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
+ } else if (shutdown) {
+ ep0state = EP0_REQUEUE;
+ }
+ break;
+ case EP0_OUT_DATA_PHASE_COMPLETE: {
+ /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
+ if (udc->ep0_req_completed) {
+ udc->ep0_reply = NULL;
+ bcm63xx_ep0_read_complete(udc);
+
+ /* send 0-byte ack to host */
+ bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
+ ep0state = EP0_OUT_STATUS_PHASE;
+ } else if (shutdown) {
+ iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
+ bcm63xx_ep0_nuke_reply(udc, 0);
+ ep0state = EP0_REQUEUE;
+ }
+ break;
+ }
+ case EP0_OUT_STATUS_PHASE:
+ /*
+ * Normal case: 0-byte OUT ack packet is in flight; wait
+ * for it to finish, then go back to REQUEUE->IDLE.
+ *
+ * Shutdown case: just cancel the transmission. Don't bother
+ * calling the completion, because it originated from this
+ * function anyway. Then go back to REQUEUE->IDLE.
+ */
+ if (udc->ep0_req_completed) {
+ bcm63xx_ep0_read_complete(udc);
+ ep0state = EP0_REQUEUE;
+ } else if (shutdown) {
+ iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
+ udc->ep0_request = NULL;
+ ep0state = EP0_REQUEUE;
+ }
+ break;
+ case EP0_IN_FAKE_STATUS_PHASE: {
+ /*
+ * Normal case: we spoofed a SETUP packet and are now
+ * waiting for the gadget driver to send a 0-byte reply.
+ * This doesn't actually get sent to the HW because the
+ * HW has already sent its own reply. Once we get the
+ * response, return to IDLE.
+ *
+ * Shutdown case: return to IDLE immediately.
+ *
+ * Note that the ep0 RX descriptor has remained queued
+ * (and possibly unfilled) during this entire transaction.
+ * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
+ * or SET_INTERFACE transactions.
+ */
+ struct usb_request *r = udc->ep0_reply;
+
+ if (!r) {
+ if (shutdown)
+ ep0state = EP0_IDLE;
+ break;
+ }
+
+ bcm63xx_ep0_complete(udc, r, 0);
+ udc->ep0_reply = NULL;
+ ep0state = EP0_IDLE;
+ break;
+ }
+ case EP0_SHUTDOWN:
+ break;
+ }
+
+ if (udc->ep0state == ep0state)
+ return -EAGAIN;
+
+ udc->ep0state = ep0state;
+ return 0;
+}
+
+/**
+ * bcm63xx_ep0_process - ep0 worker thread / state machine.
+ * @w: Workqueue struct.
+ *
+ * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
+ * is used to synchronize ep0 events and ensure that both HW and SW events
+ * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
+ * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
+ * by the USBD hardware.
+ *
+ * The worker function will continue iterating around the state machine
+ * until there is nothing left to do. Usually "nothing left to do" means
+ * that we're waiting for a new event from the hardware.
+ */
+static void bcm63xx_ep0_process(struct work_struct *w)
+{
+ struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
+ spin_lock_irq(&udc->lock);
+ while (bcm63xx_ep0_one_round(udc) == 0)
+ ;
+ spin_unlock_irq(&udc->lock);
+}
+
+/***********************************************************************
+ * Standard UDC gadget operations
+ ***********************************************************************/
+
+/**
+ * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
+ * @gadget: USB device.
+ */
+static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
+{
+ struct bcm63xx_udc *udc = gadget_to_udc(gadget);
+
+ return (usbd_readl(udc, USBD_STATUS_REG) &
+ USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
+}
+
+/**
+ * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
+ * @gadget: USB device.
+ * @is_on: 0 to disable pullup, 1 to enable.
+ *
+ * See notes in bcm63xx_select_pullup().
+ */
+static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct bcm63xx_udc *udc = gadget_to_udc(gadget);
+ unsigned long flags;
+ int i, rc = -EINVAL;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (is_on && udc->ep0state == EP0_SHUTDOWN) {
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->ep0state = EP0_REQUEUE;
+ bcm63xx_fifo_setup(udc);
+ bcm63xx_fifo_reset(udc);
+ bcm63xx_ep_setup(udc);
+
+ bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
+ for (i = 0; i < BCM63XX_NUM_EP; i++)
+ bcm63xx_set_stall(udc, &udc->bep[i], false);
+
+ bcm63xx_set_ctrl_irqs(udc, true);
+ bcm63xx_select_pullup(gadget_to_udc(gadget), true);
+ rc = 0;
+ } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
+ bcm63xx_select_pullup(gadget_to_udc(gadget), false);
+
+ udc->ep0_req_shutdown = 1;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ while (1) {
+ schedule_work(&udc->ep0_wq);
+ if (udc->ep0state == EP0_SHUTDOWN)
+ break;
+ msleep(50);
+ }
+ bcm63xx_set_ctrl_irqs(udc, false);
+ cancel_work_sync(&udc->ep0_wq);
+ return 0;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return rc;
+}
+
+/**
+ * bcm63xx_udc_start - Start the controller.
+ * @gadget: USB device.
+ * @driver: Driver for USB device.
+ */
+static int bcm63xx_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct bcm63xx_udc *udc = gadget_to_udc(gadget);
+ unsigned long flags;
+
+ if (!driver || driver->max_speed < USB_SPEED_HIGH ||
+ !driver->setup)
+ return -EINVAL;
+ if (!udc)
+ return -ENODEV;
+ if (udc->driver)
+ return -EBUSY;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ set_clocks(udc, true);
+ bcm63xx_fifo_setup(udc);
+ bcm63xx_ep_init(udc);
+ bcm63xx_ep_setup(udc);
+ bcm63xx_fifo_reset(udc);
+ bcm63xx_select_phy_mode(udc, true);
+
+ udc->driver = driver;
+ udc->gadget.dev.of_node = udc->dev->of_node;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+/**
+ * bcm63xx_udc_stop - Shut down the controller.
+ * @gadget: USB device.
+ * @driver: Driver for USB device.
+ */
+static int bcm63xx_udc_stop(struct usb_gadget *gadget)
+{
+ struct bcm63xx_udc *udc = gadget_to_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ udc->driver = NULL;
+
+ /*
+ * If we switch the PHY too abruptly after dropping D+, the host
+ * will often complain:
+ *
+ * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
+ */
+ msleep(100);
+
+ bcm63xx_select_phy_mode(udc, false);
+ set_clocks(udc, false);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops bcm63xx_udc_ops = {
+ .get_frame = bcm63xx_udc_get_frame,
+ .pullup = bcm63xx_udc_pullup,
+ .udc_start = bcm63xx_udc_start,
+ .udc_stop = bcm63xx_udc_stop,
+};
+
+/***********************************************************************
+ * IRQ handling
+ ***********************************************************************/
+
+/**
+ * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
+ * @udc: Reference to the device controller.
+ *
+ * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
+ * The driver never sees the raw control packets coming in on the ep0
+ * IUDMA channel, but at least we get an interrupt event to tell us that
+ * new values are waiting in the USBD_STATUS register.
+ */
+static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
+{
+ u32 reg = usbd_readl(udc, USBD_STATUS_REG);
+
+ udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
+ udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
+ udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
+ USBD_STATUS_ALTINTF_SHIFT;
+ bcm63xx_ep_setup(udc);
+}
+
+/**
+ * bcm63xx_update_link_speed - Check to see if the link speed has changed.
+ * @udc: Reference to the device controller.
+ *
+ * The link speed update coincides with a SETUP IRQ. Returns 1 if the
+ * speed has changed, so that the caller can update the endpoint settings.
+ */
+static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
+{
+ u32 reg = usbd_readl(udc, USBD_STATUS_REG);
+ enum usb_device_speed oldspeed = udc->gadget.speed;
+
+ switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
+ case BCM63XX_SPD_HIGH:
+ udc->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case BCM63XX_SPD_FULL:
+ udc->gadget.speed = USB_SPEED_FULL;
+ break;
+ default:
+ /* this should never happen */
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ dev_err(udc->dev,
+ "received SETUP packet with invalid link speed\n");
+ return 0;
+ }
+
+ if (udc->gadget.speed != oldspeed) {
+ dev_info(udc->dev, "link up, %s-speed mode\n",
+ udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * bcm63xx_update_wedge - Iterate through wedged endpoints.
+ * @udc: Reference to the device controller.
+ * @new_status: true to "refresh" wedge status; false to clear it.
+ *
+ * On a SETUP interrupt, we need to manually "refresh" the wedge status
+ * because the controller hardware is designed to automatically clear
+ * stalls in response to a CLEAR_FEATURE request from the host.
+ *
+ * On a RESET interrupt, we do want to restore all wedged endpoints.
+ */
+static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
+{
+ int i;
+
+ for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
+ bcm63xx_set_stall(udc, &udc->bep[i], new_status);
+ if (!new_status)
+ clear_bit(i, &udc->wedgemap);
+ }
+}
+
+/**
+ * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
+ * @irq: IRQ number (unused).
+ * @dev_id: Reference to the device controller.
+ *
+ * This is where we handle link (VBUS) down, USB reset, speed changes,
+ * SET_CONFIGURATION, and SET_INTERFACE events.
+ */
+static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
+{
+ struct bcm63xx_udc *udc = dev_id;
+ u32 stat;
+ bool disconnected = false, bus_reset = false;
+
+ stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
+ usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
+
+ usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
+
+ spin_lock(&udc->lock);
+ if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
+ /* VBUS toggled */
+
+ if (!(usbd_readl(udc, USBD_EVENTS_REG) &
+ USBD_EVENTS_USB_LINK_MASK) &&
+ udc->gadget.speed != USB_SPEED_UNKNOWN)
+ dev_info(udc->dev, "link down\n");
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ disconnected = true;
+ }
+ if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
+ bcm63xx_fifo_setup(udc);
+ bcm63xx_fifo_reset(udc);
+ bcm63xx_ep_setup(udc);
+
+ bcm63xx_update_wedge(udc, false);
+
+ udc->ep0_req_reset = 1;
+ schedule_work(&udc->ep0_wq);
+ bus_reset = true;
+ }
+ if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
+ if (bcm63xx_update_link_speed(udc)) {
+ bcm63xx_fifo_setup(udc);
+ bcm63xx_ep_setup(udc);
+ }
+ bcm63xx_update_wedge(udc, true);
+ }
+ if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
+ bcm63xx_update_cfg_iface(udc);
+ udc->ep0_req_set_cfg = 1;
+ schedule_work(&udc->ep0_wq);
+ }
+ if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
+ bcm63xx_update_cfg_iface(udc);
+ udc->ep0_req_set_iface = 1;
+ schedule_work(&udc->ep0_wq);
+ }
+ spin_unlock(&udc->lock);
+
+ if (disconnected && udc->driver)
+ udc->driver->disconnect(&udc->gadget);
+ else if (bus_reset && udc->driver)
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
+ * @irq: IRQ number (unused).
+ * @dev_id: Reference to the IUDMA channel that generated the interrupt.
+ *
+ * For the two ep0 channels, we have special handling that triggers the
+ * ep0 worker thread. For normal bulk/intr channels, either queue up
+ * the next buffer descriptor for the transaction (incomplete transaction),
+ * or invoke the completion callback (complete transactions).
+ */
+static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
+{
+ struct iudma_ch *iudma = dev_id;
+ struct bcm63xx_udc *udc = iudma->udc;
+ struct bcm63xx_ep *bep;
+ struct usb_request *req = NULL;
+ struct bcm63xx_req *breq = NULL;
+ int rc;
+ bool is_done = false;
+
+ spin_lock(&udc->lock);
+
+ usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
+ ENETDMAC_IR_REG, iudma->ch_idx);
+ bep = iudma->bep;
+ rc = iudma_read(udc, iudma);
+
+ /* special handling for EP0 RX (0) and TX (1) */
+ if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
+ iudma->ch_idx == IUDMA_EP0_TXCHAN) {
+ req = udc->ep0_request;
+ breq = our_req(req);
+
+ /* a single request could require multiple submissions */
+ if (rc >= 0) {
+ req->actual += rc;
+
+ if (req->actual >= req->length || breq->bd_bytes > rc) {
+ udc->ep0_req_completed = 1;
+ is_done = true;
+ schedule_work(&udc->ep0_wq);
+
+ /* "actual" on a ZLP is 1 byte */
+ req->actual = min(req->actual, req->length);
+ } else {
+ /* queue up the next BD (same request) */
+ iudma_write(udc, iudma, breq);
+ }
+ }
+ } else if (!list_empty(&bep->queue)) {
+ breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
+ req = &breq->req;
+
+ if (rc >= 0) {
+ req->actual += rc;
+
+ if (req->actual >= req->length || breq->bd_bytes > rc) {
+ is_done = true;
+ list_del(&breq->queue);
+
+ req->actual = min(req->actual, req->length);
+
+ if (!list_empty(&bep->queue)) {
+ struct bcm63xx_req *next;
+
+ next = list_first_entry(&bep->queue,
+ struct bcm63xx_req, queue);
+ iudma_write(udc, iudma, next);
+ }
+ } else {
+ iudma_write(udc, iudma, breq);
+ }
+ }
+ }
+ spin_unlock(&udc->lock);
+
+ if (is_done) {
+ usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
+ if (req->complete)
+ req->complete(&bep->ep, req);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/***********************************************************************
+ * Debug filesystem
+ ***********************************************************************/
+
+/*
+ * bcm63xx_usbd_dbg_show - Show USBD controller state.
+ * @s: seq_file to which the information will be written.
+ * @p: Unused.
+ *
+ * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
+ */
+static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
+{
+ struct bcm63xx_udc *udc = s->private;
+
+ if (!udc->driver)
+ return -ENODEV;
+
+ seq_printf(s, "ep0 state: %s\n",
+ bcm63xx_ep0_state_names[udc->ep0state]);
+ seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
+ udc->ep0_req_reset ? "reset " : "",
+ udc->ep0_req_set_cfg ? "set_cfg " : "",
+ udc->ep0_req_set_iface ? "set_iface " : "",
+ udc->ep0_req_shutdown ? "shutdown " : "",
+ udc->ep0_request ? "pending " : "",
+ udc->ep0_req_completed ? "completed " : "",
+ udc->ep0_reply ? "reply " : "");
+ seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
+ udc->cfg, udc->iface, udc->alt_iface);
+ seq_printf(s, "regs:\n");
+ seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
+ usbd_readl(udc, USBD_CONTROL_REG),
+ usbd_readl(udc, USBD_STRAPS_REG),
+ usbd_readl(udc, USBD_STATUS_REG));
+ seq_printf(s, " events: %08x; stall: %08x\n",
+ usbd_readl(udc, USBD_EVENTS_REG),
+ usbd_readl(udc, USBD_STALL_REG));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
+
+/*
+ * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
+ * @s: seq_file to which the information will be written.
+ * @p: Unused.
+ *
+ * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
+ */
+static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
+{
+ struct bcm63xx_udc *udc = s->private;
+ int ch_idx, i;
+ u32 sram2, sram3;
+
+ if (!udc->driver)
+ return -ENODEV;
+
+ for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
+ struct iudma_ch *iudma = &udc->iudma[ch_idx];
+ struct list_head *pos;
+
+ seq_printf(s, "IUDMA channel %d -- ", ch_idx);
+ switch (iudma_defaults[ch_idx].ep_type) {
+ case BCMEP_CTRL:
+ seq_printf(s, "control");
+ break;
+ case BCMEP_BULK:
+ seq_printf(s, "bulk");
+ break;
+ case BCMEP_INTR:
+ seq_printf(s, "interrupt");
+ break;
+ }
+ seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
+ seq_printf(s, " [ep%d]:\n",
+ max_t(int, iudma_defaults[ch_idx].ep_num, 0));
+ seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
+ usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
+
+ sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
+ sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
+ seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
+ usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
+ sram2 >> 16, sram2 & 0xffff,
+ sram3 >> 16, sram3 & 0xffff,
+ usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
+ seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
+ iudma->n_bds);
+
+ if (iudma->bep) {
+ i = 0;
+ list_for_each(pos, &iudma->bep->queue)
+ i++;
+ seq_printf(s, "; %d queued\n", i);
+ } else {
+ seq_printf(s, "\n");
+ }
+
+ for (i = 0; i < iudma->n_bds; i++) {
+ struct bcm_enet_desc *d = &iudma->bd_ring[i];
+
+ seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
+ i * sizeof(*d), i,
+ d->len_stat >> 16, d->len_stat & 0xffff,
+ d->address);
+ if (d == iudma->read_bd)
+ seq_printf(s, " <<RD");
+ if (d == iudma->write_bd)
+ seq_printf(s, " <<WR");
+ seq_printf(s, "\n");
+ }
+
+ seq_printf(s, "\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
+
+/**
+ * bcm63xx_udc_init_debugfs - Create debugfs entries.
+ * @udc: Reference to the device controller.
+ */
+static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
+{
+ struct dentry *root;
+
+ if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
+ return;
+
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
+ debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
+ debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
+}
+
+/**
+ * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
+ * @udc: Reference to the device controller.
+ *
+ * debugfs_remove() is safe to call with a NULL argument.
+ */
+static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
+{
+ debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
+}
+
+/***********************************************************************
+ * Driver init/exit
+ ***********************************************************************/
+
+/**
+ * bcm63xx_udc_probe - Initialize a new instance of the UDC.
+ * @pdev: Platform device struct from the bcm63xx BSP code.
+ *
+ * Note that platform data is required, because pd.port_no varies from chip
+ * to chip and is used to switch the correct USB port to device mode.
+ */
+static int bcm63xx_udc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
+ struct bcm63xx_udc *udc;
+ int rc = -ENOMEM, i, irq;
+
+ udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, udc);
+ udc->dev = dev;
+ udc->pd = pd;
+
+ if (!pd) {
+ dev_err(dev, "missing platform data\n");
+ return -EINVAL;
+ }
+
+ udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(udc->usbd_regs))
+ return PTR_ERR(udc->usbd_regs);
+
+ udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(udc->iudma_regs))
+ return PTR_ERR(udc->iudma_regs);
+
+ spin_lock_init(&udc->lock);
+ INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
+
+ udc->gadget.ops = &bcm63xx_udc_ops;
+ udc->gadget.name = dev_name(dev);
+
+ if (!pd->use_fullspeed && !use_fullspeed)
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ else
+ udc->gadget.max_speed = USB_SPEED_FULL;
+
+ /* request clocks, allocate buffers, and clear any pending IRQs */
+ rc = bcm63xx_init_udc_hw(udc);
+ if (rc)
+ return rc;
+
+ rc = -ENXIO;
+
+ /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ rc = irq;
+ goto out_uninit;
+ }
+ if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
+ dev_name(dev), udc) < 0)
+ goto report_request_failure;
+
+ /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
+ for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0) {
+ rc = irq;
+ goto out_uninit;
+ }
+ if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
+ dev_name(dev), &udc->iudma[i]) < 0)
+ goto report_request_failure;
+ }
+
+ bcm63xx_udc_init_debugfs(udc);
+ rc = usb_add_gadget_udc(dev, &udc->gadget);
+ if (!rc)
+ return 0;
+
+ bcm63xx_udc_cleanup_debugfs(udc);
+out_uninit:
+ bcm63xx_uninit_udc_hw(udc);
+ return rc;
+
+report_request_failure:
+ dev_err(dev, "error requesting IRQ #%d\n", irq);
+ goto out_uninit;
+}
+
+/**
+ * bcm63xx_udc_remove - Remove the device from the system.
+ * @pdev: Platform device struct from the bcm63xx BSP code.
+ */
+static int bcm63xx_udc_remove(struct platform_device *pdev)
+{
+ struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
+
+ bcm63xx_udc_cleanup_debugfs(udc);
+ usb_del_gadget_udc(&udc->gadget);
+ BUG_ON(udc->driver);
+
+ bcm63xx_uninit_udc_hw(udc);
+
+ return 0;
+}
+
+static struct platform_driver bcm63xx_udc_driver = {
+ .probe = bcm63xx_udc_probe,
+ .remove = bcm63xx_udc_remove,
+ .driver = {
+ .name = DRV_MODULE_NAME,
+ },
+};
+module_platform_driver(bcm63xx_udc_driver);
+
+MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
+MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_MODULE_NAME);
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
new file mode 100644
index 000000000..8bedb7f64
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config USB_BDC_UDC
+ tristate "Broadcom USB3.0 device controller IP driver(BDC)"
+ depends on USB_GADGET && HAS_DMA
+ default ARCH_BRCMSTB
+
+ help
+ BDC is Broadcom's USB3.0 device controller IP. If your SOC has a BDC IP
+ then select this driver.
+
+ Say "y" here to link the driver statically, or "m" to build a dynamically
+ linked module called "bdc".
diff --git a/drivers/usb/gadget/udc/bdc/Makefile b/drivers/usb/gadget/udc/bdc/Makefile
new file mode 100644
index 000000000..1b21c9518
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_USB_BDC_UDC) += bdc.o
+bdc-y := bdc_core.o bdc_cmd.o bdc_ep.o bdc_udc.o
+
+ifneq ($(CONFIG_USB_GADGET_VERBOSE),)
+ bdc-y += bdc_dbg.o
+endif
diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h
new file mode 100644
index 000000000..8d00b1239
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * bdc.h - header for the BRCM BDC USB3.0 device controller
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ */
+
+#ifndef __LINUX_BDC_H__
+#define __LINUX_BDC_H__
+
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <asm/unaligned.h>
+
+#define BRCM_BDC_NAME "bdc"
+#define BRCM_BDC_DESC "Broadcom USB Device Controller driver"
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+/* BDC command operation timeout in usec*/
+#define BDC_CMD_TIMEOUT 1000
+/* BDC controller operation timeout in usec*/
+#define BDC_COP_TIMEOUT 500
+
+/*
+ * Maximum size of ep0 response buffer for ch9 requests,
+ * the set_sel request uses 6 so far, the max.
+ */
+#define EP0_RESPONSE_BUFF 6
+/* Start with SS as default */
+#define EP0_MAX_PKT_SIZE 512
+
+/* 64 entries in a SRR */
+#define NUM_SR_ENTRIES 64
+
+/* Num of bds per table */
+#define NUM_BDS_PER_TABLE 64
+
+/* Num of tables in bd list for control,bulk and Int ep */
+#define NUM_TABLES 2
+
+/* Num of tables in bd list for Isoch ep */
+#define NUM_TABLES_ISOCH 6
+
+/* U1 Timeout default: 248usec */
+#define U1_TIMEOUT 0xf8
+
+/* Interrupt coalescence in usec */
+#define INT_CLS 500
+
+/* Register offsets */
+/* Configuration and Capability registers */
+#define BDC_BDCCFG0 0x00
+#define BDC_BDCCFG1 0x04
+#define BDC_BDCCAP0 0x08
+#define BDC_BDCCAP1 0x0c
+#define BDC_CMDPAR0 0x10
+#define BDC_CMDPAR1 0x14
+#define BDC_CMDPAR2 0x18
+#define BDC_CMDSC 0x1c
+#define BDC_USPC 0x20
+#define BDC_USPPMS 0x28
+#define BDC_USPPM2 0x2c
+#define BDC_SPBBAL 0x38
+#define BDC_SPBBAH 0x3c
+#define BDC_BDCSC 0x40
+#define BDC_XSFNTF 0x4c
+
+#define BDC_DVCSA 0x50
+#define BDC_DVCSB 0x54
+#define BDC_EPSTS0 0x60
+#define BDC_EPSTS1 0x64
+#define BDC_EPSTS2 0x68
+#define BDC_EPSTS3 0x6c
+#define BDC_EPSTS4 0x70
+#define BDC_EPSTS5 0x74
+#define BDC_EPSTS6 0x78
+#define BDC_EPSTS7 0x7c
+#define BDC_SRRBAL(n) (0x200 + ((n) * 0x10))
+#define BDC_SRRBAH(n) (0x204 + ((n) * 0x10))
+#define BDC_SRRINT(n) (0x208 + ((n) * 0x10))
+#define BDC_INTCTLS(n) (0x20c + ((n) * 0x10))
+
+/* Extended capability regs */
+#define BDC_FSCNOC 0xcd4
+#define BDC_FSCNIC 0xce4
+#define NUM_NCS(p) ((p) >> 28)
+
+/* Register bit fields and Masks */
+/* BDC Configuration 0 */
+#define BDC_PGS(p) (((p) & (0x7 << 8)) >> 8)
+#define BDC_SPB(p) ((p) & 0x7)
+
+/* BDC Capability1 */
+#define BDC_P64 BIT(0)
+
+/* BDC Command register */
+#define BDC_CMD_FH 0xe
+#define BDC_CMD_DNC 0x6
+#define BDC_CMD_EPO 0x4
+#define BDC_CMD_BLA 0x3
+#define BDC_CMD_EPC 0x2
+#define BDC_CMD_DVC 0x1
+#define BDC_CMD_CWS BIT(5)
+#define BDC_CMD_CST(p) (((p) & (0xf << 6))>>6)
+#define BDC_CMD_EPN(p) (((p) & 0x1f) << 10)
+#define BDC_SUB_CMD_ADD (0x1 << 17)
+#define BDC_SUB_CMD_FWK (0x4 << 17)
+/* Reset sequence number */
+#define BDC_CMD_EPO_RST_SN (0x1 << 16)
+#define BDC_CMD_EP0_XSD (0x1 << 16)
+#define BDC_SUB_CMD_ADD_EP (0x1 << 17)
+#define BDC_SUB_CMD_DRP_EP (0x2 << 17)
+#define BDC_SUB_CMD_EP_STP (0x2 << 17)
+#define BDC_SUB_CMD_EP_STL (0x4 << 17)
+#define BDC_SUB_CMD_EP_RST (0x1 << 17)
+#define BDC_CMD_SRD BIT(27)
+
+/* CMD completion status */
+#define BDC_CMDS_SUCC 0x1
+#define BDC_CMDS_PARA 0x3
+#define BDC_CMDS_STAT 0x4
+#define BDC_CMDS_FAIL 0x5
+#define BDC_CMDS_INTL 0x6
+#define BDC_CMDS_BUSY 0xf
+
+/* CMDSC Param 2 shifts */
+#define EPT_SHIFT 22
+#define MP_SHIFT 10
+#define MB_SHIFT 6
+#define EPM_SHIFT 4
+
+/* BDC USPSC */
+#define BDC_VBC BIT(31)
+#define BDC_PRC BIT(30)
+#define BDC_PCE BIT(29)
+#define BDC_CFC BIT(28)
+#define BDC_PCC BIT(27)
+#define BDC_PSC BIT(26)
+#define BDC_VBS BIT(25)
+#define BDC_PRS BIT(24)
+#define BDC_PCS BIT(23)
+#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
+#define BDC_SCN BIT(8)
+#define BDC_SDC BIT(7)
+#define BDC_SWS BIT(4)
+
+#define BDC_USPSC_RW (BDC_SCN|BDC_SDC|BDC_SWS|0xf)
+#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
+
+#define BDC_SPEED_FS 0x1
+#define BDC_SPEED_LS 0x2
+#define BDC_SPEED_HS 0x3
+#define BDC_SPEED_SS 0x4
+
+#define BDC_PST(p) ((p) & 0xf)
+#define BDC_PST_MASK 0xf
+
+/* USPPMS */
+#define BDC_U2E BIT(31)
+#define BDC_U1E BIT(30)
+#define BDC_U2A BIT(29)
+#define BDC_PORT_W1S BIT(17)
+#define BDC_U1T(p) ((p) & 0xff)
+#define BDC_U2T(p) (((p) & 0xff) << 8)
+#define BDC_U1T_MASK 0xff
+
+/* USBPM2 */
+/* Hardware LPM Enable */
+#define BDC_HLE BIT(16)
+
+/* BDC Status and Control */
+#define BDC_COP_RST (1 << 29)
+#define BDC_COP_RUN (2 << 29)
+#define BDC_COP_STP (4 << 29)
+
+#define BDC_COP_MASK (BDC_COP_RST|BDC_COP_RUN|BDC_COP_STP)
+
+#define BDC_COS BIT(28)
+#define BDC_CSTS(p) (((p) & (0x7 << 20)) >> 20)
+#define BDC_MASK_MCW BIT(7)
+#define BDC_GIE BIT(1)
+#define BDC_GIP BIT(0)
+
+#define BDC_HLT 1
+#define BDC_NOR 2
+#define BDC_OIP 7
+
+/* Buffer descriptor and Status report bit fields and masks */
+#define BD_TYPE_BITMASK (0xf)
+#define BD_CHAIN 0xf
+
+#define BD_TFS_SHIFT 4
+#define BD_SOT BIT(26)
+#define BD_EOT BIT(27)
+#define BD_ISP BIT(29)
+#define BD_IOC BIT(30)
+#define BD_SBF BIT(31)
+
+#define BD_INTR_TARGET(p) (((p) & 0x1f) << 27)
+
+#define BDC_SRR_RWS BIT(4)
+#define BDC_SRR_RST BIT(3)
+#define BDC_SRR_ISR BIT(2)
+#define BDC_SRR_IE BIT(1)
+#define BDC_SRR_IP BIT(0)
+#define BDC_SRR_EPI(p) (((p) & (0xff << 24)) >> 24)
+#define BDC_SRR_DPI(p) (((p) & (0xff << 16)) >> 16)
+#define BDC_SRR_DPI_MASK 0x00ff0000
+
+#define MARK_CHAIN_BD (BD_CHAIN|BD_EOT|BD_SOT)
+
+/* Control transfer BD specific fields */
+#define BD_DIR_IN BIT(25)
+
+#define BDC_PTC_MASK 0xf0000000
+
+/* status report defines */
+#define SR_XSF 0
+#define SR_USPC 4
+#define SR_BD_LEN(p) ((p) & 0xffffff)
+
+#define XSF_SUCC 0x1
+#define XSF_SHORT 0x3
+#define XSF_BABB 0x4
+#define XSF_SETUP_RECV 0x6
+#define XSF_DATA_START 0x7
+#define XSF_STATUS_START 0x8
+
+#define XSF_STS(p) (((p) >> 28) & 0xf)
+
+/* Transfer BD fields */
+#define BD_LEN(p) ((p) & 0x1ffff)
+#define BD_LTF BIT(25)
+#define BD_TYPE_DS 0x1
+#define BD_TYPE_SS 0x2
+
+#define BDC_EP_ENABLED BIT(0)
+#define BDC_EP_STALL BIT(1)
+#define BDC_EP_STOP BIT(2)
+
+/* One BD can transfer max 65536 bytes */
+#define BD_MAX_BUFF_SIZE (1 << 16)
+/* Maximum bytes in one XFR, Refer to BDC spec */
+#define MAX_XFR_LEN 16777215
+
+/* defines for Force Header command */
+#define DEV_NOTF_TYPE 6
+#define FWK_SUBTYPE 1
+#define TRA_PACKET 4
+
+#define to_bdc_ep(e) container_of(e, struct bdc_ep, usb_ep)
+#define to_bdc_req(r) container_of(r, struct bdc_req, usb_req)
+#define gadget_to_bdc(g) container_of(g, struct bdc, gadget)
+
+/* FUNCTION WAKE DEV NOTIFICATION interval, USB3 spec table 8.13 */
+#define BDC_TNOTIFY 2500 /*in ms*/
+/* Devstatus bitfields */
+#define REMOTE_WAKEUP_ISSUED BIT(16)
+#define DEVICE_SUSPENDED BIT(17)
+#define FUNC_WAKE_ISSUED BIT(18)
+#define REMOTE_WAKE_ENABLE (1 << USB_DEVICE_REMOTE_WAKEUP)
+
+/* On disconnect, preserve these bits and clear rest */
+#define DEVSTATUS_CLEAR (1 << USB_DEVICE_SELF_POWERED)
+/* Hardware and software Data structures */
+
+/* Endpoint bd: buffer descriptor */
+struct bdc_bd {
+ __le32 offset[4];
+};
+
+/* Status report in Status report ring(srr) */
+struct bdc_sr {
+ __le32 offset[4];
+};
+
+/* bd_table: contiguous bd's in a table */
+struct bd_table {
+ struct bdc_bd *start_bd;
+ /* dma address of start bd of table*/
+ dma_addr_t dma;
+};
+
+/*
+ * Each endpoint has a bdl(buffer descriptor list), bdl consists of 1 or more bd
+ * table's chained to each other through a chain bd, every table has equal
+ * number of bds. the software uses bdi(bd index) to refer to particular bd in
+ * the list.
+ */
+struct bd_list {
+ /* Array of bd table pointers*/
+ struct bd_table **bd_table_array;
+ /* How many tables chained to each other */
+ int num_tabs;
+ /* Max_bdi = num_tabs * num_bds_table - 1 */
+ int max_bdi;
+ /* current enq bdi from sw point of view */
+ int eqp_bdi;
+ /* current deq bdi from sw point of view */
+ int hwd_bdi;
+ /* numbers of bds per table */
+ int num_bds_table;
+};
+
+struct bdc_req;
+
+/* Representation of a transfer, one transfer can have multiple bd's */
+struct bd_transfer {
+ struct bdc_req *req;
+ /* start bd index */
+ int start_bdi;
+ /* this will be the next hw dqp when this transfer completes */
+ int next_hwd_bdi;
+ /* number of bds in this transfer */
+ int num_bds;
+};
+
+/*
+ * Representation of a gadget request, every gadget request is contained
+ * by 1 bd_transfer.
+ */
+struct bdc_req {
+ struct usb_request usb_req;
+ struct list_head queue;
+ struct bdc_ep *ep;
+ /* only one Transfer per request */
+ struct bd_transfer bd_xfr;
+ int epnum;
+};
+
+/* scratchpad buffer needed by bdc hardware */
+struct bdc_scratchpad {
+ dma_addr_t sp_dma;
+ void *buff;
+ u32 size;
+};
+
+/* endpoint representation */
+struct bdc_ep {
+ struct usb_ep usb_ep;
+ struct list_head queue;
+ struct bdc *bdc;
+ u8 ep_type;
+ u8 dir;
+ u8 ep_num;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
+ unsigned int flags;
+ char name[20];
+ /* endpoint bd list*/
+ struct bd_list bd_list;
+ /*
+ * HW generates extra event for multi bd tranfers, this flag helps in
+ * ignoring the extra event
+ */
+ bool ignore_next_sr;
+};
+
+/* bdc cmmand parameter structure */
+struct bdc_cmd_params {
+ u32 param2;
+ u32 param1;
+ u32 param0;
+};
+
+/* status report ring(srr), currently one srr is supported for entire system */
+struct srr {
+ struct bdc_sr *sr_bds;
+ u16 eqp_index;
+ u16 dqp_index;
+ dma_addr_t dma_addr;
+};
+
+/* EP0 states */
+enum bdc_ep0_state {
+ WAIT_FOR_SETUP = 0,
+ WAIT_FOR_DATA_START,
+ WAIT_FOR_DATA_XMIT,
+ WAIT_FOR_STATUS_START,
+ WAIT_FOR_STATUS_XMIT,
+ STATUS_PENDING
+};
+
+/* Link states */
+enum bdc_link_state {
+ BDC_LINK_STATE_U0 = 0x00,
+ BDC_LINK_STATE_U3 = 0x03,
+ BDC_LINK_STATE_RX_DET = 0x05,
+ BDC_LINK_STATE_RESUME = 0x0f
+};
+
+/* representation of bdc */
+struct bdc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *gadget_driver;
+ struct device *dev;
+ /* device lock */
+ spinlock_t lock;
+
+ /* generic phy */
+ struct phy **phys;
+ int num_phys;
+ /* num of endpoints for a particular instantiation of IP */
+ unsigned int num_eps;
+ /*
+ * Array of ep's, it uses the same index covention as bdc hw i.e.
+ * 1 for ep0, 2 for 1out,3 for 1in ....
+ */
+ struct bdc_ep **bdc_ep_array;
+ void __iomem *regs;
+ struct bdc_scratchpad scratchpad;
+ u32 sp_buff_size;
+ /* current driver supports 1 status ring */
+ struct srr srr;
+ /* Last received setup packet */
+ struct usb_ctrlrequest setup_pkt;
+ struct bdc_req ep0_req;
+ struct bdc_req status_req;
+ enum bdc_ep0_state ep0_state;
+ bool delayed_status;
+ bool zlp_needed;
+ bool reinit;
+ bool pullup;
+ /* Bits 0-15 are standard and 16-31 for proprietary information */
+ u32 devstatus;
+ int irq;
+ void *mem;
+ u32 dev_addr;
+ /* DMA pools */
+ struct dma_pool *bd_table_pool;
+ u8 test_mode;
+ /* array of callbacks for various status report handlers */
+ void (*sr_handler[2])(struct bdc *, struct bdc_sr *);
+ /* ep0 callback handlers */
+ void (*sr_xsf_ep0[3])(struct bdc *, struct bdc_sr *);
+ /* ep0 response buffer for ch9 requests like GET_STATUS and SET_SEL */
+ unsigned char ep0_response_buff[EP0_RESPONSE_BUFF];
+ /*
+ * Timer to check if host resumed transfer after bdc sent Func wake
+ * notification packet after a remote wakeup. if not, then resend the
+ * Func Wake packet every 2.5 secs. Refer to USB3 spec section 8.5.6.4
+ */
+ struct delayed_work func_wake_notify;
+ struct clk *clk;
+};
+
+static inline u32 bdc_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void bdc_writel(void __iomem *base, u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
+
+/* Buffer descriptor list operations */
+void bdc_notify_xfr(struct bdc *bdc, u32 epnum);
+void bdc_softconn(struct bdc *bdc);
+void bdc_softdisconn(struct bdc *bdc);
+int bdc_run(struct bdc *bdc);
+int bdc_stop(struct bdc *bdc);
+int bdc_reset(struct bdc *bdc);
+int bdc_udc_init(struct bdc *bdc);
+void bdc_udc_exit(struct bdc *bdc);
+int bdc_reinit(struct bdc *bdc);
+
+/* Status report handlers */
+/* Upstream port status change sr */
+void bdc_sr_uspc(struct bdc *bdc, struct bdc_sr *sreport);
+/* transfer sr */
+void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport);
+/* EP0 XSF handlers */
+void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport);
+void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport);
+void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport);
+
+#endif /* __LINUX_BDC_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
new file mode 100644
index 000000000..1848ced07
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * bdc_cmd.c - BRCM BDC USB3.0 device controller
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ */
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include "bdc.h"
+#include "bdc_cmd.h"
+#include "bdc_dbg.h"
+
+/* Issues a cmd to cmd processor and waits for cmd completion */
+static int bdc_issue_cmd(struct bdc *bdc, u32 cmd_sc, u32 param0,
+ u32 param1, u32 param2)
+{
+ u32 timeout = BDC_CMD_TIMEOUT;
+ u32 cmd_status;
+ u32 temp;
+
+ bdc_writel(bdc->regs, BDC_CMDPAR0, param0);
+ bdc_writel(bdc->regs, BDC_CMDPAR1, param1);
+ bdc_writel(bdc->regs, BDC_CMDPAR2, param2);
+
+ /* Issue the cmd */
+ /* Make sure the cmd params are written before asking HW to exec cmd */
+ wmb();
+ bdc_writel(bdc->regs, BDC_CMDSC, cmd_sc | BDC_CMD_CWS | BDC_CMD_SRD);
+ do {
+ temp = bdc_readl(bdc->regs, BDC_CMDSC);
+ dev_dbg_ratelimited(bdc->dev, "cmdsc=%x", temp);
+ cmd_status = BDC_CMD_CST(temp);
+ if (cmd_status != BDC_CMDS_BUSY) {
+ dev_dbg(bdc->dev,
+ "command completed cmd_sts:%x\n", cmd_status);
+ return cmd_status;
+ }
+ udelay(1);
+ } while (timeout--);
+
+ dev_err(bdc->dev,
+ "command operation timedout cmd_status=%d\n", cmd_status);
+
+ return cmd_status;
+}
+
+/* Submits cmd and analyze the return value of bdc_issue_cmd */
+static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
+ u32 param0, u32 param1, u32 param2)
+{
+ u32 temp, cmd_status;
+ int ret;
+
+ temp = bdc_readl(bdc->regs, BDC_CMDSC);
+ dev_dbg(bdc->dev,
+ "%s:CMDSC:%08x cmdsc:%08x param0=%08x param1=%08x param2=%08x\n",
+ __func__, temp, cmd_sc, param0, param1, param2);
+
+ cmd_status = BDC_CMD_CST(temp);
+ if (cmd_status == BDC_CMDS_BUSY) {
+ dev_err(bdc->dev, "command processor busy: %x\n", cmd_status);
+ return -EBUSY;
+ }
+ ret = bdc_issue_cmd(bdc, cmd_sc, param0, param1, param2);
+ switch (ret) {
+ case BDC_CMDS_SUCC:
+ dev_dbg(bdc->dev, "command completed successfully\n");
+ ret = 0;
+ break;
+
+ case BDC_CMDS_PARA:
+ dev_err(bdc->dev, "command parameter error\n");
+ ret = -EINVAL;
+ break;
+
+ case BDC_CMDS_STAT:
+ dev_err(bdc->dev, "Invalid device/ep state\n");
+ ret = -EINVAL;
+ break;
+
+ case BDC_CMDS_FAIL:
+ dev_err(bdc->dev, "Command failed?\n");
+ ret = -EAGAIN;
+ break;
+
+ case BDC_CMDS_INTL:
+ dev_err(bdc->dev, "BDC Internal error\n");
+ ret = -ECONNRESET;
+ break;
+
+ case BDC_CMDS_BUSY:
+ dev_err(bdc->dev,
+ "command timedout waited for %dusec\n",
+ BDC_CMD_TIMEOUT);
+ ret = -ECONNRESET;
+ break;
+ default:
+ dev_dbg(bdc->dev, "Unknown command completion code:%x\n", ret);
+ }
+
+ return ret;
+}
+
+/* Deconfigure the endpoint from HW */
+int bdc_dconfig_ep(struct bdc *bdc, struct bdc_ep *ep)
+{
+ u32 cmd_sc;
+
+ cmd_sc = BDC_SUB_CMD_DRP_EP|BDC_CMD_EPN(ep->ep_num)|BDC_CMD_EPC;
+ dev_dbg(bdc->dev, "%s ep->ep_num =%d cmd_sc=%x\n", __func__,
+ ep->ep_num, cmd_sc);
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+}
+
+/* Reinitalize the bdlist after config ep command */
+static void ep_bd_list_reinit(struct bdc_ep *ep)
+{
+ struct bdc *bdc = ep->bdc;
+ struct bdc_bd *bd;
+
+ ep->bd_list.eqp_bdi = 0;
+ ep->bd_list.hwd_bdi = 0;
+ bd = ep->bd_list.bd_table_array[0]->start_bd;
+ dev_dbg(bdc->dev, "%s ep:%p bd:%p\n", __func__, ep, bd);
+ memset(bd, 0, sizeof(struct bdc_bd));
+ bd->offset[3] |= cpu_to_le32(BD_SBF);
+}
+
+/* Configure an endpoint */
+int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep)
+{
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
+ u32 param0, param1, param2, cmd_sc;
+ u32 mps, mbs, mul, si;
+ int ret;
+
+ desc = ep->desc;
+ comp_desc = ep->comp_desc;
+ cmd_sc = mul = mbs = param2 = 0;
+ param0 = lower_32_bits(ep->bd_list.bd_table_array[0]->dma);
+ param1 = upper_32_bits(ep->bd_list.bd_table_array[0]->dma);
+ cpu_to_le32s(&param0);
+ cpu_to_le32s(&param1);
+
+ dev_dbg(bdc->dev, "%s: param0=%08x param1=%08x",
+ __func__, param0, param1);
+ si = desc->bInterval;
+ si = clamp_val(si, 1, 16) - 1;
+
+ mps = usb_endpoint_maxp(desc);
+ param2 |= mps << MP_SHIFT;
+ param2 |= usb_endpoint_type(desc) << EPT_SHIFT;
+
+ switch (bdc->gadget.speed) {
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc)) {
+ param2 |= si;
+ if (usb_endpoint_xfer_isoc(desc) && comp_desc)
+ mul = comp_desc->bmAttributes;
+
+ }
+ param2 |= mul << EPM_SHIFT;
+ if (comp_desc)
+ mbs = comp_desc->bMaxBurst;
+ param2 |= mbs << MB_SHIFT;
+ break;
+
+ case USB_SPEED_HIGH:
+ if (usb_endpoint_xfer_isoc(desc) ||
+ usb_endpoint_xfer_int(desc)) {
+ param2 |= si;
+
+ mbs = usb_endpoint_maxp_mult(desc);
+ param2 |= mbs << MB_SHIFT;
+ }
+ break;
+
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ /* the hardware accepts SI in 125usec range */
+ if (usb_endpoint_xfer_isoc(desc))
+ si += 3;
+
+ /*
+ * FS Int endpoints can have si of 1-255ms but the controller
+ * accepts 2^bInterval*125usec, so convert ms to nearest power
+ * of 2
+ */
+ if (usb_endpoint_xfer_int(desc))
+ si = fls(desc->bInterval * 8) - 1;
+
+ param2 |= si;
+ break;
+ default:
+ dev_err(bdc->dev, "UNKNOWN speed ERR\n");
+ return -EINVAL;
+ }
+
+ cmd_sc |= BDC_CMD_EPC|BDC_CMD_EPN(ep->ep_num)|BDC_SUB_CMD_ADD_EP;
+
+ dev_dbg(bdc->dev, "cmd_sc=%x param2=%08x\n", cmd_sc, param2);
+ ret = bdc_submit_cmd(bdc, cmd_sc, param0, param1, param2);
+ if (ret) {
+ dev_err(bdc->dev, "command failed :%x\n", ret);
+ return ret;
+ }
+ ep_bd_list_reinit(ep);
+
+ return ret;
+}
+
+/*
+ * Change the HW deq pointer, if this command is successful, HW will start
+ * fetching the next bd from address dma_addr.
+ */
+int bdc_ep_bla(struct bdc *bdc, struct bdc_ep *ep, dma_addr_t dma_addr)
+{
+ u32 param0, param1;
+ u32 cmd_sc = 0;
+
+ dev_dbg(bdc->dev, "%s: add=%08llx\n", __func__,
+ (unsigned long long)(dma_addr));
+ param0 = lower_32_bits(dma_addr);
+ param1 = upper_32_bits(dma_addr);
+ cpu_to_le32s(&param0);
+ cpu_to_le32s(&param1);
+
+ cmd_sc |= BDC_CMD_EPN(ep->ep_num)|BDC_CMD_BLA;
+ dev_dbg(bdc->dev, "cmd_sc=%x\n", cmd_sc);
+
+ return bdc_submit_cmd(bdc, cmd_sc, param0, param1, 0);
+}
+
+/* Set the address sent bu Host in SET_ADD request */
+int bdc_address_device(struct bdc *bdc, u32 add)
+{
+ u32 cmd_sc = 0;
+ u32 param2;
+
+ dev_dbg(bdc->dev, "%s: add=%d\n", __func__, add);
+ cmd_sc |= BDC_SUB_CMD_ADD|BDC_CMD_DVC;
+ param2 = add & 0x7f;
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, param2);
+}
+
+/* Send a Function Wake notification packet using FH command */
+int bdc_function_wake_fh(struct bdc *bdc, u8 intf)
+{
+ u32 param0, param1;
+ u32 cmd_sc = 0;
+
+ param0 = param1 = 0;
+ dev_dbg(bdc->dev, "%s intf=%d\n", __func__, intf);
+ cmd_sc |= BDC_CMD_FH;
+ param0 |= TRA_PACKET;
+ param0 |= (bdc->dev_addr << 25);
+ param1 |= DEV_NOTF_TYPE;
+ param1 |= (FWK_SUBTYPE<<4);
+ dev_dbg(bdc->dev, "param0=%08x param1=%08x\n", param0, param1);
+
+ return bdc_submit_cmd(bdc, cmd_sc, param0, param1, 0);
+}
+
+/* Send a Function Wake notification packet using DNC command */
+int bdc_function_wake(struct bdc *bdc, u8 intf)
+{
+ u32 cmd_sc = 0;
+ u32 param2 = 0;
+
+ dev_dbg(bdc->dev, "%s intf=%d", __func__, intf);
+ param2 |= intf;
+ cmd_sc |= BDC_SUB_CMD_FWK|BDC_CMD_DNC;
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, param2);
+}
+
+/* Stall the endpoint */
+int bdc_ep_set_stall(struct bdc *bdc, int epnum)
+{
+ u32 cmd_sc = 0;
+
+ dev_dbg(bdc->dev, "%s epnum=%d\n", __func__, epnum);
+ /* issue a stall endpoint command */
+ cmd_sc |= BDC_SUB_CMD_EP_STL | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+}
+
+/* resets the endpoint, called when host sends CLEAR_FEATURE(HALT) */
+int bdc_ep_clear_stall(struct bdc *bdc, int epnum)
+{
+ struct bdc_ep *ep;
+ u32 cmd_sc = 0;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s: epnum=%d\n", __func__, epnum);
+ ep = bdc->bdc_ep_array[epnum];
+ /*
+ * If we are not in stalled then stall Endpoint and issue clear stall,
+ * his will reset the seq number for non EP0.
+ */
+ if (epnum != 1) {
+ /* if the endpoint it not stalled */
+ if (!(ep->flags & BDC_EP_STALL)) {
+ ret = bdc_ep_set_stall(bdc, epnum);
+ if (ret)
+ return ret;
+ }
+ }
+ /* Preserve the seq number for ep0 only */
+ if (epnum != 1)
+ cmd_sc |= BDC_CMD_EPO_RST_SN;
+
+ /* issue a reset endpoint command */
+ cmd_sc |= BDC_SUB_CMD_EP_RST | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
+
+ ret = bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+ if (ret) {
+ dev_err(bdc->dev, "command failed:%x\n", ret);
+ return ret;
+ }
+ bdc_notify_xfr(bdc, epnum);
+
+ return ret;
+}
+
+/* Stop the endpoint, called when software wants to dequeue some request */
+int bdc_stop_ep(struct bdc *bdc, int epnum)
+{
+ struct bdc_ep *ep;
+ u32 cmd_sc = 0;
+ int ret;
+
+ ep = bdc->bdc_ep_array[epnum];
+ dev_dbg(bdc->dev, "%s: ep:%s ep->flags:%08x\n", __func__,
+ ep->name, ep->flags);
+ /* Endpoint has to be in running state to execute stop ep command */
+ if (!(ep->flags & BDC_EP_ENABLED)) {
+ dev_err(bdc->dev, "stop endpoint called for disabled ep\n");
+ return -EINVAL;
+ }
+ if ((ep->flags & BDC_EP_STALL) || (ep->flags & BDC_EP_STOP))
+ return 0;
+
+ /* issue a stop endpoint command */
+ cmd_sc |= BDC_CMD_EP0_XSD | BDC_SUB_CMD_EP_STP
+ | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
+
+ ret = bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+ if (ret) {
+ dev_err(bdc->dev,
+ "stop endpoint command didn't complete:%d ep:%s\n",
+ ret, ep->name);
+ return ret;
+ }
+ ep->flags |= BDC_EP_STOP;
+ bdc_dump_epsts(bdc);
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.h b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
new file mode 100644
index 000000000..533ad52fa
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * bdc_cmd.h - header for the BDC debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ */
+#ifndef __LINUX_BDC_CMD_H__
+#define __LINUX_BDC_CMD_H__
+
+/* Command operations */
+int bdc_address_device(struct bdc *bdc, u32 add);
+int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep);
+int bdc_dconfig_ep(struct bdc *bdc, struct bdc_ep *ep);
+int bdc_stop_ep(struct bdc *bdc, int epnum);
+int bdc_ep_set_stall(struct bdc *bdc, int epnum);
+int bdc_ep_clear_stall(struct bdc *bdc, int epnum);
+int bdc_ep_bla(struct bdc *bdc, struct bdc_ep *ep, dma_addr_t dma_addr);
+int bdc_function_wake(struct bdc *bdc, u8 intf);
+int bdc_function_wake_fh(struct bdc *bdc, u8 intf);
+
+#endif /* __LINUX_BDC_CMD_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
new file mode 100644
index 000000000..9849e0c86
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * bdc_core.c - BRCM BDC USB3.0 device controller core operations
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/moduleparam.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/clk.h>
+
+#include "bdc.h"
+#include "bdc_dbg.h"
+
+/* Poll till controller status is not OIP */
+static int poll_oip(struct bdc *bdc, u32 usec)
+{
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(bdc->regs + BDC_BDCSC, status,
+ (BDC_CSTS(status) != BDC_OIP), 10, usec);
+ if (ret)
+ dev_err(bdc->dev, "operation timedout BDCSC: 0x%08x\n", status);
+ else
+ dev_dbg(bdc->dev, "%s complete status=%d", __func__, BDC_CSTS(status));
+
+ return ret;
+}
+
+/* Stop the BDC controller */
+int bdc_stop(struct bdc *bdc)
+{
+ int ret;
+ u32 temp;
+
+ dev_dbg(bdc->dev, "%s ()\n\n", __func__);
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ /* Check if BDC is already halted */
+ if (BDC_CSTS(temp) == BDC_HLT) {
+ dev_vdbg(bdc->dev, "BDC already halted\n");
+ return 0;
+ }
+ temp &= ~BDC_COP_MASK;
+ temp |= BDC_COS|BDC_COP_STP;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+
+ ret = poll_oip(bdc, BDC_COP_TIMEOUT);
+ if (ret)
+ dev_err(bdc->dev, "bdc stop operation failed");
+
+ return ret;
+}
+
+/* Issue a reset to BDC controller */
+int bdc_reset(struct bdc *bdc)
+{
+ u32 temp;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ /* First halt the controller */
+ ret = bdc_stop(bdc);
+ if (ret)
+ return ret;
+
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp &= ~BDC_COP_MASK;
+ temp |= BDC_COS|BDC_COP_RST;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ ret = poll_oip(bdc, BDC_COP_TIMEOUT);
+ if (ret)
+ dev_err(bdc->dev, "bdc reset operation failed");
+
+ return ret;
+}
+
+/* Run the BDC controller */
+int bdc_run(struct bdc *bdc)
+{
+ u32 temp;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ /* if BDC is already in running state then do not do anything */
+ if (BDC_CSTS(temp) == BDC_NOR) {
+ dev_warn(bdc->dev, "bdc is already in running state\n");
+ return 0;
+ }
+ temp &= ~BDC_COP_MASK;
+ temp |= BDC_COP_RUN;
+ temp |= BDC_COS;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ ret = poll_oip(bdc, BDC_COP_TIMEOUT);
+ if (ret) {
+ dev_err(bdc->dev, "bdc run operation failed:%d", ret);
+ return ret;
+ }
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ if (BDC_CSTS(temp) != BDC_NOR) {
+ dev_err(bdc->dev, "bdc not in normal mode after RUN op :%d\n",
+ BDC_CSTS(temp));
+ return -ESHUTDOWN;
+ }
+
+ return 0;
+}
+
+/*
+ * Present the termination to the host, typically called from upstream port
+ * event with Vbus present =1
+ */
+void bdc_softconn(struct bdc *bdc)
+{
+ u32 uspc;
+
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ uspc &= ~BDC_PST_MASK;
+ uspc |= BDC_LINK_STATE_RX_DET;
+ uspc |= BDC_SWS;
+ dev_dbg(bdc->dev, "%s () uspc=%08x\n", __func__, uspc);
+ bdc_writel(bdc->regs, BDC_USPC, uspc);
+}
+
+/* Remove the termination */
+void bdc_softdisconn(struct bdc *bdc)
+{
+ u32 uspc;
+
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ uspc |= BDC_SDC;
+ uspc &= ~BDC_SCN;
+ dev_dbg(bdc->dev, "%s () uspc=%x\n", __func__, uspc);
+ bdc_writel(bdc->regs, BDC_USPC, uspc);
+}
+
+/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
+static int scratchpad_setup(struct bdc *bdc)
+{
+ int sp_buff_size;
+ u32 low32;
+ u32 upp32;
+
+ sp_buff_size = BDC_SPB(bdc_readl(bdc->regs, BDC_BDCCFG0));
+ dev_dbg(bdc->dev, "%s() sp_buff_size=%d\n", __func__, sp_buff_size);
+ if (!sp_buff_size) {
+ dev_dbg(bdc->dev, "Scratchpad buffer not needed\n");
+ return 0;
+ }
+ /* Refer to BDC spec, Table 4 for description of SPB */
+ sp_buff_size = 1 << (sp_buff_size + 5);
+ dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
+ bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
+ &bdc->scratchpad.sp_dma,
+ GFP_KERNEL);
+
+ if (!bdc->scratchpad.buff)
+ goto fail;
+
+ bdc->sp_buff_size = sp_buff_size;
+ bdc->scratchpad.size = sp_buff_size;
+ low32 = lower_32_bits(bdc->scratchpad.sp_dma);
+ upp32 = upper_32_bits(bdc->scratchpad.sp_dma);
+ cpu_to_le32s(&low32);
+ cpu_to_le32s(&upp32);
+ bdc_writel(bdc->regs, BDC_SPBBAL, low32);
+ bdc_writel(bdc->regs, BDC_SPBBAH, upp32);
+ return 0;
+
+fail:
+ bdc->scratchpad.buff = NULL;
+
+ return -ENOMEM;
+}
+
+/* Allocate the status report ring */
+static int setup_srr(struct bdc *bdc, int interrupter)
+{
+ dev_dbg(bdc->dev, "%s() NUM_SR_ENTRIES:%d\n", __func__, NUM_SR_ENTRIES);
+ /* Reset the SRR */
+ bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
+ bdc->srr.dqp_index = 0;
+ /* allocate the status report descriptors */
+ bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd),
+ &bdc->srr.dma_addr, GFP_KERNEL);
+ if (!bdc->srr.sr_bds)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Initialize the HW regs and internal data structures */
+static void bdc_mem_init(struct bdc *bdc, bool reinit)
+{
+ u8 size = 0;
+ u32 usb2_pm;
+ u32 low32;
+ u32 upp32;
+ u32 temp;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ bdc->ep0_state = WAIT_FOR_SETUP;
+ bdc->dev_addr = 0;
+ bdc->srr.eqp_index = 0;
+ bdc->srr.dqp_index = 0;
+ bdc->zlp_needed = false;
+ bdc->delayed_status = false;
+
+ bdc_writel(bdc->regs, BDC_SPBBAL, bdc->scratchpad.sp_dma);
+ /* Init the SRR */
+ temp = BDC_SRR_RWS | BDC_SRR_RST;
+ /* Reset the SRR */
+ bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
+ dev_dbg(bdc->dev, "bdc->srr.sr_bds =%p\n", bdc->srr.sr_bds);
+ temp = lower_32_bits(bdc->srr.dma_addr);
+ size = fls(NUM_SR_ENTRIES) - 2;
+ temp |= size;
+ dev_dbg(bdc->dev, "SRRBAL[0]=%08x NUM_SR_ENTRIES:%d size:%d\n",
+ temp, NUM_SR_ENTRIES, size);
+
+ low32 = lower_32_bits(temp);
+ upp32 = upper_32_bits(bdc->srr.dma_addr);
+ cpu_to_le32s(&low32);
+ cpu_to_le32s(&upp32);
+
+ /* Write the dma addresses into regs*/
+ bdc_writel(bdc->regs, BDC_SRRBAL(0), low32);
+ bdc_writel(bdc->regs, BDC_SRRBAH(0), upp32);
+
+ temp = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ temp |= BDC_SRR_IE;
+ temp &= ~(BDC_SRR_RST | BDC_SRR_RWS);
+ bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
+
+ /* Set the Interrupt Coalescence ~500 usec */
+ temp = bdc_readl(bdc->regs, BDC_INTCTLS(0));
+ temp &= ~0xffff;
+ temp |= INT_CLS;
+ bdc_writel(bdc->regs, BDC_INTCTLS(0), temp);
+
+ usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
+ dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
+ /* Enable hardware LPM Enable */
+ usb2_pm |= BDC_HLE;
+ bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
+
+ /* readback for debug */
+ usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
+ dev_dbg(bdc->dev, "usb2_pm=%08x\n", usb2_pm);
+
+ /* Disable any unwanted SR's on SRR */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ /* We don't want Microframe counter wrap SR */
+ temp |= BDC_MASK_MCW;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+
+ /*
+ * In some error cases, driver has to reset the entire BDC controller
+ * in that case reinit is passed as 1
+ */
+ if (reinit) {
+ int i;
+ /* Enable interrupts */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp |= BDC_GIE;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ /* Init scratchpad to 0 */
+ memset(bdc->scratchpad.buff, 0, bdc->sp_buff_size);
+ /* Initialize SRR to 0 */
+ memset(bdc->srr.sr_bds, 0,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd));
+ /*
+ * clear ep flags to avoid post disconnect stops/deconfigs but
+ * not during S2 exit
+ */
+ if (!bdc->gadget.speed)
+ for (i = 1; i < bdc->num_eps; ++i)
+ bdc->bdc_ep_array[i]->flags = 0;
+ } else {
+ /* One time initiaization only */
+ /* Enable status report function pointers */
+ bdc->sr_handler[0] = bdc_sr_xsf;
+ bdc->sr_handler[1] = bdc_sr_uspc;
+
+ /* EP0 status report function pointers */
+ bdc->sr_xsf_ep0[0] = bdc_xsf_ep0_setup_recv;
+ bdc->sr_xsf_ep0[1] = bdc_xsf_ep0_data_start;
+ bdc->sr_xsf_ep0[2] = bdc_xsf_ep0_status_start;
+ }
+}
+
+/* Free the dynamic memory */
+static void bdc_mem_free(struct bdc *bdc)
+{
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ /* Free SRR */
+ if (bdc->srr.sr_bds)
+ dma_free_coherent(bdc->dev,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd),
+ bdc->srr.sr_bds, bdc->srr.dma_addr);
+
+ /* Free scratchpad */
+ if (bdc->scratchpad.buff)
+ dma_free_coherent(bdc->dev, bdc->sp_buff_size,
+ bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
+
+ /* Destroy the dma pools */
+ dma_pool_destroy(bdc->bd_table_pool);
+
+ /* Free the bdc_ep array */
+ kfree(bdc->bdc_ep_array);
+
+ bdc->srr.sr_bds = NULL;
+ bdc->scratchpad.buff = NULL;
+ bdc->bd_table_pool = NULL;
+ bdc->bdc_ep_array = NULL;
+}
+
+/*
+ * bdc reinit gives a controller reset and reinitialize the registers,
+ * called from disconnect/bus reset scenario's, to ensure proper HW cleanup
+ */
+int bdc_reinit(struct bdc *bdc)
+{
+ int ret;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ ret = bdc_stop(bdc);
+ if (ret)
+ goto out;
+
+ ret = bdc_reset(bdc);
+ if (ret)
+ goto out;
+
+ /* the reinit flag is 1 */
+ bdc_mem_init(bdc, true);
+ ret = bdc_run(bdc);
+out:
+ bdc->reinit = false;
+
+ return ret;
+}
+
+/* Allocate all the dyanmic memory */
+static int bdc_mem_alloc(struct bdc *bdc)
+{
+ u32 page_size;
+ unsigned int num_ieps, num_oeps;
+
+ dev_dbg(bdc->dev,
+ "%s() NUM_BDS_PER_TABLE:%d\n", __func__,
+ NUM_BDS_PER_TABLE);
+ page_size = BDC_PGS(bdc_readl(bdc->regs, BDC_BDCCFG0));
+ /* page size is 2^pgs KB */
+ page_size = 1 << page_size;
+ /* KB */
+ page_size <<= 10;
+ dev_dbg(bdc->dev, "page_size=%d\n", page_size);
+
+ /* Create a pool of bd tables */
+ bdc->bd_table_pool =
+ dma_pool_create("BDC BD tables", bdc->dev, NUM_BDS_PER_TABLE * 16,
+ 16, page_size);
+
+ if (!bdc->bd_table_pool)
+ goto fail;
+
+ if (scratchpad_setup(bdc))
+ goto fail;
+
+ /* read from regs */
+ num_ieps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNIC));
+ num_oeps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNOC));
+ /* +2: 1 for ep0 and the other is rsvd i.e. bdc_ep[0] is rsvd */
+ bdc->num_eps = num_ieps + num_oeps + 2;
+ dev_dbg(bdc->dev,
+ "ieps:%d eops:%d num_eps:%d\n",
+ num_ieps, num_oeps, bdc->num_eps);
+ /* allocate array of ep pointers */
+ bdc->bdc_ep_array = kcalloc(bdc->num_eps, sizeof(struct bdc_ep *),
+ GFP_KERNEL);
+ if (!bdc->bdc_ep_array)
+ goto fail;
+
+ dev_dbg(bdc->dev, "Allocating sr report0\n");
+ if (setup_srr(bdc, 0))
+ goto fail;
+
+ return 0;
+fail:
+ dev_warn(bdc->dev, "Couldn't initialize memory\n");
+ bdc_mem_free(bdc);
+
+ return -ENOMEM;
+}
+
+/* opposite to bdc_hw_init */
+static void bdc_hw_exit(struct bdc *bdc)
+{
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ bdc_mem_free(bdc);
+}
+
+/* Initialize the bdc HW and memory */
+static int bdc_hw_init(struct bdc *bdc)
+{
+ int ret;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ ret = bdc_reset(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "err resetting bdc abort bdc init%d\n", ret);
+ return ret;
+ }
+ ret = bdc_mem_alloc(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "Mem alloc failed, aborting\n");
+ return -ENOMEM;
+ }
+ bdc_mem_init(bdc, 0);
+ bdc_dbg_regs(bdc);
+ dev_dbg(bdc->dev, "HW Init done\n");
+
+ return 0;
+}
+
+static int bdc_phy_init(struct bdc *bdc)
+{
+ int phy_num;
+ int ret;
+
+ for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
+ ret = phy_init(bdc->phys[phy_num]);
+ if (ret)
+ goto err_exit_phy;
+ ret = phy_power_on(bdc->phys[phy_num]);
+ if (ret) {
+ phy_exit(bdc->phys[phy_num]);
+ goto err_exit_phy;
+ }
+ }
+
+ return 0;
+
+err_exit_phy:
+ while (--phy_num >= 0) {
+ phy_power_off(bdc->phys[phy_num]);
+ phy_exit(bdc->phys[phy_num]);
+ }
+
+ return ret;
+}
+
+static void bdc_phy_exit(struct bdc *bdc)
+{
+ int phy_num;
+
+ for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
+ phy_power_off(bdc->phys[phy_num]);
+ phy_exit(bdc->phys[phy_num]);
+ }
+}
+
+static int bdc_probe(struct platform_device *pdev)
+{
+ struct bdc *bdc;
+ int ret;
+ int irq;
+ u32 temp;
+ struct device *dev = &pdev->dev;
+ int phy_num;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
+ if (!bdc)
+ return -ENOMEM;
+
+ bdc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bdc->regs))
+ return PTR_ERR(bdc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ spin_lock_init(&bdc->lock);
+ platform_set_drvdata(pdev, bdc);
+ bdc->irq = irq;
+ bdc->dev = dev;
+ dev_dbg(dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
+
+ bdc->num_phys = of_count_phandle_with_args(dev->of_node,
+ "phys", "#phy-cells");
+ if (bdc->num_phys > 0) {
+ bdc->phys = devm_kcalloc(dev, bdc->num_phys,
+ sizeof(struct phy *), GFP_KERNEL);
+ if (!bdc->phys)
+ return -ENOMEM;
+ } else {
+ bdc->num_phys = 0;
+ }
+ dev_info(dev, "Using %d phy(s)\n", bdc->num_phys);
+
+ for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
+ bdc->phys[phy_num] = devm_of_phy_get_by_index(
+ dev, dev->of_node, phy_num);
+ if (IS_ERR(bdc->phys[phy_num])) {
+ ret = PTR_ERR(bdc->phys[phy_num]);
+ dev_err(bdc->dev,
+ "BDC phy specified but not found:%d\n", ret);
+ return ret;
+ }
+ }
+
+ bdc->clk = devm_clk_get_optional(dev, "sw_usbd");
+ if (IS_ERR(bdc->clk))
+ return PTR_ERR(bdc->clk);
+
+ ret = clk_prepare_enable(bdc->clk);
+ if (ret) {
+ dev_err(dev, "could not enable clock\n");
+ return ret;
+ }
+
+ ret = bdc_phy_init(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "BDC phy init failure:%d\n", ret);
+ goto disable_clk;
+ }
+
+ temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
+ if ((temp & BDC_P64) &&
+ !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ dev_dbg(dev, "Using 64-bit address\n");
+ } else {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev,
+ "No suitable DMA config available, abort\n");
+ ret = -ENOTSUPP;
+ goto phycleanup;
+ }
+ dev_dbg(dev, "Using 32-bit address\n");
+ }
+ ret = bdc_hw_init(bdc);
+ if (ret) {
+ dev_err(dev, "BDC init failure:%d\n", ret);
+ goto phycleanup;
+ }
+ ret = bdc_udc_init(bdc);
+ if (ret) {
+ dev_err(dev, "BDC Gadget init failure:%d\n", ret);
+ goto cleanup;
+ }
+ return 0;
+
+cleanup:
+ bdc_hw_exit(bdc);
+phycleanup:
+ bdc_phy_exit(bdc);
+disable_clk:
+ clk_disable_unprepare(bdc->clk);
+ return ret;
+}
+
+static int bdc_remove(struct platform_device *pdev)
+{
+ struct bdc *bdc;
+
+ bdc = platform_get_drvdata(pdev);
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ bdc_udc_exit(bdc);
+ bdc_hw_exit(bdc);
+ bdc_phy_exit(bdc);
+ clk_disable_unprepare(bdc->clk);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bdc_suspend(struct device *dev)
+{
+ struct bdc *bdc = dev_get_drvdata(dev);
+ int ret;
+
+ /* Halt the controller */
+ ret = bdc_stop(bdc);
+ if (!ret)
+ clk_disable_unprepare(bdc->clk);
+
+ return ret;
+}
+
+static int bdc_resume(struct device *dev)
+{
+ struct bdc *bdc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(bdc->clk);
+ if (ret) {
+ dev_err(bdc->dev, "err enabling the clock\n");
+ return ret;
+ }
+ ret = bdc_reinit(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "err in bdc reinit\n");
+ clk_disable_unprepare(bdc->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(bdc_pm_ops, bdc_suspend,
+ bdc_resume);
+
+static const struct of_device_id bdc_of_match[] = {
+ { .compatible = "brcm,bdc-udc-v2" },
+ { .compatible = "brcm,bdc" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bdc_driver = {
+ .driver = {
+ .name = BRCM_BDC_NAME,
+ .pm = &bdc_pm_ops,
+ .of_match_table = bdc_of_match,
+ },
+ .probe = bdc_probe,
+ .remove = bdc_remove,
+};
+
+module_platform_driver(bdc_driver);
+MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(BRCM_BDC_DESC);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.c b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
new file mode 100644
index 000000000..9c03e1330
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * bdc_dbg.c - BRCM BDC USB3.0 device controller debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ */
+
+#include "bdc.h"
+#include "bdc_dbg.h"
+
+void bdc_dbg_regs(struct bdc *bdc)
+{
+ u32 temp;
+
+ dev_vdbg(bdc->dev, "bdc->regs:%p\n", bdc->regs);
+ temp = bdc_readl(bdc->regs, BDC_BDCCFG0);
+ dev_vdbg(bdc->dev, "bdccfg0:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_BDCCFG1);
+ dev_vdbg(bdc->dev, "bdccfg1:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_BDCCAP0);
+ dev_vdbg(bdc->dev, "bdccap0:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
+ dev_vdbg(bdc->dev, "bdccap1:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_USPC);
+ dev_vdbg(bdc->dev, "uspc:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_DVCSA);
+ dev_vdbg(bdc->dev, "dvcsa:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_DVCSB);
+ dev_vdbg(bdc->dev, "dvcsb:0x%x08\n", temp);
+}
+
+void bdc_dump_epsts(struct bdc *bdc)
+{
+ u32 temp;
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS0);
+ dev_vdbg(bdc->dev, "BDC_EPSTS0:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS1);
+ dev_vdbg(bdc->dev, "BDC_EPSTS1:0x%x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS2);
+ dev_vdbg(bdc->dev, "BDC_EPSTS2:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS3);
+ dev_vdbg(bdc->dev, "BDC_EPSTS3:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS4);
+ dev_vdbg(bdc->dev, "BDC_EPSTS4:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS5);
+ dev_vdbg(bdc->dev, "BDC_EPSTS5:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS6);
+ dev_vdbg(bdc->dev, "BDC_EPSTS6:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS7);
+ dev_vdbg(bdc->dev, "BDC_EPSTS7:0x%08x\n", temp);
+}
+
+void bdc_dbg_srr(struct bdc *bdc, u32 srr_num)
+{
+ struct bdc_sr *sr;
+ dma_addr_t addr;
+ int i;
+
+ sr = bdc->srr.sr_bds;
+ addr = bdc->srr.dma_addr;
+ dev_vdbg(bdc->dev, "%s sr:%p dqp_index:%d\n", __func__,
+ sr, bdc->srr.dqp_index);
+ for (i = 0; i < NUM_SR_ENTRIES; i++) {
+ sr = &bdc->srr.sr_bds[i];
+ dev_vdbg(bdc->dev, "%llx %08x %08x %08x %08x\n",
+ (unsigned long long)addr,
+ le32_to_cpu(sr->offset[0]),
+ le32_to_cpu(sr->offset[1]),
+ le32_to_cpu(sr->offset[2]),
+ le32_to_cpu(sr->offset[3]));
+ addr += sizeof(*sr);
+ }
+}
+
+void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ struct bd_table *bd_table;
+ struct bdc_bd *bd;
+ int tbi, bdi, gbdi;
+ dma_addr_t dma;
+
+ gbdi = 0;
+ dev_vdbg(bdc->dev,
+ "Dump bd list for %s epnum:%d\n",
+ ep->name, ep->ep_num);
+
+ dev_vdbg(bdc->dev,
+ "tabs:%d max_bdi:%d eqp_bdi:%d hwd_bdi:%d num_bds_table:%d\n",
+ bd_list->num_tabs, bd_list->max_bdi, bd_list->eqp_bdi,
+ bd_list->hwd_bdi, bd_list->num_bds_table);
+
+ for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
+ bd_table = bd_list->bd_table_array[tbi];
+ for (bdi = 0; bdi < bd_list->num_bds_table; bdi++) {
+ bd = bd_table->start_bd + bdi;
+ dma = bd_table->dma + (sizeof(struct bdc_bd) * bdi);
+ dev_vdbg(bdc->dev,
+ "tbi:%2d bdi:%2d gbdi:%2d virt:%p phys:%llx %08x %08x %08x %08x\n",
+ tbi, bdi, gbdi++, bd, (unsigned long long)dma,
+ le32_to_cpu(bd->offset[0]),
+ le32_to_cpu(bd->offset[1]),
+ le32_to_cpu(bd->offset[2]),
+ le32_to_cpu(bd->offset[3]));
+ }
+ dev_vdbg(bdc->dev, "\n\n");
+ }
+}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.h b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
new file mode 100644
index 000000000..acd8332f8
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * bdc_dbg.h - header for the BDC debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ */
+#ifndef __LINUX_BDC_DBG_H__
+#define __LINUX_BDC_DBG_H__
+
+#include "bdc.h"
+
+#ifdef CONFIG_USB_GADGET_VERBOSE
+void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep);
+void bdc_dbg_srr(struct bdc *bdc, u32 srr_num);
+void bdc_dbg_regs(struct bdc *bdc);
+void bdc_dump_epsts(struct bdc *bdc);
+#else
+static inline void bdc_dbg_regs(struct bdc *bdc)
+{ }
+
+static inline void bdc_dbg_srr(struct bdc *bdc, u32 srr_num)
+{ }
+
+static inline void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep)
+{ }
+
+static inline void bdc_dump_epsts(struct bdc *bdc)
+{ }
+#endif /* CONFIG_USB_GADGET_VERBOSE */
+#endif /* __LINUX_BDC_DBG_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
new file mode 100644
index 000000000..fa88f210e
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -0,0 +1,2035 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * Based on drivers under drivers/usb/
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/unaligned.h>
+#include <linux/platform_device.h>
+#include <linux/usb/composite.h>
+
+#include "bdc.h"
+#include "bdc_ep.h"
+#include "bdc_cmd.h"
+#include "bdc_dbg.h"
+
+static const char * const ep0_state_string[] = {
+ "WAIT_FOR_SETUP",
+ "WAIT_FOR_DATA_START",
+ "WAIT_FOR_DATA_XMIT",
+ "WAIT_FOR_STATUS_START",
+ "WAIT_FOR_STATUS_XMIT",
+ "STATUS_PENDING"
+};
+
+/* Free the bdl during ep disable */
+static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ struct bdc *bdc = ep->bdc;
+ struct bd_table *bd_table;
+ int index;
+
+ dev_dbg(bdc->dev, "%s ep:%s num_tabs:%d\n",
+ __func__, ep->name, num_tabs);
+
+ if (!bd_list->bd_table_array) {
+ dev_dbg(bdc->dev, "%s already freed\n", ep->name);
+ return;
+ }
+ for (index = 0; index < num_tabs; index++) {
+ /*
+ * check if the bd_table struct is allocated ?
+ * if yes, then check if bd memory has been allocated, then
+ * free the dma_pool and also the bd_table struct memory
+ */
+ bd_table = bd_list->bd_table_array[index];
+ dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index);
+ if (!bd_table) {
+ dev_dbg(bdc->dev, "bd_table not allocated\n");
+ continue;
+ }
+ if (!bd_table->start_bd) {
+ dev_dbg(bdc->dev, "bd dma pool not allocated\n");
+ continue;
+ }
+
+ dev_dbg(bdc->dev,
+ "Free dma pool start_bd:%p dma:%llx\n",
+ bd_table->start_bd,
+ (unsigned long long)bd_table->dma);
+
+ dma_pool_free(bdc->bd_table_pool,
+ bd_table->start_bd,
+ bd_table->dma);
+ /* Free the bd_table structure */
+ kfree(bd_table);
+ }
+ /* Free the bd table array */
+ kfree(ep->bd_list.bd_table_array);
+}
+
+/*
+ * chain the tables, by insteting a chain bd at the end of prev_table, pointing
+ * to next_table
+ */
+static inline void chain_table(struct bd_table *prev_table,
+ struct bd_table *next_table,
+ u32 bd_p_tab)
+{
+ /* Chain the prev table to next table */
+ prev_table->start_bd[bd_p_tab-1].offset[0] =
+ cpu_to_le32(lower_32_bits(next_table->dma));
+
+ prev_table->start_bd[bd_p_tab-1].offset[1] =
+ cpu_to_le32(upper_32_bits(next_table->dma));
+
+ prev_table->start_bd[bd_p_tab-1].offset[2] =
+ 0x0;
+
+ prev_table->start_bd[bd_p_tab-1].offset[3] =
+ cpu_to_le32(MARK_CHAIN_BD);
+}
+
+/* Allocate the bdl for ep, during config ep */
+static int ep_bd_list_alloc(struct bdc_ep *ep)
+{
+ struct bd_table *prev_table = NULL;
+ int index, num_tabs, bd_p_tab;
+ struct bdc *bdc = ep->bdc;
+ struct bd_table *bd_table;
+ dma_addr_t dma;
+
+ if (usb_endpoint_xfer_isoc(ep->desc))
+ num_tabs = NUM_TABLES_ISOCH;
+ else
+ num_tabs = NUM_TABLES;
+
+ bd_p_tab = NUM_BDS_PER_TABLE;
+ /* if there is only 1 table in bd list then loop chain to self */
+ dev_dbg(bdc->dev,
+ "%s ep:%p num_tabs:%d\n",
+ __func__, ep, num_tabs);
+
+ /* Allocate memory for table array */
+ ep->bd_list.bd_table_array = kcalloc(num_tabs,
+ sizeof(struct bd_table *),
+ GFP_ATOMIC);
+ if (!ep->bd_list.bd_table_array)
+ return -ENOMEM;
+
+ /* Allocate memory for each table */
+ for (index = 0; index < num_tabs; index++) {
+ /* Allocate memory for bd_table structure */
+ bd_table = kzalloc(sizeof(*bd_table), GFP_ATOMIC);
+ if (!bd_table)
+ goto fail;
+
+ bd_table->start_bd = dma_pool_zalloc(bdc->bd_table_pool,
+ GFP_ATOMIC,
+ &dma);
+ if (!bd_table->start_bd) {
+ kfree(bd_table);
+ goto fail;
+ }
+
+ bd_table->dma = dma;
+
+ dev_dbg(bdc->dev,
+ "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
+ index, bd_table->start_bd,
+ (unsigned long long)bd_table->dma, prev_table);
+
+ ep->bd_list.bd_table_array[index] = bd_table;
+ if (prev_table)
+ chain_table(prev_table, bd_table, bd_p_tab);
+
+ prev_table = bd_table;
+ }
+ chain_table(prev_table, ep->bd_list.bd_table_array[0], bd_p_tab);
+ /* Memory allocation is successful, now init the internal fields */
+ ep->bd_list.num_tabs = num_tabs;
+ ep->bd_list.max_bdi = (num_tabs * bd_p_tab) - 1;
+ ep->bd_list.num_tabs = num_tabs;
+ ep->bd_list.num_bds_table = bd_p_tab;
+ ep->bd_list.eqp_bdi = 0;
+ ep->bd_list.hwd_bdi = 0;
+
+ return 0;
+fail:
+ /* Free the bd_table_array, bd_table struct, bd's */
+ ep_bd_list_free(ep, num_tabs);
+
+ return -ENOMEM;
+}
+
+/* returns how many bd's are need for this transfer */
+static inline int bd_needed_req(struct bdc_req *req)
+{
+ int bd_needed = 0;
+ int remaining;
+
+ /* 1 bd needed for 0 byte transfer */
+ if (req->usb_req.length == 0)
+ return 1;
+
+ /* remaining bytes after tranfering all max BD size BD's */
+ remaining = req->usb_req.length % BD_MAX_BUFF_SIZE;
+ if (remaining)
+ bd_needed++;
+
+ /* How many maximum BUFF size BD's ? */
+ remaining = req->usb_req.length / BD_MAX_BUFF_SIZE;
+ bd_needed += remaining;
+
+ return bd_needed;
+}
+
+/* returns the bd index(bdi) corresponding to bd dma address */
+static int bd_add_to_bdi(struct bdc_ep *ep, dma_addr_t bd_dma_addr)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ dma_addr_t dma_first_bd, dma_last_bd;
+ struct bdc *bdc = ep->bdc;
+ struct bd_table *bd_table;
+ bool found = false;
+ int tbi, bdi;
+
+ dma_first_bd = dma_last_bd = 0;
+ dev_dbg(bdc->dev, "%s %llx\n",
+ __func__, (unsigned long long)bd_dma_addr);
+ /*
+ * Find in which table this bd_dma_addr belongs?, go through the table
+ * array and compare addresses of first and last address of bd of each
+ * table
+ */
+ for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
+ bd_table = bd_list->bd_table_array[tbi];
+ dma_first_bd = bd_table->dma;
+ dma_last_bd = bd_table->dma +
+ (sizeof(struct bdc_bd) *
+ (bd_list->num_bds_table - 1));
+ dev_dbg(bdc->dev, "dma_first_bd:%llx dma_last_bd:%llx\n",
+ (unsigned long long)dma_first_bd,
+ (unsigned long long)dma_last_bd);
+ if (bd_dma_addr >= dma_first_bd && bd_dma_addr <= dma_last_bd) {
+ found = true;
+ break;
+ }
+ }
+ if (unlikely(!found)) {
+ dev_err(bdc->dev, "%s FATAL err, bd not found\n", __func__);
+ return -EINVAL;
+ }
+ /* Now we know the table, find the bdi */
+ bdi = (bd_dma_addr - dma_first_bd) / sizeof(struct bdc_bd);
+
+ /* return the global bdi, to compare with ep eqp_bdi */
+ return (bdi + (tbi * bd_list->num_bds_table));
+}
+
+/* returns the table index(tbi) of the given bdi */
+static int bdi_to_tbi(struct bdc_ep *ep, int bdi)
+{
+ int tbi;
+
+ tbi = bdi / ep->bd_list.num_bds_table;
+ dev_vdbg(ep->bdc->dev,
+ "bdi:%d num_bds_table:%d tbi:%d\n",
+ bdi, ep->bd_list.num_bds_table, tbi);
+
+ return tbi;
+}
+
+/* Find the bdi last bd in the transfer */
+static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
+{
+ int end_bdi;
+
+ end_bdi = next_hwd_bdi - 1;
+ if (end_bdi < 0)
+ end_bdi = ep->bd_list.max_bdi - 1;
+ else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
+ end_bdi--;
+
+ return end_bdi;
+}
+
+/*
+ * How many transfer bd's are available on this ep bdl, chain bds are not
+ * counted in available bds
+ */
+static int bd_available_ep(struct bdc_ep *ep)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ int available1, available2;
+ struct bdc *bdc = ep->bdc;
+ int chain_bd1, chain_bd2;
+ int available_bd = 0;
+
+ available1 = available2 = chain_bd1 = chain_bd2 = 0;
+ /* if empty then we have all bd's available - number of chain bd's */
+ if (bd_list->eqp_bdi == bd_list->hwd_bdi)
+ return bd_list->max_bdi - bd_list->num_tabs;
+
+ /*
+ * Depending upon where eqp and dqp pointers are, caculate number
+ * of avaialble bd's
+ */
+ if (bd_list->hwd_bdi < bd_list->eqp_bdi) {
+ /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
+ available1 = bd_list->max_bdi - bd_list->eqp_bdi;
+ available2 = bd_list->hwd_bdi;
+ chain_bd1 = available1 / bd_list->num_bds_table;
+ chain_bd2 = available2 / bd_list->num_bds_table;
+ dev_vdbg(bdc->dev, "chain_bd1:%d chain_bd2:%d\n",
+ chain_bd1, chain_bd2);
+ available_bd = available1 + available2 - chain_bd1 - chain_bd2;
+ } else {
+ /* available bd's are from eqp..dqp - number of chain bd's */
+ available1 = bd_list->hwd_bdi - bd_list->eqp_bdi;
+ /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
+ if ((bd_list->hwd_bdi - bd_list->eqp_bdi)
+ <= bd_list->num_bds_table) {
+ /* If there any chain bd in between */
+ if (!(bdi_to_tbi(ep, bd_list->hwd_bdi)
+ == bdi_to_tbi(ep, bd_list->eqp_bdi))) {
+ available_bd = available1 - 1;
+ }
+ } else {
+ chain_bd1 = available1 / bd_list->num_bds_table;
+ available_bd = available1 - chain_bd1;
+ }
+ }
+ /*
+ * we need to keep one extra bd to check if ring is full or empty so
+ * reduce by 1
+ */
+ available_bd--;
+ dev_vdbg(bdc->dev, "available_bd:%d\n", available_bd);
+
+ return available_bd;
+}
+
+/* Notify the hardware after queueing the bd to bdl */
+void bdc_notify_xfr(struct bdc *bdc, u32 epnum)
+{
+ struct bdc_ep *ep = bdc->bdc_ep_array[epnum];
+
+ dev_vdbg(bdc->dev, "%s epnum:%d\n", __func__, epnum);
+ /*
+ * We don't have anyway to check if ep state is running,
+ * except the software flags.
+ */
+ if (unlikely(ep->flags & BDC_EP_STOP))
+ ep->flags &= ~BDC_EP_STOP;
+
+ bdc_writel(bdc->regs, BDC_XSFNTF, epnum);
+}
+
+/* returns the bd corresponding to bdi */
+static struct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi)
+{
+ int tbi = bdi_to_tbi(ep, bdi);
+ int local_bdi = 0;
+
+ local_bdi = bdi - (tbi * ep->bd_list.num_bds_table);
+ dev_vdbg(ep->bdc->dev,
+ "%s bdi:%d local_bdi:%d\n",
+ __func__, bdi, local_bdi);
+
+ return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi);
+}
+
+/* Advance the enqueue pointer */
+static void ep_bdlist_eqp_adv(struct bdc_ep *ep)
+{
+ ep->bd_list.eqp_bdi++;
+ /* if it's chain bd, then move to next */
+ if (((ep->bd_list.eqp_bdi + 1) % ep->bd_list.num_bds_table) == 0)
+ ep->bd_list.eqp_bdi++;
+
+ /* if the eqp is pointing to last + 1 then move back to 0 */
+ if (ep->bd_list.eqp_bdi == (ep->bd_list.max_bdi + 1))
+ ep->bd_list.eqp_bdi = 0;
+}
+
+/* Setup the first bd for ep0 transfer */
+static int setup_first_bd_ep0(struct bdc *bdc, struct bdc_req *req, u32 *dword3)
+{
+ u16 wValue;
+ u32 req_len;
+
+ req->ep->dir = 0;
+ req_len = req->usb_req.length;
+ switch (bdc->ep0_state) {
+ case WAIT_FOR_DATA_START:
+ *dword3 |= BD_TYPE_DS;
+ if (bdc->setup_pkt.bRequestType & USB_DIR_IN)
+ *dword3 |= BD_DIR_IN;
+
+ /* check if zlp will be needed */
+ wValue = le16_to_cpu(bdc->setup_pkt.wValue);
+ if ((wValue > req_len) &&
+ (req_len % bdc->gadget.ep0->maxpacket == 0)) {
+ dev_dbg(bdc->dev, "ZLP needed wVal:%d len:%d MaxP:%d\n",
+ wValue, req_len,
+ bdc->gadget.ep0->maxpacket);
+ bdc->zlp_needed = true;
+ }
+ break;
+
+ case WAIT_FOR_STATUS_START:
+ *dword3 |= BD_TYPE_SS;
+ if (!le16_to_cpu(bdc->setup_pkt.wLength) ||
+ !(bdc->setup_pkt.bRequestType & USB_DIR_IN))
+ *dword3 |= BD_DIR_IN;
+ break;
+ default:
+ dev_err(bdc->dev,
+ "Unknown ep0 state for queueing bd ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Setup the bd dma descriptor for a given request */
+static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
+{
+ dma_addr_t buf_add = req->usb_req.dma;
+ u32 maxp, tfs, dword2, dword3;
+ struct bd_transfer *bd_xfr;
+ struct bd_list *bd_list;
+ struct bdc_ep *ep;
+ struct bdc_bd *bd;
+ int ret, bdnum;
+ u32 req_len;
+
+ ep = req->ep;
+ bd_list = &ep->bd_list;
+ bd_xfr = &req->bd_xfr;
+ bd_xfr->req = req;
+ bd_xfr->start_bdi = bd_list->eqp_bdi;
+ bd = bdi_to_bd(ep, bd_list->eqp_bdi);
+ req_len = req->usb_req.length;
+ maxp = usb_endpoint_maxp(ep->desc);
+ tfs = roundup(req->usb_req.length, maxp);
+ tfs = tfs/maxp;
+ dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
+ __func__, ep->name, num_bds, tfs, req_len, bd);
+
+ for (bdnum = 0; bdnum < num_bds; bdnum++) {
+ dword2 = dword3 = 0;
+ /* First bd */
+ if (!bdnum) {
+ dword3 |= BD_SOT|BD_SBF|(tfs<<BD_TFS_SHIFT);
+ dword2 |= BD_LTF;
+ /* format of first bd for ep0 is different than other */
+ if (ep->ep_num == 1) {
+ ret = setup_first_bd_ep0(bdc, req, &dword3);
+ if (ret)
+ return ret;
+ }
+ }
+ if (!req->ep->dir)
+ dword3 |= BD_ISP;
+
+ if (req_len > BD_MAX_BUFF_SIZE) {
+ dword2 |= BD_MAX_BUFF_SIZE;
+ req_len -= BD_MAX_BUFF_SIZE;
+ } else {
+ /* this should be the last bd */
+ dword2 |= req_len;
+ dword3 |= BD_IOC;
+ dword3 |= BD_EOT;
+ }
+ /* Currently only 1 INT target is supported */
+ dword2 |= BD_INTR_TARGET(0);
+ bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
+ if (unlikely(!bd)) {
+ dev_err(bdc->dev, "Err bd pointing to wrong addr\n");
+ return -EINVAL;
+ }
+ /* write bd */
+ bd->offset[0] = cpu_to_le32(lower_32_bits(buf_add));
+ bd->offset[1] = cpu_to_le32(upper_32_bits(buf_add));
+ bd->offset[2] = cpu_to_le32(dword2);
+ bd->offset[3] = cpu_to_le32(dword3);
+ /* advance eqp pointer */
+ ep_bdlist_eqp_adv(ep);
+ /* advance the buff pointer */
+ buf_add += BD_MAX_BUFF_SIZE;
+ dev_vdbg(bdc->dev, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
+ (unsigned long long)buf_add, req_len, bd,
+ ep->bd_list.eqp_bdi);
+ bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
+ bd->offset[3] = cpu_to_le32(BD_SBF);
+ }
+ /* clear the STOP BD fetch bit from the first bd of this xfr */
+ bd = bdi_to_bd(ep, bd_xfr->start_bdi);
+ bd->offset[3] &= cpu_to_le32(~BD_SBF);
+ /* the new eqp will be next hw dqp */
+ bd_xfr->num_bds = num_bds;
+ bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi;
+ /* everything is written correctly before notifying the HW */
+ wmb();
+
+ return 0;
+}
+
+/* Queue the xfr */
+static int bdc_queue_xfr(struct bdc *bdc, struct bdc_req *req)
+{
+ int num_bds, bd_available;
+ struct bdc_ep *ep;
+ int ret;
+
+ ep = req->ep;
+ dev_dbg(bdc->dev, "%s req:%p\n", __func__, req);
+ dev_dbg(bdc->dev, "eqp_bdi:%d hwd_bdi:%d\n",
+ ep->bd_list.eqp_bdi, ep->bd_list.hwd_bdi);
+
+ num_bds = bd_needed_req(req);
+ bd_available = bd_available_ep(ep);
+
+ /* how many bd's are avaialble on ep */
+ if (num_bds > bd_available)
+ return -ENOMEM;
+
+ ret = setup_bd_list_xfr(bdc, req, num_bds);
+ if (ret)
+ return ret;
+ list_add_tail(&req->queue, &ep->queue);
+ bdc_dbg_bd_list(bdc, ep);
+ bdc_notify_xfr(bdc, ep->ep_num);
+
+ return 0;
+}
+
+/* callback to gadget layer when xfr completes */
+static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
+ int status)
+{
+ struct bdc *bdc = ep->bdc;
+
+ if (req == NULL)
+ return;
+
+ dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
+ list_del(&req->queue);
+ req->usb_req.status = status;
+ usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir);
+ if (req->usb_req.complete) {
+ spin_unlock(&bdc->lock);
+ usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
+ spin_lock(&bdc->lock);
+ }
+}
+
+/* Disable the endpoint */
+int bdc_ep_disable(struct bdc_ep *ep)
+{
+ struct bdc_req *req;
+ struct bdc *bdc;
+ int ret;
+
+ ret = 0;
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num);
+ /* Stop the endpoint */
+ ret = bdc_stop_ep(bdc, ep->ep_num);
+
+ /*
+ * Intentionally don't check the ret value of stop, it can fail in
+ * disconnect scenarios, continue with dconfig
+ */
+ /* de-queue any pending requests */
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct bdc_req,
+ queue);
+ bdc_req_complete(ep, req, -ESHUTDOWN);
+ }
+ /* deconfigure the endpoint */
+ ret = bdc_dconfig_ep(bdc, ep);
+ if (ret)
+ dev_warn(bdc->dev,
+ "dconfig fail but continue with memory free");
+
+ ep->flags = 0;
+ /* ep0 memory is not freed, but reused on next connect sr */
+ if (ep->ep_num == 1)
+ return 0;
+
+ /* Free the bdl memory */
+ ep_bd_list_free(ep, ep->bd_list.num_tabs);
+ ep->desc = NULL;
+ ep->comp_desc = NULL;
+ ep->usb_ep.desc = NULL;
+ ep->ep_type = 0;
+
+ return ret;
+}
+
+/* Enable the ep */
+int bdc_ep_enable(struct bdc_ep *ep)
+{
+ struct bdc *bdc;
+ int ret = 0;
+
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s NUM_TABLES:%d %d\n",
+ __func__, NUM_TABLES, NUM_TABLES_ISOCH);
+
+ ret = ep_bd_list_alloc(ep);
+ if (ret) {
+ dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret);
+ return -ENOMEM;
+ }
+ bdc_dbg_bd_list(bdc, ep);
+ /* only for ep0: config ep is called for ep0 from connect event */
+ if (ep->ep_num == 1)
+ return ret;
+
+ /* Issue a configure endpoint command */
+ ret = bdc_config_ep(bdc, ep);
+ if (ret)
+ return ret;
+
+ ep->usb_ep.maxpacket = usb_endpoint_maxp(ep->desc);
+ ep->usb_ep.desc = ep->desc;
+ ep->usb_ep.comp_desc = ep->comp_desc;
+ ep->ep_type = usb_endpoint_type(ep->desc);
+ ep->flags |= BDC_EP_ENABLED;
+
+ return 0;
+}
+
+/* EP0 related code */
+
+/* Queue a status stage BD */
+static int ep0_queue_status_stage(struct bdc *bdc)
+{
+ struct bdc_req *status_req;
+ struct bdc_ep *ep;
+
+ status_req = &bdc->status_req;
+ ep = bdc->bdc_ep_array[1];
+ status_req->ep = ep;
+ status_req->usb_req.length = 0;
+ status_req->usb_req.status = -EINPROGRESS;
+ status_req->usb_req.actual = 0;
+ status_req->usb_req.complete = NULL;
+ bdc_queue_xfr(bdc, status_req);
+
+ return 0;
+}
+
+/* Queue xfr on ep0 */
+static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
+{
+ struct bdc *bdc;
+ int ret;
+
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ req->usb_req.actual = 0;
+ req->usb_req.status = -EINPROGRESS;
+ req->epnum = ep->ep_num;
+
+ if (bdc->delayed_status) {
+ bdc->delayed_status = false;
+ /* if status stage was delayed? */
+ if (bdc->ep0_state == WAIT_FOR_STATUS_START) {
+ /* Queue a status stage BD */
+ ep0_queue_status_stage(bdc);
+ bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
+ return 0;
+ }
+ } else {
+ /*
+ * if delayed status is false and 0 length transfer is requested
+ * i.e. for status stage of some setup request, then just
+ * return from here the status stage is queued independently
+ */
+ if (req->usb_req.length == 0)
+ return 0;
+
+ }
+ ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
+ if (ret) {
+ dev_err(bdc->dev, "dma mapping failed %s\n", ep->name);
+ return ret;
+ }
+
+ return bdc_queue_xfr(bdc, req);
+}
+
+/* Queue data stage */
+static int ep0_queue_data_stage(struct bdc *bdc)
+{
+ struct bdc_ep *ep;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ ep = bdc->bdc_ep_array[1];
+ bdc->ep0_req.ep = ep;
+ bdc->ep0_req.usb_req.complete = NULL;
+
+ return ep0_queue(ep, &bdc->ep0_req);
+}
+
+/* Queue req on ep */
+static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
+{
+ struct bdc *bdc;
+ int ret = 0;
+
+ if (!req || !ep->usb_ep.desc)
+ return -EINVAL;
+
+ bdc = ep->bdc;
+
+ req->usb_req.actual = 0;
+ req->usb_req.status = -EINPROGRESS;
+ req->epnum = ep->ep_num;
+
+ ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
+ if (ret) {
+ dev_err(bdc->dev, "dma mapping failed\n");
+ return ret;
+ }
+
+ return bdc_queue_xfr(bdc, req);
+}
+
+/* Dequeue a request from ep */
+static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
+{
+ int start_bdi, end_bdi, tbi, eqp_bdi, curr_hw_dqpi;
+ bool start_pending, end_pending;
+ bool first_remove = false;
+ struct bdc_req *first_req;
+ struct bdc_bd *bd_start;
+ struct bd_table *table;
+ dma_addr_t next_bd_dma;
+ u64 deq_ptr_64 = 0;
+ struct bdc *bdc;
+ u32 tmp_32;
+ int ret;
+
+ bdc = ep->bdc;
+ start_pending = end_pending = false;
+ eqp_bdi = ep->bd_list.eqp_bdi - 1;
+
+ if (eqp_bdi < 0)
+ eqp_bdi = ep->bd_list.max_bdi;
+
+ start_bdi = req->bd_xfr.start_bdi;
+ end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi);
+
+ dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
+ __func__, ep->name, start_bdi, end_bdi);
+ dev_dbg(bdc->dev, "%s ep=%p ep->desc=%p\n", __func__,
+ ep, (void *)ep->usb_ep.desc);
+ /* if still connected, stop the ep to see where the HW is ? */
+ if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
+ ret = bdc_stop_ep(bdc, ep->ep_num);
+ /* if there is an issue, then no need to go further */
+ if (ret)
+ return 0;
+ } else
+ return 0;
+
+ /*
+ * After endpoint is stopped, there can be 3 cases, the request
+ * is processed, pending or in the middle of processing
+ */
+
+ /* The current hw dequeue pointer */
+ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0);
+ deq_ptr_64 = tmp_32;
+ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1);
+ deq_ptr_64 |= ((u64)tmp_32 << 32);
+
+ /* we have the dma addr of next bd that will be fetched by hardware */
+ curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64);
+ if (curr_hw_dqpi < 0)
+ return curr_hw_dqpi;
+
+ /*
+ * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
+ * curr_hw_dqbdi..eqp_bdi.
+ */
+
+ /* Check if start_bdi and end_bdi are in range of HW owned BD's */
+ if (curr_hw_dqpi > eqp_bdi) {
+ /* there is a wrap from last to 0 */
+ if (start_bdi >= curr_hw_dqpi || start_bdi <= eqp_bdi) {
+ start_pending = true;
+ end_pending = true;
+ } else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
+ end_pending = true;
+ }
+ } else {
+ if (start_bdi >= curr_hw_dqpi) {
+ start_pending = true;
+ end_pending = true;
+ } else if (end_bdi >= curr_hw_dqpi) {
+ end_pending = true;
+ }
+ }
+ dev_dbg(bdc->dev,
+ "start_pending:%d end_pending:%d speed:%d\n",
+ start_pending, end_pending, bdc->gadget.speed);
+
+ /* If both start till end are processes, we cannot deq req */
+ if (!start_pending && !end_pending)
+ return -EINVAL;
+
+ /*
+ * if ep_dequeue is called after disconnect then just return
+ * success from here
+ */
+ if (bdc->gadget.speed == USB_SPEED_UNKNOWN)
+ return 0;
+ tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
+ table = ep->bd_list.bd_table_array[tbi];
+ next_bd_dma = table->dma +
+ sizeof(struct bdc_bd)*(req->bd_xfr.next_hwd_bdi -
+ tbi * ep->bd_list.num_bds_table);
+
+ first_req = list_first_entry(&ep->queue, struct bdc_req,
+ queue);
+
+ if (req == first_req)
+ first_remove = true;
+
+ /*
+ * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
+ * incase if start is pending this is the first request in the list
+ * then issue ep_bla instead of marking as chain bd
+ */
+ if (start_pending && !first_remove) {
+ /*
+ * Mark the start bd as Chain bd, and point the chain
+ * bd to next_bd_dma
+ */
+ bd_start = bdi_to_bd(ep, start_bdi);
+ bd_start->offset[0] = cpu_to_le32(lower_32_bits(next_bd_dma));
+ bd_start->offset[1] = cpu_to_le32(upper_32_bits(next_bd_dma));
+ bd_start->offset[2] = 0x0;
+ bd_start->offset[3] = cpu_to_le32(MARK_CHAIN_BD);
+ bdc_dbg_bd_list(bdc, ep);
+ } else if (end_pending) {
+ /*
+ * The transfer is stopped in the middle, move the
+ * HW deq pointer to next_bd_dma
+ */
+ ret = bdc_ep_bla(bdc, ep, next_bd_dma);
+ if (ret) {
+ dev_err(bdc->dev, "error in ep_bla:%d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Halt/Clear the ep based on value */
+static int ep_set_halt(struct bdc_ep *ep, u32 value)
+{
+ struct bdc *bdc;
+ int ret;
+
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
+
+ if (value) {
+ dev_dbg(bdc->dev, "Halt\n");
+ if (ep->ep_num == 1)
+ bdc->ep0_state = WAIT_FOR_SETUP;
+
+ ret = bdc_ep_set_stall(bdc, ep->ep_num);
+ if (ret)
+ dev_err(bdc->dev, "failed to set STALL on %s\n",
+ ep->name);
+ else
+ ep->flags |= BDC_EP_STALL;
+ } else {
+ /* Clear */
+ dev_dbg(bdc->dev, "Before Clear\n");
+ ret = bdc_ep_clear_stall(bdc, ep->ep_num);
+ if (ret)
+ dev_err(bdc->dev, "failed to clear STALL on %s\n",
+ ep->name);
+ else
+ ep->flags &= ~BDC_EP_STALL;
+ dev_dbg(bdc->dev, "After Clear\n");
+ }
+
+ return ret;
+}
+
+/* Free all the ep */
+void bdc_free_ep(struct bdc *bdc)
+{
+ struct bdc_ep *ep;
+ u8 epnum;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ for (epnum = 1; epnum < bdc->num_eps; epnum++) {
+ ep = bdc->bdc_ep_array[epnum];
+ if (!ep)
+ continue;
+
+ if (ep->flags & BDC_EP_ENABLED)
+ ep_bd_list_free(ep, ep->bd_list.num_tabs);
+
+ /* ep0 is not in this gadget list */
+ if (epnum != 1)
+ list_del(&ep->usb_ep.ep_list);
+
+ kfree(ep);
+ }
+}
+
+/* USB2 spec, section 7.1.20 */
+static int bdc_set_test_mode(struct bdc *bdc)
+{
+ u32 usb2_pm;
+
+ usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
+ usb2_pm &= ~BDC_PTC_MASK;
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ switch (bdc->test_mode) {
+ case USB_TEST_J:
+ case USB_TEST_K:
+ case USB_TEST_SE0_NAK:
+ case USB_TEST_PACKET:
+ case USB_TEST_FORCE_ENABLE:
+ usb2_pm |= bdc->test_mode << 28;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
+ bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
+
+ return 0;
+}
+
+/*
+ * Helper function to handle Transfer status report with status as either
+ * success or short
+ */
+static void handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep,
+ struct bdc_sr *sreport)
+{
+ int short_bdi, start_bdi, end_bdi, max_len_bds, chain_bds;
+ struct bd_list *bd_list = &ep->bd_list;
+ int actual_length, length_short;
+ struct bd_transfer *bd_xfr;
+ struct bdc_bd *short_bd;
+ struct bdc_req *req;
+ u64 deq_ptr_64 = 0;
+ int status = 0;
+ int sr_status;
+ u32 tmp_32;
+
+ dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep);
+ bdc_dbg_srr(bdc, 0);
+ /* do not process thie sr if ignore flag is set */
+ if (ep->ignore_next_sr) {
+ ep->ignore_next_sr = false;
+ return;
+ }
+
+ if (unlikely(list_empty(&ep->queue))) {
+ dev_warn(bdc->dev, "xfr srr with no BD's queued\n");
+ return;
+ }
+ req = list_entry(ep->queue.next, struct bdc_req,
+ queue);
+
+ bd_xfr = &req->bd_xfr;
+ sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
+
+ /*
+ * sr_status is short and this transfer has more than 1 bd then it needs
+ * special handling, this is only applicable for bulk and ctrl
+ */
+ if (sr_status == XSF_SHORT && bd_xfr->num_bds > 1) {
+ /*
+ * This is multi bd xfr, lets see which bd
+ * caused short transfer and how many bytes have been
+ * transferred so far.
+ */
+ tmp_32 = le32_to_cpu(sreport->offset[0]);
+ deq_ptr_64 = tmp_32;
+ tmp_32 = le32_to_cpu(sreport->offset[1]);
+ deq_ptr_64 |= ((u64)tmp_32 << 32);
+ short_bdi = bd_add_to_bdi(ep, deq_ptr_64);
+ if (unlikely(short_bdi < 0))
+ dev_warn(bdc->dev, "bd doesn't exist?\n");
+
+ start_bdi = bd_xfr->start_bdi;
+ /*
+ * We know the start_bdi and short_bdi, how many xfr
+ * bds in between
+ */
+ if (start_bdi <= short_bdi) {
+ max_len_bds = short_bdi - start_bdi;
+ if (max_len_bds <= bd_list->num_bds_table) {
+ if (!(bdi_to_tbi(ep, start_bdi) ==
+ bdi_to_tbi(ep, short_bdi)))
+ max_len_bds--;
+ } else {
+ chain_bds = max_len_bds/bd_list->num_bds_table;
+ max_len_bds -= chain_bds;
+ }
+ } else {
+ /* there is a wrap in the ring within a xfr */
+ chain_bds = (bd_list->max_bdi - start_bdi)/
+ bd_list->num_bds_table;
+ chain_bds += short_bdi/bd_list->num_bds_table;
+ max_len_bds = bd_list->max_bdi - start_bdi;
+ max_len_bds += short_bdi;
+ max_len_bds -= chain_bds;
+ }
+ /* max_len_bds is the number of full length bds */
+ end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi);
+ if (!(end_bdi == short_bdi))
+ ep->ignore_next_sr = true;
+
+ actual_length = max_len_bds * BD_MAX_BUFF_SIZE;
+ short_bd = bdi_to_bd(ep, short_bdi);
+ /* length queued */
+ length_short = le32_to_cpu(short_bd->offset[2]) & 0x1FFFFF;
+ /* actual length trensfered */
+ length_short -= SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
+ actual_length += length_short;
+ req->usb_req.actual = actual_length;
+ } else {
+ req->usb_req.actual = req->usb_req.length -
+ SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
+ dev_dbg(bdc->dev,
+ "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
+ req->usb_req.length, req->usb_req.actual,
+ bd_xfr->next_hwd_bdi);
+ }
+
+ /* Update the dequeue pointer */
+ ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi;
+ if (req->usb_req.actual < req->usb_req.length) {
+ dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num);
+ if (req->usb_req.short_not_ok)
+ status = -EREMOTEIO;
+ }
+ bdc_req_complete(ep, bd_xfr->req, status);
+}
+
+/* EP0 setup related packet handlers */
+
+/*
+ * Setup packet received, just store the packet and process on next DS or SS
+ * started SR
+ */
+void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct usb_ctrlrequest *setup_pkt;
+ u32 len;
+
+ dev_dbg(bdc->dev,
+ "%s ep0_state:%s\n",
+ __func__, ep0_state_string[bdc->ep0_state]);
+ /* Store received setup packet */
+ setup_pkt = &bdc->setup_pkt;
+ memcpy(setup_pkt, &sreport->offset[0], sizeof(*setup_pkt));
+ len = le16_to_cpu(setup_pkt->wLength);
+ if (!len)
+ bdc->ep0_state = WAIT_FOR_STATUS_START;
+ else
+ bdc->ep0_state = WAIT_FOR_DATA_START;
+
+
+ dev_dbg(bdc->dev,
+ "%s exit ep0_state:%s\n",
+ __func__, ep0_state_string[bdc->ep0_state]);
+}
+
+/* Stall ep0 */
+static void ep0_stall(struct bdc *bdc)
+{
+ struct bdc_ep *ep = bdc->bdc_ep_array[1];
+ struct bdc_req *req;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ bdc->delayed_status = false;
+ ep_set_halt(ep, 1);
+
+ /* de-queue any pendig requests */
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct bdc_req,
+ queue);
+ bdc_req_complete(ep, req, -ESHUTDOWN);
+ }
+}
+
+/* SET_ADD handlers */
+static int ep0_set_address(struct bdc *bdc, struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ int ret = 0;
+ u32 addr;
+
+ addr = le16_to_cpu(ctrl->wValue);
+ dev_dbg(bdc->dev,
+ "%s addr:%d dev state:%d\n",
+ __func__, addr, state);
+
+ if (addr > 127)
+ return -EINVAL;
+
+ switch (state) {
+ case USB_STATE_DEFAULT:
+ case USB_STATE_ADDRESS:
+ /* Issue Address device command */
+ ret = bdc_address_device(bdc, addr);
+ if (ret)
+ return ret;
+
+ if (addr)
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
+
+ bdc->dev_addr = addr;
+ break;
+ default:
+ dev_warn(bdc->dev,
+ "SET Address in wrong device state %d\n",
+ state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Handler for SET/CLEAR FEATURE requests for device */
+static int ep0_handle_feature_dev(struct bdc *bdc, u16 wValue,
+ u16 wIndex, bool set)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ u32 usppms = 0;
+
+ dev_dbg(bdc->dev, "%s set:%d dev state:%d\n",
+ __func__, set, state);
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ dev_dbg(bdc->dev, "USB_DEVICE_REMOTE_WAKEUP\n");
+ if (set)
+ bdc->devstatus |= REMOTE_WAKE_ENABLE;
+ else
+ bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
+ break;
+
+ case USB_DEVICE_TEST_MODE:
+ dev_dbg(bdc->dev, "USB_DEVICE_TEST_MODE\n");
+ if ((wIndex & 0xFF) ||
+ (bdc->gadget.speed != USB_SPEED_HIGH) || !set)
+ return -EINVAL;
+
+ bdc->test_mode = wIndex >> 8;
+ break;
+
+ case USB_DEVICE_U1_ENABLE:
+ dev_dbg(bdc->dev, "USB_DEVICE_U1_ENABLE\n");
+
+ if (bdc->gadget.speed != USB_SPEED_SUPER ||
+ state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ usppms = bdc_readl(bdc->regs, BDC_USPPMS);
+ if (set) {
+ /* clear previous u1t */
+ usppms &= ~BDC_U1T(BDC_U1T_MASK);
+ usppms |= BDC_U1T(U1_TIMEOUT);
+ usppms |= BDC_U1E | BDC_PORT_W1S;
+ bdc->devstatus |= (1 << USB_DEV_STAT_U1_ENABLED);
+ } else {
+ usppms &= ~BDC_U1E;
+ usppms |= BDC_PORT_W1S;
+ bdc->devstatus &= ~(1 << USB_DEV_STAT_U1_ENABLED);
+ }
+ bdc_writel(bdc->regs, BDC_USPPMS, usppms);
+ break;
+
+ case USB_DEVICE_U2_ENABLE:
+ dev_dbg(bdc->dev, "USB_DEVICE_U2_ENABLE\n");
+
+ if (bdc->gadget.speed != USB_SPEED_SUPER ||
+ state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ usppms = bdc_readl(bdc->regs, BDC_USPPMS);
+ if (set) {
+ usppms |= BDC_U2E;
+ usppms |= BDC_U2A;
+ bdc->devstatus |= (1 << USB_DEV_STAT_U2_ENABLED);
+ } else {
+ usppms &= ~BDC_U2E;
+ usppms &= ~BDC_U2A;
+ bdc->devstatus &= ~(1 << USB_DEV_STAT_U2_ENABLED);
+ }
+ bdc_writel(bdc->regs, BDC_USPPMS, usppms);
+ break;
+
+ case USB_DEVICE_LTM_ENABLE:
+ dev_dbg(bdc->dev, "USB_DEVICE_LTM_ENABLE?\n");
+ if (bdc->gadget.speed != USB_SPEED_SUPER ||
+ state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ break;
+ default:
+ dev_err(bdc->dev, "Unknown wValue:%d\n", wValue);
+ return -EOPNOTSUPP;
+ } /* USB_RECIP_DEVICE end */
+
+ return 0;
+}
+
+/* SET/CLEAR FEATURE handler */
+static int ep0_handle_feature(struct bdc *bdc,
+ struct usb_ctrlrequest *setup_pkt, bool set)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ struct bdc_ep *ep;
+ u16 wValue;
+ u16 wIndex;
+ int epnum;
+
+ wValue = le16_to_cpu(setup_pkt->wValue);
+ wIndex = le16_to_cpu(setup_pkt->wIndex);
+
+ dev_dbg(bdc->dev,
+ "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
+ __func__, wValue, wIndex, state,
+ bdc->gadget.speed, set);
+
+ switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ return ep0_handle_feature_dev(bdc, wValue, wIndex, set);
+ case USB_RECIP_INTERFACE:
+ dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
+ /* USB3 spec, sec 9.4.9 */
+ if (wValue != USB_INTRF_FUNC_SUSPEND)
+ return -EINVAL;
+ /* USB3 spec, Table 9-8 */
+ if (set) {
+ if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) {
+ dev_dbg(bdc->dev, "SET REMOTE_WAKEUP\n");
+ bdc->devstatus |= REMOTE_WAKE_ENABLE;
+ } else {
+ dev_dbg(bdc->dev, "CLEAR REMOTE_WAKEUP\n");
+ bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
+ }
+ }
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
+ if (wValue != USB_ENDPOINT_HALT)
+ return -EINVAL;
+
+ epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum) {
+ if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ epnum = epnum * 2 + 1;
+ else
+ epnum *= 2;
+ } else {
+ epnum = 1; /*EP0*/
+ }
+ /*
+ * If CLEAR_FEATURE on ep0 then don't do anything as the stall
+ * condition on ep0 has already been cleared when SETUP packet
+ * was received.
+ */
+ if (epnum == 1 && !set) {
+ dev_dbg(bdc->dev, "ep0 stall already cleared\n");
+ return 0;
+ }
+ dev_dbg(bdc->dev, "epnum=%d\n", epnum);
+ ep = bdc->bdc_ep_array[epnum];
+ if (!ep)
+ return -EINVAL;
+
+ return ep_set_halt(ep, set);
+ default:
+ dev_err(bdc->dev, "Unknown recipient\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* GET_STATUS request handler */
+static int ep0_handle_status(struct bdc *bdc,
+ struct usb_ctrlrequest *setup_pkt)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ struct bdc_ep *ep;
+ u16 usb_status = 0;
+ u32 epnum;
+ u16 wIndex;
+
+ /* USB2.0 spec sec 9.4.5 */
+ if (state == USB_STATE_DEFAULT)
+ return -EINVAL;
+ wIndex = le16_to_cpu(setup_pkt->wIndex);
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ usb_status = bdc->devstatus;
+ switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ dev_dbg(bdc->dev,
+ "USB_RECIP_DEVICE devstatus:%08x\n",
+ bdc->devstatus);
+ /* USB3 spec, sec 9.4.5 */
+ if (bdc->gadget.speed == USB_SPEED_SUPER)
+ usb_status &= ~REMOTE_WAKE_ENABLE;
+ break;
+
+ case USB_RECIP_INTERFACE:
+ dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
+ if (bdc->gadget.speed == USB_SPEED_SUPER) {
+ /*
+ * This should come from func for Func remote wkup
+ * usb_status |=1;
+ */
+ if (bdc->devstatus & REMOTE_WAKE_ENABLE)
+ usb_status |= REMOTE_WAKE_ENABLE;
+ } else {
+ usb_status = 0;
+ }
+
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
+ epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum) {
+ if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ epnum = epnum*2 + 1;
+ else
+ epnum *= 2;
+ } else {
+ epnum = 1; /* EP0 */
+ }
+
+ ep = bdc->bdc_ep_array[epnum];
+ if (!ep) {
+ dev_err(bdc->dev, "ISSUE, GET_STATUS for invalid EP ?");
+ return -EINVAL;
+ }
+ if (ep->flags & BDC_EP_STALL)
+ usb_status |= 1 << USB_ENDPOINT_HALT;
+
+ break;
+ default:
+ dev_err(bdc->dev, "Unknown recipient for get_status\n");
+ return -EINVAL;
+ }
+ /* prepare a data stage for GET_STATUS */
+ dev_dbg(bdc->dev, "usb_status=%08x\n", usb_status);
+ *(__le16 *)bdc->ep0_response_buff = cpu_to_le16(usb_status);
+ bdc->ep0_req.usb_req.length = 2;
+ bdc->ep0_req.usb_req.buf = &bdc->ep0_response_buff;
+ ep0_queue_data_stage(bdc);
+
+ return 0;
+}
+
+static void ep0_set_sel_cmpl(struct usb_ep *_ep, struct usb_request *_req)
+{
+ /* ep0_set_sel_cmpl */
+}
+
+/* Queue data stage to handle 6 byte SET_SEL request */
+static int ep0_set_sel(struct bdc *bdc,
+ struct usb_ctrlrequest *setup_pkt)
+{
+ struct bdc_ep *ep;
+ u16 wLength;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ wLength = le16_to_cpu(setup_pkt->wLength);
+ if (unlikely(wLength != 6)) {
+ dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
+ return -EINVAL;
+ }
+ ep = bdc->bdc_ep_array[1];
+ bdc->ep0_req.ep = ep;
+ bdc->ep0_req.usb_req.length = 6;
+ bdc->ep0_req.usb_req.buf = bdc->ep0_response_buff;
+ bdc->ep0_req.usb_req.complete = ep0_set_sel_cmpl;
+ ep0_queue_data_stage(bdc);
+
+ return 0;
+}
+
+/*
+ * Queue a 0 byte bd only if wLength is more than the length and length is
+ * a multiple of MaxPacket then queue 0 byte BD
+ */
+static int ep0_queue_zlp(struct bdc *bdc)
+{
+ int ret;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ bdc->ep0_req.ep = bdc->bdc_ep_array[1];
+ bdc->ep0_req.usb_req.length = 0;
+ bdc->ep0_req.usb_req.complete = NULL;
+ bdc->ep0_state = WAIT_FOR_DATA_START;
+ ret = bdc_queue_xfr(bdc, &bdc->ep0_req);
+ if (ret) {
+ dev_err(bdc->dev, "err queueing zlp :%d\n", ret);
+ return ret;
+ }
+ bdc->ep0_state = WAIT_FOR_DATA_XMIT;
+
+ return 0;
+}
+
+/* Control request handler */
+static int handle_control_request(struct bdc *bdc)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ struct usb_ctrlrequest *setup_pkt;
+ int delegate_setup = 0;
+ int ret = 0;
+ int config = 0;
+
+ setup_pkt = &bdc->setup_pkt;
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ if ((setup_pkt->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (setup_pkt->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(bdc->dev, "USB_REQ_SET_ADDRESS\n");
+ ret = ep0_set_address(bdc, setup_pkt);
+ bdc->devstatus &= DEVSTATUS_CLEAR;
+ break;
+
+ case USB_REQ_SET_CONFIGURATION:
+ dev_dbg(bdc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ if (state == USB_STATE_ADDRESS) {
+ usb_gadget_set_state(&bdc->gadget,
+ USB_STATE_CONFIGURED);
+ } else if (state == USB_STATE_CONFIGURED) {
+ /*
+ * USB2 spec sec 9.4.7, if wValue is 0 then dev
+ * is moved to addressed state
+ */
+ config = le16_to_cpu(setup_pkt->wValue);
+ if (!config)
+ usb_gadget_set_state(
+ &bdc->gadget,
+ USB_STATE_ADDRESS);
+ }
+ delegate_setup = 1;
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ dev_dbg(bdc->dev, "USB_REQ_SET_FEATURE\n");
+ ret = ep0_handle_feature(bdc, setup_pkt, 1);
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ dev_dbg(bdc->dev, "USB_REQ_CLEAR_FEATURE\n");
+ ret = ep0_handle_feature(bdc, setup_pkt, 0);
+ break;
+
+ case USB_REQ_GET_STATUS:
+ dev_dbg(bdc->dev, "USB_REQ_GET_STATUS\n");
+ ret = ep0_handle_status(bdc, setup_pkt);
+ break;
+
+ case USB_REQ_SET_SEL:
+ dev_dbg(bdc->dev, "USB_REQ_SET_SEL\n");
+ ret = ep0_set_sel(bdc, setup_pkt);
+ break;
+
+ case USB_REQ_SET_ISOCH_DELAY:
+ dev_warn(bdc->dev,
+ "USB_REQ_SET_ISOCH_DELAY not handled\n");
+ ret = 0;
+ break;
+ default:
+ delegate_setup = 1;
+ }
+ } else {
+ delegate_setup = 1;
+ }
+
+ if (delegate_setup) {
+ spin_unlock(&bdc->lock);
+ ret = bdc->gadget_driver->setup(&bdc->gadget, setup_pkt);
+ spin_lock(&bdc->lock);
+ }
+
+ return ret;
+}
+
+/* EP0: Data stage started */
+void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct bdc_ep *ep;
+ int ret = 0;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ ep = bdc->bdc_ep_array[1];
+ /* If ep0 was stalled, the clear it first */
+ if (ep->flags & BDC_EP_STALL) {
+ ret = ep_set_halt(ep, 0);
+ if (ret)
+ goto err;
+ }
+ if (bdc->ep0_state != WAIT_FOR_DATA_START)
+ dev_warn(bdc->dev,
+ "Data stage not expected ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+
+ ret = handle_control_request(bdc);
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ /*
+ * The ep0 state will remain WAIT_FOR_DATA_START till
+ * we received ep_queue on ep0
+ */
+ bdc->delayed_status = true;
+ return;
+ }
+ if (!ret) {
+ bdc->ep0_state = WAIT_FOR_DATA_XMIT;
+ dev_dbg(bdc->dev,
+ "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
+ return;
+ }
+err:
+ ep0_stall(bdc);
+}
+
+/* EP0: status stage started */
+void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct usb_ctrlrequest *setup_pkt;
+ struct bdc_ep *ep;
+ int ret = 0;
+
+ dev_dbg(bdc->dev,
+ "%s ep0_state:%s",
+ __func__, ep0_state_string[bdc->ep0_state]);
+ ep = bdc->bdc_ep_array[1];
+
+ /* check if ZLP was queued? */
+ if (bdc->zlp_needed)
+ bdc->zlp_needed = false;
+
+ if (ep->flags & BDC_EP_STALL) {
+ ret = ep_set_halt(ep, 0);
+ if (ret)
+ goto err;
+ }
+
+ if ((bdc->ep0_state != WAIT_FOR_STATUS_START) &&
+ (bdc->ep0_state != WAIT_FOR_DATA_XMIT))
+ dev_err(bdc->dev,
+ "Status stage recv but ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+
+ /* check if data stage is in progress ? */
+ if (bdc->ep0_state == WAIT_FOR_DATA_XMIT) {
+ bdc->ep0_state = STATUS_PENDING;
+ /* Status stage will be queued upon Data stage transmit event */
+ dev_dbg(bdc->dev,
+ "status started but data not transmitted yet\n");
+ return;
+ }
+ setup_pkt = &bdc->setup_pkt;
+
+ /*
+ * 2 stage setup then only process the setup, for 3 stage setup the date
+ * stage is already handled
+ */
+ if (!le16_to_cpu(setup_pkt->wLength)) {
+ ret = handle_control_request(bdc);
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ bdc->delayed_status = true;
+ /* ep0_state will remain WAIT_FOR_STATUS_START */
+ return;
+ }
+ }
+ if (!ret) {
+ /* Queue a status stage BD */
+ ep0_queue_status_stage(bdc);
+ bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
+ dev_dbg(bdc->dev,
+ "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
+ return;
+ }
+err:
+ ep0_stall(bdc);
+}
+
+/* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
+static void ep0_xsf_complete(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ switch (bdc->ep0_state) {
+ case WAIT_FOR_DATA_XMIT:
+ bdc->ep0_state = WAIT_FOR_STATUS_START;
+ break;
+ case WAIT_FOR_STATUS_XMIT:
+ bdc->ep0_state = WAIT_FOR_SETUP;
+ if (bdc->test_mode) {
+ int ret;
+
+ dev_dbg(bdc->dev, "test_mode:%d\n", bdc->test_mode);
+ ret = bdc_set_test_mode(bdc);
+ if (ret < 0) {
+ dev_err(bdc->dev, "Err in setting Test mode\n");
+ return;
+ }
+ bdc->test_mode = 0;
+ }
+ break;
+ case STATUS_PENDING:
+ bdc_xsf_ep0_status_start(bdc, sreport);
+ break;
+
+ default:
+ dev_err(bdc->dev,
+ "Unknown ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+
+ }
+}
+
+/* xfr completion status report handler */
+void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct bdc_ep *ep;
+ u32 sr_status;
+ u8 ep_num;
+
+ ep_num = (le32_to_cpu(sreport->offset[3])>>4) & 0x1f;
+ ep = bdc->bdc_ep_array[ep_num];
+ if (!ep || !(ep->flags & BDC_EP_ENABLED)) {
+ dev_err(bdc->dev, "xsf for ep not enabled\n");
+ return;
+ }
+ /*
+ * check if this transfer is after link went from U3->U0 due
+ * to remote wakeup
+ */
+ if (bdc->devstatus & FUNC_WAKE_ISSUED) {
+ bdc->devstatus &= ~(FUNC_WAKE_ISSUED);
+ dev_dbg(bdc->dev, "%s clearing FUNC_WAKE_ISSUED flag\n",
+ __func__);
+ }
+ sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
+ dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n",
+ __func__, sr_status, ep->name);
+
+ switch (sr_status) {
+ case XSF_SUCC:
+ case XSF_SHORT:
+ handle_xsr_succ_status(bdc, ep, sreport);
+ if (ep_num == 1)
+ ep0_xsf_complete(bdc, sreport);
+ break;
+
+ case XSF_SETUP_RECV:
+ case XSF_DATA_START:
+ case XSF_STATUS_START:
+ if (ep_num != 1) {
+ dev_err(bdc->dev,
+ "ep0 related packets on non ep0 endpoint");
+ return;
+ }
+ bdc->sr_xsf_ep0[sr_status - XSF_SETUP_RECV](bdc, sreport);
+ break;
+
+ case XSF_BABB:
+ if (ep_num == 1) {
+ dev_dbg(bdc->dev, "Babble on ep0 zlp_need:%d\n",
+ bdc->zlp_needed);
+ /*
+ * If the last completed transfer had wLength >Data Len,
+ * and Len is multiple of MaxPacket,then queue ZLP
+ */
+ if (bdc->zlp_needed) {
+ /* queue 0 length bd */
+ ep0_queue_zlp(bdc);
+ return;
+ }
+ }
+ dev_warn(bdc->dev, "Babble on ep not handled\n");
+ break;
+ default:
+ dev_warn(bdc->dev, "sr status not handled:%x\n", sr_status);
+ break;
+ }
+}
+
+static int bdc_gadget_ep_queue(struct usb_ep *_ep,
+ struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct bdc_req *req;
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep || !_ep->desc)
+ return -ESHUTDOWN;
+
+ if (!_req || !_req->complete || !_req->buf)
+ return -EINVAL;
+
+ ep = to_bdc_ep(_ep);
+ req = to_bdc_req(_req);
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req);
+ dev_dbg(bdc->dev, "queuing request %p to %s length %d zero:%d\n",
+ _req, ep->name, _req->length, _req->zero);
+
+ if (!ep->usb_ep.desc) {
+ dev_warn(bdc->dev,
+ "trying to queue req %p to disabled %s\n",
+ _req, ep->name);
+ return -ESHUTDOWN;
+ }
+
+ if (_req->length > MAX_XFR_LEN) {
+ dev_warn(bdc->dev,
+ "req length > supported MAX:%d requested:%d\n",
+ MAX_XFR_LEN, _req->length);
+ return -EOPNOTSUPP;
+ }
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (ep == bdc->bdc_ep_array[1])
+ ret = ep0_queue(ep, req);
+ else
+ ret = ep_queue(ep, req);
+
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_gadget_ep_dequeue(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct bdc_req *req;
+ struct bdc_req *iter;
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep || !_req)
+ return -EINVAL;
+
+ ep = to_bdc_ep(_ep);
+ req = to_bdc_req(_req);
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
+ bdc_dbg_bd_list(bdc, ep);
+ spin_lock_irqsave(&bdc->lock, flags);
+
+ req = NULL;
+ /* make sure it's still queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->usb_req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ spin_unlock_irqrestore(&bdc->lock, flags);
+ dev_err(bdc->dev, "usb_req !=req n");
+ return -EINVAL;
+ }
+ ret = ep_dequeue(ep, req);
+ if (ret) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ bdc_req_complete(ep, req, -ECONNRESET);
+
+err:
+ bdc_dbg_bd_list(bdc, ep);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_gadget_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ ep = to_bdc_ep(_ep);
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (usb_endpoint_xfer_isoc(ep->usb_ep.desc))
+ ret = -EINVAL;
+ else if (!list_empty(&ep->queue))
+ ret = -EAGAIN;
+ else
+ ret = ep_set_halt(ep, value);
+
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *bdc_gadget_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct bdc_req *req;
+ struct bdc_ep *ep;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ ep = to_bdc_ep(_ep);
+ req->ep = ep;
+ req->epnum = ep->ep_num;
+ req->usb_req.dma = DMA_ADDR_INVALID;
+ dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
+
+ return &req->usb_req;
+}
+
+static void bdc_gadget_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct bdc_req *req;
+
+ req = to_bdc_req(_req);
+ kfree(req);
+}
+
+/* endpoint operations */
+
+/* configure endpoint and also allocate resources */
+static int bdc_gadget_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ pr_debug("%s invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!desc->wMaxPacketSize) {
+ pr_debug("%s missing wMaxPacketSize\n", __func__);
+ return -EINVAL;
+ }
+
+ ep = to_bdc_ep(_ep);
+ bdc = ep->bdc;
+
+ /* Sanity check, upper layer will not send enable for ep0 */
+ if (ep == bdc->bdc_ep_array[1])
+ return -EINVAL;
+
+ if (!bdc->gadget_driver
+ || bdc->gadget.speed == USB_SPEED_UNKNOWN) {
+ return -ESHUTDOWN;
+ }
+
+ dev_dbg(bdc->dev, "%s Enabling %s\n", __func__, ep->name);
+ spin_lock_irqsave(&bdc->lock, flags);
+ ep->desc = desc;
+ ep->comp_desc = _ep->comp_desc;
+ ret = bdc_ep_enable(ep);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_gadget_ep_disable(struct usb_ep *_ep)
+{
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep) {
+ pr_debug("bdc: invalid parameters\n");
+ return -EINVAL;
+ }
+ ep = to_bdc_ep(_ep);
+ bdc = ep->bdc;
+
+ /* Upper layer will not call this for ep0, but do a sanity check */
+ if (ep == bdc->bdc_ep_array[1]) {
+ dev_warn(bdc->dev, "%s called for ep0\n", __func__);
+ return -EINVAL;
+ }
+ dev_dbg(bdc->dev,
+ "%s() ep:%s ep->flags:%08x\n",
+ __func__, ep->name, ep->flags);
+
+ if (!(ep->flags & BDC_EP_ENABLED)) {
+ if (bdc->gadget.speed != USB_SPEED_UNKNOWN)
+ dev_warn(bdc->dev, "%s is already disabled\n",
+ ep->name);
+ return 0;
+ }
+ spin_lock_irqsave(&bdc->lock, flags);
+ ret = bdc_ep_disable(ep);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static const struct usb_ep_ops bdc_gadget_ep_ops = {
+ .enable = bdc_gadget_ep_enable,
+ .disable = bdc_gadget_ep_disable,
+ .alloc_request = bdc_gadget_alloc_request,
+ .free_request = bdc_gadget_free_request,
+ .queue = bdc_gadget_ep_queue,
+ .dequeue = bdc_gadget_ep_dequeue,
+ .set_halt = bdc_gadget_ep_set_halt
+};
+
+/* dir = 1 is IN */
+static int init_ep(struct bdc *bdc, u32 epnum, u32 dir)
+{
+ struct bdc_ep *ep;
+
+ dev_dbg(bdc->dev, "%s epnum=%d dir=%d\n", __func__, epnum, dir);
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->bdc = bdc;
+ ep->dir = dir;
+
+ if (dir)
+ ep->usb_ep.caps.dir_in = true;
+ else
+ ep->usb_ep.caps.dir_out = true;
+
+ /* ep->ep_num is the index inside bdc_ep */
+ if (epnum == 1) {
+ ep->ep_num = 1;
+ bdc->bdc_ep_array[ep->ep_num] = ep;
+ snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
+ ep->usb_ep.caps.type_control = true;
+ ep->comp_desc = NULL;
+ bdc->gadget.ep0 = &ep->usb_ep;
+ } else {
+ if (dir)
+ ep->ep_num = epnum * 2 - 1;
+ else
+ ep->ep_num = epnum * 2 - 2;
+
+ bdc->bdc_ep_array[ep->ep_num] = ep;
+ snprintf(ep->name, sizeof(ep->name), "ep%d%s", epnum - 1,
+ dir & 1 ? "in" : "out");
+
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+ ep->usb_ep.caps.type_iso = true;
+ ep->usb_ep.caps.type_bulk = true;
+ ep->usb_ep.caps.type_int = true;
+ ep->usb_ep.max_streams = 0;
+ list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
+ }
+ ep->usb_ep.ops = &bdc_gadget_ep_ops;
+ ep->usb_ep.name = ep->name;
+ ep->flags = 0;
+ ep->ignore_next_sr = false;
+ dev_dbg(bdc->dev, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
+ ep, ep->usb_ep.name, epnum, ep->ep_num);
+
+ INIT_LIST_HEAD(&ep->queue);
+
+ return 0;
+}
+
+/* Init all ep */
+int bdc_init_ep(struct bdc *bdc)
+{
+ u8 epnum;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ INIT_LIST_HEAD(&bdc->gadget.ep_list);
+ /* init ep0 */
+ ret = init_ep(bdc, 1, 0);
+ if (ret) {
+ dev_err(bdc->dev, "init ep ep0 fail %d\n", ret);
+ return ret;
+ }
+
+ for (epnum = 2; epnum <= bdc->num_eps / 2; epnum++) {
+ /* OUT */
+ ret = init_ep(bdc, epnum, 0);
+ if (ret) {
+ dev_err(bdc->dev,
+ "init ep failed for:%d error: %d\n",
+ epnum, ret);
+ return ret;
+ }
+
+ /* IN */
+ ret = init_ep(bdc, epnum, 1);
+ if (ret) {
+ dev_err(bdc->dev,
+ "init ep failed for:%d error: %d\n",
+ epnum, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.h b/drivers/usb/gadget/udc/bdc/bdc_ep.h
new file mode 100644
index 000000000..4d3affd07
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * bdc_ep.h - header for the BDC debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ */
+#ifndef __LINUX_BDC_EP_H__
+#define __LINUX_BDC_EP_H__
+
+int bdc_init_ep(struct bdc *bdc);
+int bdc_ep_disable(struct bdc_ep *ep);
+int bdc_ep_enable(struct bdc_ep *ep);
+void bdc_free_ep(struct bdc *bdc);
+
+#endif /* __LINUX_BDC_EP_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
new file mode 100644
index 000000000..53ffaf4e2
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * bdc_udc.c - BRCM BDC USB3.0 device controller gagdet ops
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * Based on drivers under drivers/usb/gadget/udc/
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/unaligned.h>
+#include <linux/platform_device.h>
+
+#include "bdc.h"
+#include "bdc_ep.h"
+#include "bdc_cmd.h"
+#include "bdc_dbg.h"
+
+static const struct usb_gadget_ops bdc_gadget_ops;
+
+static const char * const conn_speed_str[] = {
+ "Not connected",
+ "Full Speed",
+ "Low Speed",
+ "High Speed",
+ "Super Speed",
+};
+
+/* EP0 initial descripror */
+static struct usb_endpoint_descriptor bdc_gadget_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .bEndpointAddress = 0,
+ .wMaxPacketSize = cpu_to_le16(EP0_MAX_PKT_SIZE),
+};
+
+/* Advance the srr dqp maintained by SW */
+static void srr_dqp_index_advc(struct bdc *bdc, u32 srr_num)
+{
+ struct srr *srr;
+
+ srr = &bdc->srr;
+ dev_dbg_ratelimited(bdc->dev, "srr->dqp_index:%d\n", srr->dqp_index);
+ srr->dqp_index++;
+ /* rollback to 0 if we are past the last */
+ if (srr->dqp_index == NUM_SR_ENTRIES)
+ srr->dqp_index = 0;
+}
+
+/* connect sr */
+static void bdc_uspc_connected(struct bdc *bdc)
+{
+ u32 speed, temp;
+ u32 usppms;
+ int ret;
+
+ temp = bdc_readl(bdc->regs, BDC_USPC);
+ speed = BDC_PSP(temp);
+ dev_dbg(bdc->dev, "%s speed=%x\n", __func__, speed);
+ switch (speed) {
+ case BDC_SPEED_SS:
+ bdc_gadget_ep0_desc.wMaxPacketSize =
+ cpu_to_le16(EP0_MAX_PKT_SIZE);
+ bdc->gadget.ep0->maxpacket = EP0_MAX_PKT_SIZE;
+ bdc->gadget.speed = USB_SPEED_SUPER;
+ /* Enable U1T in SS mode */
+ usppms = bdc_readl(bdc->regs, BDC_USPPMS);
+ usppms &= ~BDC_U1T(0xff);
+ usppms |= BDC_U1T(U1_TIMEOUT);
+ usppms |= BDC_PORT_W1S;
+ bdc_writel(bdc->regs, BDC_USPPMS, usppms);
+ break;
+
+ case BDC_SPEED_HS:
+ bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ bdc->gadget.ep0->maxpacket = 64;
+ bdc->gadget.speed = USB_SPEED_HIGH;
+ break;
+
+ case BDC_SPEED_FS:
+ bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ bdc->gadget.ep0->maxpacket = 64;
+ bdc->gadget.speed = USB_SPEED_FULL;
+ break;
+
+ case BDC_SPEED_LS:
+ bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
+ bdc->gadget.ep0->maxpacket = 8;
+ bdc->gadget.speed = USB_SPEED_LOW;
+ break;
+ default:
+ dev_err(bdc->dev, "UNDEFINED SPEED\n");
+ return;
+ }
+ dev_dbg(bdc->dev, "connected at %s\n", conn_speed_str[speed]);
+ /* Now we know the speed, configure ep0 */
+ bdc->bdc_ep_array[1]->desc = &bdc_gadget_ep0_desc;
+ ret = bdc_config_ep(bdc, bdc->bdc_ep_array[1]);
+ if (ret)
+ dev_err(bdc->dev, "EP0 config failed\n");
+ bdc->bdc_ep_array[1]->usb_ep.desc = &bdc_gadget_ep0_desc;
+ bdc->bdc_ep_array[1]->flags |= BDC_EP_ENABLED;
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
+}
+
+/* device got disconnected */
+static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit)
+{
+ struct bdc_ep *ep;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ /*
+ * Only stop ep0 from here, rest of the endpoints will be disabled
+ * from gadget_disconnect
+ */
+ ep = bdc->bdc_ep_array[1];
+ if (ep && (ep->flags & BDC_EP_ENABLED))
+ /* if enabled then stop and remove requests */
+ bdc_ep_disable(ep);
+
+ if (bdc->gadget_driver && bdc->gadget_driver->disconnect) {
+ spin_unlock(&bdc->lock);
+ bdc->gadget_driver->disconnect(&bdc->gadget);
+ spin_lock(&bdc->lock);
+ }
+ /* Set Unknown speed */
+ bdc->gadget.speed = USB_SPEED_UNKNOWN;
+ bdc->devstatus &= DEVSTATUS_CLEAR;
+ bdc->delayed_status = false;
+ bdc->reinit = reinit;
+ bdc->test_mode = false;
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
+}
+
+/* TNotify wkaeup timer */
+static void bdc_func_wake_timer(struct work_struct *work)
+{
+ struct bdc *bdc = container_of(work, struct bdc, func_wake_notify.work);
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ /*
+ * Check if host has started transferring on endpoints
+ * FUNC_WAKE_ISSUED is cleared when transfer has started after resume
+ */
+ if (bdc->devstatus & FUNC_WAKE_ISSUED) {
+ dev_dbg(bdc->dev, "FUNC_WAKE_ISSUED FLAG IS STILL SET\n");
+ /* flag is still set, so again send func wake */
+ bdc_function_wake_fh(bdc, 0);
+ schedule_delayed_work(&bdc->func_wake_notify,
+ msecs_to_jiffies(BDC_TNOTIFY));
+ }
+ spin_unlock_irqrestore(&bdc->lock, flags);
+}
+
+/* handler for Link state change condition */
+static void handle_link_state_change(struct bdc *bdc, u32 uspc)
+{
+ u32 link_state;
+
+ dev_dbg(bdc->dev, "Link state change");
+ link_state = BDC_PST(uspc);
+ switch (link_state) {
+ case BDC_LINK_STATE_U3:
+ if ((bdc->gadget.speed != USB_SPEED_UNKNOWN) &&
+ bdc->gadget_driver->suspend) {
+ dev_dbg(bdc->dev, "Entered Suspend mode\n");
+ spin_unlock(&bdc->lock);
+ bdc->devstatus |= DEVICE_SUSPENDED;
+ bdc->gadget_driver->suspend(&bdc->gadget);
+ spin_lock(&bdc->lock);
+ }
+ break;
+ case BDC_LINK_STATE_U0:
+ if (bdc->devstatus & REMOTE_WAKEUP_ISSUED) {
+ bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
+ if (bdc->gadget.speed == USB_SPEED_SUPER) {
+ bdc_function_wake_fh(bdc, 0);
+ bdc->devstatus |= FUNC_WAKE_ISSUED;
+ /*
+ * Start a Notification timer and check if the
+ * Host transferred anything on any of the EPs,
+ * if not then send function wake again every
+ * TNotification secs until host initiates
+ * transfer to BDC, USB3 spec Table 8.13
+ */
+ schedule_delayed_work(
+ &bdc->func_wake_notify,
+ msecs_to_jiffies(BDC_TNOTIFY));
+ dev_dbg(bdc->dev, "sched func_wake_notify\n");
+ }
+ }
+ break;
+
+ case BDC_LINK_STATE_RESUME:
+ dev_dbg(bdc->dev, "Resumed from Suspend\n");
+ if (bdc->devstatus & DEVICE_SUSPENDED) {
+ bdc->gadget_driver->resume(&bdc->gadget);
+ bdc->devstatus &= ~DEVICE_SUSPENDED;
+ }
+ break;
+ default:
+ dev_dbg(bdc->dev, "link state:%d\n", link_state);
+ }
+}
+
+/* something changes on upstream port, handle it here */
+void bdc_sr_uspc(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ u32 clear_flags = 0;
+ u32 uspc;
+ bool connected = false;
+ bool disconn = false;
+
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ dev_dbg(bdc->dev, "%s uspc=0x%08x\n", __func__, uspc);
+
+ /* Port connect changed */
+ if (uspc & BDC_PCC) {
+ /* Vbus not present, and not connected to Downstream port */
+ if ((uspc & BDC_VBC) && !(uspc & BDC_VBS) && !(uspc & BDC_PCS))
+ disconn = true;
+ else if ((uspc & BDC_PCS) && !BDC_PST(uspc))
+ connected = true;
+ clear_flags |= BDC_PCC;
+ }
+
+ /* Change in VBus and VBus is present */
+ if ((uspc & BDC_VBC) && (uspc & BDC_VBS)) {
+ if (bdc->pullup) {
+ dev_dbg(bdc->dev, "Do a softconnect\n");
+ /* Attached state, do a softconnect */
+ bdc_softconn(bdc);
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_POWERED);
+ }
+ clear_flags |= BDC_VBC;
+ } else if ((uspc & BDC_PRS) || (uspc & BDC_PRC) || disconn) {
+ /* Hot reset, warm reset, 2.0 bus reset or disconn */
+ dev_dbg(bdc->dev, "Port reset or disconn\n");
+ bdc_uspc_disconnected(bdc, disconn);
+ clear_flags |= BDC_PRC;
+ } else if ((uspc & BDC_PSC) && (uspc & BDC_PCS)) {
+ /* Change in Link state */
+ handle_link_state_change(bdc, uspc);
+ clear_flags |= BDC_PSC;
+ }
+
+ /*
+ * In SS we might not have PRC bit set before connection, but in 2.0
+ * the PRC bit is set before connection, so moving this condition out
+ * of bus reset to handle both SS/2.0 speeds.
+ */
+ if (connected) {
+ /* This is the connect event for U0/L0 */
+ dev_dbg(bdc->dev, "Connected\n");
+ bdc_uspc_connected(bdc);
+ bdc->devstatus &= ~(DEVICE_SUSPENDED);
+ }
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ uspc &= (~BDC_USPSC_RW);
+ dev_dbg(bdc->dev, "uspc=%x\n", uspc);
+ bdc_writel(bdc->regs, BDC_USPC, clear_flags);
+}
+
+/* Main interrupt handler for bdc */
+static irqreturn_t bdc_udc_interrupt(int irq, void *_bdc)
+{
+ u32 eqp_index, dqp_index, sr_type, srr_int;
+ struct bdc_sr *sreport;
+ struct bdc *bdc = _bdc;
+ u32 status;
+ int ret;
+
+ spin_lock(&bdc->lock);
+ status = bdc_readl(bdc->regs, BDC_BDCSC);
+ if (!(status & BDC_GIP)) {
+ spin_unlock(&bdc->lock);
+ return IRQ_NONE;
+ }
+ srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ /* Check if the SRR IP bit it set? */
+ if (!(srr_int & BDC_SRR_IP)) {
+ dev_warn(bdc->dev, "Global irq pending but SRR IP is 0\n");
+ spin_unlock(&bdc->lock);
+ return IRQ_NONE;
+ }
+ eqp_index = BDC_SRR_EPI(srr_int);
+ dqp_index = BDC_SRR_DPI(srr_int);
+ dev_dbg(bdc->dev,
+ "%s eqp_index=%d dqp_index=%d srr.dqp_index=%d\n\n",
+ __func__, eqp_index, dqp_index, bdc->srr.dqp_index);
+
+ /* check for ring empty condition */
+ if (eqp_index == dqp_index) {
+ dev_dbg(bdc->dev, "SRR empty?\n");
+ spin_unlock(&bdc->lock);
+ return IRQ_HANDLED;
+ }
+
+ while (bdc->srr.dqp_index != eqp_index) {
+ sreport = &bdc->srr.sr_bds[bdc->srr.dqp_index];
+ /* sreport is read before using it */
+ rmb();
+ sr_type = le32_to_cpu(sreport->offset[3]) & BD_TYPE_BITMASK;
+ dev_dbg_ratelimited(bdc->dev, "sr_type=%d\n", sr_type);
+ switch (sr_type) {
+ case SR_XSF:
+ bdc->sr_handler[0](bdc, sreport);
+ break;
+
+ case SR_USPC:
+ bdc->sr_handler[1](bdc, sreport);
+ break;
+ default:
+ dev_warn(bdc->dev, "SR:%d not handled\n", sr_type);
+ }
+ /* Advance the srr dqp index */
+ srr_dqp_index_advc(bdc, 0);
+ }
+ /* update the hw dequeue pointer */
+ srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ srr_int &= ~BDC_SRR_DPI_MASK;
+ srr_int &= ~(BDC_SRR_RWS|BDC_SRR_RST|BDC_SRR_ISR);
+ srr_int |= ((bdc->srr.dqp_index) << 16);
+ srr_int |= BDC_SRR_IP;
+ bdc_writel(bdc->regs, BDC_SRRINT(0), srr_int);
+ srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ if (bdc->reinit) {
+ ret = bdc_reinit(bdc);
+ if (ret)
+ dev_err(bdc->dev, "err in bdc reinit\n");
+ }
+
+ spin_unlock(&bdc->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* Gadget ops */
+static int bdc_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (bdc->gadget_driver) {
+ dev_err(bdc->dev, "%s is already bound to %s\n",
+ bdc->gadget.name,
+ bdc->gadget_driver->driver.name);
+ ret = -EBUSY;
+ goto err;
+ }
+ /*
+ * Run the controller from here and when BDC is connected to
+ * Host then driver will receive a USPC SR with VBUS present
+ * and then driver will do a softconnect.
+ */
+ ret = bdc_run(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "%s bdc run fail\n", __func__);
+ goto err;
+ }
+ bdc->gadget_driver = driver;
+ bdc->gadget.dev.driver = &driver->driver;
+err:
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_udc_stop(struct usb_gadget *gadget)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ bdc_stop(bdc);
+ bdc->gadget_driver = NULL;
+ bdc->gadget.dev.driver = NULL;
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return 0;
+}
+
+static int bdc_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+ u32 uspc;
+
+ dev_dbg(bdc->dev, "%s() is_on:%d\n", __func__, is_on);
+ if (!gadget)
+ return -EINVAL;
+
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (!is_on) {
+ bdc_softdisconn(bdc);
+ bdc->pullup = false;
+ } else {
+ /*
+ * For a self powered device, we need to wait till we receive
+ * a VBUS change and Vbus present event, then if pullup flag
+ * is set, then only we present the Termintation.
+ */
+ bdc->pullup = true;
+ /*
+ * Check if BDC is already connected to Host i.e Vbus=1,
+ * if yes, then present TERM now, this is typical for bus
+ * powered devices.
+ */
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ if (uspc & BDC_VBS)
+ bdc_softconn(bdc);
+ }
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return 0;
+}
+
+static int bdc_udc_set_selfpowered(struct usb_gadget *gadget,
+ int is_self)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ gadget->is_selfpowered = (is_self != 0);
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (!is_self)
+ bdc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
+ else
+ bdc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return 0;
+}
+
+static int bdc_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+ u8 link_state;
+ u32 uspc;
+ int ret = 0;
+
+ dev_dbg(bdc->dev,
+ "%s() bdc->devstatus=%08x\n",
+ __func__, bdc->devstatus);
+
+ if (!(bdc->devstatus & REMOTE_WAKE_ENABLE))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&bdc->lock, flags);
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ link_state = BDC_PST(uspc);
+ dev_dbg(bdc->dev, "link_state =%d portsc=%x", link_state, uspc);
+ if (link_state != BDC_LINK_STATE_U3) {
+ dev_warn(bdc->dev,
+ "can't wakeup from link state %d\n",
+ link_state);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (bdc->gadget.speed == USB_SPEED_SUPER)
+ bdc->devstatus |= REMOTE_WAKEUP_ISSUED;
+
+ uspc &= ~BDC_PST_MASK;
+ uspc &= (~BDC_USPSC_RW);
+ uspc |= BDC_PST(BDC_LINK_STATE_U0);
+ uspc |= BDC_SWS;
+ bdc_writel(bdc->regs, BDC_USPC, uspc);
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ link_state = BDC_PST(uspc);
+ dev_dbg(bdc->dev, "link_state =%d portsc=%x", link_state, uspc);
+out:
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static const struct usb_gadget_ops bdc_gadget_ops = {
+ .wakeup = bdc_udc_wakeup,
+ .set_selfpowered = bdc_udc_set_selfpowered,
+ .pullup = bdc_udc_pullup,
+ .udc_start = bdc_udc_start,
+ .udc_stop = bdc_udc_stop,
+};
+
+/* Init the gadget interface and register the udc */
+int bdc_udc_init(struct bdc *bdc)
+{
+ u32 temp;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ bdc->gadget.ops = &bdc_gadget_ops;
+ bdc->gadget.max_speed = USB_SPEED_SUPER;
+ bdc->gadget.speed = USB_SPEED_UNKNOWN;
+ bdc->gadget.dev.parent = bdc->dev;
+
+ bdc->gadget.sg_supported = false;
+
+
+ bdc->gadget.name = BRCM_BDC_NAME;
+ ret = devm_request_irq(bdc->dev, bdc->irq, bdc_udc_interrupt,
+ IRQF_SHARED, BRCM_BDC_NAME, bdc);
+ if (ret) {
+ dev_err(bdc->dev,
+ "failed to request irq #%d %d\n",
+ bdc->irq, ret);
+ return ret;
+ }
+
+ ret = bdc_init_ep(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "bdc init ep fail: %d\n", ret);
+ return ret;
+ }
+
+ ret = usb_add_gadget_udc(bdc->dev, &bdc->gadget);
+ if (ret) {
+ dev_err(bdc->dev, "failed to register udc\n");
+ goto err0;
+ }
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
+ bdc->bdc_ep_array[1]->desc = &bdc_gadget_ep0_desc;
+ /*
+ * Allocate bd list for ep0 only, ep0 will be enabled on connect
+ * status report when the speed is known
+ */
+ ret = bdc_ep_enable(bdc->bdc_ep_array[1]);
+ if (ret) {
+ dev_err(bdc->dev, "fail to enable %s\n",
+ bdc->bdc_ep_array[1]->name);
+ goto err1;
+ }
+ INIT_DELAYED_WORK(&bdc->func_wake_notify, bdc_func_wake_timer);
+ /* Enable Interrupts */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp |= BDC_GIE;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ return 0;
+err1:
+ usb_del_gadget_udc(&bdc->gadget);
+err0:
+ bdc_free_ep(bdc);
+
+ return ret;
+}
+
+void bdc_udc_exit(struct bdc *bdc)
+{
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ bdc_ep_disable(bdc->bdc_ep_array[1]);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ usb_del_gadget_udc(&bdc->gadget);
+ bdc_free_ep(bdc);
+}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
new file mode 100644
index 000000000..c40f2ecbe
--- /dev/null
+++ b/drivers/usb/gadget/udc/core.c
@@ -0,0 +1,1877 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Felipe Balbi <balbi@ti.com>
+ */
+
+#define pr_fmt(fmt) "UDC core: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched/task_stack.h>
+#include <linux/workqueue.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+
+#include "trace.h"
+
+static DEFINE_IDA(gadget_id_numbers);
+
+static struct bus_type gadget_bus_type;
+
+/**
+ * struct usb_udc - describes one usb device controller
+ * @driver: the gadget driver pointer. For use by the class code
+ * @dev: the child device to the actual controller
+ * @gadget: the gadget. For use by the class code
+ * @list: for use by the udc class driver
+ * @vbus: for udcs who care about vbus status, this value is real vbus status;
+ * for udcs who do not care about vbus status, this value is always true
+ * @started: the UDC's started state. True if the UDC had started.
+ * @allow_connect: Indicates whether UDC is allowed to be pulled up.
+ * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or
+ * unbound.
+ * @vbus_work: work routine to handle VBUS status change notifications.
+ * @connect_lock: protects udc->started, gadget->connect,
+ * gadget->allow_connect and gadget->deactivate. The routines
+ * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(),
+ * usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and
+ * usb_gadget_udc_stop_locked() are called with this lock held.
+ *
+ * This represents the internal data structure which is used by the UDC-class
+ * to hold information about udc driver and gadget together.
+ */
+struct usb_udc {
+ struct usb_gadget_driver *driver;
+ struct usb_gadget *gadget;
+ struct device dev;
+ struct list_head list;
+ bool vbus;
+ bool started;
+ bool allow_connect;
+ struct work_struct vbus_work;
+ struct mutex connect_lock;
+};
+
+static struct class *udc_class;
+static LIST_HEAD(udc_list);
+
+/* Protects udc_list, udc->driver, driver->is_bound, and related calls */
+static DEFINE_MUTEX(udc_lock);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint
+ * @ep:the endpoint being configured
+ * @maxpacket_limit:value of maximum packet size limit
+ *
+ * This function should be used only in UDC drivers to initialize endpoint
+ * (usually in probe function).
+ */
+void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
+ unsigned maxpacket_limit)
+{
+ ep->maxpacket_limit = maxpacket_limit;
+ ep->maxpacket = maxpacket_limit;
+
+ trace_usb_ep_set_maxpacket_limit(ep, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit);
+
+/**
+ * usb_ep_enable - configure endpoint, making it usable
+ * @ep:the endpoint being configured. may not be the endpoint named "ep0".
+ * drivers discover endpoints through the ep_list of a usb_gadget.
+ *
+ * When configurations are set, or when interface settings change, the driver
+ * will enable or disable the relevant endpoints. while it is enabled, an
+ * endpoint may be used for i/o until the driver receives a disconnect() from
+ * the host or until the endpoint is disabled.
+ *
+ * the ep0 implementation (which calls this routine) must ensure that the
+ * hardware capabilities of each endpoint match the descriptor provided
+ * for it. for example, an endpoint named "ep2in-bulk" would be usable
+ * for interrupt transfers as well as bulk, but it likely couldn't be used
+ * for iso transfers or for endpoint 14. some endpoints are fully
+ * configurable, with more generic names like "ep-a". (remember that for
+ * USB, "in" means "towards the USB host".)
+ *
+ * This routine may be called in an atomic (interrupt) context.
+ *
+ * returns zero, or a negative error code.
+ */
+int usb_ep_enable(struct usb_ep *ep)
+{
+ int ret = 0;
+
+ if (ep->enabled)
+ goto out;
+
+ /* UDC drivers can't handle endpoints with maxpacket size 0 */
+ if (usb_endpoint_maxp(ep->desc) == 0) {
+ /*
+ * We should log an error message here, but we can't call
+ * dev_err() because there's no way to find the gadget
+ * given only ep.
+ */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ep->ops->enable(ep, ep->desc);
+ if (ret)
+ goto out;
+
+ ep->enabled = true;
+
+out:
+ trace_usb_ep_enable(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_enable);
+
+/**
+ * usb_ep_disable - endpoint is no longer usable
+ * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0".
+ *
+ * no other task may be using this endpoint when this is called.
+ * any pending and uncompleted requests will complete with status
+ * indicating disconnect (-ESHUTDOWN) before this call returns.
+ * gadget drivers must call usb_ep_enable() again before queueing
+ * requests to the endpoint.
+ *
+ * This routine may be called in an atomic (interrupt) context.
+ *
+ * returns zero, or a negative error code.
+ */
+int usb_ep_disable(struct usb_ep *ep)
+{
+ int ret = 0;
+
+ if (!ep->enabled)
+ goto out;
+
+ ret = ep->ops->disable(ep);
+ if (ret)
+ goto out;
+
+ ep->enabled = false;
+
+out:
+ trace_usb_ep_disable(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_disable);
+
+/**
+ * usb_ep_alloc_request - allocate a request object to use with this endpoint
+ * @ep:the endpoint to be used with with the request
+ * @gfp_flags:GFP_* flags to use
+ *
+ * Request objects must be allocated with this call, since they normally
+ * need controller-specific setup and may even need endpoint-specific
+ * resources such as allocation of DMA descriptors.
+ * Requests may be submitted with usb_ep_queue(), and receive a single
+ * completion callback. Free requests with usb_ep_free_request(), when
+ * they are no longer needed.
+ *
+ * Returns the request, or null if one could not be allocated.
+ */
+struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct usb_request *req = NULL;
+
+ req = ep->ops->alloc_request(ep, gfp_flags);
+
+ trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(usb_ep_alloc_request);
+
+/**
+ * usb_ep_free_request - frees a request object
+ * @ep:the endpoint associated with the request
+ * @req:the request being freed
+ *
+ * Reverses the effect of usb_ep_alloc_request().
+ * Caller guarantees the request is not queued, and that it will
+ * no longer be requeued (or otherwise used).
+ */
+void usb_ep_free_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ trace_usb_ep_free_request(ep, req, 0);
+ ep->ops->free_request(ep, req);
+}
+EXPORT_SYMBOL_GPL(usb_ep_free_request);
+
+/**
+ * usb_ep_queue - queues (submits) an I/O request to an endpoint.
+ * @ep:the endpoint associated with the request
+ * @req:the request being submitted
+ * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
+ * pre-allocate all necessary memory with the request.
+ *
+ * This tells the device controller to perform the specified request through
+ * that endpoint (reading or writing a buffer). When the request completes,
+ * including being canceled by usb_ep_dequeue(), the request's completion
+ * routine is called to return the request to the driver. Any endpoint
+ * (except control endpoints like ep0) may have more than one transfer
+ * request queued; they complete in FIFO order. Once a gadget driver
+ * submits a request, that request may not be examined or modified until it
+ * is given back to that driver through the completion callback.
+ *
+ * Each request is turned into one or more packets. The controller driver
+ * never merges adjacent requests into the same packet. OUT transfers
+ * will sometimes use data that's already buffered in the hardware.
+ * Drivers can rely on the fact that the first byte of the request's buffer
+ * always corresponds to the first byte of some USB packet, for both
+ * IN and OUT transfers.
+ *
+ * Bulk endpoints can queue any amount of data; the transfer is packetized
+ * automatically. The last packet will be short if the request doesn't fill it
+ * out completely. Zero length packets (ZLPs) should be avoided in portable
+ * protocols since not all usb hardware can successfully handle zero length
+ * packets. (ZLPs may be explicitly written, and may be implicitly written if
+ * the request 'zero' flag is set.) Bulk endpoints may also be used
+ * for interrupt transfers; but the reverse is not true, and some endpoints
+ * won't support every interrupt transfer. (Such as 768 byte packets.)
+ *
+ * Interrupt-only endpoints are less functional than bulk endpoints, for
+ * example by not supporting queueing or not handling buffers that are
+ * larger than the endpoint's maxpacket size. They may also treat data
+ * toggle differently.
+ *
+ * Control endpoints ... after getting a setup() callback, the driver queues
+ * one response (even if it would be zero length). That enables the
+ * status ack, after transferring data as specified in the response. Setup
+ * functions may return negative error codes to generate protocol stalls.
+ * (Note that some USB device controllers disallow protocol stall responses
+ * in some cases.) When control responses are deferred (the response is
+ * written after the setup callback returns), then usb_ep_set_halt() may be
+ * used on ep0 to trigger protocol stalls. Depending on the controller,
+ * it may not be possible to trigger a status-stage protocol stall when the
+ * data stage is over, that is, from within the response's completion
+ * routine.
+ *
+ * For periodic endpoints, like interrupt or isochronous ones, the usb host
+ * arranges to poll once per interval, and the gadget driver usually will
+ * have queued some data to transfer at that time.
+ *
+ * Note that @req's ->complete() callback must never be called from
+ * within usb_ep_queue() as that can create deadlock situations.
+ *
+ * This routine may be called in interrupt context.
+ *
+ * Returns zero, or a negative error code. Endpoints that are not enabled
+ * report errors; errors will also be
+ * reported when the usb peripheral is disconnected.
+ *
+ * If and only if @req is successfully queued (the return value is zero),
+ * @req->complete() will be called exactly once, when the Gadget core and
+ * UDC are finished with the request. When the completion function is called,
+ * control of the request is returned to the device driver which submitted it.
+ * The completion handler may then immediately free or reuse @req.
+ */
+int usb_ep_queue(struct usb_ep *ep,
+ struct usb_request *req, gfp_t gfp_flags)
+{
+ int ret = 0;
+
+ if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ ret = ep->ops->queue(ep, req, gfp_flags);
+
+out:
+ trace_usb_ep_queue(ep, req, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_queue);
+
+/**
+ * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
+ * @ep:the endpoint associated with the request
+ * @req:the request being canceled
+ *
+ * If the request is still active on the endpoint, it is dequeued and
+ * eventually its completion routine is called (with status -ECONNRESET);
+ * else a negative error code is returned. This routine is asynchronous,
+ * that is, it may return before the completion routine runs.
+ *
+ * Note that some hardware can't clear out write fifos (to unlink the request
+ * at the head of the queue) except as part of disconnecting from usb. Such
+ * restrictions prevent drivers from supporting configuration changes,
+ * even to configuration zero (a "chapter 9" requirement).
+ *
+ * This routine may be called in interrupt context.
+ */
+int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ int ret;
+
+ ret = ep->ops->dequeue(ep, req);
+ trace_usb_ep_dequeue(ep, req, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_dequeue);
+
+/**
+ * usb_ep_set_halt - sets the endpoint halt feature.
+ * @ep: the non-isochronous endpoint being stalled
+ *
+ * Use this to stall an endpoint, perhaps as an error report.
+ * Except for control endpoints,
+ * the endpoint stays halted (will not stream any data) until the host
+ * clears this feature; drivers may need to empty the endpoint's request
+ * queue first, to make sure no inappropriate transfers happen.
+ *
+ * Note that while an endpoint CLEAR_FEATURE will be invisible to the
+ * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the
+ * current altsetting, see usb_ep_clear_halt(). When switching altsettings,
+ * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
+ *
+ * This routine may be called in interrupt context.
+ *
+ * Returns zero, or a negative error code. On success, this call sets
+ * underlying hardware state that blocks data transfers.
+ * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
+ * transfer requests are still queued, or if the controller hardware
+ * (usually a FIFO) still holds bytes that the host hasn't collected.
+ */
+int usb_ep_set_halt(struct usb_ep *ep)
+{
+ int ret;
+
+ ret = ep->ops->set_halt(ep, 1);
+ trace_usb_ep_set_halt(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_halt);
+
+/**
+ * usb_ep_clear_halt - clears endpoint halt, and resets toggle
+ * @ep:the bulk or interrupt endpoint being reset
+ *
+ * Use this when responding to the standard usb "set interface" request,
+ * for endpoints that aren't reconfigured, after clearing any other state
+ * in the endpoint's i/o queue.
+ *
+ * This routine may be called in interrupt context.
+ *
+ * Returns zero, or a negative error code. On success, this call clears
+ * the underlying hardware state reflecting endpoint halt and data toggle.
+ * Note that some hardware can't support this request (like pxa2xx_udc),
+ * and accordingly can't correctly implement interface altsettings.
+ */
+int usb_ep_clear_halt(struct usb_ep *ep)
+{
+ int ret;
+
+ ret = ep->ops->set_halt(ep, 0);
+ trace_usb_ep_clear_halt(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_clear_halt);
+
+/**
+ * usb_ep_set_wedge - sets the halt feature and ignores clear requests
+ * @ep: the endpoint being wedged
+ *
+ * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
+ * requests. If the gadget driver clears the halt status, it will
+ * automatically unwedge the endpoint.
+ *
+ * This routine may be called in interrupt context.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_ep_set_wedge(struct usb_ep *ep)
+{
+ int ret;
+
+ if (ep->ops->set_wedge)
+ ret = ep->ops->set_wedge(ep);
+ else
+ ret = ep->ops->set_halt(ep, 1);
+
+ trace_usb_ep_set_wedge(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_wedge);
+
+/**
+ * usb_ep_fifo_status - returns number of bytes in fifo, or error
+ * @ep: the endpoint whose fifo status is being checked.
+ *
+ * FIFO endpoints may have "unclaimed data" in them in certain cases,
+ * such as after aborted transfers. Hosts may not have collected all
+ * the IN data written by the gadget driver (and reported by a request
+ * completion). The gadget driver may not have collected all the data
+ * written OUT to it by the host. Drivers that need precise handling for
+ * fault reporting or recovery may need to use this call.
+ *
+ * This routine may be called in interrupt context.
+ *
+ * This returns the number of such bytes in the fifo, or a negative
+ * errno if the endpoint doesn't use a FIFO or doesn't support such
+ * precise handling.
+ */
+int usb_ep_fifo_status(struct usb_ep *ep)
+{
+ int ret;
+
+ if (ep->ops->fifo_status)
+ ret = ep->ops->fifo_status(ep);
+ else
+ ret = -EOPNOTSUPP;
+
+ trace_usb_ep_fifo_status(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_fifo_status);
+
+/**
+ * usb_ep_fifo_flush - flushes contents of a fifo
+ * @ep: the endpoint whose fifo is being flushed.
+ *
+ * This call may be used to flush the "unclaimed data" that may exist in
+ * an endpoint fifo after abnormal transaction terminations. The call
+ * must never be used except when endpoint is not being used for any
+ * protocol translation.
+ *
+ * This routine may be called in interrupt context.
+ */
+void usb_ep_fifo_flush(struct usb_ep *ep)
+{
+ if (ep->ops->fifo_flush)
+ ep->ops->fifo_flush(ep);
+
+ trace_usb_ep_fifo_flush(ep, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_fifo_flush);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_frame_number - returns the current frame number
+ * @gadget: controller that reports the frame number
+ *
+ * Returns the usb frame number, normally eleven bits from a SOF packet,
+ * or negative errno if this device doesn't support this capability.
+ */
+int usb_gadget_frame_number(struct usb_gadget *gadget)
+{
+ int ret;
+
+ ret = gadget->ops->get_frame(gadget);
+
+ trace_usb_gadget_frame_number(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_frame_number);
+
+/**
+ * usb_gadget_wakeup - tries to wake up the host connected to this gadget
+ * @gadget: controller used to wake up the host
+ *
+ * Returns zero on success, else negative error code if the hardware
+ * doesn't support such attempts, or its support has not been enabled
+ * by the usb host. Drivers must return device descriptors that report
+ * their ability to support this, or hosts won't enable it.
+ *
+ * This may also try to use SRP to wake the host and start enumeration,
+ * even if OTG isn't otherwise in use. OTG devices may also start
+ * remote wakeup even when hosts don't explicitly enable it.
+ */
+int usb_gadget_wakeup(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->wakeup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->wakeup(gadget);
+
+out:
+ trace_usb_gadget_wakeup(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_wakeup);
+
+/**
+ * usb_gadget_set_selfpowered - sets the device selfpowered feature.
+ * @gadget:the device being declared as self-powered
+ *
+ * this affects the device status reported by the hardware driver
+ * to reflect that it now has a local power supply.
+ *
+ * returns zero on success, else negative errno.
+ */
+int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->set_selfpowered) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->set_selfpowered(gadget, 1);
+
+out:
+ trace_usb_gadget_set_selfpowered(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_set_selfpowered);
+
+/**
+ * usb_gadget_clear_selfpowered - clear the device selfpowered feature.
+ * @gadget:the device being declared as bus-powered
+ *
+ * this affects the device status reported by the hardware driver.
+ * some hardware may not support bus-powered operation, in which
+ * case this feature's value can never change.
+ *
+ * returns zero on success, else negative errno.
+ */
+int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->set_selfpowered) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->set_selfpowered(gadget, 0);
+
+out:
+ trace_usb_gadget_clear_selfpowered(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_clear_selfpowered);
+
+/**
+ * usb_gadget_vbus_connect - Notify controller that VBUS is powered
+ * @gadget:The device which now has VBUS power.
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session starting. Common responses include
+ * resuming the controller, activating the D+ (or D-) pullup to let the
+ * host detect that a USB device is attached, and starting to draw power
+ * (8mA or possibly more, especially after SET_CONFIGURATION).
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_connect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->vbus_session) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->vbus_session(gadget, 1);
+
+out:
+ trace_usb_gadget_vbus_connect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_connect);
+
+/**
+ * usb_gadget_vbus_draw - constrain controller's VBUS power usage
+ * @gadget:The device whose VBUS usage is being described
+ * @mA:How much current to draw, in milliAmperes. This should be twice
+ * the value listed in the configuration descriptor bMaxPower field.
+ *
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume. For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ int ret = 0;
+
+ if (!gadget->ops->vbus_draw) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->vbus_draw(gadget, mA);
+ if (!ret)
+ gadget->mA = mA;
+
+out:
+ trace_usb_gadget_vbus_draw(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_draw);
+
+/**
+ * usb_gadget_vbus_disconnect - notify controller about VBUS session end
+ * @gadget:the device whose VBUS supply is being described
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session ending. Common responses include
+ * reversing everything done in usb_gadget_vbus_connect().
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->vbus_session) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->vbus_session(gadget, 0);
+
+out:
+ trace_usb_gadget_vbus_disconnect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
+
+static int usb_gadget_connect_locked(struct usb_gadget *gadget)
+ __must_hold(&gadget->udc->connect_lock)
+{
+ int ret = 0;
+
+ if (!gadget->ops->pullup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) {
+ /*
+ * If the gadget isn't usable (because it is deactivated,
+ * unbound, or not yet started), we only save the new state.
+ * The gadget will be connected automatically when it is
+ * activated/bound/started.
+ */
+ gadget->connected = true;
+ goto out;
+ }
+
+ ret = gadget->ops->pullup(gadget, 1);
+ if (!ret)
+ gadget->connected = 1;
+
+out:
+ trace_usb_gadget_connect(gadget, ret);
+
+ return ret;
+}
+
+/**
+ * usb_gadget_connect - software-controlled connect to USB host
+ * @gadget:the peripheral being connected
+ *
+ * Enables the D+ (or potentially D-) pullup. The host will start
+ * enumerating this gadget when the pullup is active and a VBUS session
+ * is active (the link is powered).
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_connect(struct usb_gadget *gadget)
+{
+ int ret;
+
+ mutex_lock(&gadget->udc->connect_lock);
+ ret = usb_gadget_connect_locked(gadget);
+ mutex_unlock(&gadget->udc->connect_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_connect);
+
+static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
+ __must_hold(&gadget->udc->connect_lock)
+{
+ int ret = 0;
+
+ if (!gadget->ops->pullup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!gadget->connected)
+ goto out;
+
+ if (gadget->deactivated || !gadget->udc->started) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will stay disconnected after activation.
+ */
+ gadget->connected = false;
+ goto out;
+ }
+
+ ret = gadget->ops->pullup(gadget, 0);
+ if (!ret)
+ gadget->connected = 0;
+
+ mutex_lock(&udc_lock);
+ if (gadget->udc->driver)
+ gadget->udc->driver->disconnect(gadget);
+ mutex_unlock(&udc_lock);
+
+out:
+ trace_usb_gadget_disconnect(gadget, ret);
+
+ return ret;
+}
+
+/**
+ * usb_gadget_disconnect - software-controlled disconnect from USB host
+ * @gadget:the peripheral being disconnected
+ *
+ * Disables the D+ (or potentially D-) pullup, which the host may see
+ * as a disconnect (when a VBUS session is active). Not all systems
+ * support software pullup controls.
+ *
+ * Following a successful disconnect, invoke the ->disconnect() callback
+ * for the current gadget driver so that UDC drivers don't need to.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_disconnect(struct usb_gadget *gadget)
+{
+ int ret;
+
+ mutex_lock(&gadget->udc->connect_lock);
+ ret = usb_gadget_disconnect_locked(gadget);
+ mutex_unlock(&gadget->udc->connect_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
+
+/**
+ * usb_gadget_deactivate - deactivate function which is not ready to work
+ * @gadget: the peripheral being deactivated
+ *
+ * This routine may be used during the gadget driver bind() call to prevent
+ * the peripheral from ever being visible to the USB host, unless later
+ * usb_gadget_activate() is called. For example, user mode components may
+ * need to be activated before the system can talk to hosts.
+ *
+ * This routine may sleep; it must not be called in interrupt context
+ * (such as from within a gadget driver's disconnect() callback).
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_deactivate(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ mutex_lock(&gadget->udc->connect_lock);
+ if (gadget->deactivated)
+ goto unlock;
+
+ if (gadget->connected) {
+ ret = usb_gadget_disconnect_locked(gadget);
+ if (ret)
+ goto unlock;
+
+ /*
+ * If gadget was being connected before deactivation, we want
+ * to reconnect it in usb_gadget_activate().
+ */
+ gadget->connected = true;
+ }
+ gadget->deactivated = true;
+
+unlock:
+ mutex_unlock(&gadget->udc->connect_lock);
+ trace_usb_gadget_deactivate(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_deactivate);
+
+/**
+ * usb_gadget_activate - activate function which is not ready to work
+ * @gadget: the peripheral being activated
+ *
+ * This routine activates gadget which was previously deactivated with
+ * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+ *
+ * This routine may sleep; it must not be called in interrupt context.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_activate(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ mutex_lock(&gadget->udc->connect_lock);
+ if (!gadget->deactivated)
+ goto unlock;
+
+ gadget->deactivated = false;
+
+ /*
+ * If gadget has been connected before deactivation, or became connected
+ * while it was being deactivated, we call usb_gadget_connect().
+ */
+ if (gadget->connected)
+ ret = usb_gadget_connect_locked(gadget);
+
+unlock:
+ mutex_unlock(&gadget->udc->connect_lock);
+ trace_usb_gadget_activate(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_activate);
+
+/* ------------------------------------------------------------------------- */
+
+#ifdef CONFIG_HAS_DMA
+
+int usb_gadget_map_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in)
+{
+ if (req->length == 0)
+ return 0;
+
+ if (req->num_sgs) {
+ int mapped;
+
+ mapped = dma_map_sg(dev, req->sg, req->num_sgs,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (mapped == 0) {
+ dev_err(dev, "failed to map SGs\n");
+ return -EFAULT;
+ }
+
+ req->num_mapped_sgs = mapped;
+ } else {
+ if (is_vmalloc_addr(req->buf)) {
+ dev_err(dev, "buffer is not dma capable\n");
+ return -EFAULT;
+ } else if (object_is_on_stack(req->buf)) {
+ dev_err(dev, "buffer is on stack\n");
+ return -EFAULT;
+ }
+
+ req->dma = dma_map_single(dev, req->buf, req->length,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, req->dma)) {
+ dev_err(dev, "failed to map buffer\n");
+ return -EFAULT;
+ }
+
+ req->dma_mapped = 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev);
+
+int usb_gadget_map_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in)
+{
+ return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request);
+
+void usb_gadget_unmap_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in)
+{
+ if (req->length == 0)
+ return;
+
+ if (req->num_mapped_sgs) {
+ dma_unmap_sg(dev, req->sg, req->num_sgs,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ req->num_mapped_sgs = 0;
+ } else if (req->dma_mapped) {
+ dma_unmap_single(dev, req->dma, req->length,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->dma_mapped = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev);
+
+void usb_gadget_unmap_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in)
+{
+ usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
+
+#endif /* CONFIG_HAS_DMA */
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_giveback_request - give the request back to the gadget layer
+ * @ep: the endpoint to be used with with the request
+ * @req: the request being given back
+ *
+ * This is called by device controller drivers in order to return the
+ * completed request back to the gadget layer.
+ */
+void usb_gadget_giveback_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ if (likely(req->status == 0))
+ usb_led_activity(USB_LED_EVENT_GADGET);
+
+ trace_usb_gadget_giveback_request(ep, req, 0);
+
+ req->complete(ep, req);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_giveback_request);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * gadget_find_ep_by_name - returns ep whose name is the same as sting passed
+ * in second parameter or NULL if searched endpoint not found
+ * @g: controller to check for quirk
+ * @name: name of searched endpoint
+ */
+struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name)
+{
+ struct usb_ep *ep;
+
+ gadget_for_each_ep(ep, g) {
+ if (!strcmp(ep->name, name))
+ return ep;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gadget_find_ep_by_name);
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+ struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ u8 type;
+ u16 max;
+ int num_req_streams = 0;
+
+ /* endpoint already claimed? */
+ if (ep->claimed)
+ return 0;
+
+ type = usb_endpoint_type(desc);
+ max = usb_endpoint_maxp(desc);
+
+ if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
+ return 0;
+ if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
+ return 0;
+
+ if (max > ep->maxpacket_limit)
+ return 0;
+
+ /* "high bandwidth" works only at high speed */
+ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1)
+ return 0;
+
+ switch (type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* only support ep0 for portable CONTROL traffic */
+ return 0;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!ep->caps.type_iso)
+ return 0;
+ /* ISO: limit 1023 bytes full speed, 1024 high/super speed */
+ if (!gadget_is_dualspeed(gadget) && max > 1023)
+ return 0;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (!ep->caps.type_bulk)
+ return 0;
+ if (ep_comp && gadget_is_superspeed(gadget)) {
+ /* Get the number of required streams from the
+ * EP companion descriptor and see if the EP
+ * matches it
+ */
+ num_req_streams = ep_comp->bmAttributes & 0x1f;
+ if (num_req_streams > ep->max_streams)
+ return 0;
+ }
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ /* Bulk endpoints handle interrupt transfers,
+ * except the toggle-quirky iso-synch kind
+ */
+ if (!ep->caps.type_int && !ep->caps.type_bulk)
+ return 0;
+ /* INT: limit 64 bytes full speed, 1024 high/super speed */
+ if (!gadget_is_dualspeed(gadget) && max > 64)
+ return 0;
+ break;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
+
+/**
+ * usb_gadget_check_config - checks if the UDC can support the binded
+ * configuration
+ * @gadget: controller to check the USB configuration
+ *
+ * Ensure that a UDC is able to support the requested resources by a
+ * configuration, and that there are no resource limitations, such as
+ * internal memory allocated to all requested endpoints.
+ *
+ * Returns zero on success, else a negative errno.
+ */
+int usb_gadget_check_config(struct usb_gadget *gadget)
+{
+ if (gadget->ops->check_config)
+ return gadget->ops->check_config(gadget);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_check_config);
+
+/* ------------------------------------------------------------------------- */
+
+static void usb_gadget_state_work(struct work_struct *work)
+{
+ struct usb_gadget *gadget = work_to_gadget(work);
+ struct usb_udc *udc = gadget->udc;
+
+ if (udc)
+ sysfs_notify(&udc->dev.kobj, NULL, "state");
+}
+
+void usb_gadget_set_state(struct usb_gadget *gadget,
+ enum usb_device_state state)
+{
+ gadget->state = state;
+ schedule_work(&gadget->work);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_set_state);
+
+/* ------------------------------------------------------------------------- */
+
+/* Acquire connect_lock before calling this function. */
+static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
+{
+ if (udc->vbus)
+ usb_gadget_connect_locked(udc->gadget);
+ else
+ usb_gadget_disconnect_locked(udc->gadget);
+}
+
+static void vbus_event_work(struct work_struct *work)
+{
+ struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work);
+
+ mutex_lock(&udc->connect_lock);
+ usb_udc_connect_control_locked(udc);
+ mutex_unlock(&udc->connect_lock);
+}
+
+/**
+ * usb_udc_vbus_handler - updates the udc core vbus status, and try to
+ * connect or disconnect gadget
+ * @gadget: The gadget which vbus change occurs
+ * @status: The vbus status
+ *
+ * The udc driver calls it when it wants to connect or disconnect gadget
+ * according to vbus status.
+ *
+ * This function can be invoked from interrupt context by irq handlers of
+ * the gadget drivers, however, usb_udc_connect_control() has to run in
+ * non-atomic context due to the following:
+ * a. Some of the gadget driver implementations expect the ->pullup
+ * callback to be invoked in non-atomic context.
+ * b. usb_gadget_disconnect() acquires udc_lock which is a mutex.
+ * Hence offload invocation of usb_udc_connect_control() to workqueue.
+ */
+void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
+{
+ struct usb_udc *udc = gadget->udc;
+
+ if (udc) {
+ udc->vbus = status;
+ schedule_work(&udc->vbus_work);
+ }
+}
+EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
+
+/**
+ * usb_gadget_udc_reset - notifies the udc core that bus reset occurs
+ * @gadget: The gadget which bus reset occurs
+ * @driver: The gadget driver we want to notify
+ *
+ * If the udc driver has bus reset handler, it needs to call this when the bus
+ * reset occurs, it notifies the gadget driver that the bus reset occurs as
+ * well as updates gadget state.
+ */
+void usb_gadget_udc_reset(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ driver->reset(gadget);
+ usb_gadget_set_state(gadget, USB_STATE_DEFAULT);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+
+/**
+ * usb_gadget_udc_start_locked - tells usb device controller to start up
+ * @udc: The UDC to be started
+ *
+ * This call is issued by the UDC Class driver when it's about
+ * to register a gadget driver to the device controller, before
+ * calling gadget driver's bind() method.
+ *
+ * It allows the controller to be powered off until strictly
+ * necessary to have it powered on.
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller should acquire connect_lock before invoking this function.
+ */
+static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
+ __must_hold(&udc->connect_lock)
+{
+ int ret;
+
+ if (udc->started) {
+ dev_err(&udc->dev, "UDC had already started\n");
+ return -EBUSY;
+ }
+
+ ret = udc->gadget->ops->udc_start(udc->gadget, udc->driver);
+ if (!ret)
+ udc->started = true;
+
+ return ret;
+}
+
+/**
+ * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
+ * @udc: The UDC to be stopped
+ *
+ * This call is issued by the UDC Class driver after calling
+ * gadget driver's unbind() method.
+ *
+ * The details are implementation specific, but it can go as
+ * far as powering off UDC completely and disable its data
+ * line pullups.
+ *
+ * Caller should acquire connect lock before invoking this function.
+ */
+static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
+ __must_hold(&udc->connect_lock)
+{
+ if (!udc->started) {
+ dev_err(&udc->dev, "UDC had already stopped\n");
+ return;
+ }
+
+ udc->gadget->ops->udc_stop(udc->gadget);
+ udc->started = false;
+}
+
+/**
+ * usb_gadget_udc_set_speed - tells usb device controller speed supported by
+ * current driver
+ * @udc: The device we want to set maximum speed
+ * @speed: The maximum speed to allowed to run
+ *
+ * This call is issued by the UDC Class driver before calling
+ * usb_gadget_udc_start() in order to make sure that we don't try to
+ * connect on speeds the gadget driver doesn't support.
+ */
+static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
+ enum usb_device_speed speed)
+{
+ struct usb_gadget *gadget = udc->gadget;
+ enum usb_device_speed s;
+
+ if (speed == USB_SPEED_UNKNOWN)
+ s = gadget->max_speed;
+ else
+ s = min(speed, gadget->max_speed);
+
+ if (s == USB_SPEED_SUPER_PLUS && gadget->ops->udc_set_ssp_rate)
+ gadget->ops->udc_set_ssp_rate(gadget, gadget->max_ssp_rate);
+ else if (gadget->ops->udc_set_speed)
+ gadget->ops->udc_set_speed(gadget, s);
+}
+
+/**
+ * usb_gadget_enable_async_callbacks - tell usb device controller to enable asynchronous callbacks
+ * @udc: The UDC which should enable async callbacks
+ *
+ * This routine is used when binding gadget drivers. It undoes the effect
+ * of usb_gadget_disable_async_callbacks(); the UDC driver should enable IRQs
+ * (if necessary) and resume issuing callbacks.
+ *
+ * This routine will always be called in process context.
+ */
+static inline void usb_gadget_enable_async_callbacks(struct usb_udc *udc)
+{
+ struct usb_gadget *gadget = udc->gadget;
+
+ if (gadget->ops->udc_async_callbacks)
+ gadget->ops->udc_async_callbacks(gadget, true);
+}
+
+/**
+ * usb_gadget_disable_async_callbacks - tell usb device controller to disable asynchronous callbacks
+ * @udc: The UDC which should disable async callbacks
+ *
+ * This routine is used when unbinding gadget drivers. It prevents a race:
+ * The UDC driver doesn't know when the gadget driver's ->unbind callback
+ * runs, so unless it is told to disable asynchronous callbacks, it might
+ * issue a callback (such as ->disconnect) after the unbind has completed.
+ *
+ * After this function runs, the UDC driver must suppress all ->suspend,
+ * ->resume, ->disconnect, ->reset, and ->setup callbacks to the gadget driver
+ * until async callbacks are again enabled. A simple-minded but effective
+ * way to accomplish this is to tell the UDC hardware not to generate any
+ * more IRQs.
+ *
+ * Request completion callbacks must still be issued. However, it's okay
+ * to defer them until the request is cancelled, since the pull-up will be
+ * turned off during the time period when async callbacks are disabled.
+ *
+ * This routine will always be called in process context.
+ */
+static inline void usb_gadget_disable_async_callbacks(struct usb_udc *udc)
+{
+ struct usb_gadget *gadget = udc->gadget;
+
+ if (gadget->ops->udc_async_callbacks)
+ gadget->ops->udc_async_callbacks(gadget, false);
+}
+
+/**
+ * usb_udc_release - release the usb_udc struct
+ * @dev: the dev member within usb_udc
+ *
+ * This is called by driver's core in order to free memory once the last
+ * reference is released.
+ */
+static void usb_udc_release(struct device *dev)
+{
+ struct usb_udc *udc;
+
+ udc = container_of(dev, struct usb_udc, dev);
+ dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
+ kfree(udc);
+}
+
+static const struct attribute_group *usb_udc_attr_groups[];
+
+static void usb_udc_nop_release(struct device *dev)
+{
+ dev_vdbg(dev, "%s\n", __func__);
+}
+
+/**
+ * usb_initialize_gadget - initialize a gadget and its embedded struct device
+ * @parent: the parent device to this udc. Usually the controller driver's
+ * device.
+ * @gadget: the gadget to be initialized.
+ * @release: a gadget release function.
+ */
+void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget,
+ void (*release)(struct device *dev))
+{
+ INIT_WORK(&gadget->work, usb_gadget_state_work);
+ gadget->dev.parent = parent;
+
+ if (release)
+ gadget->dev.release = release;
+ else
+ gadget->dev.release = usb_udc_nop_release;
+
+ device_initialize(&gadget->dev);
+ gadget->dev.bus = &gadget_bus_type;
+}
+EXPORT_SYMBOL_GPL(usb_initialize_gadget);
+
+/**
+ * usb_add_gadget - adds a new gadget to the udc class driver list
+ * @gadget: the gadget to be added to the list.
+ *
+ * Returns zero on success, negative errno otherwise.
+ * Does not do a final usb_put_gadget() if an error occurs.
+ */
+int usb_add_gadget(struct usb_gadget *gadget)
+{
+ struct usb_udc *udc;
+ int ret = -ENOMEM;
+
+ udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ goto error;
+
+ device_initialize(&udc->dev);
+ udc->dev.release = usb_udc_release;
+ udc->dev.class = udc_class;
+ udc->dev.groups = usb_udc_attr_groups;
+ udc->dev.parent = gadget->dev.parent;
+ ret = dev_set_name(&udc->dev, "%s",
+ kobject_name(&gadget->dev.parent->kobj));
+ if (ret)
+ goto err_put_udc;
+
+ udc->gadget = gadget;
+ gadget->udc = udc;
+ mutex_init(&udc->connect_lock);
+
+ udc->started = false;
+
+ mutex_lock(&udc_lock);
+ list_add_tail(&udc->list, &udc_list);
+ mutex_unlock(&udc_lock);
+ INIT_WORK(&udc->vbus_work, vbus_event_work);
+
+ ret = device_add(&udc->dev);
+ if (ret)
+ goto err_unlist_udc;
+
+ usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
+ udc->vbus = true;
+
+ ret = ida_alloc(&gadget_id_numbers, GFP_KERNEL);
+ if (ret < 0)
+ goto err_del_udc;
+ gadget->id_number = ret;
+ dev_set_name(&gadget->dev, "gadget.%d", ret);
+
+ ret = device_add(&gadget->dev);
+ if (ret)
+ goto err_free_id;
+
+ return 0;
+
+ err_free_id:
+ ida_free(&gadget_id_numbers, gadget->id_number);
+
+ err_del_udc:
+ flush_work(&gadget->work);
+ device_del(&udc->dev);
+
+ err_unlist_udc:
+ mutex_lock(&udc_lock);
+ list_del(&udc->list);
+ mutex_unlock(&udc_lock);
+
+ err_put_udc:
+ put_device(&udc->dev);
+
+ error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget);
+
+/**
+ * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller driver's
+ * device.
+ * @gadget: the gadget to be added to the list.
+ * @release: a gadget release function.
+ *
+ * Returns zero on success, negative errno otherwise.
+ * Calls the gadget release function in the latter case.
+ */
+int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
+ void (*release)(struct device *dev))
+{
+ int ret;
+
+ usb_initialize_gadget(parent, gadget, release);
+ ret = usb_add_gadget(gadget);
+ if (ret)
+ usb_put_gadget(gadget);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
+
+/**
+ * usb_get_gadget_udc_name - get the name of the first UDC controller
+ * This functions returns the name of the first UDC controller in the system.
+ * Please note that this interface is usefull only for legacy drivers which
+ * assume that there is only one UDC controller in the system and they need to
+ * get its name before initialization. There is no guarantee that the UDC
+ * of the returned name will be still available, when gadget driver registers
+ * itself.
+ *
+ * Returns pointer to string with UDC controller name on success, NULL
+ * otherwise. Caller should kfree() returned string.
+ */
+char *usb_get_gadget_udc_name(void)
+{
+ struct usb_udc *udc;
+ char *name = NULL;
+
+ /* For now we take the first available UDC */
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list) {
+ if (!udc->driver) {
+ name = kstrdup(udc->gadget->name, GFP_KERNEL);
+ break;
+ }
+ }
+ mutex_unlock(&udc_lock);
+ return name;
+}
+EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name);
+
+/**
+ * usb_add_gadget_udc - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller
+ * driver's device.
+ * @gadget: the gadget to be added to the list
+ *
+ * Returns zero on success, negative errno otherwise.
+ */
+int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
+{
+ return usb_add_gadget_udc_release(parent, gadget, NULL);
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
+
+/**
+ * usb_del_gadget - deletes a gadget and unregisters its udc
+ * @gadget: the gadget to be deleted.
+ *
+ * This will unbind @gadget, if it is bound.
+ * It will not do a final usb_put_gadget().
+ */
+void usb_del_gadget(struct usb_gadget *gadget)
+{
+ struct usb_udc *udc = gadget->udc;
+
+ if (!udc)
+ return;
+
+ dev_vdbg(gadget->dev.parent, "unregistering gadget\n");
+
+ mutex_lock(&udc_lock);
+ list_del(&udc->list);
+ mutex_unlock(&udc_lock);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+ flush_work(&gadget->work);
+ device_del(&gadget->dev);
+ ida_free(&gadget_id_numbers, gadget->id_number);
+ cancel_work_sync(&udc->vbus_work);
+ device_unregister(&udc->dev);
+}
+EXPORT_SYMBOL_GPL(usb_del_gadget);
+
+/**
+ * usb_del_gadget_udc - unregisters a gadget
+ * @gadget: the gadget to be unregistered.
+ *
+ * Calls usb_del_gadget() and does a final usb_put_gadget().
+ */
+void usb_del_gadget_udc(struct usb_gadget *gadget)
+{
+ usb_del_gadget(gadget);
+ usb_put_gadget(gadget);
+}
+EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
+
+/* ------------------------------------------------------------------------- */
+
+static int gadget_match_driver(struct device *dev, struct device_driver *drv)
+{
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_udc *udc = gadget->udc;
+ struct usb_gadget_driver *driver = container_of(drv,
+ struct usb_gadget_driver, driver);
+
+ /* If the driver specifies a udc_name, it must match the UDC's name */
+ if (driver->udc_name &&
+ strcmp(driver->udc_name, dev_name(&udc->dev)) != 0)
+ return 0;
+
+ /* If the driver is already bound to a gadget, it doesn't match */
+ if (driver->is_bound)
+ return 0;
+
+ /* Otherwise any gadget driver matches any UDC */
+ return 1;
+}
+
+static int gadget_bind_driver(struct device *dev)
+{
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_udc *udc = gadget->udc;
+ struct usb_gadget_driver *driver = container_of(dev->driver,
+ struct usb_gadget_driver, driver);
+ int ret = 0;
+
+ mutex_lock(&udc_lock);
+ if (driver->is_bound) {
+ mutex_unlock(&udc_lock);
+ return -ENXIO; /* Driver binds to only one gadget */
+ }
+ driver->is_bound = true;
+ udc->driver = driver;
+ mutex_unlock(&udc_lock);
+
+ dev_dbg(&udc->dev, "binding gadget driver [%s]\n", driver->function);
+
+ usb_gadget_udc_set_speed(udc, driver->max_speed);
+
+ ret = driver->bind(udc->gadget, driver);
+ if (ret)
+ goto err_bind;
+
+ mutex_lock(&udc->connect_lock);
+ ret = usb_gadget_udc_start_locked(udc);
+ if (ret) {
+ mutex_unlock(&udc->connect_lock);
+ goto err_start;
+ }
+ usb_gadget_enable_async_callbacks(udc);
+ udc->allow_connect = true;
+ usb_udc_connect_control_locked(udc);
+ mutex_unlock(&udc->connect_lock);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ return 0;
+
+ err_start:
+ driver->unbind(udc->gadget);
+
+ err_bind:
+ if (ret != -EISNAM)
+ dev_err(&udc->dev, "failed to start %s: %d\n",
+ driver->function, ret);
+
+ mutex_lock(&udc_lock);
+ udc->driver = NULL;
+ driver->is_bound = false;
+ mutex_unlock(&udc_lock);
+
+ return ret;
+}
+
+static void gadget_unbind_driver(struct device *dev)
+{
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_udc *udc = gadget->udc;
+ struct usb_gadget_driver *driver = udc->driver;
+
+ dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
+
+ udc->allow_connect = false;
+ cancel_work_sync(&udc->vbus_work);
+ mutex_lock(&udc->connect_lock);
+ usb_gadget_disconnect_locked(gadget);
+ usb_gadget_disable_async_callbacks(udc);
+ if (gadget->irq)
+ synchronize_irq(gadget->irq);
+ mutex_unlock(&udc->connect_lock);
+
+ udc->driver->unbind(gadget);
+
+ mutex_lock(&udc->connect_lock);
+ usb_gadget_udc_stop_locked(udc);
+ mutex_unlock(&udc->connect_lock);
+
+ mutex_lock(&udc_lock);
+ driver->is_bound = false;
+ udc->driver = NULL;
+ mutex_unlock(&udc_lock);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+}
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
+ struct module *owner, const char *mod_name)
+{
+ int ret;
+
+ if (!driver || !driver->bind || !driver->setup)
+ return -EINVAL;
+
+ driver->driver.bus = &gadget_bus_type;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+ ret = driver_register(&driver->driver);
+ if (ret) {
+ pr_warn("%s: driver registration failed: %d\n",
+ driver->function, ret);
+ return ret;
+ }
+
+ mutex_lock(&udc_lock);
+ if (!driver->is_bound) {
+ if (driver->match_existing_only) {
+ pr_warn("%s: couldn't find an available UDC or it's busy\n",
+ driver->function);
+ ret = -EBUSY;
+ } else {
+ pr_info("%s: couldn't find an available UDC\n",
+ driver->function);
+ ret = 0;
+ }
+ }
+ mutex_unlock(&udc_lock);
+
+ if (ret)
+ driver_unregister(&driver->driver);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_register_driver_owner);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ if (!driver || !driver->unbind)
+ return -EINVAL;
+
+ driver_unregister(&driver->driver);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t srp_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+
+ if (sysfs_streq(buf, "1"))
+ usb_gadget_wakeup(udc->gadget);
+
+ return n;
+}
+static DEVICE_ATTR_WO(srp);
+
+static ssize_t soft_connect_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ ssize_t ret;
+
+ device_lock(&udc->gadget->dev);
+ if (!udc->driver) {
+ dev_err(dev, "soft-connect without a gadget driver\n");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (sysfs_streq(buf, "connect")) {
+ mutex_lock(&udc->connect_lock);
+ usb_gadget_udc_start_locked(udc);
+ usb_gadget_connect_locked(udc->gadget);
+ mutex_unlock(&udc->connect_lock);
+ } else if (sysfs_streq(buf, "disconnect")) {
+ mutex_lock(&udc->connect_lock);
+ usb_gadget_disconnect_locked(udc->gadget);
+ usb_gadget_udc_stop_locked(udc);
+ mutex_unlock(&udc->connect_lock);
+ } else {
+ dev_err(dev, "unsupported command '%s'\n", buf);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = n;
+out:
+ device_unlock(&udc->gadget->dev);
+ return ret;
+}
+static DEVICE_ATTR_WO(soft_connect);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ struct usb_gadget *gadget = udc->gadget;
+
+ return sprintf(buf, "%s\n", usb_state_string(gadget->state));
+}
+static DEVICE_ATTR_RO(state);
+
+static ssize_t function_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ struct usb_gadget_driver *drv;
+ int rc = 0;
+
+ mutex_lock(&udc_lock);
+ drv = udc->driver;
+ if (drv && drv->function)
+ rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+ mutex_unlock(&udc_lock);
+ return rc;
+}
+static DEVICE_ATTR_RO(function);
+
+#define USB_UDC_SPEED_ATTR(name, param) \
+ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ return scnprintf(buf, PAGE_SIZE, "%s\n", \
+ usb_speed_string(udc->gadget->param)); \
+} \
+static DEVICE_ATTR_RO(name)
+
+static USB_UDC_SPEED_ATTR(current_speed, speed);
+static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
+
+#define USB_UDC_ATTR(name) \
+ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ struct usb_gadget *gadget = udc->gadget; \
+ \
+ return scnprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
+} \
+static DEVICE_ATTR_RO(name)
+
+static USB_UDC_ATTR(is_otg);
+static USB_UDC_ATTR(is_a_peripheral);
+static USB_UDC_ATTR(b_hnp_enable);
+static USB_UDC_ATTR(a_hnp_support);
+static USB_UDC_ATTR(a_alt_hnp_support);
+static USB_UDC_ATTR(is_selfpowered);
+
+static struct attribute *usb_udc_attrs[] = {
+ &dev_attr_srp.attr,
+ &dev_attr_soft_connect.attr,
+ &dev_attr_state.attr,
+ &dev_attr_function.attr,
+ &dev_attr_current_speed.attr,
+ &dev_attr_maximum_speed.attr,
+
+ &dev_attr_is_otg.attr,
+ &dev_attr_is_a_peripheral.attr,
+ &dev_attr_b_hnp_enable.attr,
+ &dev_attr_a_hnp_support.attr,
+ &dev_attr_a_alt_hnp_support.attr,
+ &dev_attr_is_selfpowered.attr,
+ NULL,
+};
+
+static const struct attribute_group usb_udc_attr_group = {
+ .attrs = usb_udc_attrs,
+};
+
+static const struct attribute_group *usb_udc_attr_groups[] = {
+ &usb_udc_attr_group,
+ NULL,
+};
+
+static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ int ret;
+
+ ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
+ if (ret) {
+ dev_err(dev, "failed to add uevent USB_UDC_NAME\n");
+ return ret;
+ }
+
+ mutex_lock(&udc_lock);
+ if (udc->driver)
+ ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
+ udc->driver->function);
+ mutex_unlock(&udc_lock);
+ if (ret) {
+ dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct bus_type gadget_bus_type = {
+ .name = "gadget",
+ .probe = gadget_bind_driver,
+ .remove = gadget_unbind_driver,
+ .match = gadget_match_driver,
+};
+
+static int __init usb_udc_init(void)
+{
+ int rc;
+
+ udc_class = class_create(THIS_MODULE, "udc");
+ if (IS_ERR(udc_class)) {
+ pr_err("failed to create udc class --> %ld\n",
+ PTR_ERR(udc_class));
+ return PTR_ERR(udc_class);
+ }
+
+ udc_class->dev_uevent = usb_udc_uevent;
+
+ rc = bus_register(&gadget_bus_type);
+ if (rc)
+ class_destroy(udc_class);
+ return rc;
+}
+subsys_initcall(usb_udc_init);
+
+static void __exit usb_udc_exit(void)
+{
+ bus_unregister(&gadget_bus_type);
+ class_destroy(udc_class);
+}
+module_exit(usb_udc_exit);
+
+MODULE_DESCRIPTION("UDC Framework");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
new file mode 100644
index 000000000..899ac9f9c
--- /dev/null
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -0,0 +1,2909 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003-2005 Alan Stern
+ */
+
+
+/*
+ * This exposes a device side "USB gadget" API, driven by requests to a
+ * Linux-USB host controller driver. USB traffic is simulated; there's
+ * no need for USB hardware. Use this with two other drivers:
+ *
+ * - Gadget driver, responding to requests (device);
+ * - Host-side device driver, as already familiar in Linux.
+ *
+ * Having this all in one kernel can help some stages of development,
+ * bypassing some hardware (and driver) issues. UML could help too.
+ *
+ * Note: The emulation does not include isochronous transfers!
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/scatterlist.h>
+
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+#define DRIVER_DESC "USB Host+Gadget Emulator"
+#define DRIVER_VERSION "02 May 2005"
+
+#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
+#define POWER_BUDGET_3 900 /* in mA */
+
+static const char driver_name[] = "dummy_hcd";
+static const char driver_desc[] = "USB Host+Gadget Emulator";
+
+static const char gadget_name[] = "dummy_udc";
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
+
+struct dummy_hcd_module_parameters {
+ bool is_super_speed;
+ bool is_high_speed;
+ unsigned int num;
+};
+
+static struct dummy_hcd_module_parameters mod_data = {
+ .is_super_speed = false,
+ .is_high_speed = true,
+ .num = 1,
+};
+module_param_named(is_super_speed, mod_data.is_super_speed, bool, S_IRUGO);
+MODULE_PARM_DESC(is_super_speed, "true to simulate SuperSpeed connection");
+module_param_named(is_high_speed, mod_data.is_high_speed, bool, S_IRUGO);
+MODULE_PARM_DESC(is_high_speed, "true to simulate HighSpeed connection");
+module_param_named(num, mod_data.num, uint, S_IRUGO);
+MODULE_PARM_DESC(num, "number of emulated controllers");
+/*-------------------------------------------------------------------------*/
+
+/* gadget side driver data structres */
+struct dummy_ep {
+ struct list_head queue;
+ unsigned long last_io; /* jiffies timestamp */
+ struct usb_gadget *gadget;
+ const struct usb_endpoint_descriptor *desc;
+ struct usb_ep ep;
+ unsigned halted:1;
+ unsigned wedged:1;
+ unsigned already_seen:1;
+ unsigned setup_stage:1;
+ unsigned stream_en:1;
+};
+
+struct dummy_request {
+ struct list_head queue; /* ep's requests */
+ struct usb_request req;
+};
+
+static inline struct dummy_ep *usb_ep_to_dummy_ep(struct usb_ep *_ep)
+{
+ return container_of(_ep, struct dummy_ep, ep);
+}
+
+static inline struct dummy_request *usb_request_to_dummy_request
+ (struct usb_request *_req)
+{
+ return container_of(_req, struct dummy_request, req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Every device has ep0 for control requests, plus up to 30 more endpoints,
+ * in one of two types:
+ *
+ * - Configurable: direction (in/out), type (bulk, iso, etc), and endpoint
+ * number can be changed. Names like "ep-a" are used for this type.
+ *
+ * - Fixed Function: in other cases. some characteristics may be mutable;
+ * that'd be hardware-specific. Names like "ep12out-bulk" are used.
+ *
+ * Gadget drivers are responsible for not setting up conflicting endpoint
+ * configurations, illegal or unsupported packet lengths, and so on.
+ */
+
+static const char ep0name[] = "ep0";
+
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} ep_info[] = {
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+/* we don't provide isochronous endpoints since we don't support them */
+#define TYPE_BULK_OR_INT (USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
+
+ /* everyone has ep0 */
+ EP_INFO(ep0name,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ /* act like a pxa250: fifteen fixed function endpoints */
+ EP_INFO("ep1in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep2out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+/*
+ EP_INFO("ep3in-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep4out-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+*/
+ EP_INFO("ep5in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep6in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep7out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+/*
+ EP_INFO("ep8in-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep9out-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+*/
+ EP_INFO("ep10in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep11in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep12out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+/*
+ EP_INFO("ep13in-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep14out-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+*/
+ EP_INFO("ep15in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+
+ /* or like sa1100: two fixed function endpoints */
+ EP_INFO("ep1out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep2in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+
+ /* and now some generic EPs so we have enough in multi config */
+ EP_INFO("ep-aout",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep-bin",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep-cout",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep-dout",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep-ein",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep-fout",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep-gin",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep-hout",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep-iout",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep-jin",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep-kout",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep-lin",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep-mout",
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
+
+#undef EP_INFO
+};
+
+#define DUMMY_ENDPOINTS ARRAY_SIZE(ep_info)
+
+/*-------------------------------------------------------------------------*/
+
+#define FIFO_SIZE 64
+
+struct urbp {
+ struct urb *urb;
+ struct list_head urbp_list;
+ struct sg_mapping_iter miter;
+ u32 miter_started;
+};
+
+
+enum dummy_rh_state {
+ DUMMY_RH_RESET,
+ DUMMY_RH_SUSPENDED,
+ DUMMY_RH_RUNNING
+};
+
+struct dummy_hcd {
+ struct dummy *dum;
+ enum dummy_rh_state rh_state;
+ struct timer_list timer;
+ u32 port_status;
+ u32 old_status;
+ unsigned long re_timeout;
+
+ struct usb_device *udev;
+ struct list_head urbp_list;
+ struct urbp *next_frame_urbp;
+
+ u32 stream_en_ep;
+ u8 num_stream[30 / 2];
+
+ unsigned active:1;
+ unsigned old_active:1;
+ unsigned resuming:1;
+};
+
+struct dummy {
+ spinlock_t lock;
+
+ /*
+ * DEVICE/GADGET side support
+ */
+ struct dummy_ep ep[DUMMY_ENDPOINTS];
+ int address;
+ int callback_usage;
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct dummy_request fifo_req;
+ u8 fifo_buf[FIFO_SIZE];
+ u16 devstatus;
+ unsigned ints_enabled:1;
+ unsigned udc_suspended:1;
+ unsigned pullup:1;
+
+ /*
+ * HOST side support
+ */
+ struct dummy_hcd *hs_hcd;
+ struct dummy_hcd *ss_hcd;
+};
+
+static inline struct dummy_hcd *hcd_to_dummy_hcd(struct usb_hcd *hcd)
+{
+ return (struct dummy_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *dummy_hcd_to_hcd(struct dummy_hcd *dum)
+{
+ return container_of((void *) dum, struct usb_hcd, hcd_priv);
+}
+
+static inline struct device *dummy_dev(struct dummy_hcd *dum)
+{
+ return dummy_hcd_to_hcd(dum)->self.controller;
+}
+
+static inline struct device *udc_dev(struct dummy *dum)
+{
+ return dum->gadget.dev.parent;
+}
+
+static inline struct dummy *ep_to_dummy(struct dummy_ep *ep)
+{
+ return container_of(ep->gadget, struct dummy, gadget);
+}
+
+static inline struct dummy_hcd *gadget_to_dummy_hcd(struct usb_gadget *gadget)
+{
+ struct dummy *dum = container_of(gadget, struct dummy, gadget);
+ if (dum->gadget.speed == USB_SPEED_SUPER)
+ return dum->ss_hcd;
+ else
+ return dum->hs_hcd;
+}
+
+static inline struct dummy *gadget_dev_to_dummy(struct device *dev)
+{
+ return container_of(dev, struct dummy, gadget.dev);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* DEVICE/GADGET SIDE UTILITY ROUTINES */
+
+/* called with spinlock held */
+static void nuke(struct dummy *dum, struct dummy_ep *ep)
+{
+ while (!list_empty(&ep->queue)) {
+ struct dummy_request *req;
+
+ req = list_entry(ep->queue.next, struct dummy_request, queue);
+ list_del_init(&req->queue);
+ req->req.status = -ESHUTDOWN;
+
+ spin_unlock(&dum->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&dum->lock);
+ }
+}
+
+/* caller must hold lock */
+static void stop_activity(struct dummy *dum)
+{
+ int i;
+
+ /* prevent any more requests */
+ dum->address = 0;
+
+ /* The timer is left running so that outstanding URBs can fail */
+
+ /* nuke any pending requests first, so driver i/o is quiesced */
+ for (i = 0; i < DUMMY_ENDPOINTS; ++i)
+ nuke(dum, &dum->ep[i]);
+
+ /* driver now does any non-usb quiescing necessary */
+}
+
+/**
+ * set_link_state_by_speed() - Sets the current state of the link according to
+ * the hcd speed
+ * @dum_hcd: pointer to the dummy_hcd structure to update the link state for
+ *
+ * This function updates the port_status according to the link state and the
+ * speed of the hcd.
+ */
+static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
+{
+ struct dummy *dum = dum_hcd->dum;
+
+ if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) {
+ if ((dum_hcd->port_status & USB_SS_PORT_STAT_POWER) == 0) {
+ dum_hcd->port_status = 0;
+ } else if (!dum->pullup || dum->udc_suspended) {
+ /* UDC suspend must cause a disconnect */
+ dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_ENABLE);
+ if ((dum_hcd->old_status &
+ USB_PORT_STAT_CONNECTION) != 0)
+ dum_hcd->port_status |=
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ } else {
+ /* device is connected and not suspended */
+ dum_hcd->port_status |= (USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_SPEED_5GBPS) ;
+ if ((dum_hcd->old_status &
+ USB_PORT_STAT_CONNECTION) == 0)
+ dum_hcd->port_status |=
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) &&
+ (dum_hcd->port_status &
+ USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 &&
+ dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+ dum_hcd->active = 1;
+ }
+ } else {
+ if ((dum_hcd->port_status & USB_PORT_STAT_POWER) == 0) {
+ dum_hcd->port_status = 0;
+ } else if (!dum->pullup || dum->udc_suspended) {
+ /* UDC suspend must cause a disconnect */
+ dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED |
+ USB_PORT_STAT_HIGH_SPEED |
+ USB_PORT_STAT_SUSPEND);
+ if ((dum_hcd->old_status &
+ USB_PORT_STAT_CONNECTION) != 0)
+ dum_hcd->port_status |=
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ } else {
+ dum_hcd->port_status |= USB_PORT_STAT_CONNECTION;
+ if ((dum_hcd->old_status &
+ USB_PORT_STAT_CONNECTION) == 0)
+ dum_hcd->port_status |=
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0)
+ dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
+ else if ((dum_hcd->port_status &
+ USB_PORT_STAT_SUSPEND) == 0 &&
+ dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+ dum_hcd->active = 1;
+ }
+ }
+}
+
+/* caller must hold lock */
+static void set_link_state(struct dummy_hcd *dum_hcd)
+ __must_hold(&dum->lock)
+{
+ struct dummy *dum = dum_hcd->dum;
+ unsigned int power_bit;
+
+ dum_hcd->active = 0;
+ if (dum->pullup)
+ if ((dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 &&
+ dum->gadget.speed != USB_SPEED_SUPER) ||
+ (dummy_hcd_to_hcd(dum_hcd)->speed != HCD_USB3 &&
+ dum->gadget.speed == USB_SPEED_SUPER))
+ return;
+
+ set_link_state_by_speed(dum_hcd);
+ power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
+ USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
+
+ if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
+ dum_hcd->active)
+ dum_hcd->resuming = 0;
+
+ /* Currently !connected or in reset */
+ if ((dum_hcd->port_status & power_bit) == 0 ||
+ (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
+ unsigned int disconnect = power_bit &
+ dum_hcd->old_status & (~dum_hcd->port_status);
+ unsigned int reset = USB_PORT_STAT_RESET &
+ (~dum_hcd->old_status) & dum_hcd->port_status;
+
+ /* Report reset and disconnect events to the driver */
+ if (dum->ints_enabled && (disconnect || reset)) {
+ stop_activity(dum);
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
+ if (reset)
+ usb_gadget_udc_reset(&dum->gadget, dum->driver);
+ else
+ dum->driver->disconnect(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
+ }
+ } else if (dum_hcd->active != dum_hcd->old_active &&
+ dum->ints_enabled) {
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
+ if (dum_hcd->old_active && dum->driver->suspend)
+ dum->driver->suspend(&dum->gadget);
+ else if (!dum_hcd->old_active && dum->driver->resume)
+ dum->driver->resume(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
+ }
+
+ dum_hcd->old_status = dum_hcd->port_status;
+ dum_hcd->old_active = dum_hcd->active;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* DEVICE/GADGET SIDE DRIVER
+ *
+ * This only tracks gadget state. All the work is done when the host
+ * side tries some (emulated) i/o operation. Real device controller
+ * drivers would do real i/o using dma, fifos, irqs, timers, etc.
+ */
+
+#define is_enabled(dum) \
+ (dum->port_status & USB_PORT_STAT_ENABLE)
+
+static int dummy_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
+ struct dummy_ep *ep;
+ unsigned max;
+ int retval;
+
+ ep = usb_ep_to_dummy_ep(_ep);
+ if (!_ep || !desc || ep->desc || _ep->name == ep0name
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+ dum = ep_to_dummy(ep);
+ if (!dum->driver)
+ return -ESHUTDOWN;
+
+ dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+ if (!is_enabled(dum_hcd))
+ return -ESHUTDOWN;
+
+ /*
+ * For HS/FS devices only bits 0..10 of the wMaxPacketSize represent the
+ * maximum packet size.
+ * For SS devices the wMaxPacketSize is limited by 1024.
+ */
+ max = usb_endpoint_maxp(desc);
+
+ /* drivers must not request bad settings, since lower levels
+ * (hardware or its drivers) may not check. some endpoints
+ * can't do iso, many have maxpacket limitations, etc.
+ *
+ * since this "hardware" driver is here to help debugging, we
+ * have some extra sanity checks. (there could be more though,
+ * especially for "ep9out" style fixed function ones.)
+ */
+ retval = -EINVAL;
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (strstr(ep->ep.name, "-iso")
+ || strstr(ep->ep.name, "-int")) {
+ goto done;
+ }
+ switch (dum->gadget.speed) {
+ case USB_SPEED_SUPER:
+ if (max == 1024)
+ break;
+ goto done;
+ case USB_SPEED_HIGH:
+ if (max == 512)
+ break;
+ goto done;
+ case USB_SPEED_FULL:
+ if (max == 8 || max == 16 || max == 32 || max == 64)
+ /* we'll fake any legal size */
+ break;
+ /* save a return statement */
+ fallthrough;
+ default:
+ goto done;
+ }
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
+ goto done;
+ /* real hardware might not handle all packet sizes */
+ switch (dum->gadget.speed) {
+ case USB_SPEED_SUPER:
+ case USB_SPEED_HIGH:
+ if (max <= 1024)
+ break;
+ /* save a return statement */
+ fallthrough;
+ case USB_SPEED_FULL:
+ if (max <= 64)
+ break;
+ /* save a return statement */
+ fallthrough;
+ default:
+ if (max <= 8)
+ break;
+ goto done;
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (strstr(ep->ep.name, "-bulk")
+ || strstr(ep->ep.name, "-int"))
+ goto done;
+ /* real hardware might not handle all packet sizes */
+ switch (dum->gadget.speed) {
+ case USB_SPEED_SUPER:
+ case USB_SPEED_HIGH:
+ if (max <= 1024)
+ break;
+ /* save a return statement */
+ fallthrough;
+ case USB_SPEED_FULL:
+ if (max <= 1023)
+ break;
+ /* save a return statement */
+ fallthrough;
+ default:
+ goto done;
+ }
+ break;
+ default:
+ /* few chips support control except on ep0 */
+ goto done;
+ }
+
+ _ep->maxpacket = max;
+ if (usb_ss_max_streams(_ep->comp_desc)) {
+ if (!usb_endpoint_xfer_bulk(desc)) {
+ dev_err(udc_dev(dum), "Can't enable stream support on "
+ "non-bulk ep %s\n", _ep->name);
+ return -EINVAL;
+ }
+ ep->stream_en = 1;
+ }
+ ep->desc = desc;
+
+ dev_dbg(udc_dev(dum), "enabled %s (ep%d%s-%s) maxpacket %d stream %s\n",
+ _ep->name,
+ desc->bEndpointAddress & 0x0f,
+ (desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+ usb_ep_type_string(usb_endpoint_type(desc)),
+ max, ep->stream_en ? "enabled" : "disabled");
+
+ /* at this point real hardware should be NAKing transfers
+ * to that endpoint, until a buffer is queued to it.
+ */
+ ep->halted = ep->wedged = 0;
+ retval = 0;
+done:
+ return retval;
+}
+
+static int dummy_disable(struct usb_ep *_ep)
+{
+ struct dummy_ep *ep;
+ struct dummy *dum;
+ unsigned long flags;
+
+ ep = usb_ep_to_dummy_ep(_ep);
+ if (!_ep || !ep->desc || _ep->name == ep0name)
+ return -EINVAL;
+ dum = ep_to_dummy(ep);
+
+ spin_lock_irqsave(&dum->lock, flags);
+ ep->desc = NULL;
+ ep->stream_en = 0;
+ nuke(dum, ep);
+ spin_unlock_irqrestore(&dum->lock, flags);
+
+ dev_dbg(udc_dev(dum), "disabled %s\n", _ep->name);
+ return 0;
+}
+
+static struct usb_request *dummy_alloc_request(struct usb_ep *_ep,
+ gfp_t mem_flags)
+{
+ struct dummy_request *req;
+
+ if (!_ep)
+ return NULL;
+
+ req = kzalloc(sizeof(*req), mem_flags);
+ if (!req)
+ return NULL;
+ INIT_LIST_HEAD(&req->queue);
+ return &req->req;
+}
+
+static void dummy_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct dummy_request *req;
+
+ if (!_ep || !_req) {
+ WARN_ON(1);
+ return;
+ }
+
+ req = usb_request_to_dummy_request(_req);
+ WARN_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+static void fifo_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
+static int dummy_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t mem_flags)
+{
+ struct dummy_ep *ep;
+ struct dummy_request *req;
+ struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
+ unsigned long flags;
+
+ req = usb_request_to_dummy_request(_req);
+ if (!_req || !list_empty(&req->queue) || !_req->complete)
+ return -EINVAL;
+
+ ep = usb_ep_to_dummy_ep(_ep);
+ if (!_ep || (!ep->desc && _ep->name != ep0name))
+ return -EINVAL;
+
+ dum = ep_to_dummy(ep);
+ dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+ if (!dum->driver || !is_enabled(dum_hcd))
+ return -ESHUTDOWN;
+
+#if 0
+ dev_dbg(udc_dev(dum), "ep %p queue req %p to %s, len %d buf %p\n",
+ ep, _req, _ep->name, _req->length, _req->buf);
+#endif
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+ spin_lock_irqsave(&dum->lock, flags);
+
+ /* implement an emulated single-request FIFO */
+ if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
+ list_empty(&dum->fifo_req.queue) &&
+ list_empty(&ep->queue) &&
+ _req->length <= FIFO_SIZE) {
+ req = &dum->fifo_req;
+ req->req = *_req;
+ req->req.buf = dum->fifo_buf;
+ memcpy(dum->fifo_buf, _req->buf, _req->length);
+ req->req.context = dum;
+ req->req.complete = fifo_complete;
+
+ list_add_tail(&req->queue, &ep->queue);
+ spin_unlock(&dum->lock);
+ _req->actual = _req->length;
+ _req->status = 0;
+ usb_gadget_giveback_request(_ep, _req);
+ spin_lock(&dum->lock);
+ } else
+ list_add_tail(&req->queue, &ep->queue);
+ spin_unlock_irqrestore(&dum->lock, flags);
+
+ /* real hardware would likely enable transfers here, in case
+ * it'd been left NAKing.
+ */
+ return 0;
+}
+
+static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct dummy_ep *ep;
+ struct dummy *dum;
+ int retval = -EINVAL;
+ unsigned long flags;
+ struct dummy_request *req = NULL, *iter;
+
+ if (!_ep || !_req)
+ return retval;
+ ep = usb_ep_to_dummy_ep(_ep);
+ dum = ep_to_dummy(ep);
+
+ if (!dum->driver)
+ return -ESHUTDOWN;
+
+ local_irq_save(flags);
+ spin_lock(&dum->lock);
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ list_del_init(&iter->queue);
+ _req->status = -ECONNRESET;
+ req = iter;
+ retval = 0;
+ break;
+ }
+ spin_unlock(&dum->lock);
+
+ if (retval == 0) {
+ dev_dbg(udc_dev(dum),
+ "dequeued req %p from %s, len %d buf %p\n",
+ req, _ep->name, _req->length, _req->buf);
+ usb_gadget_giveback_request(_ep, _req);
+ }
+ local_irq_restore(flags);
+ return retval;
+}
+
+static int
+dummy_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
+{
+ struct dummy_ep *ep;
+ struct dummy *dum;
+
+ if (!_ep)
+ return -EINVAL;
+ ep = usb_ep_to_dummy_ep(_ep);
+ dum = ep_to_dummy(ep);
+ if (!dum->driver)
+ return -ESHUTDOWN;
+ if (!value)
+ ep->halted = ep->wedged = 0;
+ else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
+ !list_empty(&ep->queue))
+ return -EAGAIN;
+ else {
+ ep->halted = 1;
+ if (wedged)
+ ep->wedged = 1;
+ }
+ /* FIXME clear emulated data toggle too */
+ return 0;
+}
+
+static int
+dummy_set_halt(struct usb_ep *_ep, int value)
+{
+ return dummy_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int dummy_set_wedge(struct usb_ep *_ep)
+{
+ if (!_ep || _ep->name == ep0name)
+ return -EINVAL;
+ return dummy_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static const struct usb_ep_ops dummy_ep_ops = {
+ .enable = dummy_enable,
+ .disable = dummy_disable,
+
+ .alloc_request = dummy_alloc_request,
+ .free_request = dummy_free_request,
+
+ .queue = dummy_queue,
+ .dequeue = dummy_dequeue,
+
+ .set_halt = dummy_set_halt,
+ .set_wedge = dummy_set_wedge,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* there are both host and device side versions of this call ... */
+static int dummy_g_get_frame(struct usb_gadget *_gadget)
+{
+ struct timespec64 ts64;
+
+ ktime_get_ts64(&ts64);
+ return ts64.tv_nsec / NSEC_PER_MSEC;
+}
+
+static int dummy_wakeup(struct usb_gadget *_gadget)
+{
+ struct dummy_hcd *dum_hcd;
+
+ dum_hcd = gadget_to_dummy_hcd(_gadget);
+ if (!(dum_hcd->dum->devstatus & ((1 << USB_DEVICE_B_HNP_ENABLE)
+ | (1 << USB_DEVICE_REMOTE_WAKEUP))))
+ return -EINVAL;
+ if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0)
+ return -ENOLINK;
+ if ((dum_hcd->port_status & USB_PORT_STAT_SUSPEND) == 0 &&
+ dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+ return -EIO;
+
+ /* FIXME: What if the root hub is suspended but the port isn't? */
+
+ /* hub notices our request, issues downstream resume, etc */
+ dum_hcd->resuming = 1;
+ dum_hcd->re_timeout = jiffies + msecs_to_jiffies(20);
+ mod_timer(&dummy_hcd_to_hcd(dum_hcd)->rh_timer, dum_hcd->re_timeout);
+ return 0;
+}
+
+static int dummy_set_selfpowered(struct usb_gadget *_gadget, int value)
+{
+ struct dummy *dum;
+
+ _gadget->is_selfpowered = (value != 0);
+ dum = gadget_to_dummy_hcd(_gadget)->dum;
+ if (value)
+ dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
+ else
+ dum->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+ return 0;
+}
+
+static void dummy_udc_update_ep0(struct dummy *dum)
+{
+ if (dum->gadget.speed == USB_SPEED_SUPER)
+ dum->ep[0].ep.maxpacket = 9;
+ else
+ dum->ep[0].ep.maxpacket = 64;
+}
+
+static int dummy_pullup(struct usb_gadget *_gadget, int value)
+{
+ struct dummy_hcd *dum_hcd;
+ struct dummy *dum;
+ unsigned long flags;
+
+ dum = gadget_dev_to_dummy(&_gadget->dev);
+ dum_hcd = gadget_to_dummy_hcd(_gadget);
+
+ spin_lock_irqsave(&dum->lock, flags);
+ dum->pullup = (value != 0);
+ set_link_state(dum_hcd);
+ if (value == 0) {
+ /*
+ * Emulate synchronize_irq(): wait for callbacks to finish.
+ * This seems to be the best place to emulate the call to
+ * synchronize_irq() that's in usb_gadget_remove_driver().
+ * Doing it in dummy_udc_stop() would be too late since it
+ * is called after the unbind callback and unbind shouldn't
+ * be invoked until all the other callbacks are finished.
+ */
+ while (dum->callback_usage > 0) {
+ spin_unlock_irqrestore(&dum->lock, flags);
+ usleep_range(1000, 2000);
+ spin_lock_irqsave(&dum->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&dum->lock, flags);
+
+ usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
+ return 0;
+}
+
+static void dummy_udc_set_speed(struct usb_gadget *_gadget,
+ enum usb_device_speed speed)
+{
+ struct dummy *dum;
+
+ dum = gadget_dev_to_dummy(&_gadget->dev);
+ dum->gadget.speed = speed;
+ dummy_udc_update_ep0(dum);
+}
+
+static void dummy_udc_async_callbacks(struct usb_gadget *_gadget, bool enable)
+{
+ struct dummy *dum = gadget_dev_to_dummy(&_gadget->dev);
+
+ spin_lock_irq(&dum->lock);
+ dum->ints_enabled = enable;
+ spin_unlock_irq(&dum->lock);
+}
+
+static int dummy_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int dummy_udc_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops dummy_ops = {
+ .get_frame = dummy_g_get_frame,
+ .wakeup = dummy_wakeup,
+ .set_selfpowered = dummy_set_selfpowered,
+ .pullup = dummy_pullup,
+ .udc_start = dummy_udc_start,
+ .udc_stop = dummy_udc_stop,
+ .udc_set_speed = dummy_udc_set_speed,
+ .udc_async_callbacks = dummy_udc_async_callbacks,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* "function" sysfs attribute */
+static ssize_t function_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dummy *dum = gadget_dev_to_dummy(dev);
+
+ if (!dum->driver || !dum->driver->function)
+ return 0;
+ return scnprintf(buf, PAGE_SIZE, "%s\n", dum->driver->function);
+}
+static DEVICE_ATTR_RO(function);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Driver registration/unregistration.
+ *
+ * This is basically hardware-specific; there's usually only one real USB
+ * device (not host) controller since that's how USB devices are intended
+ * to work. So most implementations of these api calls will rely on the
+ * fact that only one driver will ever bind to the hardware. But curious
+ * hardware can be built with discrete components, so the gadget API doesn't
+ * require that assumption.
+ *
+ * For this emulator, it might be convenient to create a usb device
+ * for each driver that registers: just add to a big root hub.
+ */
+
+static int dummy_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
+ struct dummy *dum = dum_hcd->dum;
+
+ switch (g->speed) {
+ /* All the speeds we support */
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ break;
+ default:
+ dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
+ driver->max_speed);
+ return -EINVAL;
+ }
+
+ /*
+ * DEVICE side init ... the layer above hardware, which
+ * can't enumerate without help from the driver we're binding.
+ */
+
+ spin_lock_irq(&dum->lock);
+ dum->devstatus = 0;
+ dum->driver = driver;
+ spin_unlock_irq(&dum->lock);
+
+ return 0;
+}
+
+static int dummy_udc_stop(struct usb_gadget *g)
+{
+ struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
+ struct dummy *dum = dum_hcd->dum;
+
+ spin_lock_irq(&dum->lock);
+ dum->ints_enabled = 0;
+ stop_activity(dum);
+ dum->driver = NULL;
+ spin_unlock_irq(&dum->lock);
+
+ return 0;
+}
+
+#undef is_enabled
+
+/* The gadget structure is stored inside the hcd structure and will be
+ * released along with it. */
+static void init_dummy_udc_hw(struct dummy *dum)
+{
+ int i;
+
+ INIT_LIST_HEAD(&dum->gadget.ep_list);
+ for (i = 0; i < DUMMY_ENDPOINTS; i++) {
+ struct dummy_ep *ep = &dum->ep[i];
+
+ if (!ep_info[i].name)
+ break;
+ ep->ep.name = ep_info[i].name;
+ ep->ep.caps = ep_info[i].caps;
+ ep->ep.ops = &dummy_ep_ops;
+ list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list);
+ ep->halted = ep->wedged = ep->already_seen =
+ ep->setup_stage = 0;
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
+ ep->ep.max_streams = 16;
+ ep->last_io = jiffies;
+ ep->gadget = &dum->gadget;
+ ep->desc = NULL;
+ INIT_LIST_HEAD(&ep->queue);
+ }
+
+ dum->gadget.ep0 = &dum->ep[0].ep;
+ list_del_init(&dum->ep[0].ep.ep_list);
+ INIT_LIST_HEAD(&dum->fifo_req.queue);
+
+#ifdef CONFIG_USB_OTG
+ dum->gadget.is_otg = 1;
+#endif
+}
+
+static int dummy_udc_probe(struct platform_device *pdev)
+{
+ struct dummy *dum;
+ int rc;
+
+ dum = *((void **)dev_get_platdata(&pdev->dev));
+ /* Clear usb_gadget region for new registration to udc-core */
+ memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
+ dum->gadget.name = gadget_name;
+ dum->gadget.ops = &dummy_ops;
+ if (mod_data.is_super_speed)
+ dum->gadget.max_speed = USB_SPEED_SUPER;
+ else if (mod_data.is_high_speed)
+ dum->gadget.max_speed = USB_SPEED_HIGH;
+ else
+ dum->gadget.max_speed = USB_SPEED_FULL;
+
+ dum->gadget.dev.parent = &pdev->dev;
+ init_dummy_udc_hw(dum);
+
+ rc = usb_add_gadget_udc(&pdev->dev, &dum->gadget);
+ if (rc < 0)
+ goto err_udc;
+
+ rc = device_create_file(&dum->gadget.dev, &dev_attr_function);
+ if (rc < 0)
+ goto err_dev;
+ platform_set_drvdata(pdev, dum);
+ return rc;
+
+err_dev:
+ usb_del_gadget_udc(&dum->gadget);
+err_udc:
+ return rc;
+}
+
+static int dummy_udc_remove(struct platform_device *pdev)
+{
+ struct dummy *dum = platform_get_drvdata(pdev);
+
+ device_remove_file(&dum->gadget.dev, &dev_attr_function);
+ usb_del_gadget_udc(&dum->gadget);
+ return 0;
+}
+
+static void dummy_udc_pm(struct dummy *dum, struct dummy_hcd *dum_hcd,
+ int suspend)
+{
+ spin_lock_irq(&dum->lock);
+ dum->udc_suspended = suspend;
+ set_link_state(dum_hcd);
+ spin_unlock_irq(&dum->lock);
+}
+
+static int dummy_udc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct dummy *dum = platform_get_drvdata(pdev);
+ struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+ dummy_udc_pm(dum, dum_hcd, 1);
+ usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
+ return 0;
+}
+
+static int dummy_udc_resume(struct platform_device *pdev)
+{
+ struct dummy *dum = platform_get_drvdata(pdev);
+ struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+ dummy_udc_pm(dum, dum_hcd, 0);
+ usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
+ return 0;
+}
+
+static struct platform_driver dummy_udc_driver = {
+ .probe = dummy_udc_probe,
+ .remove = dummy_udc_remove,
+ .suspend = dummy_udc_suspend,
+ .resume = dummy_udc_resume,
+ .driver = {
+ .name = gadget_name,
+ },
+};
+
+/*-------------------------------------------------------------------------*/
+
+static unsigned int dummy_get_ep_idx(const struct usb_endpoint_descriptor *desc)
+{
+ unsigned int index;
+
+ index = usb_endpoint_num(desc) << 1;
+ if (usb_endpoint_dir_in(desc))
+ index |= 1;
+ return index;
+}
+
+/* HOST SIDE DRIVER
+ *
+ * this uses the hcd framework to hook up to host side drivers.
+ * its root hub will only have one device, otherwise it acts like
+ * a normal host controller.
+ *
+ * when urbs are queued, they're just stuck on a list that we
+ * scan in a timer callback. that callback connects writes from
+ * the host with reads from the device, and so on, based on the
+ * usb 2.0 rules.
+ */
+
+static int dummy_ep_stream_en(struct dummy_hcd *dum_hcd, struct urb *urb)
+{
+ const struct usb_endpoint_descriptor *desc = &urb->ep->desc;
+ u32 index;
+
+ if (!usb_endpoint_xfer_bulk(desc))
+ return 0;
+
+ index = dummy_get_ep_idx(desc);
+ return (1 << index) & dum_hcd->stream_en_ep;
+}
+
+/*
+ * The max stream number is saved as a nibble so for the 30 possible endpoints
+ * we only 15 bytes of memory. Therefore we are limited to max 16 streams (0
+ * means we use only 1 stream). The maximum according to the spec is 16bit so
+ * if the 16 stream limit is about to go, the array size should be incremented
+ * to 30 elements of type u16.
+ */
+static int get_max_streams_for_pipe(struct dummy_hcd *dum_hcd,
+ unsigned int pipe)
+{
+ int max_streams;
+
+ max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)];
+ if (usb_pipeout(pipe))
+ max_streams >>= 4;
+ else
+ max_streams &= 0xf;
+ max_streams++;
+ return max_streams;
+}
+
+static void set_max_streams_for_pipe(struct dummy_hcd *dum_hcd,
+ unsigned int pipe, unsigned int streams)
+{
+ int max_streams;
+
+ streams--;
+ max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)];
+ if (usb_pipeout(pipe)) {
+ streams <<= 4;
+ max_streams &= 0xf;
+ } else {
+ max_streams &= 0xf0;
+ }
+ max_streams |= streams;
+ dum_hcd->num_stream[usb_pipeendpoint(pipe)] = max_streams;
+}
+
+static int dummy_validate_stream(struct dummy_hcd *dum_hcd, struct urb *urb)
+{
+ unsigned int max_streams;
+ int enabled;
+
+ enabled = dummy_ep_stream_en(dum_hcd, urb);
+ if (!urb->stream_id) {
+ if (enabled)
+ return -EINVAL;
+ return 0;
+ }
+ if (!enabled)
+ return -EINVAL;
+
+ max_streams = get_max_streams_for_pipe(dum_hcd,
+ usb_pipeendpoint(urb->pipe));
+ if (urb->stream_id > max_streams) {
+ dev_err(dummy_dev(dum_hcd), "Stream id %d is out of range.\n",
+ urb->stream_id);
+ BUG();
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dummy_urb_enqueue(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags
+) {
+ struct dummy_hcd *dum_hcd;
+ struct urbp *urbp;
+ unsigned long flags;
+ int rc;
+
+ urbp = kmalloc(sizeof *urbp, mem_flags);
+ if (!urbp)
+ return -ENOMEM;
+ urbp->urb = urb;
+ urbp->miter_started = 0;
+
+ dum_hcd = hcd_to_dummy_hcd(hcd);
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+
+ rc = dummy_validate_stream(dum_hcd, urb);
+ if (rc) {
+ kfree(urbp);
+ goto done;
+ }
+
+ rc = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (rc) {
+ kfree(urbp);
+ goto done;
+ }
+
+ if (!dum_hcd->udev) {
+ dum_hcd->udev = urb->dev;
+ usb_get_dev(dum_hcd->udev);
+ } else if (unlikely(dum_hcd->udev != urb->dev))
+ dev_err(dummy_dev(dum_hcd), "usb_device address has changed!\n");
+
+ list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
+ urb->hcpriv = urbp;
+ if (!dum_hcd->next_frame_urbp)
+ dum_hcd->next_frame_urbp = urbp;
+ if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
+ urb->error_count = 1; /* mark as a new urb */
+
+ /* kick the scheduler, it'll do the rest */
+ if (!timer_pending(&dum_hcd->timer))
+ mod_timer(&dum_hcd->timer, jiffies + 1);
+
+ done:
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+ return rc;
+}
+
+static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct dummy_hcd *dum_hcd;
+ unsigned long flags;
+ int rc;
+
+ /* giveback happens automatically in timer callback,
+ * so make sure the callback happens */
+ dum_hcd = hcd_to_dummy_hcd(hcd);
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
+ !list_empty(&dum_hcd->urbp_list))
+ mod_timer(&dum_hcd->timer, jiffies);
+
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+ return rc;
+}
+
+static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
+ u32 len)
+{
+ void *ubuf, *rbuf;
+ struct urbp *urbp = urb->hcpriv;
+ int to_host;
+ struct sg_mapping_iter *miter = &urbp->miter;
+ u32 trans = 0;
+ u32 this_sg;
+ bool next_sg;
+
+ to_host = usb_urb_dir_in(urb);
+ rbuf = req->req.buf + req->req.actual;
+
+ if (!urb->num_sgs) {
+ ubuf = urb->transfer_buffer + urb->actual_length;
+ if (to_host)
+ memcpy(ubuf, rbuf, len);
+ else
+ memcpy(rbuf, ubuf, len);
+ return len;
+ }
+
+ if (!urbp->miter_started) {
+ u32 flags = SG_MITER_ATOMIC;
+
+ if (to_host)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(miter, urb->sg, urb->num_sgs, flags);
+ urbp->miter_started = 1;
+ }
+ next_sg = sg_miter_next(miter);
+ if (next_sg == false) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ do {
+ ubuf = miter->addr;
+ this_sg = min_t(u32, len, miter->length);
+ miter->consumed = this_sg;
+ trans += this_sg;
+
+ if (to_host)
+ memcpy(ubuf, rbuf, this_sg);
+ else
+ memcpy(rbuf, ubuf, this_sg);
+ len -= this_sg;
+
+ if (!len)
+ break;
+ next_sg = sg_miter_next(miter);
+ if (next_sg == false) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ rbuf += this_sg;
+ } while (1);
+
+ sg_miter_stop(miter);
+ return trans;
+}
+
+/* transfer up to a frame's worth; caller must own lock */
+static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb,
+ struct dummy_ep *ep, int limit, int *status)
+{
+ struct dummy *dum = dum_hcd->dum;
+ struct dummy_request *req;
+ int sent = 0;
+
+top:
+ /* if there's no request queued, the device is NAKing; return */
+ list_for_each_entry(req, &ep->queue, queue) {
+ unsigned host_len, dev_len, len;
+ int is_short, to_host;
+ int rescan = 0;
+
+ if (dummy_ep_stream_en(dum_hcd, urb)) {
+ if ((urb->stream_id != req->req.stream_id))
+ continue;
+ }
+
+ /* 1..N packets of ep->ep.maxpacket each ... the last one
+ * may be short (including zero length).
+ *
+ * writer can send a zlp explicitly (length 0) or implicitly
+ * (length mod maxpacket zero, and 'zero' flag); they always
+ * terminate reads.
+ */
+ host_len = urb->transfer_buffer_length - urb->actual_length;
+ dev_len = req->req.length - req->req.actual;
+ len = min(host_len, dev_len);
+
+ /* FIXME update emulated data toggle too */
+
+ to_host = usb_urb_dir_in(urb);
+ if (unlikely(len == 0))
+ is_short = 1;
+ else {
+ /* not enough bandwidth left? */
+ if (limit < ep->ep.maxpacket && limit < len)
+ break;
+ len = min_t(unsigned, len, limit);
+ if (len == 0)
+ break;
+
+ /* send multiple of maxpacket first, then remainder */
+ if (len >= ep->ep.maxpacket) {
+ is_short = 0;
+ if (len % ep->ep.maxpacket)
+ rescan = 1;
+ len -= len % ep->ep.maxpacket;
+ } else {
+ is_short = 1;
+ }
+
+ len = dummy_perform_transfer(urb, req, len);
+
+ ep->last_io = jiffies;
+ if ((int)len < 0) {
+ req->req.status = len;
+ } else {
+ limit -= len;
+ sent += len;
+ urb->actual_length += len;
+ req->req.actual += len;
+ }
+ }
+
+ /* short packets terminate, maybe with overflow/underflow.
+ * it's only really an error to write too much.
+ *
+ * partially filling a buffer optionally blocks queue advances
+ * (so completion handlers can clean up the queue) but we don't
+ * need to emulate such data-in-flight.
+ */
+ if (is_short) {
+ if (host_len == dev_len) {
+ req->req.status = 0;
+ *status = 0;
+ } else if (to_host) {
+ req->req.status = 0;
+ if (dev_len > host_len)
+ *status = -EOVERFLOW;
+ else
+ *status = 0;
+ } else {
+ *status = 0;
+ if (host_len > dev_len)
+ req->req.status = -EOVERFLOW;
+ else
+ req->req.status = 0;
+ }
+
+ /*
+ * many requests terminate without a short packet.
+ * send a zlp if demanded by flags.
+ */
+ } else {
+ if (req->req.length == req->req.actual) {
+ if (req->req.zero && to_host)
+ rescan = 1;
+ else
+ req->req.status = 0;
+ }
+ if (urb->transfer_buffer_length == urb->actual_length) {
+ if (urb->transfer_flags & URB_ZERO_PACKET &&
+ !to_host)
+ rescan = 1;
+ else
+ *status = 0;
+ }
+ }
+
+ /* device side completion --> continuable */
+ if (req->req.status != -EINPROGRESS) {
+ list_del_init(&req->queue);
+
+ spin_unlock(&dum->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&dum->lock);
+
+ /* requests might have been unlinked... */
+ rescan = 1;
+ }
+
+ /* host side completion --> terminate */
+ if (*status != -EINPROGRESS)
+ break;
+
+ /* rescan to continue with any other queued i/o */
+ if (rescan)
+ goto top;
+ }
+ return sent;
+}
+
+static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
+{
+ int limit = ep->ep.maxpacket;
+
+ if (dum->gadget.speed == USB_SPEED_HIGH) {
+ int tmp;
+
+ /* high bandwidth mode */
+ tmp = usb_endpoint_maxp_mult(ep->desc);
+ tmp *= 8 /* applies to entire frame */;
+ limit += limit * tmp;
+ }
+ if (dum->gadget.speed == USB_SPEED_SUPER) {
+ switch (usb_endpoint_type(ep->desc)) {
+ case USB_ENDPOINT_XFER_ISOC:
+ /* Sec. 4.4.8.2 USB3.0 Spec */
+ limit = 3 * 16 * 1024 * 8;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ /* Sec. 4.4.7.2 USB3.0 Spec */
+ limit = 3 * 1024 * 8;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ default:
+ break;
+ }
+ }
+ return limit;
+}
+
+#define is_active(dum_hcd) ((dum_hcd->port_status & \
+ (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | \
+ USB_PORT_STAT_SUSPEND)) \
+ == (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE))
+
+static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
+{
+ int i;
+
+ if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
+ dum->ss_hcd : dum->hs_hcd)))
+ return NULL;
+ if (!dum->ints_enabled)
+ return NULL;
+ if ((address & ~USB_DIR_IN) == 0)
+ return &dum->ep[0];
+ for (i = 1; i < DUMMY_ENDPOINTS; i++) {
+ struct dummy_ep *ep = &dum->ep[i];
+
+ if (!ep->desc)
+ continue;
+ if (ep->desc->bEndpointAddress == address)
+ return ep;
+ }
+ return NULL;
+}
+
+#undef is_active
+
+#define Dev_Request (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+#define Dev_InRequest (Dev_Request | USB_DIR_IN)
+#define Intf_Request (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
+#define Intf_InRequest (Intf_Request | USB_DIR_IN)
+#define Ep_Request (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
+#define Ep_InRequest (Ep_Request | USB_DIR_IN)
+
+
+/**
+ * handle_control_request() - handles all control transfers
+ * @dum_hcd: pointer to dummy (the_controller)
+ * @urb: the urb request to handle
+ * @setup: pointer to the setup data for a USB device control
+ * request
+ * @status: pointer to request handling status
+ *
+ * Return 0 - if the request was handled
+ * 1 - if the request wasn't handles
+ * error code on error
+ */
+static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
+ struct usb_ctrlrequest *setup,
+ int *status)
+{
+ struct dummy_ep *ep2;
+ struct dummy *dum = dum_hcd->dum;
+ int ret_val = 1;
+ unsigned w_index;
+ unsigned w_value;
+
+ w_index = le16_to_cpu(setup->wIndex);
+ w_value = le16_to_cpu(setup->wValue);
+ switch (setup->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ if (setup->bRequestType != Dev_Request)
+ break;
+ dum->address = w_value;
+ *status = 0;
+ dev_dbg(udc_dev(dum), "set_address = %d\n",
+ w_value);
+ ret_val = 0;
+ break;
+ case USB_REQ_SET_FEATURE:
+ if (setup->bRequestType == Dev_Request) {
+ ret_val = 0;
+ switch (w_value) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ break;
+ case USB_DEVICE_B_HNP_ENABLE:
+ dum->gadget.b_hnp_enable = 1;
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ dum->gadget.a_hnp_support = 1;
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ dum->gadget.a_alt_hnp_support = 1;
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_U1_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_U2_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_LTM_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ default:
+ ret_val = -EOPNOTSUPP;
+ }
+ if (ret_val == 0) {
+ dum->devstatus |= (1 << w_value);
+ *status = 0;
+ }
+ } else if (setup->bRequestType == Ep_Request) {
+ /* endpoint halt */
+ ep2 = find_endpoint(dum, w_index);
+ if (!ep2 || ep2->ep.name == ep0name) {
+ ret_val = -EOPNOTSUPP;
+ break;
+ }
+ ep2->halted = 1;
+ ret_val = 0;
+ *status = 0;
+ }
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ if (setup->bRequestType == Dev_Request) {
+ ret_val = 0;
+ switch (w_value) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ w_value = USB_DEVICE_REMOTE_WAKEUP;
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_U1_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_U2_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_LTM_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ default:
+ ret_val = -EOPNOTSUPP;
+ break;
+ }
+ if (ret_val == 0) {
+ dum->devstatus &= ~(1 << w_value);
+ *status = 0;
+ }
+ } else if (setup->bRequestType == Ep_Request) {
+ /* endpoint halt */
+ ep2 = find_endpoint(dum, w_index);
+ if (!ep2) {
+ ret_val = -EOPNOTSUPP;
+ break;
+ }
+ if (!ep2->wedged)
+ ep2->halted = 0;
+ ret_val = 0;
+ *status = 0;
+ }
+ break;
+ case USB_REQ_GET_STATUS:
+ if (setup->bRequestType == Dev_InRequest
+ || setup->bRequestType == Intf_InRequest
+ || setup->bRequestType == Ep_InRequest) {
+ char *buf;
+ /*
+ * device: remote wakeup, selfpowered
+ * interface: nothing
+ * endpoint: halt
+ */
+ buf = (char *)urb->transfer_buffer;
+ if (urb->transfer_buffer_length > 0) {
+ if (setup->bRequestType == Ep_InRequest) {
+ ep2 = find_endpoint(dum, w_index);
+ if (!ep2) {
+ ret_val = -EOPNOTSUPP;
+ break;
+ }
+ buf[0] = ep2->halted;
+ } else if (setup->bRequestType ==
+ Dev_InRequest) {
+ buf[0] = (u8)dum->devstatus;
+ } else
+ buf[0] = 0;
+ }
+ if (urb->transfer_buffer_length > 1)
+ buf[1] = 0;
+ urb->actual_length = min_t(u32, 2,
+ urb->transfer_buffer_length);
+ ret_val = 0;
+ *status = 0;
+ }
+ break;
+ }
+ return ret_val;
+}
+
+/*
+ * Drive both sides of the transfers; looks like irq handlers to both
+ * drivers except that the callbacks are invoked from soft interrupt
+ * context.
+ */
+static void dummy_timer(struct timer_list *t)
+{
+ struct dummy_hcd *dum_hcd = from_timer(dum_hcd, t, timer);
+ struct dummy *dum = dum_hcd->dum;
+ struct urbp *urbp, *tmp;
+ unsigned long flags;
+ int limit, total;
+ int i;
+
+ /* simplistic model for one frame's bandwidth */
+ /* FIXME: account for transaction and packet overhead */
+ switch (dum->gadget.speed) {
+ case USB_SPEED_LOW:
+ total = 8/*bytes*/ * 12/*packets*/;
+ break;
+ case USB_SPEED_FULL:
+ total = 64/*bytes*/ * 19/*packets*/;
+ break;
+ case USB_SPEED_HIGH:
+ total = 512/*bytes*/ * 13/*packets*/ * 8/*uframes*/;
+ break;
+ case USB_SPEED_SUPER:
+ /* Bus speed is 500000 bytes/ms, so use a little less */
+ total = 490000;
+ break;
+ default: /* Can't happen */
+ dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
+ total = 0;
+ break;
+ }
+
+ /* FIXME if HZ != 1000 this will probably misbehave ... */
+
+ /* look at each urb queued by the host side driver */
+ spin_lock_irqsave(&dum->lock, flags);
+
+ if (!dum_hcd->udev) {
+ dev_err(dummy_dev(dum_hcd),
+ "timer fired with no URBs pending?\n");
+ spin_unlock_irqrestore(&dum->lock, flags);
+ return;
+ }
+ dum_hcd->next_frame_urbp = NULL;
+
+ for (i = 0; i < DUMMY_ENDPOINTS; i++) {
+ if (!ep_info[i].name)
+ break;
+ dum->ep[i].already_seen = 0;
+ }
+
+restart:
+ list_for_each_entry_safe(urbp, tmp, &dum_hcd->urbp_list, urbp_list) {
+ struct urb *urb;
+ struct dummy_request *req;
+ u8 address;
+ struct dummy_ep *ep = NULL;
+ int status = -EINPROGRESS;
+
+ /* stop when we reach URBs queued after the timer interrupt */
+ if (urbp == dum_hcd->next_frame_urbp)
+ break;
+
+ urb = urbp->urb;
+ if (urb->unlinked)
+ goto return_urb;
+ else if (dum_hcd->rh_state != DUMMY_RH_RUNNING)
+ continue;
+
+ /* Used up this frame's bandwidth? */
+ if (total <= 0)
+ continue;
+
+ /* find the gadget's ep for this request (if configured) */
+ address = usb_pipeendpoint (urb->pipe);
+ if (usb_urb_dir_in(urb))
+ address |= USB_DIR_IN;
+ ep = find_endpoint(dum, address);
+ if (!ep) {
+ /* set_configuration() disagreement */
+ dev_dbg(dummy_dev(dum_hcd),
+ "no ep configured for urb %p\n",
+ urb);
+ status = -EPROTO;
+ goto return_urb;
+ }
+
+ if (ep->already_seen)
+ continue;
+ ep->already_seen = 1;
+ if (ep == &dum->ep[0] && urb->error_count) {
+ ep->setup_stage = 1; /* a new urb */
+ urb->error_count = 0;
+ }
+ if (ep->halted && !ep->setup_stage) {
+ /* NOTE: must not be iso! */
+ dev_dbg(dummy_dev(dum_hcd), "ep %s halted, urb %p\n",
+ ep->ep.name, urb);
+ status = -EPIPE;
+ goto return_urb;
+ }
+ /* FIXME make sure both ends agree on maxpacket */
+
+ /* handle control requests */
+ if (ep == &dum->ep[0] && ep->setup_stage) {
+ struct usb_ctrlrequest setup;
+ int value;
+
+ setup = *(struct usb_ctrlrequest *) urb->setup_packet;
+ /* paranoia, in case of stale queued data */
+ list_for_each_entry(req, &ep->queue, queue) {
+ list_del_init(&req->queue);
+ req->req.status = -EOVERFLOW;
+ dev_dbg(udc_dev(dum), "stale req = %p\n",
+ req);
+
+ spin_unlock(&dum->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&dum->lock);
+ ep->already_seen = 0;
+ goto restart;
+ }
+
+ /* gadget driver never sees set_address or operations
+ * on standard feature flags. some hardware doesn't
+ * even expose them.
+ */
+ ep->last_io = jiffies;
+ ep->setup_stage = 0;
+ ep->halted = 0;
+
+ value = handle_control_request(dum_hcd, urb, &setup,
+ &status);
+
+ /* gadget driver handles all other requests. block
+ * until setup() returns; no reentrancy issues etc.
+ */
+ if (value > 0) {
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
+ value = dum->driver->setup(&dum->gadget,
+ &setup);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
+
+ if (value >= 0) {
+ /* no delays (max 64KB data stage) */
+ limit = 64*1024;
+ goto treat_control_like_bulk;
+ }
+ /* error, see below */
+ }
+
+ if (value < 0) {
+ if (value != -EOPNOTSUPP)
+ dev_dbg(udc_dev(dum),
+ "setup --> %d\n",
+ value);
+ status = -EPIPE;
+ urb->actual_length = 0;
+ }
+
+ goto return_urb;
+ }
+
+ /* non-control requests */
+ limit = total;
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_ISOCHRONOUS:
+ /*
+ * We don't support isochronous. But if we did,
+ * here are some of the issues we'd have to face:
+ *
+ * Is it urb->interval since the last xfer?
+ * Use urb->iso_frame_desc[i].
+ * Complete whether or not ep has requests queued.
+ * Report random errors, to debug drivers.
+ */
+ limit = max(limit, periodic_bytes(dum, ep));
+ status = -EINVAL; /* fail all xfers */
+ break;
+
+ case PIPE_INTERRUPT:
+ /* FIXME is it urb->interval since the last xfer?
+ * this almost certainly polls too fast.
+ */
+ limit = max(limit, periodic_bytes(dum, ep));
+ fallthrough;
+
+ default:
+treat_control_like_bulk:
+ ep->last_io = jiffies;
+ total -= transfer(dum_hcd, urb, ep, limit, &status);
+ break;
+ }
+
+ /* incomplete transfer? */
+ if (status == -EINPROGRESS)
+ continue;
+
+return_urb:
+ list_del(&urbp->urbp_list);
+ kfree(urbp);
+ if (ep)
+ ep->already_seen = ep->setup_stage = 0;
+
+ usb_hcd_unlink_urb_from_ep(dummy_hcd_to_hcd(dum_hcd), urb);
+ spin_unlock(&dum->lock);
+ usb_hcd_giveback_urb(dummy_hcd_to_hcd(dum_hcd), urb, status);
+ spin_lock(&dum->lock);
+
+ goto restart;
+ }
+
+ if (list_empty(&dum_hcd->urbp_list)) {
+ usb_put_dev(dum_hcd->udev);
+ dum_hcd->udev = NULL;
+ } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
+ /* want a 1 msec delay here */
+ mod_timer(&dum_hcd->timer, jiffies + msecs_to_jiffies(1));
+ }
+
+ spin_unlock_irqrestore(&dum->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define PORT_C_MASK \
+ ((USB_PORT_STAT_C_CONNECTION \
+ | USB_PORT_STAT_C_ENABLE \
+ | USB_PORT_STAT_C_SUSPEND \
+ | USB_PORT_STAT_C_OVERCURRENT \
+ | USB_PORT_STAT_C_RESET) << 16)
+
+static int dummy_hub_status(struct usb_hcd *hcd, char *buf)
+{
+ struct dummy_hcd *dum_hcd;
+ unsigned long flags;
+ int retval = 0;
+
+ dum_hcd = hcd_to_dummy_hcd(hcd);
+
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+ if (!HCD_HW_ACCESSIBLE(hcd))
+ goto done;
+
+ if (dum_hcd->resuming && time_after_eq(jiffies, dum_hcd->re_timeout)) {
+ dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
+ dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
+ set_link_state(dum_hcd);
+ }
+
+ if ((dum_hcd->port_status & PORT_C_MASK) != 0) {
+ *buf = (1 << 1);
+ dev_dbg(dummy_dev(dum_hcd), "port status 0x%08x has changes\n",
+ dum_hcd->port_status);
+ retval = 1;
+ if (dum_hcd->rh_state == DUMMY_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+ }
+done:
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+ return retval;
+}
+
+/* usb 3.0 root hub device descriptor */
+static struct {
+ struct usb_bos_descriptor bos;
+ struct usb_ss_cap_descriptor ss_cap;
+} __packed usb3_bos_desc = {
+
+ .bos = {
+ .bLength = USB_DT_BOS_SIZE,
+ .bDescriptorType = USB_DT_BOS,
+ .wTotalLength = cpu_to_le16(sizeof(usb3_bos_desc)),
+ .bNumDeviceCaps = 1,
+ },
+ .ss_cap = {
+ .bLength = USB_DT_USB_SS_CAP_SIZE,
+ .bDescriptorType = USB_DT_DEVICE_CAPABILITY,
+ .bDevCapabilityType = USB_SS_CAP_TYPE,
+ .wSpeedSupported = cpu_to_le16(USB_5GBPS_OPERATION),
+ .bFunctionalitySupport = ilog2(USB_5GBPS_OPERATION),
+ },
+};
+
+static inline void
+ss_hub_descriptor(struct usb_hub_descriptor *desc)
+{
+ memset(desc, 0, sizeof *desc);
+ desc->bDescriptorType = USB_DT_SS_HUB;
+ desc->bDescLength = 12;
+ desc->wHubCharacteristics = cpu_to_le16(
+ HUB_CHAR_INDV_PORT_LPSM |
+ HUB_CHAR_COMMON_OCPM);
+ desc->bNbrPorts = 1;
+ desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
+ desc->u.ss.DeviceRemovable = 0;
+}
+
+static inline void hub_descriptor(struct usb_hub_descriptor *desc)
+{
+ memset(desc, 0, sizeof *desc);
+ desc->bDescriptorType = USB_DT_HUB;
+ desc->bDescLength = 9;
+ desc->wHubCharacteristics = cpu_to_le16(
+ HUB_CHAR_INDV_PORT_LPSM |
+ HUB_CHAR_COMMON_OCPM);
+ desc->bNbrPorts = 1;
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */
+}
+
+static int dummy_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+) {
+ struct dummy_hcd *dum_hcd;
+ int retval = 0;
+ unsigned long flags;
+
+ if (!HCD_HW_ACCESSIBLE(hcd))
+ return -ETIMEDOUT;
+
+ dum_hcd = hcd_to_dummy_hcd(hcd);
+
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ break;
+ case ClearPortFeature:
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ if (hcd->speed == HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_SUSPEND req not "
+ "supported for USB 3.0 roothub\n");
+ goto error;
+ }
+ if (dum_hcd->port_status & USB_PORT_STAT_SUSPEND) {
+ /* 20msec resume signaling */
+ dum_hcd->resuming = 1;
+ dum_hcd->re_timeout = jiffies +
+ msecs_to_jiffies(20);
+ }
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(dummy_dev(dum_hcd), "power-off\n");
+ if (hcd->speed == HCD_USB3)
+ dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER;
+ else
+ dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
+ set_link_state(dum_hcd);
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ dum_hcd->port_status &= ~(1 << wValue);
+ set_link_state(dum_hcd);
+ break;
+ default:
+ /* Disallow INDICATOR and C_OVER_CURRENT */
+ goto error;
+ }
+ break;
+ case GetHubDescriptor:
+ if (hcd->speed == HCD_USB3 &&
+ (wLength < USB_DT_SS_HUB_SIZE ||
+ wValue != (USB_DT_SS_HUB << 8))) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "Wrong hub descriptor type for "
+ "USB 3.0 roothub.\n");
+ goto error;
+ }
+ if (hcd->speed == HCD_USB3)
+ ss_hub_descriptor((struct usb_hub_descriptor *) buf);
+ else
+ hub_descriptor((struct usb_hub_descriptor *) buf);
+ break;
+
+ case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+ if (hcd->speed != HCD_USB3)
+ goto error;
+
+ if ((wValue >> 8) != USB_DT_BOS)
+ goto error;
+
+ memcpy(buf, &usb3_bos_desc, sizeof(usb3_bos_desc));
+ retval = sizeof(usb3_bos_desc);
+ break;
+
+ case GetHubStatus:
+ *(__le32 *) buf = cpu_to_le32(0);
+ break;
+ case GetPortStatus:
+ if (wIndex != 1)
+ retval = -EPIPE;
+
+ /* whoever resets or resumes must GetPortStatus to
+ * complete it!!
+ */
+ if (dum_hcd->resuming &&
+ time_after_eq(jiffies, dum_hcd->re_timeout)) {
+ dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
+ dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
+ }
+ if ((dum_hcd->port_status & USB_PORT_STAT_RESET) != 0 &&
+ time_after_eq(jiffies, dum_hcd->re_timeout)) {
+ dum_hcd->port_status |= (USB_PORT_STAT_C_RESET << 16);
+ dum_hcd->port_status &= ~USB_PORT_STAT_RESET;
+ if (dum_hcd->dum->pullup) {
+ dum_hcd->port_status |= USB_PORT_STAT_ENABLE;
+
+ if (hcd->speed < HCD_USB3) {
+ switch (dum_hcd->dum->gadget.speed) {
+ case USB_SPEED_HIGH:
+ dum_hcd->port_status |=
+ USB_PORT_STAT_HIGH_SPEED;
+ break;
+ case USB_SPEED_LOW:
+ dum_hcd->dum->gadget.ep0->
+ maxpacket = 8;
+ dum_hcd->port_status |=
+ USB_PORT_STAT_LOW_SPEED;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+ set_link_state(dum_hcd);
+ ((__le16 *) buf)[0] = cpu_to_le16(dum_hcd->port_status);
+ ((__le16 *) buf)[1] = cpu_to_le16(dum_hcd->port_status >> 16);
+ break;
+ case SetHubFeature:
+ retval = -EPIPE;
+ break;
+ case SetPortFeature:
+ switch (wValue) {
+ case USB_PORT_FEAT_LINK_STATE:
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_LINK_STATE req not "
+ "supported for USB 2.0 roothub\n");
+ goto error;
+ }
+ /*
+ * Since this is dummy we don't have an actual link so
+ * there is nothing to do for the SET_LINK_STATE cmd
+ */
+ break;
+ case USB_PORT_FEAT_U1_TIMEOUT:
+ case USB_PORT_FEAT_U2_TIMEOUT:
+ /* TODO: add suspend/resume support! */
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_U1/2_TIMEOUT req not "
+ "supported for USB 2.0 roothub\n");
+ goto error;
+ }
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ /* Applicable only for USB2.0 hub */
+ if (hcd->speed == HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_SUSPEND req not "
+ "supported for USB 3.0 roothub\n");
+ goto error;
+ }
+ if (dum_hcd->active) {
+ dum_hcd->port_status |= USB_PORT_STAT_SUSPEND;
+
+ /* HNP would happen here; for now we
+ * assume b_bus_req is always true.
+ */
+ set_link_state(dum_hcd);
+ if (((1 << USB_DEVICE_B_HNP_ENABLE)
+ & dum_hcd->dum->devstatus) != 0)
+ dev_dbg(dummy_dev(dum_hcd),
+ "no HNP yet!\n");
+ }
+ break;
+ case USB_PORT_FEAT_POWER:
+ if (hcd->speed == HCD_USB3)
+ dum_hcd->port_status |= USB_SS_PORT_STAT_POWER;
+ else
+ dum_hcd->port_status |= USB_PORT_STAT_POWER;
+ set_link_state(dum_hcd);
+ break;
+ case USB_PORT_FEAT_BH_PORT_RESET:
+ /* Applicable only for USB3.0 hub */
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_BH_PORT_RESET req not "
+ "supported for USB 2.0 roothub\n");
+ goto error;
+ }
+ fallthrough;
+ case USB_PORT_FEAT_RESET:
+ if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+ break;
+ /* if it's already enabled, disable */
+ if (hcd->speed == HCD_USB3) {
+ dum_hcd->port_status =
+ (USB_SS_PORT_STAT_POWER |
+ USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_RESET);
+ } else {
+ dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_LOW_SPEED
+ | USB_PORT_STAT_HIGH_SPEED);
+ dum_hcd->port_status |= USB_PORT_STAT_RESET;
+ }
+ /*
+ * We want to reset device status. All but the
+ * Self powered feature
+ */
+ dum_hcd->dum->devstatus &=
+ (1 << USB_DEVICE_SELF_POWERED);
+ /*
+ * FIXME USB3.0: what is the correct reset signaling
+ * interval? Is it still 50msec as for HS?
+ */
+ dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
+ set_link_state(dum_hcd);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3, and ignored for USB-2 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ break;
+ default:
+ /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+ goto error;
+ }
+ break;
+ case GetPortErrorCount:
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "GetPortErrorCount req not "
+ "supported for USB 2.0 roothub\n");
+ goto error;
+ }
+ /* We'll always return 0 since this is a dummy hub */
+ *(__le32 *) buf = cpu_to_le32(0);
+ break;
+ case SetHubDepth:
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "SetHubDepth req not supported for "
+ "USB 2.0 roothub\n");
+ goto error;
+ }
+ break;
+ default:
+ dev_dbg(dummy_dev(dum_hcd),
+ "hub control req%04x v%04x i%04x l%d\n",
+ typeReq, wValue, wIndex, wLength);
+error:
+ /* "protocol stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+
+ if ((dum_hcd->port_status & PORT_C_MASK) != 0)
+ usb_hcd_poll_rh_status(hcd);
+ return retval;
+}
+
+static int dummy_bus_suspend(struct usb_hcd *hcd)
+{
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+
+ dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
+
+ spin_lock_irq(&dum_hcd->dum->lock);
+ dum_hcd->rh_state = DUMMY_RH_SUSPENDED;
+ set_link_state(dum_hcd);
+ hcd->state = HC_STATE_SUSPENDED;
+ spin_unlock_irq(&dum_hcd->dum->lock);
+ return 0;
+}
+
+static int dummy_bus_resume(struct usb_hcd *hcd)
+{
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+ int rc = 0;
+
+ dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
+
+ spin_lock_irq(&dum_hcd->dum->lock);
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ rc = -ESHUTDOWN;
+ } else {
+ dum_hcd->rh_state = DUMMY_RH_RUNNING;
+ set_link_state(dum_hcd);
+ if (!list_empty(&dum_hcd->urbp_list))
+ mod_timer(&dum_hcd->timer, jiffies);
+ hcd->state = HC_STATE_RUNNING;
+ }
+ spin_unlock_irq(&dum_hcd->dum->lock);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
+{
+ int ep = usb_pipeendpoint(urb->pipe);
+
+ return scnprintf(buf, size,
+ "urb/%p %s ep%d%s%s len %d/%d\n",
+ urb,
+ ({ char *s;
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ s = "ls";
+ break;
+ case USB_SPEED_FULL:
+ s = "fs";
+ break;
+ case USB_SPEED_HIGH:
+ s = "hs";
+ break;
+ case USB_SPEED_SUPER:
+ s = "ss";
+ break;
+ default:
+ s = "?";
+ break;
+ } s; }),
+ ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
+ ({ char *s; \
+ switch (usb_pipetype(urb->pipe)) { \
+ case PIPE_CONTROL: \
+ s = ""; \
+ break; \
+ case PIPE_BULK: \
+ s = "-bulk"; \
+ break; \
+ case PIPE_INTERRUPT: \
+ s = "-int"; \
+ break; \
+ default: \
+ s = "-iso"; \
+ break; \
+ } s; }),
+ urb->actual_length, urb->transfer_buffer_length);
+}
+
+static ssize_t urbs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+ struct urbp *urbp;
+ size_t size = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+ list_for_each_entry(urbp, &dum_hcd->urbp_list, urbp_list) {
+ size_t temp;
+
+ temp = show_urb(buf, PAGE_SIZE - size, urbp->urb);
+ buf += temp;
+ size += temp;
+ }
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+
+ return size;
+}
+static DEVICE_ATTR_RO(urbs);
+
+static int dummy_start_ss(struct dummy_hcd *dum_hcd)
+{
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
+ dum_hcd->rh_state = DUMMY_RH_RUNNING;
+ dum_hcd->stream_en_ep = 0;
+ INIT_LIST_HEAD(&dum_hcd->urbp_list);
+ dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
+ dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
+ dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
+#ifdef CONFIG_USB_OTG
+ dummy_hcd_to_hcd(dum_hcd)->self.otg_port = 1;
+#endif
+ return 0;
+
+ /* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
+ return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
+}
+
+static int dummy_start(struct usb_hcd *hcd)
+{
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+
+ /*
+ * HOST side init ... we emulate a root hub that'll only ever
+ * talk to one device (the gadget side). Also appears in sysfs,
+ * just like more familiar pci-based HCDs.
+ */
+ if (!usb_hcd_is_primary_hcd(hcd))
+ return dummy_start_ss(dum_hcd);
+
+ spin_lock_init(&dum_hcd->dum->lock);
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
+ dum_hcd->rh_state = DUMMY_RH_RUNNING;
+
+ INIT_LIST_HEAD(&dum_hcd->urbp_list);
+
+ hcd->power_budget = POWER_BUDGET;
+ hcd->state = HC_STATE_RUNNING;
+ hcd->uses_new_polling = 1;
+
+#ifdef CONFIG_USB_OTG
+ hcd->self.otg_port = 1;
+#endif
+
+ /* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
+ return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
+}
+
+static void dummy_stop(struct usb_hcd *hcd)
+{
+ device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
+ dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int dummy_h_get_frame(struct usb_hcd *hcd)
+{
+ return dummy_g_get_frame(NULL);
+}
+
+static int dummy_setup(struct usb_hcd *hcd)
+{
+ struct dummy *dum;
+
+ dum = *((void **)dev_get_platdata(hcd->self.controller));
+ hcd->self.sg_tablesize = ~0;
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ dum->hs_hcd = hcd_to_dummy_hcd(hcd);
+ dum->hs_hcd->dum = dum;
+ /*
+ * Mark the first roothub as being USB 2.0.
+ * The USB 3.0 roothub will be registered later by
+ * dummy_hcd_probe()
+ */
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_HIGH;
+ } else {
+ dum->ss_hcd = hcd_to_dummy_hcd(hcd);
+ dum->ss_hcd->dum = dum;
+ hcd->speed = HCD_USB3;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER;
+ }
+ return 0;
+}
+
+/* Change a group of bulk endpoints to support multiple stream IDs */
+static int dummy_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags)
+{
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+ unsigned long flags;
+ int max_stream;
+ int ret_streams = num_streams;
+ unsigned int index;
+ unsigned int i;
+
+ if (!num_eps)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+ for (i = 0; i < num_eps; i++) {
+ index = dummy_get_ep_idx(&eps[i]->desc);
+ if ((1 << index) & dum_hcd->stream_en_ep) {
+ ret_streams = -EINVAL;
+ goto out;
+ }
+ max_stream = usb_ss_max_streams(&eps[i]->ss_ep_comp);
+ if (!max_stream) {
+ ret_streams = -EINVAL;
+ goto out;
+ }
+ if (max_stream < ret_streams) {
+ dev_dbg(dummy_dev(dum_hcd), "Ep 0x%x only supports %u "
+ "stream IDs.\n",
+ eps[i]->desc.bEndpointAddress,
+ max_stream);
+ ret_streams = max_stream;
+ }
+ }
+
+ for (i = 0; i < num_eps; i++) {
+ index = dummy_get_ep_idx(&eps[i]->desc);
+ dum_hcd->stream_en_ep |= 1 << index;
+ set_max_streams_for_pipe(dum_hcd,
+ usb_endpoint_num(&eps[i]->desc), ret_streams);
+ }
+out:
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+ return ret_streams;
+}
+
+/* Reverts a group of bulk endpoints back to not using stream IDs. */
+static int dummy_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags)
+{
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
+ unsigned long flags;
+ int ret;
+ unsigned int index;
+ unsigned int i;
+
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+ for (i = 0; i < num_eps; i++) {
+ index = dummy_get_ep_idx(&eps[i]->desc);
+ if (!((1 << index) & dum_hcd->stream_en_ep)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < num_eps; i++) {
+ index = dummy_get_ep_idx(&eps[i]->desc);
+ dum_hcd->stream_en_ep &= ~(1 << index);
+ set_max_streams_for_pipe(dum_hcd,
+ usb_endpoint_num(&eps[i]->desc), 0);
+ }
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+ return ret;
+}
+
+static struct hc_driver dummy_hcd = {
+ .description = (char *) driver_name,
+ .product_desc = "Dummy host controller",
+ .hcd_priv_size = sizeof(struct dummy_hcd),
+
+ .reset = dummy_setup,
+ .start = dummy_start,
+ .stop = dummy_stop,
+
+ .urb_enqueue = dummy_urb_enqueue,
+ .urb_dequeue = dummy_urb_dequeue,
+
+ .get_frame_number = dummy_h_get_frame,
+
+ .hub_status_data = dummy_hub_status,
+ .hub_control = dummy_hub_control,
+ .bus_suspend = dummy_bus_suspend,
+ .bus_resume = dummy_bus_resume,
+
+ .alloc_streams = dummy_alloc_streams,
+ .free_streams = dummy_free_streams,
+};
+
+static int dummy_hcd_probe(struct platform_device *pdev)
+{
+ struct dummy *dum;
+ struct usb_hcd *hs_hcd;
+ struct usb_hcd *ss_hcd;
+ int retval;
+
+ dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
+ dum = *((void **)dev_get_platdata(&pdev->dev));
+
+ if (mod_data.is_super_speed)
+ dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
+ else if (mod_data.is_high_speed)
+ dummy_hcd.flags = HCD_USB2;
+ else
+ dummy_hcd.flags = HCD_USB11;
+ hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
+ if (!hs_hcd)
+ return -ENOMEM;
+ hs_hcd->has_tt = 1;
+
+ retval = usb_add_hcd(hs_hcd, 0, 0);
+ if (retval)
+ goto put_usb2_hcd;
+
+ if (mod_data.is_super_speed) {
+ ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev,
+ dev_name(&pdev->dev), hs_hcd);
+ if (!ss_hcd) {
+ retval = -ENOMEM;
+ goto dealloc_usb2_hcd;
+ }
+
+ retval = usb_add_hcd(ss_hcd, 0, 0);
+ if (retval)
+ goto put_usb3_hcd;
+ }
+ return 0;
+
+put_usb3_hcd:
+ usb_put_hcd(ss_hcd);
+dealloc_usb2_hcd:
+ usb_remove_hcd(hs_hcd);
+put_usb2_hcd:
+ usb_put_hcd(hs_hcd);
+ dum->hs_hcd = dum->ss_hcd = NULL;
+ return retval;
+}
+
+static int dummy_hcd_remove(struct platform_device *pdev)
+{
+ struct dummy *dum;
+
+ dum = hcd_to_dummy_hcd(platform_get_drvdata(pdev))->dum;
+
+ if (dum->ss_hcd) {
+ usb_remove_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
+ usb_put_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
+ }
+
+ usb_remove_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
+ usb_put_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
+
+ dum->hs_hcd = NULL;
+ dum->ss_hcd = NULL;
+
+ return 0;
+}
+
+static int dummy_hcd_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct usb_hcd *hcd;
+ struct dummy_hcd *dum_hcd;
+ int rc = 0;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ hcd = platform_get_drvdata(pdev);
+ dum_hcd = hcd_to_dummy_hcd(hcd);
+ if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
+ dev_warn(&pdev->dev, "Root hub isn't suspended!\n");
+ rc = -EBUSY;
+ } else
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ return rc;
+}
+
+static int dummy_hcd_resume(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ hcd = platform_get_drvdata(pdev);
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
+ return 0;
+}
+
+static struct platform_driver dummy_hcd_driver = {
+ .probe = dummy_hcd_probe,
+ .remove = dummy_hcd_remove,
+ .suspend = dummy_hcd_suspend,
+ .resume = dummy_hcd_resume,
+ .driver = {
+ .name = driver_name,
+ },
+};
+
+/*-------------------------------------------------------------------------*/
+#define MAX_NUM_UDC 32
+static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
+static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];
+
+static int __init dummy_hcd_init(void)
+{
+ int retval = -ENOMEM;
+ int i;
+ struct dummy *dum[MAX_NUM_UDC] = {};
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ if (!mod_data.is_high_speed && mod_data.is_super_speed)
+ return -EINVAL;
+
+ if (mod_data.num < 1 || mod_data.num > MAX_NUM_UDC) {
+ pr_err("Number of emulated UDC must be in range of 1...%d\n",
+ MAX_NUM_UDC);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mod_data.num; i++) {
+ the_hcd_pdev[i] = platform_device_alloc(driver_name, i);
+ if (!the_hcd_pdev[i]) {
+ i--;
+ while (i >= 0)
+ platform_device_put(the_hcd_pdev[i--]);
+ return retval;
+ }
+ }
+ for (i = 0; i < mod_data.num; i++) {
+ the_udc_pdev[i] = platform_device_alloc(gadget_name, i);
+ if (!the_udc_pdev[i]) {
+ i--;
+ while (i >= 0)
+ platform_device_put(the_udc_pdev[i--]);
+ goto err_alloc_udc;
+ }
+ }
+ for (i = 0; i < mod_data.num; i++) {
+ dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL);
+ if (!dum[i]) {
+ retval = -ENOMEM;
+ goto err_add_pdata;
+ }
+ retval = platform_device_add_data(the_hcd_pdev[i], &dum[i],
+ sizeof(void *));
+ if (retval)
+ goto err_add_pdata;
+ retval = platform_device_add_data(the_udc_pdev[i], &dum[i],
+ sizeof(void *));
+ if (retval)
+ goto err_add_pdata;
+ }
+
+ retval = platform_driver_register(&dummy_hcd_driver);
+ if (retval < 0)
+ goto err_add_pdata;
+ retval = platform_driver_register(&dummy_udc_driver);
+ if (retval < 0)
+ goto err_register_udc_driver;
+
+ for (i = 0; i < mod_data.num; i++) {
+ retval = platform_device_add(the_hcd_pdev[i]);
+ if (retval < 0) {
+ i--;
+ while (i >= 0)
+ platform_device_del(the_hcd_pdev[i--]);
+ goto err_add_hcd;
+ }
+ }
+ for (i = 0; i < mod_data.num; i++) {
+ if (!dum[i]->hs_hcd ||
+ (!dum[i]->ss_hcd && mod_data.is_super_speed)) {
+ /*
+ * The hcd was added successfully but its probe
+ * function failed for some reason.
+ */
+ retval = -EINVAL;
+ goto err_add_udc;
+ }
+ }
+
+ for (i = 0; i < mod_data.num; i++) {
+ retval = platform_device_add(the_udc_pdev[i]);
+ if (retval < 0) {
+ i--;
+ while (i >= 0)
+ platform_device_del(the_udc_pdev[i--]);
+ goto err_add_udc;
+ }
+ }
+
+ for (i = 0; i < mod_data.num; i++) {
+ if (!platform_get_drvdata(the_udc_pdev[i])) {
+ /*
+ * The udc was added successfully but its probe
+ * function failed for some reason.
+ */
+ retval = -EINVAL;
+ goto err_probe_udc;
+ }
+ }
+ return retval;
+
+err_probe_udc:
+ for (i = 0; i < mod_data.num; i++)
+ platform_device_del(the_udc_pdev[i]);
+err_add_udc:
+ for (i = 0; i < mod_data.num; i++)
+ platform_device_del(the_hcd_pdev[i]);
+err_add_hcd:
+ platform_driver_unregister(&dummy_udc_driver);
+err_register_udc_driver:
+ platform_driver_unregister(&dummy_hcd_driver);
+err_add_pdata:
+ for (i = 0; i < mod_data.num; i++)
+ kfree(dum[i]);
+ for (i = 0; i < mod_data.num; i++)
+ platform_device_put(the_udc_pdev[i]);
+err_alloc_udc:
+ for (i = 0; i < mod_data.num; i++)
+ platform_device_put(the_hcd_pdev[i]);
+ return retval;
+}
+module_init(dummy_hcd_init);
+
+static void __exit dummy_hcd_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < mod_data.num; i++) {
+ struct dummy *dum;
+
+ dum = *((void **)dev_get_platdata(&the_udc_pdev[i]->dev));
+
+ platform_device_unregister(the_udc_pdev[i]);
+ platform_device_unregister(the_hcd_pdev[i]);
+ kfree(dum);
+ }
+ platform_driver_unregister(&dummy_udc_driver);
+ platform_driver_unregister(&dummy_hcd_driver);
+}
+module_exit(dummy_hcd_cleanup);
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
new file mode 100644
index 000000000..3350b7776
--- /dev/null
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -0,0 +1,1239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FOTG210 UDC Driver supports Bulk transfer so far
+ *
+ * Copyright (C) 2013 Faraday Technology Corporation
+ *
+ * Author : Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "fotg210.h"
+
+#define DRIVER_DESC "FOTG210 USB Device Controller Driver"
+#define DRIVER_VERSION "30-April-2013"
+
+static const char udc_name[] = "fotg210_udc";
+static const char * const fotg210_ep_name[] = {
+ "ep0", "ep1", "ep2", "ep3", "ep4"};
+
+static void fotg210_disable_fifo_int(struct fotg210_ep *ep)
+{
+ u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
+
+ if (ep->dir_in)
+ value |= DMISGR1_MF_IN_INT(ep->epnum - 1);
+ else
+ value |= DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
+ iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
+}
+
+static void fotg210_enable_fifo_int(struct fotg210_ep *ep)
+{
+ u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
+
+ if (ep->dir_in)
+ value &= ~DMISGR1_MF_IN_INT(ep->epnum - 1);
+ else
+ value &= ~DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
+ iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
+}
+
+static void fotg210_set_cxdone(struct fotg210_udc *fotg210)
+{
+ u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
+
+ value |= DCFESR_CX_DONE;
+ iowrite32(value, fotg210->reg + FOTG210_DCFESR);
+}
+
+static void fotg210_done(struct fotg210_ep *ep, struct fotg210_request *req,
+ int status)
+{
+ list_del_init(&req->queue);
+
+ /* don't modify queue heads during completion callback */
+ if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
+ req->req.status = -ESHUTDOWN;
+ else
+ req->req.status = status;
+
+ spin_unlock(&ep->fotg210->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&ep->fotg210->lock);
+
+ if (ep->epnum) {
+ if (list_empty(&ep->queue))
+ fotg210_disable_fifo_int(ep);
+ } else {
+ fotg210_set_cxdone(ep->fotg210);
+ }
+}
+
+static void fotg210_fifo_ep_mapping(struct fotg210_ep *ep, u32 epnum,
+ u32 dir_in)
+{
+ struct fotg210_udc *fotg210 = ep->fotg210;
+ u32 val;
+
+ /* Driver should map an ep to a fifo and then map the fifo
+ * to the ep. What a brain-damaged design!
+ */
+
+ /* map a fifo to an ep */
+ val = ioread32(fotg210->reg + FOTG210_EPMAP);
+ val &= ~EPMAP_FIFONOMSK(epnum, dir_in);
+ val |= EPMAP_FIFONO(epnum, dir_in);
+ iowrite32(val, fotg210->reg + FOTG210_EPMAP);
+
+ /* map the ep to the fifo */
+ val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
+ val &= ~FIFOMAP_EPNOMSK(epnum);
+ val |= FIFOMAP_EPNO(epnum);
+ iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
+
+ /* enable fifo */
+ val = ioread32(fotg210->reg + FOTG210_FIFOCF);
+ val |= FIFOCF_FIFO_EN(epnum - 1);
+ iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
+}
+
+static void fotg210_set_fifo_dir(struct fotg210_ep *ep, u32 epnum, u32 dir_in)
+{
+ struct fotg210_udc *fotg210 = ep->fotg210;
+ u32 val;
+
+ val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
+ val |= (dir_in ? FIFOMAP_DIRIN(epnum - 1) : FIFOMAP_DIROUT(epnum - 1));
+ iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
+}
+
+static void fotg210_set_tfrtype(struct fotg210_ep *ep, u32 epnum, u32 type)
+{
+ struct fotg210_udc *fotg210 = ep->fotg210;
+ u32 val;
+
+ val = ioread32(fotg210->reg + FOTG210_FIFOCF);
+ val |= FIFOCF_TYPE(type, epnum - 1);
+ iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
+}
+
+static void fotg210_set_mps(struct fotg210_ep *ep, u32 epnum, u32 mps,
+ u32 dir_in)
+{
+ struct fotg210_udc *fotg210 = ep->fotg210;
+ u32 val;
+ u32 offset = dir_in ? FOTG210_INEPMPSR(epnum) :
+ FOTG210_OUTEPMPSR(epnum);
+
+ val = ioread32(fotg210->reg + offset);
+ val |= INOUTEPMPSR_MPS(mps);
+ iowrite32(val, fotg210->reg + offset);
+}
+
+static int fotg210_config_ep(struct fotg210_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct fotg210_udc *fotg210 = ep->fotg210;
+
+ fotg210_set_fifo_dir(ep, ep->epnum, ep->dir_in);
+ fotg210_set_tfrtype(ep, ep->epnum, ep->type);
+ fotg210_set_mps(ep, ep->epnum, ep->ep.maxpacket, ep->dir_in);
+ fotg210_fifo_ep_mapping(ep, ep->epnum, ep->dir_in);
+
+ fotg210->ep[ep->epnum] = ep;
+
+ return 0;
+}
+
+static int fotg210_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct fotg210_ep *ep;
+
+ ep = container_of(_ep, struct fotg210_ep, ep);
+
+ ep->desc = desc;
+ ep->epnum = usb_endpoint_num(desc);
+ ep->type = usb_endpoint_type(desc);
+ ep->dir_in = usb_endpoint_dir_in(desc);
+ ep->ep.maxpacket = usb_endpoint_maxp(desc);
+
+ return fotg210_config_ep(ep, desc);
+}
+
+static void fotg210_reset_tseq(struct fotg210_udc *fotg210, u8 epnum)
+{
+ struct fotg210_ep *ep = fotg210->ep[epnum];
+ u32 value;
+ void __iomem *reg;
+
+ reg = (ep->dir_in) ?
+ fotg210->reg + FOTG210_INEPMPSR(epnum) :
+ fotg210->reg + FOTG210_OUTEPMPSR(epnum);
+
+ /* Note: Driver needs to set and clear INOUTEPMPSR_RESET_TSEQ
+ * bit. Controller wouldn't clear this bit. WTF!!!
+ */
+
+ value = ioread32(reg);
+ value |= INOUTEPMPSR_RESET_TSEQ;
+ iowrite32(value, reg);
+
+ value = ioread32(reg);
+ value &= ~INOUTEPMPSR_RESET_TSEQ;
+ iowrite32(value, reg);
+}
+
+static int fotg210_ep_release(struct fotg210_ep *ep)
+{
+ if (!ep->epnum)
+ return 0;
+ ep->epnum = 0;
+ ep->stall = 0;
+ ep->wedged = 0;
+
+ fotg210_reset_tseq(ep->fotg210, ep->epnum);
+
+ return 0;
+}
+
+static int fotg210_ep_disable(struct usb_ep *_ep)
+{
+ struct fotg210_ep *ep;
+ struct fotg210_request *req;
+ unsigned long flags;
+
+ BUG_ON(!_ep);
+
+ ep = container_of(_ep, struct fotg210_ep, ep);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct fotg210_request, queue);
+ spin_lock_irqsave(&ep->fotg210->lock, flags);
+ fotg210_done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->fotg210->lock, flags);
+ }
+
+ return fotg210_ep_release(ep);
+}
+
+static struct usb_request *fotg210_ep_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct fotg210_request *req;
+
+ req = kzalloc(sizeof(struct fotg210_request), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void fotg210_ep_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct fotg210_request *req;
+
+ req = container_of(_req, struct fotg210_request, req);
+ kfree(req);
+}
+
+static void fotg210_enable_dma(struct fotg210_ep *ep,
+ dma_addr_t d, u32 len)
+{
+ u32 value;
+ struct fotg210_udc *fotg210 = ep->fotg210;
+
+ /* set transfer length and direction */
+ value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
+ value &= ~(DMACPSR1_DMA_LEN(0xFFFF) | DMACPSR1_DMA_TYPE(1));
+ value |= DMACPSR1_DMA_LEN(len) | DMACPSR1_DMA_TYPE(ep->dir_in);
+ iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
+
+ /* set device DMA target FIFO number */
+ value = ioread32(fotg210->reg + FOTG210_DMATFNR);
+ if (ep->epnum)
+ value |= DMATFNR_ACC_FN(ep->epnum - 1);
+ else
+ value |= DMATFNR_ACC_CXF;
+ iowrite32(value, fotg210->reg + FOTG210_DMATFNR);
+
+ /* set DMA memory address */
+ iowrite32(d, fotg210->reg + FOTG210_DMACPSR2);
+
+ /* enable MDMA_EROR and MDMA_CMPLT interrupt */
+ value = ioread32(fotg210->reg + FOTG210_DMISGR2);
+ value &= ~(DMISGR2_MDMA_CMPLT | DMISGR2_MDMA_ERROR);
+ iowrite32(value, fotg210->reg + FOTG210_DMISGR2);
+
+ /* start DMA */
+ value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
+ value |= DMACPSR1_DMA_START;
+ iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
+}
+
+static void fotg210_disable_dma(struct fotg210_ep *ep)
+{
+ iowrite32(DMATFNR_DISDMA, ep->fotg210->reg + FOTG210_DMATFNR);
+}
+
+static void fotg210_wait_dma_done(struct fotg210_ep *ep)
+{
+ u32 value;
+
+ do {
+ value = ioread32(ep->fotg210->reg + FOTG210_DISGR2);
+ if ((value & DISGR2_USBRST_INT) ||
+ (value & DISGR2_DMA_ERROR))
+ goto dma_reset;
+ } while (!(value & DISGR2_DMA_CMPLT));
+
+ value &= ~DISGR2_DMA_CMPLT;
+ iowrite32(value, ep->fotg210->reg + FOTG210_DISGR2);
+ return;
+
+dma_reset:
+ value = ioread32(ep->fotg210->reg + FOTG210_DMACPSR1);
+ value |= DMACPSR1_DMA_ABORT;
+ iowrite32(value, ep->fotg210->reg + FOTG210_DMACPSR1);
+
+ /* reset fifo */
+ if (ep->epnum) {
+ value = ioread32(ep->fotg210->reg +
+ FOTG210_FIBCR(ep->epnum - 1));
+ value |= FIBCR_FFRST;
+ iowrite32(value, ep->fotg210->reg +
+ FOTG210_FIBCR(ep->epnum - 1));
+ } else {
+ value = ioread32(ep->fotg210->reg + FOTG210_DCFESR);
+ value |= DCFESR_CX_CLR;
+ iowrite32(value, ep->fotg210->reg + FOTG210_DCFESR);
+ }
+}
+
+static void fotg210_start_dma(struct fotg210_ep *ep,
+ struct fotg210_request *req)
+{
+ struct device *dev = &ep->fotg210->gadget.dev;
+ dma_addr_t d;
+ u8 *buffer;
+ u32 length;
+
+ if (ep->epnum) {
+ if (ep->dir_in) {
+ buffer = req->req.buf;
+ length = req->req.length;
+ } else {
+ buffer = req->req.buf + req->req.actual;
+ length = ioread32(ep->fotg210->reg +
+ FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
+ if (length > req->req.length - req->req.actual)
+ length = req->req.length - req->req.actual;
+ }
+ } else {
+ buffer = req->req.buf + req->req.actual;
+ if (req->req.length - req->req.actual > ep->ep.maxpacket)
+ length = ep->ep.maxpacket;
+ else
+ length = req->req.length - req->req.actual;
+ }
+
+ d = dma_map_single(dev, buffer, length,
+ ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, d)) {
+ pr_err("dma_mapping_error\n");
+ return;
+ }
+
+ fotg210_enable_dma(ep, d, length);
+
+ /* check if dma is done */
+ fotg210_wait_dma_done(ep);
+
+ fotg210_disable_dma(ep);
+
+ /* update actual transfer length */
+ req->req.actual += length;
+
+ dma_unmap_single(dev, d, length, DMA_TO_DEVICE);
+}
+
+static void fotg210_ep0_queue(struct fotg210_ep *ep,
+ struct fotg210_request *req)
+{
+ if (!req->req.length) {
+ fotg210_done(ep, req, 0);
+ return;
+ }
+ if (ep->dir_in) { /* if IN */
+ fotg210_start_dma(ep, req);
+ if (req->req.length == req->req.actual)
+ fotg210_done(ep, req, 0);
+ } else { /* OUT */
+ u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
+
+ value &= ~DMISGR0_MCX_OUT_INT;
+ iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0);
+ }
+}
+
+static int fotg210_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct fotg210_ep *ep;
+ struct fotg210_request *req;
+ unsigned long flags;
+ int request = 0;
+
+ ep = container_of(_ep, struct fotg210_ep, ep);
+ req = container_of(_req, struct fotg210_request, req);
+
+ if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&ep->fotg210->lock, flags);
+
+ if (list_empty(&ep->queue))
+ request = 1;
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+
+ if (!ep->epnum) /* ep0 */
+ fotg210_ep0_queue(ep, req);
+ else if (request && !ep->stall)
+ fotg210_enable_fifo_int(ep);
+
+ spin_unlock_irqrestore(&ep->fotg210->lock, flags);
+
+ return 0;
+}
+
+static int fotg210_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct fotg210_ep *ep;
+ struct fotg210_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct fotg210_ep, ep);
+ req = container_of(_req, struct fotg210_request, req);
+
+ spin_lock_irqsave(&ep->fotg210->lock, flags);
+ if (!list_empty(&ep->queue))
+ fotg210_done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->fotg210->lock, flags);
+
+ return 0;
+}
+
+static void fotg210_set_epnstall(struct fotg210_ep *ep)
+{
+ struct fotg210_udc *fotg210 = ep->fotg210;
+ u32 value;
+ void __iomem *reg;
+
+ /* check if IN FIFO is empty before stall */
+ if (ep->dir_in) {
+ do {
+ value = ioread32(fotg210->reg + FOTG210_DCFESR);
+ } while (!(value & DCFESR_FIFO_EMPTY(ep->epnum - 1)));
+ }
+
+ reg = (ep->dir_in) ?
+ fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
+ fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
+ value = ioread32(reg);
+ value |= INOUTEPMPSR_STL_EP;
+ iowrite32(value, reg);
+}
+
+static void fotg210_clear_epnstall(struct fotg210_ep *ep)
+{
+ struct fotg210_udc *fotg210 = ep->fotg210;
+ u32 value;
+ void __iomem *reg;
+
+ reg = (ep->dir_in) ?
+ fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
+ fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
+ value = ioread32(reg);
+ value &= ~INOUTEPMPSR_STL_EP;
+ iowrite32(value, reg);
+}
+
+static int fotg210_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
+{
+ struct fotg210_ep *ep;
+ struct fotg210_udc *fotg210;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct fotg210_ep, ep);
+
+ fotg210 = ep->fotg210;
+
+ spin_lock_irqsave(&ep->fotg210->lock, flags);
+
+ if (value) {
+ fotg210_set_epnstall(ep);
+ ep->stall = 1;
+ if (wedge)
+ ep->wedged = 1;
+ } else {
+ fotg210_reset_tseq(fotg210, ep->epnum);
+ fotg210_clear_epnstall(ep);
+ ep->stall = 0;
+ ep->wedged = 0;
+ if (!list_empty(&ep->queue))
+ fotg210_enable_fifo_int(ep);
+ }
+
+ spin_unlock_irqrestore(&ep->fotg210->lock, flags);
+ return 0;
+}
+
+static int fotg210_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ return fotg210_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int fotg210_ep_set_wedge(struct usb_ep *_ep)
+{
+ return fotg210_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static void fotg210_ep_fifo_flush(struct usb_ep *_ep)
+{
+}
+
+static const struct usb_ep_ops fotg210_ep_ops = {
+ .enable = fotg210_ep_enable,
+ .disable = fotg210_ep_disable,
+
+ .alloc_request = fotg210_ep_alloc_request,
+ .free_request = fotg210_ep_free_request,
+
+ .queue = fotg210_ep_queue,
+ .dequeue = fotg210_ep_dequeue,
+
+ .set_halt = fotg210_ep_set_halt,
+ .fifo_flush = fotg210_ep_fifo_flush,
+ .set_wedge = fotg210_ep_set_wedge,
+};
+
+static void fotg210_clear_tx0byte(struct fotg210_udc *fotg210)
+{
+ u32 value = ioread32(fotg210->reg + FOTG210_TX0BYTE);
+
+ value &= ~(TX0BYTE_EP1 | TX0BYTE_EP2 | TX0BYTE_EP3
+ | TX0BYTE_EP4);
+ iowrite32(value, fotg210->reg + FOTG210_TX0BYTE);
+}
+
+static void fotg210_clear_rx0byte(struct fotg210_udc *fotg210)
+{
+ u32 value = ioread32(fotg210->reg + FOTG210_RX0BYTE);
+
+ value &= ~(RX0BYTE_EP1 | RX0BYTE_EP2 | RX0BYTE_EP3
+ | RX0BYTE_EP4);
+ iowrite32(value, fotg210->reg + FOTG210_RX0BYTE);
+}
+
+/* read 8-byte setup packet only */
+static void fotg210_rdsetupp(struct fotg210_udc *fotg210,
+ u8 *buffer)
+{
+ int i = 0;
+ u8 *tmp = buffer;
+ u32 data;
+ u32 length = 8;
+
+ iowrite32(DMATFNR_ACC_CXF, fotg210->reg + FOTG210_DMATFNR);
+
+ for (i = (length >> 2); i > 0; i--) {
+ data = ioread32(fotg210->reg + FOTG210_CXPORT);
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ *(tmp + 3) = (data >> 24) & 0xFF;
+ tmp = tmp + 4;
+ }
+
+ switch (length % 4) {
+ case 1:
+ data = ioread32(fotg210->reg + FOTG210_CXPORT);
+ *tmp = data & 0xFF;
+ break;
+ case 2:
+ data = ioread32(fotg210->reg + FOTG210_CXPORT);
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ break;
+ case 3:
+ data = ioread32(fotg210->reg + FOTG210_CXPORT);
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ break;
+ default:
+ break;
+ }
+
+ iowrite32(DMATFNR_DISDMA, fotg210->reg + FOTG210_DMATFNR);
+}
+
+static void fotg210_set_configuration(struct fotg210_udc *fotg210)
+{
+ u32 value = ioread32(fotg210->reg + FOTG210_DAR);
+
+ value |= DAR_AFT_CONF;
+ iowrite32(value, fotg210->reg + FOTG210_DAR);
+}
+
+static void fotg210_set_dev_addr(struct fotg210_udc *fotg210, u32 addr)
+{
+ u32 value = ioread32(fotg210->reg + FOTG210_DAR);
+
+ value |= (addr & 0x7F);
+ iowrite32(value, fotg210->reg + FOTG210_DAR);
+}
+
+static void fotg210_set_cxstall(struct fotg210_udc *fotg210)
+{
+ u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
+
+ value |= DCFESR_CX_STL;
+ iowrite32(value, fotg210->reg + FOTG210_DCFESR);
+}
+
+static void fotg210_request_error(struct fotg210_udc *fotg210)
+{
+ fotg210_set_cxstall(fotg210);
+ pr_err("request error!!\n");
+}
+
+static void fotg210_set_address(struct fotg210_udc *fotg210,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (le16_to_cpu(ctrl->wValue) >= 0x0100) {
+ fotg210_request_error(fotg210);
+ } else {
+ fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue));
+ fotg210_set_cxdone(fotg210);
+ }
+}
+
+static void fotg210_set_feature(struct fotg210_udc *fotg210,
+ struct usb_ctrlrequest *ctrl)
+{
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ fotg210_set_cxdone(fotg210);
+ break;
+ case USB_RECIP_INTERFACE:
+ fotg210_set_cxdone(fotg210);
+ break;
+ case USB_RECIP_ENDPOINT: {
+ u8 epnum;
+ epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum)
+ fotg210_set_epnstall(fotg210->ep[epnum]);
+ else
+ fotg210_set_cxstall(fotg210);
+ fotg210_set_cxdone(fotg210);
+ }
+ break;
+ default:
+ fotg210_request_error(fotg210);
+ break;
+ }
+}
+
+static void fotg210_clear_feature(struct fotg210_udc *fotg210,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct fotg210_ep *ep =
+ fotg210->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ fotg210_set_cxdone(fotg210);
+ break;
+ case USB_RECIP_INTERFACE:
+ fotg210_set_cxdone(fotg210);
+ break;
+ case USB_RECIP_ENDPOINT:
+ if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
+ if (ep->wedged) {
+ fotg210_set_cxdone(fotg210);
+ break;
+ }
+ if (ep->stall)
+ fotg210_set_halt_and_wedge(&ep->ep, 0, 0);
+ }
+ fotg210_set_cxdone(fotg210);
+ break;
+ default:
+ fotg210_request_error(fotg210);
+ break;
+ }
+}
+
+static int fotg210_is_epnstall(struct fotg210_ep *ep)
+{
+ struct fotg210_udc *fotg210 = ep->fotg210;
+ u32 value;
+ void __iomem *reg;
+
+ reg = (ep->dir_in) ?
+ fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
+ fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
+ value = ioread32(reg);
+ return value & INOUTEPMPSR_STL_EP ? 1 : 0;
+}
+
+/* For EP0 requests triggered by this driver (currently GET_STATUS response) */
+static void fotg210_ep0_complete(struct usb_ep *_ep, struct usb_request *req)
+{
+ struct fotg210_ep *ep;
+ struct fotg210_udc *fotg210;
+
+ ep = container_of(_ep, struct fotg210_ep, ep);
+ fotg210 = ep->fotg210;
+
+ if (req->status || req->actual != req->length) {
+ dev_warn(&fotg210->gadget.dev, "EP0 request failed: %d\n", req->status);
+ }
+}
+
+static void fotg210_get_status(struct fotg210_udc *fotg210,
+ struct usb_ctrlrequest *ctrl)
+{
+ u8 epnum;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED);
+ break;
+ case USB_RECIP_INTERFACE:
+ fotg210->ep0_data = cpu_to_le16(0);
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum)
+ fotg210->ep0_data =
+ cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum])
+ << USB_ENDPOINT_HALT);
+ else
+ fotg210_request_error(fotg210);
+ break;
+
+ default:
+ fotg210_request_error(fotg210);
+ return; /* exit */
+ }
+
+ fotg210->ep0_req->buf = &fotg210->ep0_data;
+ fotg210->ep0_req->length = 2;
+
+ spin_unlock(&fotg210->lock);
+ fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC);
+ spin_lock(&fotg210->lock);
+}
+
+static int fotg210_setup_packet(struct fotg210_udc *fotg210,
+ struct usb_ctrlrequest *ctrl)
+{
+ u8 *p = (u8 *)ctrl;
+ u8 ret = 0;
+
+ fotg210_rdsetupp(fotg210, p);
+
+ fotg210->ep[0]->dir_in = ctrl->bRequestType & USB_DIR_IN;
+
+ if (fotg210->gadget.speed == USB_SPEED_UNKNOWN) {
+ u32 value = ioread32(fotg210->reg + FOTG210_DMCR);
+ fotg210->gadget.speed = value & DMCR_HS_EN ?
+ USB_SPEED_HIGH : USB_SPEED_FULL;
+ }
+
+ /* check request */
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ fotg210_get_status(fotg210, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ fotg210_clear_feature(fotg210, ctrl);
+ break;
+ case USB_REQ_SET_FEATURE:
+ fotg210_set_feature(fotg210, ctrl);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ fotg210_set_address(fotg210, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ fotg210_set_configuration(fotg210);
+ ret = 1;
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ } else {
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void fotg210_ep0out(struct fotg210_udc *fotg210)
+{
+ struct fotg210_ep *ep = fotg210->ep[0];
+
+ if (!list_empty(&ep->queue) && !ep->dir_in) {
+ struct fotg210_request *req;
+
+ req = list_first_entry(&ep->queue,
+ struct fotg210_request, queue);
+
+ if (req->req.length)
+ fotg210_start_dma(ep, req);
+
+ if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
+ fotg210_done(ep, req, 0);
+ } else {
+ pr_err("%s : empty queue\n", __func__);
+ }
+}
+
+static void fotg210_ep0in(struct fotg210_udc *fotg210)
+{
+ struct fotg210_ep *ep = fotg210->ep[0];
+
+ if ((!list_empty(&ep->queue)) && (ep->dir_in)) {
+ struct fotg210_request *req;
+
+ req = list_entry(ep->queue.next,
+ struct fotg210_request, queue);
+
+ if (req->req.length)
+ fotg210_start_dma(ep, req);
+
+ if (req->req.actual == req->req.length)
+ fotg210_done(ep, req, 0);
+ } else {
+ fotg210_set_cxdone(fotg210);
+ }
+}
+
+static void fotg210_clear_comabt_int(struct fotg210_udc *fotg210)
+{
+ u32 value = ioread32(fotg210->reg + FOTG210_DISGR0);
+
+ value &= ~DISGR0_CX_COMABT_INT;
+ iowrite32(value, fotg210->reg + FOTG210_DISGR0);
+}
+
+static void fotg210_in_fifo_handler(struct fotg210_ep *ep)
+{
+ struct fotg210_request *req = list_entry(ep->queue.next,
+ struct fotg210_request, queue);
+
+ if (req->req.length)
+ fotg210_start_dma(ep, req);
+ fotg210_done(ep, req, 0);
+}
+
+static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
+{
+ struct fotg210_request *req = list_entry(ep->queue.next,
+ struct fotg210_request, queue);
+ int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
+
+ fotg210_start_dma(ep, req);
+
+ /* Complete the request when it's full or a short packet arrived.
+ * Like other drivers, short_not_ok isn't handled.
+ */
+
+ if (req->req.length == req->req.actual ||
+ (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
+ fotg210_done(ep, req, 0);
+}
+
+static irqreturn_t fotg210_irq(int irq, void *_fotg210)
+{
+ struct fotg210_udc *fotg210 = _fotg210;
+ u32 int_grp = ioread32(fotg210->reg + FOTG210_DIGR);
+ u32 int_msk = ioread32(fotg210->reg + FOTG210_DMIGR);
+
+ int_grp &= ~int_msk;
+
+ spin_lock(&fotg210->lock);
+
+ if (int_grp & DIGR_INT_G2) {
+ void __iomem *reg = fotg210->reg + FOTG210_DISGR2;
+ u32 int_grp2 = ioread32(reg);
+ u32 int_msk2 = ioread32(fotg210->reg + FOTG210_DMISGR2);
+ u32 value;
+
+ int_grp2 &= ~int_msk2;
+
+ if (int_grp2 & DISGR2_USBRST_INT) {
+ usb_gadget_udc_reset(&fotg210->gadget,
+ fotg210->driver);
+ value = ioread32(reg);
+ value &= ~DISGR2_USBRST_INT;
+ iowrite32(value, reg);
+ pr_info("fotg210 udc reset\n");
+ }
+ if (int_grp2 & DISGR2_SUSP_INT) {
+ value = ioread32(reg);
+ value &= ~DISGR2_SUSP_INT;
+ iowrite32(value, reg);
+ pr_info("fotg210 udc suspend\n");
+ }
+ if (int_grp2 & DISGR2_RESM_INT) {
+ value = ioread32(reg);
+ value &= ~DISGR2_RESM_INT;
+ iowrite32(value, reg);
+ pr_info("fotg210 udc resume\n");
+ }
+ if (int_grp2 & DISGR2_ISO_SEQ_ERR_INT) {
+ value = ioread32(reg);
+ value &= ~DISGR2_ISO_SEQ_ERR_INT;
+ iowrite32(value, reg);
+ pr_info("fotg210 iso sequence error\n");
+ }
+ if (int_grp2 & DISGR2_ISO_SEQ_ABORT_INT) {
+ value = ioread32(reg);
+ value &= ~DISGR2_ISO_SEQ_ABORT_INT;
+ iowrite32(value, reg);
+ pr_info("fotg210 iso sequence abort\n");
+ }
+ if (int_grp2 & DISGR2_TX0BYTE_INT) {
+ fotg210_clear_tx0byte(fotg210);
+ value = ioread32(reg);
+ value &= ~DISGR2_TX0BYTE_INT;
+ iowrite32(value, reg);
+ pr_info("fotg210 transferred 0 byte\n");
+ }
+ if (int_grp2 & DISGR2_RX0BYTE_INT) {
+ fotg210_clear_rx0byte(fotg210);
+ value = ioread32(reg);
+ value &= ~DISGR2_RX0BYTE_INT;
+ iowrite32(value, reg);
+ pr_info("fotg210 received 0 byte\n");
+ }
+ if (int_grp2 & DISGR2_DMA_ERROR) {
+ value = ioread32(reg);
+ value &= ~DISGR2_DMA_ERROR;
+ iowrite32(value, reg);
+ }
+ }
+
+ if (int_grp & DIGR_INT_G0) {
+ void __iomem *reg = fotg210->reg + FOTG210_DISGR0;
+ u32 int_grp0 = ioread32(reg);
+ u32 int_msk0 = ioread32(fotg210->reg + FOTG210_DMISGR0);
+ struct usb_ctrlrequest ctrl;
+
+ int_grp0 &= ~int_msk0;
+
+ /* the highest priority in this source register */
+ if (int_grp0 & DISGR0_CX_COMABT_INT) {
+ fotg210_clear_comabt_int(fotg210);
+ pr_info("fotg210 CX command abort\n");
+ }
+
+ if (int_grp0 & DISGR0_CX_SETUP_INT) {
+ if (fotg210_setup_packet(fotg210, &ctrl)) {
+ spin_unlock(&fotg210->lock);
+ if (fotg210->driver->setup(&fotg210->gadget,
+ &ctrl) < 0)
+ fotg210_set_cxstall(fotg210);
+ spin_lock(&fotg210->lock);
+ }
+ }
+ if (int_grp0 & DISGR0_CX_COMEND_INT)
+ pr_info("fotg210 cmd end\n");
+
+ if (int_grp0 & DISGR0_CX_IN_INT)
+ fotg210_ep0in(fotg210);
+
+ if (int_grp0 & DISGR0_CX_OUT_INT)
+ fotg210_ep0out(fotg210);
+
+ if (int_grp0 & DISGR0_CX_COMFAIL_INT) {
+ fotg210_set_cxstall(fotg210);
+ pr_info("fotg210 ep0 fail\n");
+ }
+ }
+
+ if (int_grp & DIGR_INT_G1) {
+ void __iomem *reg = fotg210->reg + FOTG210_DISGR1;
+ u32 int_grp1 = ioread32(reg);
+ u32 int_msk1 = ioread32(fotg210->reg + FOTG210_DMISGR1);
+ int fifo;
+
+ int_grp1 &= ~int_msk1;
+
+ for (fifo = 0; fifo < FOTG210_MAX_FIFO_NUM; fifo++) {
+ if (int_grp1 & DISGR1_IN_INT(fifo))
+ fotg210_in_fifo_handler(fotg210->ep[fifo + 1]);
+
+ if ((int_grp1 & DISGR1_OUT_INT(fifo)) ||
+ (int_grp1 & DISGR1_SPK_INT(fifo)))
+ fotg210_out_fifo_handler(fotg210->ep[fifo + 1]);
+ }
+ }
+
+ spin_unlock(&fotg210->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void fotg210_disable_unplug(struct fotg210_udc *fotg210)
+{
+ u32 reg = ioread32(fotg210->reg + FOTG210_PHYTMSR);
+
+ reg &= ~PHYTMSR_UNPLUG;
+ iowrite32(reg, fotg210->reg + FOTG210_PHYTMSR);
+}
+
+static int fotg210_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
+ u32 value;
+
+ /* hook up the driver */
+ fotg210->driver = driver;
+
+ /* enable device global interrupt */
+ value = ioread32(fotg210->reg + FOTG210_DMCR);
+ value |= DMCR_GLINT_EN;
+ iowrite32(value, fotg210->reg + FOTG210_DMCR);
+
+ return 0;
+}
+
+static void fotg210_init(struct fotg210_udc *fotg210)
+{
+ u32 value;
+
+ /* disable global interrupt and set int polarity to active high */
+ iowrite32(GMIR_MHC_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
+ fotg210->reg + FOTG210_GMIR);
+
+ /* disable device global interrupt */
+ value = ioread32(fotg210->reg + FOTG210_DMCR);
+ value &= ~DMCR_GLINT_EN;
+ iowrite32(value, fotg210->reg + FOTG210_DMCR);
+
+ /* enable only grp2 irqs we handle */
+ iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
+ | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
+ | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
+ fotg210->reg + FOTG210_DMISGR2);
+
+ /* disable all fifo interrupt */
+ iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
+
+ /* disable cmd end */
+ value = ioread32(fotg210->reg + FOTG210_DMISGR0);
+ value |= DMISGR0_MCX_COMEND;
+ iowrite32(value, fotg210->reg + FOTG210_DMISGR0);
+}
+
+static int fotg210_udc_stop(struct usb_gadget *g)
+{
+ struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ fotg210_init(fotg210);
+ fotg210->driver = NULL;
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops fotg210_gadget_ops = {
+ .udc_start = fotg210_udc_start,
+ .udc_stop = fotg210_udc_stop,
+};
+
+static int fotg210_udc_remove(struct platform_device *pdev)
+{
+ struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+ int i;
+
+ usb_del_gadget_udc(&fotg210->gadget);
+ iounmap(fotg210->reg);
+ free_irq(platform_get_irq(pdev, 0), fotg210);
+
+ fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
+ kfree(fotg210);
+
+ return 0;
+}
+
+static int fotg210_udc_probe(struct platform_device *pdev)
+{
+ struct resource *res, *ires;
+ struct fotg210_udc *fotg210 = NULL;
+ struct fotg210_ep *_ep[FOTG210_MAX_NUM_EP];
+ int ret = 0;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("platform_get_resource error.\n");
+ return -ENODEV;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ pr_err("platform_get_resource IORESOURCE_IRQ error.\n");
+ return -ENODEV;
+ }
+
+ ret = -ENOMEM;
+
+ /* initialize udc */
+ fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
+ if (fotg210 == NULL)
+ goto err;
+
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
+ _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
+ if (_ep[i] == NULL)
+ goto err_alloc;
+ fotg210->ep[i] = _ep[i];
+ }
+
+ fotg210->reg = ioremap(res->start, resource_size(res));
+ if (fotg210->reg == NULL) {
+ pr_err("ioremap error.\n");
+ goto err_alloc;
+ }
+
+ spin_lock_init(&fotg210->lock);
+
+ platform_set_drvdata(pdev, fotg210);
+
+ fotg210->gadget.ops = &fotg210_gadget_ops;
+
+ fotg210->gadget.max_speed = USB_SPEED_HIGH;
+ fotg210->gadget.dev.parent = &pdev->dev;
+ fotg210->gadget.dev.dma_mask = pdev->dev.dma_mask;
+ fotg210->gadget.name = udc_name;
+
+ INIT_LIST_HEAD(&fotg210->gadget.ep_list);
+
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
+ struct fotg210_ep *ep = fotg210->ep[i];
+
+ if (i) {
+ INIT_LIST_HEAD(&fotg210->ep[i]->ep.ep_list);
+ list_add_tail(&fotg210->ep[i]->ep.ep_list,
+ &fotg210->gadget.ep_list);
+ }
+ ep->fotg210 = fotg210;
+ INIT_LIST_HEAD(&ep->queue);
+ ep->ep.name = fotg210_ep_name[i];
+ ep->ep.ops = &fotg210_ep_ops;
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ }
+ usb_ep_set_maxpacket_limit(&fotg210->ep[0]->ep, 0x40);
+ fotg210->gadget.ep0 = &fotg210->ep[0]->ep;
+ INIT_LIST_HEAD(&fotg210->gadget.ep0->ep_list);
+
+ fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
+ GFP_KERNEL);
+ if (fotg210->ep0_req == NULL)
+ goto err_map;
+
+ fotg210->ep0_req->complete = fotg210_ep0_complete;
+
+ fotg210_init(fotg210);
+
+ fotg210_disable_unplug(fotg210);
+
+ ret = request_irq(ires->start, fotg210_irq, IRQF_SHARED,
+ udc_name, fotg210);
+ if (ret < 0) {
+ pr_err("request_irq error (%d)\n", ret);
+ goto err_req;
+ }
+
+ ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget);
+ if (ret)
+ goto err_add_udc;
+
+ dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+
+ return 0;
+
+err_add_udc:
+ free_irq(ires->start, fotg210);
+
+err_req:
+ fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+
+err_map:
+ iounmap(fotg210->reg);
+
+err_alloc:
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
+ kfree(fotg210);
+
+err:
+ return ret;
+}
+
+static struct platform_driver fotg210_driver = {
+ .driver = {
+ .name = udc_name,
+ },
+ .probe = fotg210_udc_probe,
+ .remove = fotg210_udc_remove,
+};
+
+module_platform_driver(fotg210_driver);
+
+MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/gadget/udc/fotg210.h b/drivers/usb/gadget/udc/fotg210.h
new file mode 100644
index 000000000..08c329575
--- /dev/null
+++ b/drivers/usb/gadget/udc/fotg210.h
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Faraday FOTG210 USB OTG controller
+ *
+ * Copyright (C) 2013 Faraday Technology Corporation
+ * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ */
+
+#include <linux/kernel.h>
+
+#define FOTG210_MAX_NUM_EP 5 /* ep0...ep4 */
+#define FOTG210_MAX_FIFO_NUM 4 /* fifo0...fifo4 */
+
+/* Global Mask of HC/OTG/DEV interrupt Register(0xC4) */
+#define FOTG210_GMIR 0xC4
+#define GMIR_INT_POLARITY 0x8 /*Active High*/
+#define GMIR_MHC_INT 0x4
+#define GMIR_MOTG_INT 0x2
+#define GMIR_MDEV_INT 0x1
+
+/* Device Main Control Register(0x100) */
+#define FOTG210_DMCR 0x100
+#define DMCR_HS_EN (1 << 6)
+#define DMCR_CHIP_EN (1 << 5)
+#define DMCR_SFRST (1 << 4)
+#define DMCR_GOSUSP (1 << 3)
+#define DMCR_GLINT_EN (1 << 2)
+#define DMCR_HALF_SPEED (1 << 1)
+#define DMCR_CAP_RMWAKUP (1 << 0)
+
+/* Device Address Register(0x104) */
+#define FOTG210_DAR 0x104
+#define DAR_AFT_CONF (1 << 7)
+
+/* Device Test Register(0x108) */
+#define FOTG210_DTR 0x108
+#define DTR_TST_CLRFF (1 << 0)
+
+/* PHY Test Mode Selector register(0x114) */
+#define FOTG210_PHYTMSR 0x114
+#define PHYTMSR_TST_PKT (1 << 4)
+#define PHYTMSR_TST_SE0NAK (1 << 3)
+#define PHYTMSR_TST_KSTA (1 << 2)
+#define PHYTMSR_TST_JSTA (1 << 1)
+#define PHYTMSR_UNPLUG (1 << 0)
+
+/* Cx configuration and FIFO Empty Status register(0x120) */
+#define FOTG210_DCFESR 0x120
+#define DCFESR_FIFO_EMPTY(fifo) (1 << 8 << (fifo))
+#define DCFESR_CX_EMP (1 << 5)
+#define DCFESR_CX_CLR (1 << 3)
+#define DCFESR_CX_STL (1 << 2)
+#define DCFESR_TST_PKDONE (1 << 1)
+#define DCFESR_CX_DONE (1 << 0)
+
+/* Device IDLE Counter Register(0x124) */
+#define FOTG210_DICR 0x124
+
+/* Device Mask of Interrupt Group Register (0x130) */
+#define FOTG210_DMIGR 0x130
+#define DMIGR_MINT_G0 (1 << 0)
+
+/* Device Mask of Interrupt Source Group 0(0x134) */
+#define FOTG210_DMISGR0 0x134
+#define DMISGR0_MCX_COMEND (1 << 3)
+#define DMISGR0_MCX_OUT_INT (1 << 2)
+#define DMISGR0_MCX_IN_INT (1 << 1)
+#define DMISGR0_MCX_SETUP_INT (1 << 0)
+
+/* Device Mask of Interrupt Source Group 1 Register(0x138)*/
+#define FOTG210_DMISGR1 0x138
+#define DMISGR1_MF3_IN_INT (1 << 19)
+#define DMISGR1_MF2_IN_INT (1 << 18)
+#define DMISGR1_MF1_IN_INT (1 << 17)
+#define DMISGR1_MF0_IN_INT (1 << 16)
+#define DMISGR1_MF_IN_INT(fifo) (1 << (16 + (fifo)))
+#define DMISGR1_MF3_SPK_INT (1 << 7)
+#define DMISGR1_MF3_OUT_INT (1 << 6)
+#define DMISGR1_MF2_SPK_INT (1 << 5)
+#define DMISGR1_MF2_OUT_INT (1 << 4)
+#define DMISGR1_MF1_SPK_INT (1 << 3)
+#define DMISGR1_MF1_OUT_INT (1 << 2)
+#define DMISGR1_MF0_SPK_INT (1 << 1)
+#define DMISGR1_MF0_OUT_INT (1 << 0)
+#define DMISGR1_MF_OUTSPK_INT(fifo) (0x3 << (fifo) * 2)
+
+/* Device Mask of Interrupt Source Group 2 Register (0x13C) */
+#define FOTG210_DMISGR2 0x13C
+#define DMISGR2_MDMA_ERROR (1 << 8)
+#define DMISGR2_MDMA_CMPLT (1 << 7)
+
+/* Device Interrupt group Register (0x140) */
+#define FOTG210_DIGR 0x140
+#define DIGR_INT_G2 (1 << 2)
+#define DIGR_INT_G1 (1 << 1)
+#define DIGR_INT_G0 (1 << 0)
+
+/* Device Interrupt Source Group 0 Register (0x144) */
+#define FOTG210_DISGR0 0x144
+#define DISGR0_CX_COMABT_INT (1 << 5)
+#define DISGR0_CX_COMFAIL_INT (1 << 4)
+#define DISGR0_CX_COMEND_INT (1 << 3)
+#define DISGR0_CX_OUT_INT (1 << 2)
+#define DISGR0_CX_IN_INT (1 << 1)
+#define DISGR0_CX_SETUP_INT (1 << 0)
+
+/* Device Interrupt Source Group 1 Register (0x148) */
+#define FOTG210_DISGR1 0x148
+#define DISGR1_OUT_INT(fifo) (1 << ((fifo) * 2))
+#define DISGR1_SPK_INT(fifo) (1 << 1 << ((fifo) * 2))
+#define DISGR1_IN_INT(fifo) (1 << 16 << (fifo))
+
+/* Device Interrupt Source Group 2 Register (0x14C) */
+#define FOTG210_DISGR2 0x14C
+#define DISGR2_DMA_ERROR (1 << 8)
+#define DISGR2_DMA_CMPLT (1 << 7)
+#define DISGR2_RX0BYTE_INT (1 << 6)
+#define DISGR2_TX0BYTE_INT (1 << 5)
+#define DISGR2_ISO_SEQ_ABORT_INT (1 << 4)
+#define DISGR2_ISO_SEQ_ERR_INT (1 << 3)
+#define DISGR2_RESM_INT (1 << 2)
+#define DISGR2_SUSP_INT (1 << 1)
+#define DISGR2_USBRST_INT (1 << 0)
+
+/* Device Receive Zero-Length Data Packet Register (0x150)*/
+#define FOTG210_RX0BYTE 0x150
+#define RX0BYTE_EP8 (1 << 7)
+#define RX0BYTE_EP7 (1 << 6)
+#define RX0BYTE_EP6 (1 << 5)
+#define RX0BYTE_EP5 (1 << 4)
+#define RX0BYTE_EP4 (1 << 3)
+#define RX0BYTE_EP3 (1 << 2)
+#define RX0BYTE_EP2 (1 << 1)
+#define RX0BYTE_EP1 (1 << 0)
+
+/* Device Transfer Zero-Length Data Packet Register (0x154)*/
+#define FOTG210_TX0BYTE 0x154
+#define TX0BYTE_EP8 (1 << 7)
+#define TX0BYTE_EP7 (1 << 6)
+#define TX0BYTE_EP6 (1 << 5)
+#define TX0BYTE_EP5 (1 << 4)
+#define TX0BYTE_EP4 (1 << 3)
+#define TX0BYTE_EP3 (1 << 2)
+#define TX0BYTE_EP2 (1 << 1)
+#define TX0BYTE_EP1 (1 << 0)
+
+/* Device IN Endpoint x MaxPacketSize Register(0x160+4*(x-1)) */
+#define FOTG210_INEPMPSR(ep) (0x160 + 4 * ((ep) - 1))
+#define INOUTEPMPSR_MPS(mps) ((mps) & 0x2FF)
+#define INOUTEPMPSR_STL_EP (1 << 11)
+#define INOUTEPMPSR_RESET_TSEQ (1 << 12)
+
+/* Device OUT Endpoint x MaxPacketSize Register(0x180+4*(x-1)) */
+#define FOTG210_OUTEPMPSR(ep) (0x180 + 4 * ((ep) - 1))
+
+/* Device Endpoint 1~4 Map Register (0x1A0) */
+#define FOTG210_EPMAP 0x1A0
+#define EPMAP_FIFONO(ep, dir) \
+ ((((ep) - 1) << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
+#define EPMAP_FIFONOMSK(ep, dir) \
+ ((3 << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
+
+/* Device FIFO Map Register (0x1A8) */
+#define FOTG210_FIFOMAP 0x1A8
+#define FIFOMAP_DIROUT(fifo) (0x0 << 4 << (fifo) * 8)
+#define FIFOMAP_DIRIN(fifo) (0x1 << 4 << (fifo) * 8)
+#define FIFOMAP_BIDIR(fifo) (0x2 << 4 << (fifo) * 8)
+#define FIFOMAP_NA(fifo) (0x3 << 4 << (fifo) * 8)
+#define FIFOMAP_EPNO(ep) ((ep) << ((ep) - 1) * 8)
+#define FIFOMAP_EPNOMSK(ep) (0xF << ((ep) - 1) * 8)
+
+/* Device FIFO Confuguration Register (0x1AC) */
+#define FOTG210_FIFOCF 0x1AC
+#define FIFOCF_TYPE(type, fifo) ((type) << (fifo) * 8)
+#define FIFOCF_BLK_SIN(fifo) (0x0 << (fifo) * 8 << 2)
+#define FIFOCF_BLK_DUB(fifo) (0x1 << (fifo) * 8 << 2)
+#define FIFOCF_BLK_TRI(fifo) (0x2 << (fifo) * 8 << 2)
+#define FIFOCF_BLKSZ_512(fifo) (0x0 << (fifo) * 8 << 4)
+#define FIFOCF_BLKSZ_1024(fifo) (0x1 << (fifo) * 8 << 4)
+#define FIFOCF_FIFO_EN(fifo) (0x1 << (fifo) * 8 << 5)
+
+/* Device FIFO n Instruction and Byte Count Register (0x1B0+4*n) */
+#define FOTG210_FIBCR(fifo) (0x1B0 + (fifo) * 4)
+#define FIBCR_BCFX 0x7FF
+#define FIBCR_FFRST (1 << 12)
+
+/* Device DMA Target FIFO Number Register (0x1C0) */
+#define FOTG210_DMATFNR 0x1C0
+#define DMATFNR_ACC_CXF (1 << 4)
+#define DMATFNR_ACC_F3 (1 << 3)
+#define DMATFNR_ACC_F2 (1 << 2)
+#define DMATFNR_ACC_F1 (1 << 1)
+#define DMATFNR_ACC_F0 (1 << 0)
+#define DMATFNR_ACC_FN(fifo) (1 << (fifo))
+#define DMATFNR_DISDMA 0
+
+/* Device DMA Controller Parameter setting 1 Register (0x1C8) */
+#define FOTG210_DMACPSR1 0x1C8
+#define DMACPSR1_DMA_LEN(len) (((len) & 0xFFFF) << 8)
+#define DMACPSR1_DMA_ABORT (1 << 3)
+#define DMACPSR1_DMA_TYPE(dir_in) (((dir_in) ? 1 : 0) << 1)
+#define DMACPSR1_DMA_START (1 << 0)
+
+/* Device DMA Controller Parameter setting 2 Register (0x1CC) */
+#define FOTG210_DMACPSR2 0x1CC
+
+/* Device DMA Controller Parameter setting 3 Register (0x1CC) */
+#define FOTG210_CXPORT 0x1D0
+
+struct fotg210_request {
+ struct usb_request req;
+ struct list_head queue;
+};
+
+struct fotg210_ep {
+ struct usb_ep ep;
+ struct fotg210_udc *fotg210;
+
+ struct list_head queue;
+ unsigned stall:1;
+ unsigned wedged:1;
+ unsigned use_dma:1;
+
+ unsigned char epnum;
+ unsigned char type;
+ unsigned char dir_in;
+ unsigned int maxp;
+ const struct usb_endpoint_descriptor *desc;
+};
+
+struct fotg210_udc {
+ spinlock_t lock; /* protect the struct */
+ void __iomem *reg;
+
+ unsigned long irq_trigger;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+ struct fotg210_ep *ep[FOTG210_MAX_NUM_EP];
+
+ struct usb_request *ep0_req; /* for internal request */
+ __le16 ep0_data;
+ u8 ep0_dir; /* 0/0x80 out/in */
+
+ u8 reenum; /* if re-enumeration */
+};
+
+#define gadget_to_fotg210(g) container_of((g), struct fotg210_udc, gadget)
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
new file mode 100644
index 000000000..72b6d74b3
--- /dev/null
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -0,0 +1,2726 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * driver/usb/gadget/fsl_qe_udc.c
+ *
+ * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Xie Xiaobo <X.Xie@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ * Based on bareboard code from Shlomi Gridish.
+ *
+ * Description:
+ * Freescle QE/CPM USB Pheripheral Controller Driver
+ * The controller can be found on MPC8360, MPC8272, and etc.
+ * MPC8360 Rev 1.1 may need QE mircocode update
+ */
+
+#undef USB_TRACE
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <soc/fsl/qe/qe.h>
+#include <asm/cpm.h>
+#include <asm/dma.h>
+#include <asm/reg.h>
+#include "fsl_qe_udc.h"
+
+#define DRIVER_DESC "Freescale QE/CPM USB Device Controller driver"
+#define DRIVER_AUTHOR "Xie XiaoBo"
+#define DRIVER_VERSION "1.0"
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+static const char driver_name[] = "fsl_qe_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+/*ep name is important in gadget, it should obey the convention of ep_match()*/
+static const char *const ep_name[] = {
+ "ep0-control", /* everyone has ep0 */
+ /* 3 configurable endpoints */
+ "ep1",
+ "ep2",
+ "ep3",
+};
+
+static const struct usb_endpoint_descriptor qe_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = USB_MAX_CTRL_PAYLOAD,
+};
+
+/********************************************************************
+ * Internal Used Function Start
+********************************************************************/
+/*-----------------------------------------------------------------
+ * done() - retire a request; caller blocked irqs
+ *--------------------------------------------------------------*/
+static void done(struct qe_ep *ep, struct qe_req *req, int status)
+{
+ struct qe_udc *udc = ep->udc;
+ unsigned char stopped = ep->stopped;
+
+ /* the req->queue pointer is used by ep_queue() func, in which
+ * the request will be added into a udc_ep->queue 'd tail
+ * so here the req will be dropped from the ep->queue
+ */
+ list_del_init(&req->queue);
+
+ /* req.status should be set as -EINPROGRESS in ep_queue() */
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ if (req->mapped) {
+ dma_unmap_single(udc->gadget.dev.parent,
+ req->req.dma, req->req.length,
+ ep_is_in(ep)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ req->req.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ } else
+ dma_sync_single_for_cpu(udc->gadget.dev.parent,
+ req->req.dma, req->req.length,
+ ep_is_in(ep)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+
+ if (status && (status != -ESHUTDOWN))
+ dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+ spin_unlock(&udc->lock);
+
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+
+ spin_lock(&udc->lock);
+
+ ep->stopped = stopped;
+}
+
+/*-----------------------------------------------------------------
+ * nuke(): delete all requests related to this ep
+ *--------------------------------------------------------------*/
+static void nuke(struct qe_ep *ep, int status)
+{
+ /* Whether this eq has request linked */
+ while (!list_empty(&ep->queue)) {
+ struct qe_req *req = NULL;
+ req = list_entry(ep->queue.next, struct qe_req, queue);
+
+ done(ep, req, status);
+ }
+}
+
+/*---------------------------------------------------------------------------*
+ * USB and Endpoint manipulate process, include parameter and register *
+ *---------------------------------------------------------------------------*/
+/* @value: 1--set stall 0--clean stall */
+static int qe_eprx_stall_change(struct qe_ep *ep, int value)
+{
+ u16 tem_usep;
+ u8 epnum = ep->epnum;
+ struct qe_udc *udc = ep->udc;
+
+ tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
+ tem_usep = tem_usep & ~USB_RHS_MASK;
+ if (value == 1)
+ tem_usep |= USB_RHS_STALL;
+ else if (ep->dir == USB_DIR_IN)
+ tem_usep |= USB_RHS_IGNORE_OUT;
+
+ out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
+ return 0;
+}
+
+static int qe_eptx_stall_change(struct qe_ep *ep, int value)
+{
+ u16 tem_usep;
+ u8 epnum = ep->epnum;
+ struct qe_udc *udc = ep->udc;
+
+ tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
+ tem_usep = tem_usep & ~USB_THS_MASK;
+ if (value == 1)
+ tem_usep |= USB_THS_STALL;
+ else if (ep->dir == USB_DIR_OUT)
+ tem_usep |= USB_THS_IGNORE_IN;
+
+ out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
+
+ return 0;
+}
+
+static int qe_ep0_stall(struct qe_udc *udc)
+{
+ qe_eptx_stall_change(&udc->eps[0], 1);
+ qe_eprx_stall_change(&udc->eps[0], 1);
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = 0;
+ return 0;
+}
+
+static int qe_eprx_nack(struct qe_ep *ep)
+{
+ u8 epnum = ep->epnum;
+ struct qe_udc *udc = ep->udc;
+
+ if (ep->state == EP_STATE_IDLE) {
+ /* Set the ep's nack */
+ clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
+ USB_RHS_MASK, USB_RHS_NACK);
+
+ /* Mask Rx and Busy interrupts */
+ clrbits16(&udc->usb_regs->usb_usbmr,
+ (USB_E_RXB_MASK | USB_E_BSY_MASK));
+
+ ep->state = EP_STATE_NACK;
+ }
+ return 0;
+}
+
+static int qe_eprx_normal(struct qe_ep *ep)
+{
+ struct qe_udc *udc = ep->udc;
+
+ if (ep->state == EP_STATE_NACK) {
+ clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
+ USB_RTHS_MASK, USB_THS_IGNORE_IN);
+
+ /* Unmask RX interrupts */
+ out_be16(&udc->usb_regs->usb_usber,
+ USB_E_BSY_MASK | USB_E_RXB_MASK);
+ setbits16(&udc->usb_regs->usb_usbmr,
+ (USB_E_RXB_MASK | USB_E_BSY_MASK));
+
+ ep->state = EP_STATE_IDLE;
+ ep->has_data = 0;
+ }
+
+ return 0;
+}
+
+static int qe_ep_cmd_stoptx(struct qe_ep *ep)
+{
+ if (ep->udc->soc_type == PORT_CPM)
+ cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
+ CPM_USB_STOP_TX_OPCODE);
+ else
+ qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
+ ep->epnum, 0);
+
+ return 0;
+}
+
+static int qe_ep_cmd_restarttx(struct qe_ep *ep)
+{
+ if (ep->udc->soc_type == PORT_CPM)
+ cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
+ CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
+ else
+ qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
+ ep->epnum, 0);
+
+ return 0;
+}
+
+static int qe_ep_flushtxfifo(struct qe_ep *ep)
+{
+ struct qe_udc *udc = ep->udc;
+ int i;
+
+ i = (int)ep->epnum;
+
+ qe_ep_cmd_stoptx(ep);
+ out_8(&udc->usb_regs->usb_uscom,
+ USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
+ out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
+ out_be32(&udc->ep_param[i]->tstate, 0);
+ out_be16(&udc->ep_param[i]->tbcnt, 0);
+
+ ep->c_txbd = ep->txbase;
+ ep->n_txbd = ep->txbase;
+ qe_ep_cmd_restarttx(ep);
+ return 0;
+}
+
+static int qe_ep_filltxfifo(struct qe_ep *ep)
+{
+ struct qe_udc *udc = ep->udc;
+
+ out_8(&udc->usb_regs->usb_uscom,
+ USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
+ return 0;
+}
+
+static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
+{
+ struct qe_ep *ep;
+ u32 bdring_len;
+ struct qe_bd __iomem *bd;
+ int i;
+
+ ep = &udc->eps[pipe_num];
+
+ if (ep->dir == USB_DIR_OUT)
+ bdring_len = USB_BDRING_LEN_RX;
+ else
+ bdring_len = USB_BDRING_LEN;
+
+ bd = ep->rxbase;
+ for (i = 0; i < (bdring_len - 1); i++) {
+ out_be32((u32 __iomem *)bd, R_E | R_I);
+ bd++;
+ }
+ out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
+
+ bd = ep->txbase;
+ for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
+ out_be32(&bd->buf, 0);
+ out_be32((u32 __iomem *)bd, 0);
+ bd++;
+ }
+ out_be32((u32 __iomem *)bd, T_W);
+
+ return 0;
+}
+
+static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
+{
+ struct qe_ep *ep;
+ u16 tmpusep;
+
+ ep = &udc->eps[pipe_num];
+ tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
+ tmpusep &= ~USB_RTHS_MASK;
+
+ switch (ep->dir) {
+ case USB_DIR_BOTH:
+ qe_ep_flushtxfifo(ep);
+ break;
+ case USB_DIR_OUT:
+ tmpusep |= USB_THS_IGNORE_IN;
+ break;
+ case USB_DIR_IN:
+ qe_ep_flushtxfifo(ep);
+ tmpusep |= USB_RHS_IGNORE_OUT;
+ break;
+ default:
+ break;
+ }
+ out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
+
+ qe_epbds_reset(udc, pipe_num);
+
+ return 0;
+}
+
+static int qe_ep_toggledata01(struct qe_ep *ep)
+{
+ ep->data01 ^= 0x1;
+ return 0;
+}
+
+static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
+{
+ struct qe_ep *ep = &udc->eps[pipe_num];
+ unsigned long tmp_addr = 0;
+ struct usb_ep_para __iomem *epparam;
+ int i;
+ struct qe_bd __iomem *bd;
+ int bdring_len;
+
+ if (ep->dir == USB_DIR_OUT)
+ bdring_len = USB_BDRING_LEN_RX;
+ else
+ bdring_len = USB_BDRING_LEN;
+
+ epparam = udc->ep_param[pipe_num];
+ /* alloc multi-ram for BD rings and set the ep parameters */
+ tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
+ USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
+ if (IS_ERR_VALUE(tmp_addr))
+ return -ENOMEM;
+
+ out_be16(&epparam->rbase, (u16)tmp_addr);
+ out_be16(&epparam->tbase, (u16)(tmp_addr +
+ (sizeof(struct qe_bd) * bdring_len)));
+
+ out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
+ out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
+
+ ep->rxbase = cpm_muram_addr(tmp_addr);
+ ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
+ * bdring_len));
+ ep->n_rxbd = ep->rxbase;
+ ep->e_rxbd = ep->rxbase;
+ ep->n_txbd = ep->txbase;
+ ep->c_txbd = ep->txbase;
+ ep->data01 = 0; /* data0 */
+
+ /* Init TX and RX bds */
+ bd = ep->rxbase;
+ for (i = 0; i < bdring_len - 1; i++) {
+ out_be32(&bd->buf, 0);
+ out_be32((u32 __iomem *)bd, 0);
+ bd++;
+ }
+ out_be32(&bd->buf, 0);
+ out_be32((u32 __iomem *)bd, R_W);
+
+ bd = ep->txbase;
+ for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
+ out_be32(&bd->buf, 0);
+ out_be32((u32 __iomem *)bd, 0);
+ bd++;
+ }
+ out_be32(&bd->buf, 0);
+ out_be32((u32 __iomem *)bd, T_W);
+
+ return 0;
+}
+
+static int qe_ep_rxbd_update(struct qe_ep *ep)
+{
+ unsigned int size;
+ int i;
+ unsigned int tmp;
+ struct qe_bd __iomem *bd;
+ unsigned int bdring_len;
+
+ if (ep->rxbase == NULL)
+ return -EINVAL;
+
+ bd = ep->rxbase;
+
+ ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
+ if (!ep->rxframe)
+ return -ENOMEM;
+
+ qe_frame_init(ep->rxframe);
+
+ if (ep->dir == USB_DIR_OUT)
+ bdring_len = USB_BDRING_LEN_RX;
+ else
+ bdring_len = USB_BDRING_LEN;
+
+ size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
+ ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
+ if (!ep->rxbuffer) {
+ kfree(ep->rxframe);
+ return -ENOMEM;
+ }
+
+ ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
+ if (ep->rxbuf_d == DMA_ADDR_INVALID) {
+ ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
+ ep->rxbuffer,
+ size,
+ DMA_FROM_DEVICE);
+ ep->rxbufmap = 1;
+ } else {
+ dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+ ep->rxbuf_d, size,
+ DMA_FROM_DEVICE);
+ ep->rxbufmap = 0;
+ }
+
+ size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
+ tmp = ep->rxbuf_d;
+ tmp = (u32)(((tmp >> 2) << 2) + 4);
+
+ for (i = 0; i < bdring_len - 1; i++) {
+ out_be32(&bd->buf, tmp);
+ out_be32((u32 __iomem *)bd, (R_E | R_I));
+ tmp = tmp + size;
+ bd++;
+ }
+ out_be32(&bd->buf, tmp);
+ out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
+
+ return 0;
+}
+
+static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
+{
+ struct qe_ep *ep = &udc->eps[pipe_num];
+ struct usb_ep_para __iomem *epparam;
+ u16 usep, logepnum;
+ u16 tmp;
+ u8 rtfcr = 0;
+
+ epparam = udc->ep_param[pipe_num];
+
+ usep = 0;
+ logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ usep |= (logepnum << USB_EPNUM_SHIFT);
+
+ switch (ep->ep.desc->bmAttributes & 0x03) {
+ case USB_ENDPOINT_XFER_BULK:
+ usep |= USB_TRANS_BULK;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ usep |= USB_TRANS_ISO;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ usep |= USB_TRANS_INT;
+ break;
+ default:
+ usep |= USB_TRANS_CTR;
+ break;
+ }
+
+ switch (ep->dir) {
+ case USB_DIR_OUT:
+ usep |= USB_THS_IGNORE_IN;
+ break;
+ case USB_DIR_IN:
+ usep |= USB_RHS_IGNORE_OUT;
+ break;
+ default:
+ break;
+ }
+ out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
+
+ rtfcr = 0x30;
+ out_8(&epparam->rbmr, rtfcr);
+ out_8(&epparam->tbmr, rtfcr);
+
+ tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
+ /* MRBLR must be divisble by 4 */
+ tmp = (u16)(((tmp >> 2) << 2) + 4);
+ out_be16(&epparam->mrblr, tmp);
+
+ return 0;
+}
+
+static int qe_ep_init(struct qe_udc *udc,
+ unsigned char pipe_num,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct qe_ep *ep = &udc->eps[pipe_num];
+ unsigned long flags;
+ int reval = 0;
+ u16 max = 0;
+
+ max = usb_endpoint_maxp(desc);
+
+ /* check the max package size validate for this endpoint */
+ /* Refer to USB2.0 spec table 9-13,
+ */
+ if (pipe_num != 0) {
+ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (strstr(ep->ep.name, "-iso")
+ || strstr(ep->ep.name, "-int"))
+ goto en_done;
+ switch (udc->gadget.speed) {
+ case USB_SPEED_HIGH:
+ if ((max == 128) || (max == 256) || (max == 512))
+ break;
+ fallthrough;
+ default:
+ switch (max) {
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ break;
+ default:
+ case USB_SPEED_LOW:
+ goto en_done;
+ }
+ }
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
+ goto en_done;
+ switch (udc->gadget.speed) {
+ case USB_SPEED_HIGH:
+ if (max <= 1024)
+ break;
+ fallthrough;
+ case USB_SPEED_FULL:
+ if (max <= 64)
+ break;
+ fallthrough;
+ default:
+ if (max <= 8)
+ break;
+ goto en_done;
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (strstr(ep->ep.name, "-bulk")
+ || strstr(ep->ep.name, "-int"))
+ goto en_done;
+ switch (udc->gadget.speed) {
+ case USB_SPEED_HIGH:
+ if (max <= 1024)
+ break;
+ fallthrough;
+ case USB_SPEED_FULL:
+ if (max <= 1023)
+ break;
+ fallthrough;
+ default:
+ goto en_done;
+ }
+ break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ if (strstr(ep->ep.name, "-iso")
+ || strstr(ep->ep.name, "-int"))
+ goto en_done;
+ switch (udc->gadget.speed) {
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ switch (max) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ break;
+ default:
+ goto en_done;
+ }
+ fallthrough;
+ case USB_SPEED_LOW:
+ switch (max) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ goto en_done;
+ }
+ default:
+ goto en_done;
+ }
+ break;
+
+ default:
+ goto en_done;
+ }
+ } /* if ep0*/
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* initialize ep structure */
+ ep->ep.maxpacket = max;
+ ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
+ ep->ep.desc = desc;
+ ep->stopped = 0;
+ ep->init = 1;
+
+ if (pipe_num == 0) {
+ ep->dir = USB_DIR_BOTH;
+ udc->ep0_dir = USB_DIR_OUT;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ } else {
+ switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
+ case USB_DIR_OUT:
+ ep->dir = USB_DIR_OUT;
+ break;
+ case USB_DIR_IN:
+ ep->dir = USB_DIR_IN;
+ default:
+ break;
+ }
+ }
+
+ /* hardware special operation */
+ qe_ep_bd_init(udc, pipe_num);
+ if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
+ reval = qe_ep_rxbd_update(ep);
+ if (reval)
+ goto en_done1;
+ }
+
+ if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
+ ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
+ if (!ep->txframe)
+ goto en_done2;
+ qe_frame_init(ep->txframe);
+ }
+
+ qe_ep_register_init(udc, pipe_num);
+
+ /* Now HW will be NAKing transfers to that EP,
+ * until a buffer is queued to it. */
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+en_done2:
+ kfree(ep->rxbuffer);
+ kfree(ep->rxframe);
+en_done1:
+ spin_unlock_irqrestore(&udc->lock, flags);
+en_done:
+ dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
+ return -ENODEV;
+}
+
+static inline void qe_usb_enable(struct qe_udc *udc)
+{
+ setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
+}
+
+static inline void qe_usb_disable(struct qe_udc *udc)
+{
+ clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
+}
+
+/*----------------------------------------------------------------------------*
+ * USB and EP basic manipulate function end *
+ *----------------------------------------------------------------------------*/
+
+
+/******************************************************************************
+ UDC transmit and receive process
+ ******************************************************************************/
+static void recycle_one_rxbd(struct qe_ep *ep)
+{
+ u32 bdstatus;
+
+ bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
+ bdstatus = R_I | R_E | (bdstatus & R_W);
+ out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
+
+ if (bdstatus & R_W)
+ ep->e_rxbd = ep->rxbase;
+ else
+ ep->e_rxbd++;
+}
+
+static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
+{
+ u32 bdstatus;
+ struct qe_bd __iomem *bd, *nextbd;
+ unsigned char stop = 0;
+
+ nextbd = ep->n_rxbd;
+ bd = ep->e_rxbd;
+ bdstatus = in_be32((u32 __iomem *)bd);
+
+ while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
+ bdstatus = R_E | R_I | (bdstatus & R_W);
+ out_be32((u32 __iomem *)bd, bdstatus);
+
+ if (bdstatus & R_W)
+ bd = ep->rxbase;
+ else
+ bd++;
+
+ bdstatus = in_be32((u32 __iomem *)bd);
+ if (stopatnext && (bd == nextbd))
+ stop = 1;
+ }
+
+ ep->e_rxbd = bd;
+}
+
+static void ep_recycle_rxbds(struct qe_ep *ep)
+{
+ struct qe_bd __iomem *bd = ep->n_rxbd;
+ u32 bdstatus;
+ u8 epnum = ep->epnum;
+ struct qe_udc *udc = ep->udc;
+
+ bdstatus = in_be32((u32 __iomem *)bd);
+ if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
+ bd = ep->rxbase +
+ ((in_be16(&udc->ep_param[epnum]->rbptr) -
+ in_be16(&udc->ep_param[epnum]->rbase))
+ >> 3);
+ bdstatus = in_be32((u32 __iomem *)bd);
+
+ if (bdstatus & R_W)
+ bd = ep->rxbase;
+ else
+ bd++;
+
+ ep->e_rxbd = bd;
+ recycle_rxbds(ep, 0);
+ ep->e_rxbd = ep->n_rxbd;
+ } else
+ recycle_rxbds(ep, 1);
+
+ if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
+ out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
+
+ if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
+ qe_eprx_normal(ep);
+
+ ep->localnack = 0;
+}
+
+static void setup_received_handle(struct qe_udc *udc,
+ struct usb_ctrlrequest *setup);
+static int qe_ep_rxframe_handle(struct qe_ep *ep);
+static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
+/* when BD PID is setup, handle the packet */
+static int ep0_setup_handle(struct qe_udc *udc)
+{
+ struct qe_ep *ep = &udc->eps[0];
+ struct qe_frame *pframe;
+ unsigned int fsize;
+ u8 *cp;
+
+ pframe = ep->rxframe;
+ if ((frame_get_info(pframe) & PID_SETUP)
+ && (udc->ep0_state == WAIT_FOR_SETUP)) {
+ fsize = frame_get_length(pframe);
+ if (unlikely(fsize != 8))
+ return -EINVAL;
+ cp = (u8 *)&udc->local_setup_buff;
+ memcpy(cp, pframe->data, fsize);
+ ep->data01 = 1;
+
+ /* handle the usb command base on the usb_ctrlrequest */
+ setup_received_handle(udc, &udc->local_setup_buff);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int qe_ep0_rx(struct qe_udc *udc)
+{
+ struct qe_ep *ep = &udc->eps[0];
+ struct qe_frame *pframe;
+ struct qe_bd __iomem *bd;
+ u32 bdstatus, length;
+ u32 vaddr;
+
+ pframe = ep->rxframe;
+
+ if (ep->dir == USB_DIR_IN) {
+ dev_err(udc->dev, "ep0 not a control endpoint\n");
+ return -EINVAL;
+ }
+
+ bd = ep->n_rxbd;
+ bdstatus = in_be32((u32 __iomem *)bd);
+ length = bdstatus & BD_LENGTH_MASK;
+
+ while (!(bdstatus & R_E) && length) {
+ if ((bdstatus & R_F) && (bdstatus & R_L)
+ && !(bdstatus & R_ERROR)) {
+ if (length == USB_CRC_SIZE) {
+ udc->ep0_state = WAIT_FOR_SETUP;
+ dev_vdbg(udc->dev,
+ "receive a ZLP in status phase\n");
+ } else {
+ qe_frame_clean(pframe);
+ vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
+ frame_set_data(pframe, (u8 *)vaddr);
+ frame_set_length(pframe,
+ (length - USB_CRC_SIZE));
+ frame_set_status(pframe, FRAME_OK);
+ switch (bdstatus & R_PID) {
+ case R_PID_SETUP:
+ frame_set_info(pframe, PID_SETUP);
+ break;
+ case R_PID_DATA1:
+ frame_set_info(pframe, PID_DATA1);
+ break;
+ default:
+ frame_set_info(pframe, PID_DATA0);
+ break;
+ }
+
+ if ((bdstatus & R_PID) == R_PID_SETUP)
+ ep0_setup_handle(udc);
+ else
+ qe_ep_rxframe_handle(ep);
+ }
+ } else {
+ dev_err(udc->dev, "The receive frame with error!\n");
+ }
+
+ /* note: don't clear the rxbd's buffer address */
+ recycle_one_rxbd(ep);
+
+ /* Get next BD */
+ if (bdstatus & R_W)
+ bd = ep->rxbase;
+ else
+ bd++;
+
+ bdstatus = in_be32((u32 __iomem *)bd);
+ length = bdstatus & BD_LENGTH_MASK;
+
+ }
+
+ ep->n_rxbd = bd;
+
+ return 0;
+}
+
+static int qe_ep_rxframe_handle(struct qe_ep *ep)
+{
+ struct qe_frame *pframe;
+ u8 framepid = 0;
+ unsigned int fsize;
+ u8 *cp;
+ struct qe_req *req;
+
+ pframe = ep->rxframe;
+
+ if (frame_get_info(pframe) & PID_DATA1)
+ framepid = 0x1;
+
+ if (framepid != ep->data01) {
+ dev_err(ep->udc->dev, "the data01 error!\n");
+ return -EIO;
+ }
+
+ fsize = frame_get_length(pframe);
+ if (list_empty(&ep->queue)) {
+ dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
+ } else {
+ req = list_entry(ep->queue.next, struct qe_req, queue);
+
+ cp = (u8 *)(req->req.buf) + req->req.actual;
+ if (cp) {
+ memcpy(cp, pframe->data, fsize);
+ req->req.actual += fsize;
+ if ((fsize < ep->ep.maxpacket) ||
+ (req->req.actual >= req->req.length)) {
+ if (ep->epnum == 0)
+ ep0_req_complete(ep->udc, req);
+ else
+ done(ep, req, 0);
+ if (list_empty(&ep->queue) && ep->epnum != 0)
+ qe_eprx_nack(ep);
+ }
+ }
+ }
+
+ qe_ep_toggledata01(ep);
+
+ return 0;
+}
+
+static void ep_rx_tasklet(struct tasklet_struct *t)
+{
+ struct qe_udc *udc = from_tasklet(udc, t, rx_tasklet);
+ struct qe_ep *ep;
+ struct qe_frame *pframe;
+ struct qe_bd __iomem *bd;
+ unsigned long flags;
+ u32 bdstatus, length;
+ u32 vaddr, i;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
+ ep = &udc->eps[i];
+
+ if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
+ dev_dbg(udc->dev,
+ "This is a transmit ep or disable tasklet!\n");
+ continue;
+ }
+
+ pframe = ep->rxframe;
+ bd = ep->n_rxbd;
+ bdstatus = in_be32((u32 __iomem *)bd);
+ length = bdstatus & BD_LENGTH_MASK;
+
+ while (!(bdstatus & R_E) && length) {
+ if (list_empty(&ep->queue)) {
+ qe_eprx_nack(ep);
+ dev_dbg(udc->dev,
+ "The rxep have noreq %d\n",
+ ep->has_data);
+ break;
+ }
+
+ if ((bdstatus & R_F) && (bdstatus & R_L)
+ && !(bdstatus & R_ERROR)) {
+ qe_frame_clean(pframe);
+ vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
+ frame_set_data(pframe, (u8 *)vaddr);
+ frame_set_length(pframe,
+ (length - USB_CRC_SIZE));
+ frame_set_status(pframe, FRAME_OK);
+ switch (bdstatus & R_PID) {
+ case R_PID_DATA1:
+ frame_set_info(pframe, PID_DATA1);
+ break;
+ case R_PID_SETUP:
+ frame_set_info(pframe, PID_SETUP);
+ break;
+ default:
+ frame_set_info(pframe, PID_DATA0);
+ break;
+ }
+ /* handle the rx frame */
+ qe_ep_rxframe_handle(ep);
+ } else {
+ dev_err(udc->dev,
+ "error in received frame\n");
+ }
+ /* note: don't clear the rxbd's buffer address */
+ /*clear the length */
+ out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
+ ep->has_data--;
+ if (!(ep->localnack))
+ recycle_one_rxbd(ep);
+
+ /* Get next BD */
+ if (bdstatus & R_W)
+ bd = ep->rxbase;
+ else
+ bd++;
+
+ bdstatus = in_be32((u32 __iomem *)bd);
+ length = bdstatus & BD_LENGTH_MASK;
+ }
+
+ ep->n_rxbd = bd;
+
+ if (ep->localnack)
+ ep_recycle_rxbds(ep);
+
+ ep->enable_tasklet = 0;
+ } /* for i=1 */
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static int qe_ep_rx(struct qe_ep *ep)
+{
+ struct qe_udc *udc;
+ struct qe_frame *pframe;
+ struct qe_bd __iomem *bd;
+ u16 swoffs, ucoffs, emptybds;
+
+ udc = ep->udc;
+ pframe = ep->rxframe;
+
+ if (ep->dir == USB_DIR_IN) {
+ dev_err(udc->dev, "transmit ep in rx function\n");
+ return -EINVAL;
+ }
+
+ bd = ep->n_rxbd;
+
+ swoffs = (u16)(bd - ep->rxbase);
+ ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
+ in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
+ if (swoffs < ucoffs)
+ emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
+ else
+ emptybds = swoffs - ucoffs;
+
+ if (emptybds < MIN_EMPTY_BDS) {
+ qe_eprx_nack(ep);
+ ep->localnack = 1;
+ dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
+ }
+ ep->has_data = USB_BDRING_LEN_RX - emptybds;
+
+ if (list_empty(&ep->queue)) {
+ qe_eprx_nack(ep);
+ dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
+ ep->has_data);
+ return 0;
+ }
+
+ tasklet_schedule(&udc->rx_tasklet);
+ ep->enable_tasklet = 1;
+
+ return 0;
+}
+
+/* send data from a frame, no matter what tx_req */
+static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
+{
+ struct qe_udc *udc = ep->udc;
+ struct qe_bd __iomem *bd;
+ u16 saveusbmr;
+ u32 bdstatus, pidmask;
+ u32 paddr;
+
+ if (ep->dir == USB_DIR_OUT) {
+ dev_err(udc->dev, "receive ep passed to tx function\n");
+ return -EINVAL;
+ }
+
+ /* Disable the Tx interrupt */
+ saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
+ out_be16(&udc->usb_regs->usb_usbmr,
+ saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
+
+ bd = ep->n_txbd;
+ bdstatus = in_be32((u32 __iomem *)bd);
+
+ if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
+ if (frame_get_length(frame) == 0) {
+ frame_set_data(frame, udc->nullbuf);
+ frame_set_length(frame, 2);
+ frame->info |= (ZLP | NO_CRC);
+ dev_vdbg(udc->dev, "the frame size = 0\n");
+ }
+ paddr = virt_to_phys((void *)frame->data);
+ out_be32(&bd->buf, paddr);
+ bdstatus = (bdstatus&T_W);
+ if (!(frame_get_info(frame) & NO_CRC))
+ bdstatus |= T_R | T_I | T_L | T_TC
+ | frame_get_length(frame);
+ else
+ bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
+
+ /* if the packet is a ZLP in status phase */
+ if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
+ ep->data01 = 0x1;
+
+ if (ep->data01) {
+ pidmask = T_PID_DATA1;
+ frame->info |= PID_DATA1;
+ } else {
+ pidmask = T_PID_DATA0;
+ frame->info |= PID_DATA0;
+ }
+ bdstatus |= T_CNF;
+ bdstatus |= pidmask;
+ out_be32((u32 __iomem *)bd, bdstatus);
+ qe_ep_filltxfifo(ep);
+
+ /* enable the TX interrupt */
+ out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
+
+ qe_ep_toggledata01(ep);
+ if (bdstatus & T_W)
+ ep->n_txbd = ep->txbase;
+ else
+ ep->n_txbd++;
+
+ return 0;
+ } else {
+ out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
+ dev_vdbg(udc->dev, "The tx bd is not ready!\n");
+ return -EBUSY;
+ }
+}
+
+/* when a bd was transmitted, the function can
+ * handle the tx_req, not include ep0 */
+static int txcomplete(struct qe_ep *ep, unsigned char restart)
+{
+ if (ep->tx_req != NULL) {
+ struct qe_req *req = ep->tx_req;
+ unsigned zlp = 0, last_len = 0;
+
+ last_len = min_t(unsigned, req->req.length - ep->sent,
+ ep->ep.maxpacket);
+
+ if (!restart) {
+ int asent = ep->last;
+ ep->sent += asent;
+ ep->last -= asent;
+ } else {
+ ep->last = 0;
+ }
+
+ /* zlp needed when req->re.zero is set */
+ if (req->req.zero) {
+ if (last_len == 0 ||
+ (req->req.length % ep->ep.maxpacket) != 0)
+ zlp = 0;
+ else
+ zlp = 1;
+ } else
+ zlp = 0;
+
+ /* a request already were transmitted completely */
+ if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
+ done(ep, ep->tx_req, 0);
+ ep->tx_req = NULL;
+ ep->last = 0;
+ ep->sent = 0;
+ }
+ }
+
+ /* we should gain a new tx_req fot this endpoint */
+ if (ep->tx_req == NULL) {
+ if (!list_empty(&ep->queue)) {
+ ep->tx_req = list_entry(ep->queue.next, struct qe_req,
+ queue);
+ ep->last = 0;
+ ep->sent = 0;
+ }
+ }
+
+ return 0;
+}
+
+/* give a frame and a tx_req, send some data */
+static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
+{
+ unsigned int size;
+ u8 *buf;
+
+ qe_frame_clean(frame);
+ size = min_t(u32, (ep->tx_req->req.length - ep->sent),
+ ep->ep.maxpacket);
+ buf = (u8 *)ep->tx_req->req.buf + ep->sent;
+ if (buf && size) {
+ ep->last = size;
+ ep->tx_req->req.actual += size;
+ frame_set_data(frame, buf);
+ frame_set_length(frame, size);
+ frame_set_status(frame, FRAME_OK);
+ frame_set_info(frame, 0);
+ return qe_ep_tx(ep, frame);
+ }
+ return -EIO;
+}
+
+/* give a frame struct,send a ZLP */
+static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
+{
+ struct qe_udc *udc = ep->udc;
+
+ if (frame == NULL)
+ return -ENODEV;
+
+ qe_frame_clean(frame);
+ frame_set_data(frame, (u8 *)udc->nullbuf);
+ frame_set_length(frame, 2);
+ frame_set_status(frame, FRAME_OK);
+ frame_set_info(frame, (ZLP | NO_CRC | infor));
+
+ return qe_ep_tx(ep, frame);
+}
+
+static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
+{
+ struct qe_req *req = ep->tx_req;
+ int reval;
+
+ if (req == NULL)
+ return -ENODEV;
+
+ if ((req->req.length - ep->sent) > 0)
+ reval = qe_usb_senddata(ep, frame);
+ else
+ reval = sendnulldata(ep, frame, 0);
+
+ return reval;
+}
+
+/* if direction is DIR_IN, the status is Device->Host
+ * if direction is DIR_OUT, the status transaction is Device<-Host
+ * in status phase, udc create a request and gain status */
+static int ep0_prime_status(struct qe_udc *udc, int direction)
+{
+
+ struct qe_ep *ep = &udc->eps[0];
+
+ if (direction == USB_DIR_IN) {
+ udc->ep0_state = DATA_STATE_NEED_ZLP;
+ udc->ep0_dir = USB_DIR_IN;
+ sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
+ } else {
+ udc->ep0_dir = USB_DIR_OUT;
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
+ }
+
+ return 0;
+}
+
+/* a request complete in ep0, whether gadget request or udc request */
+static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
+{
+ struct qe_ep *ep = &udc->eps[0];
+ /* because usb and ep's status already been set in ch9setaddress() */
+
+ switch (udc->ep0_state) {
+ case DATA_STATE_XMIT:
+ done(ep, req, 0);
+ /* receive status phase */
+ if (ep0_prime_status(udc, USB_DIR_OUT))
+ qe_ep0_stall(udc);
+ break;
+
+ case DATA_STATE_NEED_ZLP:
+ done(ep, req, 0);
+ udc->ep0_state = WAIT_FOR_SETUP;
+ break;
+
+ case DATA_STATE_RECV:
+ done(ep, req, 0);
+ /* send status phase */
+ if (ep0_prime_status(udc, USB_DIR_IN))
+ qe_ep0_stall(udc);
+ break;
+
+ case WAIT_FOR_OUT_STATUS:
+ done(ep, req, 0);
+ udc->ep0_state = WAIT_FOR_SETUP;
+ break;
+
+ case WAIT_FOR_SETUP:
+ dev_vdbg(udc->dev, "Unexpected interrupt\n");
+ break;
+
+ default:
+ qe_ep0_stall(udc);
+ break;
+ }
+}
+
+static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
+{
+ struct qe_req *tx_req = NULL;
+ struct qe_frame *frame = ep->txframe;
+
+ if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
+ if (!restart)
+ ep->udc->ep0_state = WAIT_FOR_SETUP;
+ else
+ sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
+ return 0;
+ }
+
+ tx_req = ep->tx_req;
+ if (tx_req != NULL) {
+ if (!restart) {
+ int asent = ep->last;
+ ep->sent += asent;
+ ep->last -= asent;
+ } else {
+ ep->last = 0;
+ }
+
+ /* a request already were transmitted completely */
+ if ((ep->tx_req->req.length - ep->sent) <= 0) {
+ ep->tx_req->req.actual = (unsigned int)ep->sent;
+ ep0_req_complete(ep->udc, ep->tx_req);
+ ep->tx_req = NULL;
+ ep->last = 0;
+ ep->sent = 0;
+ }
+ } else {
+ dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
+ }
+
+ return 0;
+}
+
+static int ep0_txframe_handle(struct qe_ep *ep)
+{
+ /* if have error, transmit again */
+ if (frame_get_status(ep->txframe) & FRAME_ERROR) {
+ qe_ep_flushtxfifo(ep);
+ dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
+ if (frame_get_info(ep->txframe) & PID_DATA0)
+ ep->data01 = 0;
+ else
+ ep->data01 = 1;
+
+ ep0_txcomplete(ep, 1);
+ } else
+ ep0_txcomplete(ep, 0);
+
+ frame_create_tx(ep, ep->txframe);
+ return 0;
+}
+
+static int qe_ep0_txconf(struct qe_ep *ep)
+{
+ struct qe_bd __iomem *bd;
+ struct qe_frame *pframe;
+ u32 bdstatus;
+
+ bd = ep->c_txbd;
+ bdstatus = in_be32((u32 __iomem *)bd);
+ while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
+ pframe = ep->txframe;
+
+ /* clear and recycle the BD */
+ out_be32((u32 __iomem *)bd, bdstatus & T_W);
+ out_be32(&bd->buf, 0);
+ if (bdstatus & T_W)
+ ep->c_txbd = ep->txbase;
+ else
+ ep->c_txbd++;
+
+ if (ep->c_txbd == ep->n_txbd) {
+ if (bdstatus & DEVICE_T_ERROR) {
+ frame_set_status(pframe, FRAME_ERROR);
+ if (bdstatus & T_TO)
+ pframe->status |= TX_ER_TIMEOUT;
+ if (bdstatus & T_UN)
+ pframe->status |= TX_ER_UNDERUN;
+ }
+ ep0_txframe_handle(ep);
+ }
+
+ bd = ep->c_txbd;
+ bdstatus = in_be32((u32 __iomem *)bd);
+ }
+
+ return 0;
+}
+
+static int ep_txframe_handle(struct qe_ep *ep)
+{
+ if (frame_get_status(ep->txframe) & FRAME_ERROR) {
+ qe_ep_flushtxfifo(ep);
+ dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
+ if (frame_get_info(ep->txframe) & PID_DATA0)
+ ep->data01 = 0;
+ else
+ ep->data01 = 1;
+
+ txcomplete(ep, 1);
+ } else
+ txcomplete(ep, 0);
+
+ frame_create_tx(ep, ep->txframe); /* send the data */
+ return 0;
+}
+
+/* confirm the already trainsmited bd */
+static int qe_ep_txconf(struct qe_ep *ep)
+{
+ struct qe_bd __iomem *bd;
+ struct qe_frame *pframe = NULL;
+ u32 bdstatus;
+ unsigned char breakonrxinterrupt = 0;
+
+ bd = ep->c_txbd;
+ bdstatus = in_be32((u32 __iomem *)bd);
+ while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
+ pframe = ep->txframe;
+ if (bdstatus & DEVICE_T_ERROR) {
+ frame_set_status(pframe, FRAME_ERROR);
+ if (bdstatus & T_TO)
+ pframe->status |= TX_ER_TIMEOUT;
+ if (bdstatus & T_UN)
+ pframe->status |= TX_ER_UNDERUN;
+ }
+
+ /* clear and recycle the BD */
+ out_be32((u32 __iomem *)bd, bdstatus & T_W);
+ out_be32(&bd->buf, 0);
+ if (bdstatus & T_W)
+ ep->c_txbd = ep->txbase;
+ else
+ ep->c_txbd++;
+
+ /* handle the tx frame */
+ ep_txframe_handle(ep);
+ bd = ep->c_txbd;
+ bdstatus = in_be32((u32 __iomem *)bd);
+ }
+ if (breakonrxinterrupt)
+ return -EIO;
+ else
+ return 0;
+}
+
+/* Add a request in queue, and try to transmit a packet */
+static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
+{
+ int reval = 0;
+
+ if (ep->tx_req == NULL) {
+ ep->sent = 0;
+ ep->last = 0;
+ txcomplete(ep, 0); /* can gain a new tx_req */
+ reval = frame_create_tx(ep, ep->txframe);
+ }
+ return reval;
+}
+
+/* Maybe this is a good ideal */
+static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
+{
+ struct qe_udc *udc = ep->udc;
+ struct qe_frame *pframe = NULL;
+ struct qe_bd __iomem *bd;
+ u32 bdstatus, length;
+ u32 vaddr, fsize;
+ u8 *cp;
+ u8 finish_req = 0;
+ u8 framepid;
+
+ if (list_empty(&ep->queue)) {
+ dev_vdbg(udc->dev, "the req already finish!\n");
+ return 0;
+ }
+ pframe = ep->rxframe;
+
+ bd = ep->n_rxbd;
+ bdstatus = in_be32((u32 __iomem *)bd);
+ length = bdstatus & BD_LENGTH_MASK;
+
+ while (!(bdstatus & R_E) && length) {
+ if (finish_req)
+ break;
+ if ((bdstatus & R_F) && (bdstatus & R_L)
+ && !(bdstatus & R_ERROR)) {
+ qe_frame_clean(pframe);
+ vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
+ frame_set_data(pframe, (u8 *)vaddr);
+ frame_set_length(pframe, (length - USB_CRC_SIZE));
+ frame_set_status(pframe, FRAME_OK);
+ switch (bdstatus & R_PID) {
+ case R_PID_DATA1:
+ frame_set_info(pframe, PID_DATA1); break;
+ default:
+ frame_set_info(pframe, PID_DATA0); break;
+ }
+ /* handle the rx frame */
+
+ if (frame_get_info(pframe) & PID_DATA1)
+ framepid = 0x1;
+ else
+ framepid = 0;
+
+ if (framepid != ep->data01) {
+ dev_vdbg(udc->dev, "the data01 error!\n");
+ } else {
+ fsize = frame_get_length(pframe);
+
+ cp = (u8 *)(req->req.buf) + req->req.actual;
+ if (cp) {
+ memcpy(cp, pframe->data, fsize);
+ req->req.actual += fsize;
+ if ((fsize < ep->ep.maxpacket)
+ || (req->req.actual >=
+ req->req.length)) {
+ finish_req = 1;
+ done(ep, req, 0);
+ if (list_empty(&ep->queue))
+ qe_eprx_nack(ep);
+ }
+ }
+ qe_ep_toggledata01(ep);
+ }
+ } else {
+ dev_err(udc->dev, "The receive frame with error!\n");
+ }
+
+ /* note: don't clear the rxbd's buffer address *
+ * only Clear the length */
+ out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
+ ep->has_data--;
+
+ /* Get next BD */
+ if (bdstatus & R_W)
+ bd = ep->rxbase;
+ else
+ bd++;
+
+ bdstatus = in_be32((u32 __iomem *)bd);
+ length = bdstatus & BD_LENGTH_MASK;
+ }
+
+ ep->n_rxbd = bd;
+ ep_recycle_rxbds(ep);
+
+ return 0;
+}
+
+/* only add the request in queue */
+static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
+{
+ if (ep->state == EP_STATE_NACK) {
+ if (ep->has_data <= 0) {
+ /* Enable rx and unmask rx interrupt */
+ qe_eprx_normal(ep);
+ } else {
+ /* Copy the exist BD data */
+ ep_req_rx(ep, req);
+ }
+ }
+
+ return 0;
+}
+
+/********************************************************************
+ Internal Used Function End
+********************************************************************/
+
+/*-----------------------------------------------------------------------
+ Endpoint Management Functions For Gadget
+ -----------------------------------------------------------------------*/
+static int qe_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct qe_udc *udc;
+ struct qe_ep *ep;
+ int retval = 0;
+ unsigned char epnum;
+
+ ep = container_of(_ep, struct qe_ep, ep);
+
+ /* catch various bogus parameters */
+ if (!_ep || !desc || _ep->name == ep_name[0] ||
+ (desc->bDescriptorType != USB_DT_ENDPOINT))
+ return -EINVAL;
+
+ udc = ep->udc;
+ if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
+ return -ESHUTDOWN;
+
+ epnum = (u8)desc->bEndpointAddress & 0xF;
+
+ retval = qe_ep_init(udc, epnum, desc);
+ if (retval != 0) {
+ cpm_muram_free(cpm_muram_offset(ep->rxbase));
+ dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
+ return -EINVAL;
+ }
+ dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
+ return 0;
+}
+
+static int qe_ep_disable(struct usb_ep *_ep)
+{
+ struct qe_udc *udc;
+ struct qe_ep *ep;
+ unsigned long flags;
+ unsigned int size;
+
+ ep = container_of(_ep, struct qe_ep, ep);
+ udc = ep->udc;
+
+ if (!_ep || !ep->ep.desc) {
+ dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* Nuke all pending requests (does flush) */
+ nuke(ep, -ESHUTDOWN);
+ ep->ep.desc = NULL;
+ ep->stopped = 1;
+ ep->tx_req = NULL;
+ qe_ep_reset(udc, ep->epnum);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ cpm_muram_free(cpm_muram_offset(ep->rxbase));
+
+ if (ep->dir == USB_DIR_OUT)
+ size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
+ (USB_BDRING_LEN_RX + 1);
+ else
+ size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
+ (USB_BDRING_LEN + 1);
+
+ if (ep->dir != USB_DIR_IN) {
+ kfree(ep->rxframe);
+ if (ep->rxbufmap) {
+ dma_unmap_single(udc->gadget.dev.parent,
+ ep->rxbuf_d, size,
+ DMA_FROM_DEVICE);
+ ep->rxbuf_d = DMA_ADDR_INVALID;
+ } else {
+ dma_sync_single_for_cpu(
+ udc->gadget.dev.parent,
+ ep->rxbuf_d, size,
+ DMA_FROM_DEVICE);
+ }
+ kfree(ep->rxbuffer);
+ }
+
+ if (ep->dir != USB_DIR_OUT)
+ kfree(ep->txframe);
+
+ dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
+ return 0;
+}
+
+static struct usb_request *qe_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct qe_req *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->req.dma = DMA_ADDR_INVALID;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct qe_req *req;
+
+ req = container_of(_req, struct qe_req, req);
+
+ if (_req)
+ kfree(req);
+}
+
+static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
+ struct qe_req *req = container_of(_req, struct qe_req, req);
+ struct qe_udc *udc;
+ int reval;
+
+ udc = ep->udc;
+ /* catch various bogus parameters */
+ if (!_req || !req->req.complete || !req->req.buf
+ || !list_empty(&req->queue)) {
+ dev_dbg(udc->dev, "bad params\n");
+ return -EINVAL;
+ }
+ if (!_ep || (!ep->ep.desc && ep_index(ep))) {
+ dev_dbg(udc->dev, "bad ep\n");
+ return -EINVAL;
+ }
+
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ req->ep = ep;
+
+ /* map virtual address to hardware */
+ if (req->req.dma == DMA_ADDR_INVALID) {
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf,
+ req->req.length,
+ ep_is_in(ep)
+ ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ req->mapped = 1;
+ } else {
+ dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+ req->req.dma, req->req.length,
+ ep_is_in(ep)
+ ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ req->mapped = 0;
+ }
+
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+
+ list_add_tail(&req->queue, &ep->queue);
+ dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
+ ep->name, req->req.length);
+
+ /* push the request to device */
+ if (ep_is_in(ep))
+ reval = ep_req_send(ep, req);
+
+ /* EP0 */
+ if (ep_index(ep) == 0 && req->req.length > 0) {
+ if (ep_is_in(ep))
+ udc->ep0_state = DATA_STATE_XMIT;
+ else
+ udc->ep0_state = DATA_STATE_RECV;
+ }
+
+ if (ep->dir == USB_DIR_OUT)
+ reval = ep_req_receive(ep, req);
+
+ return 0;
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
+ struct qe_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ ret = __qe_ep_queue(_ep, _req);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return ret;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
+ struct qe_req *req = NULL;
+ struct qe_req *iter;
+ unsigned long flags;
+
+ if (!_ep || !_req)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+
+ if (!req) {
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return -EINVAL;
+ }
+
+ done(ep, req, -ECONNRESET);
+
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return 0;
+}
+
+/*-----------------------------------------------------------------
+ * modify the endpoint halt feature
+ * @ep: the non-isochronous endpoint being stalled
+ * @value: 1--set halt 0--clear halt
+ * Returns zero, or a negative error code.
+*----------------------------------------------------------------*/
+static int qe_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct qe_ep *ep;
+ unsigned long flags;
+ int status = -EOPNOTSUPP;
+ struct qe_udc *udc;
+
+ ep = container_of(_ep, struct qe_ep, ep);
+ if (!_ep || !ep->ep.desc) {
+ status = -EINVAL;
+ goto out;
+ }
+
+ udc = ep->udc;
+ /* Attempt to halt IN ep will fail if any transfer requests
+ * are still queue */
+ if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
+ status = -EAGAIN;
+ goto out;
+ }
+
+ status = 0;
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ qe_eptx_stall_change(ep, value);
+ qe_eprx_stall_change(ep, value);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ if (ep->epnum == 0) {
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = 0;
+ }
+
+ /* set data toggle to DATA0 on clear halt */
+ if (value == 0)
+ ep->data01 = 0;
+out:
+ dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
+ value ? "set" : "clear", status);
+
+ return status;
+}
+
+static const struct usb_ep_ops qe_ep_ops = {
+ .enable = qe_ep_enable,
+ .disable = qe_ep_disable,
+
+ .alloc_request = qe_alloc_request,
+ .free_request = qe_free_request,
+
+ .queue = qe_ep_queue,
+ .dequeue = qe_ep_dequeue,
+
+ .set_halt = qe_ep_set_halt,
+};
+
+/*------------------------------------------------------------------------
+ Gadget Driver Layer Operations
+ ------------------------------------------------------------------------*/
+
+/* Get the current frame number */
+static int qe_get_frame(struct usb_gadget *gadget)
+{
+ struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
+ u16 tmp;
+
+ tmp = in_be16(&udc->usb_param->frame_n);
+ if (tmp & 0x8000)
+ return tmp & 0x07ff;
+ return -EINVAL;
+}
+
+static int fsl_qe_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver);
+static int fsl_qe_stop(struct usb_gadget *gadget);
+
+/* defined in usb_gadget.h */
+static const struct usb_gadget_ops qe_gadget_ops = {
+ .get_frame = qe_get_frame,
+ .udc_start = fsl_qe_start,
+ .udc_stop = fsl_qe_stop,
+};
+
+/*-------------------------------------------------------------------------
+ USB ep0 Setup process in BUS Enumeration
+ -------------------------------------------------------------------------*/
+static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
+{
+ struct qe_ep *ep = &udc->eps[pipe];
+
+ nuke(ep, -ECONNRESET);
+ ep->tx_req = NULL;
+ return 0;
+}
+
+static int reset_queues(struct qe_udc *udc)
+{
+ u8 pipe;
+
+ for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
+ udc_reset_ep_queue(udc, pipe);
+
+ /* report disconnect; the driver is already quiesced */
+ spin_unlock(&udc->lock);
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
+ spin_lock(&udc->lock);
+
+ return 0;
+}
+
+static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
+ u16 length)
+{
+ /* Save the new address to device struct */
+ udc->device_address = (u8) value;
+ /* Update usb state */
+ udc->usb_state = USB_STATE_ADDRESS;
+
+ /* Status phase , send a ZLP */
+ if (ep0_prime_status(udc, USB_DIR_IN))
+ qe_ep0_stall(udc);
+}
+
+static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct qe_req *req = container_of(_req, struct qe_req, req);
+
+ req->req.buf = NULL;
+ kfree(req);
+}
+
+static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
+ u16 index, u16 length)
+{
+ u16 usb_status = 0;
+ struct qe_req *req;
+ struct qe_ep *ep;
+ int status = 0;
+
+ ep = &udc->eps[0];
+ if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ /* Get device status */
+ usb_status = 1 << USB_DEVICE_SELF_POWERED;
+ } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+ /* Get interface status */
+ /* We don't have interface information in udc driver */
+ usb_status = 0;
+ } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+ /* Get endpoint status */
+ int pipe = index & USB_ENDPOINT_NUMBER_MASK;
+ struct qe_ep *target_ep;
+ u16 usep;
+
+ if (pipe >= USB_MAX_ENDPOINTS)
+ goto stall;
+ target_ep = &udc->eps[pipe];
+
+ /* stall if endpoint doesn't exist */
+ if (!target_ep->ep.desc)
+ goto stall;
+
+ usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
+ if (index & USB_DIR_IN) {
+ if (target_ep->dir != USB_DIR_IN)
+ goto stall;
+ if ((usep & USB_THS_MASK) == USB_THS_STALL)
+ usb_status = 1 << USB_ENDPOINT_HALT;
+ } else {
+ if (target_ep->dir != USB_DIR_OUT)
+ goto stall;
+ if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
+ usb_status = 1 << USB_ENDPOINT_HALT;
+ }
+ }
+
+ req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
+ struct qe_req, req);
+ req->req.length = 2;
+ req->req.buf = udc->statusbuf;
+ *(u16 *)req->req.buf = cpu_to_le16(usb_status);
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->req.complete = ownercomplete;
+
+ udc->ep0_dir = USB_DIR_IN;
+
+ /* data phase */
+ status = __qe_ep_queue(&ep->ep, &req->req);
+
+ if (status == 0)
+ return;
+stall:
+ dev_err(udc->dev, "Can't respond to getstatus request \n");
+ qe_ep0_stall(udc);
+}
+
+/* only handle the setup request, suppose the device in normal status */
+static void setup_received_handle(struct qe_udc *udc,
+ struct usb_ctrlrequest *setup)
+{
+ /* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
+ u16 wValue = le16_to_cpu(setup->wValue);
+ u16 wIndex = le16_to_cpu(setup->wIndex);
+ u16 wLength = le16_to_cpu(setup->wLength);
+
+ /* clear the previous request in the ep0 */
+ udc_reset_ep_queue(udc, 0);
+
+ if (setup->bRequestType & USB_DIR_IN)
+ udc->ep0_dir = USB_DIR_IN;
+ else
+ udc->ep0_dir = USB_DIR_OUT;
+
+ switch (setup->bRequest) {
+ case USB_REQ_GET_STATUS:
+ /* Data+Status phase form udc */
+ if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+ != (USB_DIR_IN | USB_TYPE_STANDARD))
+ break;
+ ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
+ wLength);
+ return;
+
+ case USB_REQ_SET_ADDRESS:
+ /* Status phase from udc */
+ if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
+ USB_RECIP_DEVICE))
+ break;
+ ch9setaddress(udc, wValue, wIndex, wLength);
+ return;
+
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ /* Requests with no data phase, status phase from udc */
+ if ((setup->bRequestType & USB_TYPE_MASK)
+ != USB_TYPE_STANDARD)
+ break;
+
+ if ((setup->bRequestType & USB_RECIP_MASK)
+ == USB_RECIP_ENDPOINT) {
+ int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ struct qe_ep *ep;
+
+ if (wValue != 0 || wLength != 0
+ || pipe >= USB_MAX_ENDPOINTS)
+ break;
+ ep = &udc->eps[pipe];
+
+ spin_unlock(&udc->lock);
+ qe_ep_set_halt(&ep->ep,
+ (setup->bRequest == USB_REQ_SET_FEATURE)
+ ? 1 : 0);
+ spin_lock(&udc->lock);
+ }
+
+ ep0_prime_status(udc, USB_DIR_IN);
+
+ return;
+
+ default:
+ break;
+ }
+
+ if (wLength) {
+ /* Data phase from gadget, status phase from udc */
+ if (setup->bRequestType & USB_DIR_IN) {
+ udc->ep0_state = DATA_STATE_XMIT;
+ udc->ep0_dir = USB_DIR_IN;
+ } else {
+ udc->ep0_state = DATA_STATE_RECV;
+ udc->ep0_dir = USB_DIR_OUT;
+ }
+ spin_unlock(&udc->lock);
+ if (udc->driver->setup(&udc->gadget,
+ &udc->local_setup_buff) < 0)
+ qe_ep0_stall(udc);
+ spin_lock(&udc->lock);
+ } else {
+ /* No data phase, IN status from gadget */
+ udc->ep0_dir = USB_DIR_IN;
+ spin_unlock(&udc->lock);
+ if (udc->driver->setup(&udc->gadget,
+ &udc->local_setup_buff) < 0)
+ qe_ep0_stall(udc);
+ spin_lock(&udc->lock);
+ udc->ep0_state = DATA_STATE_NEED_ZLP;
+ }
+}
+
+/*-------------------------------------------------------------------------
+ USB Interrupt handlers
+ -------------------------------------------------------------------------*/
+static void suspend_irq(struct qe_udc *udc)
+{
+ udc->resume_state = udc->usb_state;
+ udc->usb_state = USB_STATE_SUSPENDED;
+
+ /* report suspend to the driver ,serial.c not support this*/
+ if (udc->driver->suspend)
+ udc->driver->suspend(&udc->gadget);
+}
+
+static void resume_irq(struct qe_udc *udc)
+{
+ udc->usb_state = udc->resume_state;
+ udc->resume_state = 0;
+
+ /* report resume to the driver , serial.c not support this*/
+ if (udc->driver->resume)
+ udc->driver->resume(&udc->gadget);
+}
+
+static void idle_irq(struct qe_udc *udc)
+{
+ u8 usbs;
+
+ usbs = in_8(&udc->usb_regs->usb_usbs);
+ if (usbs & USB_IDLE_STATUS_MASK) {
+ if ((udc->usb_state) != USB_STATE_SUSPENDED)
+ suspend_irq(udc);
+ } else {
+ if (udc->usb_state == USB_STATE_SUSPENDED)
+ resume_irq(udc);
+ }
+}
+
+static int reset_irq(struct qe_udc *udc)
+{
+ unsigned char i;
+
+ if (udc->usb_state == USB_STATE_DEFAULT)
+ return 0;
+
+ qe_usb_disable(udc);
+ out_8(&udc->usb_regs->usb_usadr, 0);
+
+ for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+ if (udc->eps[i].init)
+ qe_ep_reset(udc, i);
+ }
+
+ reset_queues(udc);
+ udc->usb_state = USB_STATE_DEFAULT;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = USB_DIR_OUT;
+ qe_usb_enable(udc);
+ return 0;
+}
+
+static int bsy_irq(struct qe_udc *udc)
+{
+ return 0;
+}
+
+static int txe_irq(struct qe_udc *udc)
+{
+ return 0;
+}
+
+/* ep0 tx interrupt also in here */
+static int tx_irq(struct qe_udc *udc)
+{
+ struct qe_ep *ep;
+ struct qe_bd __iomem *bd;
+ int i, res = 0;
+
+ if ((udc->usb_state == USB_STATE_ADDRESS)
+ && (in_8(&udc->usb_regs->usb_usadr) == 0))
+ out_8(&udc->usb_regs->usb_usadr, udc->device_address);
+
+ for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
+ ep = &udc->eps[i];
+ if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
+ bd = ep->c_txbd;
+ if (!(in_be32((u32 __iomem *)bd) & T_R)
+ && (in_be32(&bd->buf))) {
+ /* confirm the transmitted bd */
+ if (ep->epnum == 0)
+ res = qe_ep0_txconf(ep);
+ else
+ res = qe_ep_txconf(ep);
+ }
+ }
+ }
+ return res;
+}
+
+
+/* setup packect's rx is handle in the function too */
+static void rx_irq(struct qe_udc *udc)
+{
+ struct qe_ep *ep;
+ struct qe_bd __iomem *bd;
+ int i;
+
+ for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+ ep = &udc->eps[i];
+ if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
+ bd = ep->n_rxbd;
+ if (!(in_be32((u32 __iomem *)bd) & R_E)
+ && (in_be32(&bd->buf))) {
+ if (ep->epnum == 0) {
+ qe_ep0_rx(udc);
+ } else {
+ /*non-setup package receive*/
+ qe_ep_rx(ep);
+ }
+ }
+ }
+ }
+}
+
+static irqreturn_t qe_udc_irq(int irq, void *_udc)
+{
+ struct qe_udc *udc = (struct qe_udc *)_udc;
+ u16 irq_src;
+ irqreturn_t status = IRQ_NONE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ irq_src = in_be16(&udc->usb_regs->usb_usber) &
+ in_be16(&udc->usb_regs->usb_usbmr);
+ /* Clear notification bits */
+ out_be16(&udc->usb_regs->usb_usber, irq_src);
+ /* USB Interrupt */
+ if (irq_src & USB_E_IDLE_MASK) {
+ idle_irq(udc);
+ irq_src &= ~USB_E_IDLE_MASK;
+ status = IRQ_HANDLED;
+ }
+
+ if (irq_src & USB_E_TXB_MASK) {
+ tx_irq(udc);
+ irq_src &= ~USB_E_TXB_MASK;
+ status = IRQ_HANDLED;
+ }
+
+ if (irq_src & USB_E_RXB_MASK) {
+ rx_irq(udc);
+ irq_src &= ~USB_E_RXB_MASK;
+ status = IRQ_HANDLED;
+ }
+
+ if (irq_src & USB_E_RESET_MASK) {
+ reset_irq(udc);
+ irq_src &= ~USB_E_RESET_MASK;
+ status = IRQ_HANDLED;
+ }
+
+ if (irq_src & USB_E_BSY_MASK) {
+ bsy_irq(udc);
+ irq_src &= ~USB_E_BSY_MASK;
+ status = IRQ_HANDLED;
+ }
+
+ if (irq_src & USB_E_TXE_MASK) {
+ txe_irq(udc);
+ irq_src &= ~USB_E_TXE_MASK;
+ status = IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return status;
+}
+
+/*-------------------------------------------------------------------------
+ Gadget driver probe and unregister.
+ --------------------------------------------------------------------------*/
+static int fsl_qe_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct qe_udc *udc;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct qe_udc, gadget);
+ /* lock is needed but whether should use this lock or another */
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* hook up the driver */
+ udc->driver = driver;
+ udc->gadget.speed = driver->max_speed;
+
+ /* Enable IRQ reg and Set usbcmd reg EN bit */
+ qe_usb_enable(udc);
+
+ out_be16(&udc->usb_regs->usb_usber, 0xffff);
+ out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
+ udc->usb_state = USB_STATE_ATTACHED;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = USB_DIR_OUT;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int fsl_qe_stop(struct usb_gadget *gadget)
+{
+ struct qe_udc *udc;
+ struct qe_ep *loop_ep;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct qe_udc, gadget);
+ /* stop usb controller, disable intr */
+ qe_usb_disable(udc);
+
+ /* in fact, no needed */
+ udc->usb_state = USB_STATE_ATTACHED;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = 0;
+
+ /* stand operation */
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ nuke(&udc->eps[0], -ESHUTDOWN);
+ list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
+ nuke(loop_ep, -ESHUTDOWN);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ udc->driver = NULL;
+
+ return 0;
+}
+
+/* udc structure's alloc and setup, include ep-param alloc */
+static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
+{
+ struct qe_udc *udc;
+ struct device_node *np = ofdev->dev.of_node;
+ unsigned long tmp_addr = 0;
+ struct usb_device_para __iomem *usbpram;
+ unsigned int i;
+ u64 size;
+ u32 offset;
+
+ udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ goto cleanup;
+
+ udc->dev = &ofdev->dev;
+
+ /* get default address of usb parameter in MURAM from device tree */
+ offset = *of_get_address(np, 1, &size, NULL);
+ udc->usb_param = cpm_muram_addr(offset);
+ memset_io(udc->usb_param, 0, size);
+
+ usbpram = udc->usb_param;
+ out_be16(&usbpram->frame_n, 0);
+ out_be32(&usbpram->rstate, 0);
+
+ tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
+ sizeof(struct usb_ep_para)),
+ USB_EP_PARA_ALIGNMENT);
+ if (IS_ERR_VALUE(tmp_addr))
+ goto cleanup;
+
+ for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+ out_be16(&usbpram->epptr[i], (u16)tmp_addr);
+ udc->ep_param[i] = cpm_muram_addr(tmp_addr);
+ tmp_addr += 32;
+ }
+
+ memset_io(udc->ep_param[0], 0,
+ USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
+
+ udc->resume_state = USB_STATE_NOTATTACHED;
+ udc->usb_state = USB_STATE_POWERED;
+ udc->ep0_dir = 0;
+
+ spin_lock_init(&udc->lock);
+ return udc;
+
+cleanup:
+ kfree(udc);
+ return NULL;
+}
+
+/* USB Controller register init */
+static int qe_udc_reg_init(struct qe_udc *udc)
+{
+ struct usb_ctlr __iomem *qe_usbregs;
+ qe_usbregs = udc->usb_regs;
+
+ /* Spec says that we must enable the USB controller to change mode. */
+ out_8(&qe_usbregs->usb_usmod, 0x01);
+ /* Mode changed, now disable it, since muram isn't initialized yet. */
+ out_8(&qe_usbregs->usb_usmod, 0x00);
+
+ /* Initialize the rest. */
+ out_be16(&qe_usbregs->usb_usbmr, 0);
+ out_8(&qe_usbregs->usb_uscom, 0);
+ out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
+
+ return 0;
+}
+
+static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
+{
+ struct qe_ep *ep = &udc->eps[pipe_num];
+
+ ep->udc = udc;
+ strcpy(ep->name, ep_name[pipe_num]);
+ ep->ep.name = ep_name[pipe_num];
+
+ if (pipe_num == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+
+ ep->ep.ops = &qe_ep_ops;
+ ep->stopped = 1;
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
+ ep->ep.desc = NULL;
+ ep->dir = 0xff;
+ ep->epnum = (u8)pipe_num;
+ ep->sent = 0;
+ ep->last = 0;
+ ep->init = 0;
+ ep->rxframe = NULL;
+ ep->txframe = NULL;
+ ep->tx_req = NULL;
+ ep->state = EP_STATE_IDLE;
+ ep->has_data = 0;
+
+ /* the queue lists any req for this ep */
+ INIT_LIST_HEAD(&ep->queue);
+
+ /* gagdet.ep_list used for ep_autoconfig so no ep0*/
+ if (pipe_num != 0)
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+
+ ep->gadget = &udc->gadget;
+
+ return 0;
+}
+
+/*-----------------------------------------------------------------------
+ * UDC device Driver operation functions *
+ *----------------------------------------------------------------------*/
+static void qe_udc_release(struct device *dev)
+{
+ struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
+ int i;
+
+ complete(udc->done);
+ cpm_muram_free(cpm_muram_offset(udc->ep_param[0]));
+ for (i = 0; i < USB_MAX_ENDPOINTS; i++)
+ udc->ep_param[i] = NULL;
+
+ kfree(udc);
+}
+
+/* Driver probe functions */
+static const struct of_device_id qe_udc_match[];
+static int qe_udc_probe(struct platform_device *ofdev)
+{
+ struct qe_udc *udc;
+ const struct of_device_id *match;
+ struct device_node *np = ofdev->dev.of_node;
+ struct qe_ep *ep;
+ unsigned int ret = 0;
+ unsigned int i;
+ const void *prop;
+
+ match = of_match_device(qe_udc_match, &ofdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ prop = of_get_property(np, "mode", NULL);
+ if (!prop || strcmp(prop, "peripheral"))
+ return -ENODEV;
+
+ /* Initialize the udc structure including QH member and other member */
+ udc = qe_udc_config(ofdev);
+ if (!udc) {
+ dev_err(&ofdev->dev, "failed to initialize\n");
+ return -ENOMEM;
+ }
+
+ udc->soc_type = (unsigned long)match->data;
+ udc->usb_regs = of_iomap(np, 0);
+ if (!udc->usb_regs) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ /* initialize usb hw reg except for regs for EP,
+ * leave usbintr reg untouched*/
+ qe_udc_reg_init(udc);
+
+ /* here comes the stand operations for probe
+ * set the qe_udc->gadget.xxx */
+ udc->gadget.ops = &qe_gadget_ops;
+
+ /* gadget.ep0 is a pointer */
+ udc->gadget.ep0 = &udc->eps[0].ep;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+
+ /* modify in register gadget process */
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+
+ /* name: Identifies the controller hardware type. */
+ udc->gadget.name = driver_name;
+ udc->gadget.dev.parent = &ofdev->dev;
+
+ /* initialize qe_ep struct */
+ for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
+ /* because the ep type isn't decide here so
+ * qe_ep_init() should be called in ep_enable() */
+
+ /* setup the qe_ep struct and link ep.ep.list
+ * into gadget.ep_list */
+ qe_ep_config(udc, (unsigned char)i);
+ }
+
+ /* ep0 initialization in here */
+ ret = qe_ep_init(udc, 0, &qe_ep0_desc);
+ if (ret)
+ goto err2;
+
+ /* create a buf for ZLP send, need to remain zeroed */
+ udc->nullbuf = devm_kzalloc(&ofdev->dev, 256, GFP_KERNEL);
+ if (udc->nullbuf == NULL) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+ /* buffer for data of get_status request */
+ udc->statusbuf = devm_kzalloc(&ofdev->dev, 2, GFP_KERNEL);
+ if (udc->statusbuf == NULL) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+ udc->nullp = virt_to_phys((void *)udc->nullbuf);
+ if (udc->nullp == DMA_ADDR_INVALID) {
+ udc->nullp = dma_map_single(
+ udc->gadget.dev.parent,
+ udc->nullbuf,
+ 256,
+ DMA_TO_DEVICE);
+ udc->nullmap = 1;
+ } else {
+ dma_sync_single_for_device(udc->gadget.dev.parent,
+ udc->nullp, 256,
+ DMA_TO_DEVICE);
+ }
+
+ tasklet_setup(&udc->rx_tasklet, ep_rx_tasklet);
+ /* request irq and disable DR */
+ udc->usb_irq = irq_of_parse_and_map(np, 0);
+ if (!udc->usb_irq) {
+ ret = -EINVAL;
+ goto err_noirq;
+ }
+
+ ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
+ driver_name, udc);
+ if (ret) {
+ dev_err(udc->dev, "cannot request irq %d err %d\n",
+ udc->usb_irq, ret);
+ goto err4;
+ }
+
+ ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget,
+ qe_udc_release);
+ if (ret)
+ goto err5;
+
+ platform_set_drvdata(ofdev, udc);
+ dev_info(udc->dev,
+ "%s USB controller initialized as device\n",
+ (udc->soc_type == PORT_QE) ? "QE" : "CPM");
+ return 0;
+
+err5:
+ free_irq(udc->usb_irq, udc);
+err4:
+ irq_dispose_mapping(udc->usb_irq);
+err_noirq:
+ if (udc->nullmap) {
+ dma_unmap_single(udc->gadget.dev.parent,
+ udc->nullp, 256,
+ DMA_TO_DEVICE);
+ udc->nullp = DMA_ADDR_INVALID;
+ } else {
+ dma_sync_single_for_cpu(udc->gadget.dev.parent,
+ udc->nullp, 256,
+ DMA_TO_DEVICE);
+ }
+err3:
+ ep = &udc->eps[0];
+ cpm_muram_free(cpm_muram_offset(ep->rxbase));
+ kfree(ep->rxframe);
+ kfree(ep->rxbuffer);
+ kfree(ep->txframe);
+err2:
+ iounmap(udc->usb_regs);
+err1:
+ kfree(udc);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return -ENOTSUPP;
+}
+
+static int qe_udc_resume(struct platform_device *dev)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+static int qe_udc_remove(struct platform_device *ofdev)
+{
+ struct qe_udc *udc = platform_get_drvdata(ofdev);
+ struct qe_ep *ep;
+ unsigned int size;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ udc->done = &done;
+ tasklet_disable(&udc->rx_tasklet);
+
+ if (udc->nullmap) {
+ dma_unmap_single(udc->gadget.dev.parent,
+ udc->nullp, 256,
+ DMA_TO_DEVICE);
+ udc->nullp = DMA_ADDR_INVALID;
+ } else {
+ dma_sync_single_for_cpu(udc->gadget.dev.parent,
+ udc->nullp, 256,
+ DMA_TO_DEVICE);
+ }
+
+ ep = &udc->eps[0];
+ cpm_muram_free(cpm_muram_offset(ep->rxbase));
+ size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
+
+ kfree(ep->rxframe);
+ if (ep->rxbufmap) {
+ dma_unmap_single(udc->gadget.dev.parent,
+ ep->rxbuf_d, size,
+ DMA_FROM_DEVICE);
+ ep->rxbuf_d = DMA_ADDR_INVALID;
+ } else {
+ dma_sync_single_for_cpu(udc->gadget.dev.parent,
+ ep->rxbuf_d, size,
+ DMA_FROM_DEVICE);
+ }
+
+ kfree(ep->rxbuffer);
+ kfree(ep->txframe);
+
+ free_irq(udc->usb_irq, udc);
+ irq_dispose_mapping(udc->usb_irq);
+
+ tasklet_kill(&udc->rx_tasklet);
+
+ iounmap(udc->usb_regs);
+
+ /* wait for release() of gadget.dev to free udc */
+ wait_for_completion(&done);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static const struct of_device_id qe_udc_match[] = {
+ {
+ .compatible = "fsl,mpc8323-qe-usb",
+ .data = (void *)PORT_QE,
+ },
+ {
+ .compatible = "fsl,mpc8360-qe-usb",
+ .data = (void *)PORT_QE,
+ },
+ {
+ .compatible = "fsl,mpc8272-cpm-usb",
+ .data = (void *)PORT_CPM,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, qe_udc_match);
+
+static struct platform_driver udc_driver = {
+ .driver = {
+ .name = driver_name,
+ .of_match_table = qe_udc_match,
+ },
+ .probe = qe_udc_probe,
+ .remove = qe_udc_remove,
+#ifdef CONFIG_PM
+ .suspend = qe_udc_suspend,
+ .resume = qe_udc_resume,
+#endif
+};
+
+module_platform_driver(udc_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.h b/drivers/usb/gadget/udc/fsl_qe_udc.h
new file mode 100644
index 000000000..53ca0ff7c
--- /dev/null
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.h
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/usb/gadget/qe_udc.h
+ *
+ * Copyright (C) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Xiaobo Xie <X.Xie@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * Freescale USB device/endpoint management registers
+ */
+
+#ifndef __FSL_QE_UDC_H
+#define __FSL_QE_UDC_H
+
+/* SoC type */
+#define PORT_CPM 0
+#define PORT_QE 1
+
+#define USB_MAX_ENDPOINTS 4
+#define USB_MAX_PIPES USB_MAX_ENDPOINTS
+#define USB_EP0_MAX_SIZE 64
+#define USB_MAX_CTRL_PAYLOAD 0x4000
+#define USB_BDRING_LEN 16
+#define USB_BDRING_LEN_RX 256
+#define USB_BDRING_LEN_TX 16
+#define MIN_EMPTY_BDS 128
+#define MAX_DATA_BDS 8
+#define USB_CRC_SIZE 2
+#define USB_DIR_BOTH 0x88
+#define R_BUF_MAXSIZE 0x800
+#define USB_EP_PARA_ALIGNMENT 32
+
+/* USB Mode Register bit define */
+#define USB_MODE_EN 0x01
+#define USB_MODE_HOST 0x02
+#define USB_MODE_TEST 0x04
+#define USB_MODE_SFTE 0x08
+#define USB_MODE_RESUME 0x40
+#define USB_MODE_LSS 0x80
+
+/* USB Slave Address Register Mask */
+#define USB_SLVADDR_MASK 0x7F
+
+/* USB Endpoint register define */
+#define USB_EPNUM_MASK 0xF000
+#define USB_EPNUM_SHIFT 12
+
+#define USB_TRANS_MODE_SHIFT 8
+#define USB_TRANS_CTR 0x0000
+#define USB_TRANS_INT 0x0100
+#define USB_TRANS_BULK 0x0200
+#define USB_TRANS_ISO 0x0300
+
+#define USB_EP_MF 0x0020
+#define USB_EP_RTE 0x0010
+
+#define USB_THS_SHIFT 2
+#define USB_THS_MASK 0x000c
+#define USB_THS_NORMAL 0x0
+#define USB_THS_IGNORE_IN 0x0004
+#define USB_THS_NACK 0x0008
+#define USB_THS_STALL 0x000c
+
+#define USB_RHS_SHIFT 0
+#define USB_RHS_MASK 0x0003
+#define USB_RHS_NORMAL 0x0
+#define USB_RHS_IGNORE_OUT 0x0001
+#define USB_RHS_NACK 0x0002
+#define USB_RHS_STALL 0x0003
+
+#define USB_RTHS_MASK 0x000f
+
+/* USB Command Register define */
+#define USB_CMD_STR_FIFO 0x80
+#define USB_CMD_FLUSH_FIFO 0x40
+#define USB_CMD_ISFT 0x20
+#define USB_CMD_DSFT 0x10
+#define USB_CMD_EP_MASK 0x03
+
+/* USB Event and Mask Register define */
+#define USB_E_MSF_MASK 0x0800
+#define USB_E_SFT_MASK 0x0400
+#define USB_E_RESET_MASK 0x0200
+#define USB_E_IDLE_MASK 0x0100
+#define USB_E_TXE4_MASK 0x0080
+#define USB_E_TXE3_MASK 0x0040
+#define USB_E_TXE2_MASK 0x0020
+#define USB_E_TXE1_MASK 0x0010
+#define USB_E_SOF_MASK 0x0008
+#define USB_E_BSY_MASK 0x0004
+#define USB_E_TXB_MASK 0x0002
+#define USB_E_RXB_MASK 0x0001
+#define USBER_ALL_CLEAR 0x0fff
+
+#define USB_E_DEFAULT_DEVICE (USB_E_RESET_MASK | USB_E_TXE4_MASK | \
+ USB_E_TXE3_MASK | USB_E_TXE2_MASK | \
+ USB_E_TXE1_MASK | USB_E_BSY_MASK | \
+ USB_E_TXB_MASK | USB_E_RXB_MASK)
+
+#define USB_E_TXE_MASK (USB_E_TXE4_MASK | USB_E_TXE3_MASK|\
+ USB_E_TXE2_MASK | USB_E_TXE1_MASK)
+/* USB Status Register define */
+#define USB_IDLE_STATUS_MASK 0x01
+
+/* USB Start of Frame Timer */
+#define USB_USSFT_MASK 0x3FFF
+
+/* USB Frame Number Register */
+#define USB_USFRN_MASK 0xFFFF
+
+struct usb_device_para{
+ u16 epptr[4];
+ u32 rstate;
+ u32 rptr;
+ u16 frame_n;
+ u16 rbcnt;
+ u32 rtemp;
+ u32 rxusb_data;
+ u16 rxuptr;
+ u8 reso[2];
+ u32 softbl;
+ u8 sofucrctemp;
+};
+
+struct usb_ep_para{
+ u16 rbase;
+ u16 tbase;
+ u8 rbmr;
+ u8 tbmr;
+ u16 mrblr;
+ u16 rbptr;
+ u16 tbptr;
+ u32 tstate;
+ u32 tptr;
+ u16 tcrc;
+ u16 tbcnt;
+ u32 ttemp;
+ u16 txusbu_ptr;
+ u8 reserve[2];
+};
+
+#define USB_BUSMODE_GBL 0x20
+#define USB_BUSMODE_BO_MASK 0x18
+#define USB_BUSMODE_BO_SHIFT 0x3
+#define USB_BUSMODE_BE 0x2
+#define USB_BUSMODE_CETM 0x04
+#define USB_BUSMODE_DTB 0x02
+
+/* Endpoint basic handle */
+#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress & 0xF)
+#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
+#define ep_is_in(EP) ((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
+ USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
+ & USB_DIR_IN) == USB_DIR_IN)
+
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP 0
+#define DATA_STATE_XMIT 1
+#define DATA_STATE_NEED_ZLP 2
+#define WAIT_FOR_OUT_STATUS 3
+#define DATA_STATE_RECV 4
+
+/* ep tramsfer mode */
+#define USBP_TM_CTL 0
+#define USBP_TM_ISO 1
+#define USBP_TM_BULK 2
+#define USBP_TM_INT 3
+
+/*-----------------------------------------------------------------------------
+ USB RX And TX DATA Frame
+ -----------------------------------------------------------------------------*/
+struct qe_frame{
+ u8 *data;
+ u32 len;
+ u32 status;
+ u32 info;
+
+ void *privdata;
+ struct list_head node;
+};
+
+/* Frame structure, info field. */
+#define PID_DATA0 0x80000000 /* Data toggle zero */
+#define PID_DATA1 0x40000000 /* Data toggle one */
+#define PID_SETUP 0x20000000 /* setup bit */
+#define SETUP_STATUS 0x10000000 /* setup status bit */
+#define SETADDR_STATUS 0x08000000 /* setupup address status bit */
+#define NO_REQ 0x04000000 /* Frame without request */
+#define HOST_DATA 0x02000000 /* Host data frame */
+#define FIRST_PACKET_IN_FRAME 0x01000000 /* first packet in the frame */
+#define TOKEN_FRAME 0x00800000 /* Host token frame */
+#define ZLP 0x00400000 /* Zero length packet */
+#define IN_TOKEN_FRAME 0x00200000 /* In token package */
+#define OUT_TOKEN_FRAME 0x00100000 /* Out token package */
+#define SETUP_TOKEN_FRAME 0x00080000 /* Setup token package */
+#define STALL_FRAME 0x00040000 /* Stall handshake */
+#define NACK_FRAME 0x00020000 /* Nack handshake */
+#define NO_PID 0x00010000 /* No send PID */
+#define NO_CRC 0x00008000 /* No send CRC */
+#define HOST_COMMAND 0x00004000 /* Host command frame */
+
+/* Frame status field */
+/* Receive side */
+#define FRAME_OK 0x00000000 /* Frame transmitted or received OK */
+#define FRAME_ERROR 0x80000000 /* Error occurred on frame */
+#define START_FRAME_LOST 0x40000000 /* START_FRAME_LOST */
+#define END_FRAME_LOST 0x20000000 /* END_FRAME_LOST */
+#define RX_ER_NONOCT 0x10000000 /* Rx Non Octet Aligned Packet */
+#define RX_ER_BITSTUFF 0x08000000 /* Frame Aborted --Received packet
+ with bit stuff error */
+#define RX_ER_CRC 0x04000000 /* Received packet with CRC error */
+#define RX_ER_OVERUN 0x02000000 /* Over-run occurred on reception */
+#define RX_ER_PID 0x01000000 /* Wrong PID received */
+/* Tranmit side */
+#define TX_ER_NAK 0x00800000 /* Received NAK handshake */
+#define TX_ER_STALL 0x00400000 /* Received STALL handshake */
+#define TX_ER_TIMEOUT 0x00200000 /* Transmit time out */
+#define TX_ER_UNDERUN 0x00100000 /* Transmit underrun */
+#define FRAME_INPROGRESS 0x00080000 /* Frame is being transmitted */
+#define ER_DATA_UNDERUN 0x00040000 /* Frame is shorter then expected */
+#define ER_DATA_OVERUN 0x00020000 /* Frame is longer then expected */
+
+/* QE USB frame operation functions */
+#define frame_get_length(frm) (frm->len)
+#define frame_set_length(frm, leng) (frm->len = leng)
+#define frame_get_data(frm) (frm->data)
+#define frame_set_data(frm, dat) (frm->data = dat)
+#define frame_get_info(frm) (frm->info)
+#define frame_set_info(frm, inf) (frm->info = inf)
+#define frame_get_status(frm) (frm->status)
+#define frame_set_status(frm, stat) (frm->status = stat)
+#define frame_get_privdata(frm) (frm->privdata)
+#define frame_set_privdata(frm, dat) (frm->privdata = dat)
+
+static inline void qe_frame_clean(struct qe_frame *frm)
+{
+ frame_set_data(frm, NULL);
+ frame_set_length(frm, 0);
+ frame_set_status(frm, FRAME_OK);
+ frame_set_info(frm, 0);
+ frame_set_privdata(frm, NULL);
+}
+
+static inline void qe_frame_init(struct qe_frame *frm)
+{
+ qe_frame_clean(frm);
+ INIT_LIST_HEAD(&(frm->node));
+}
+
+struct qe_req {
+ struct usb_request req;
+ struct list_head queue;
+ /* ep_queue() func will add
+ a request->queue into a udc_ep->queue 'd tail */
+ struct qe_ep *ep;
+ unsigned mapped:1;
+};
+
+struct qe_ep {
+ struct usb_ep ep;
+ struct list_head queue;
+ struct qe_udc *udc;
+ struct usb_gadget *gadget;
+
+ u8 state;
+
+ struct qe_bd __iomem *rxbase;
+ struct qe_bd __iomem *n_rxbd;
+ struct qe_bd __iomem *e_rxbd;
+
+ struct qe_bd __iomem *txbase;
+ struct qe_bd __iomem *n_txbd;
+ struct qe_bd __iomem *c_txbd;
+
+ struct qe_frame *rxframe;
+ u8 *rxbuffer;
+ dma_addr_t rxbuf_d;
+ u8 rxbufmap;
+ unsigned char localnack;
+ int has_data;
+
+ struct qe_frame *txframe;
+ struct qe_req *tx_req;
+ int sent; /*data already sent */
+ int last; /*data sent in the last time*/
+
+ u8 dir;
+ u8 epnum;
+ u8 tm; /* transfer mode */
+ u8 data01;
+ u8 init;
+
+ u8 already_seen;
+ u8 enable_tasklet;
+ u8 setup_stage;
+ u32 last_io; /* timestamp */
+
+ char name[14];
+
+ unsigned double_buf:1;
+ unsigned stopped:1;
+ unsigned fnf:1;
+ unsigned has_dma:1;
+
+ u8 ackwait;
+ u8 dma_channel;
+ u16 dma_counter;
+ int lch;
+
+ struct timer_list timer;
+};
+
+struct qe_udc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct device *dev;
+ struct qe_ep eps[USB_MAX_ENDPOINTS];
+ struct usb_ctrlrequest local_setup_buff;
+ spinlock_t lock; /* lock for set/config qe_udc */
+ unsigned long soc_type; /* QE or CPM soc */
+
+ struct qe_req *status_req; /* ep0 status request */
+
+ /* USB and EP Parameter Block pointer */
+ struct usb_device_para __iomem *usb_param;
+ struct usb_ep_para __iomem *ep_param[4];
+
+ u32 max_pipes; /* Device max pipes */
+ u32 max_use_endpts; /* Max endpointes to be used */
+ u32 bus_reset; /* Device is bus reseting */
+ u32 resume_state; /* USB state to resume*/
+ u32 usb_state; /* USB current state */
+ u32 usb_next_state; /* USB next state */
+ u32 ep0_state; /* Endpoint zero state */
+ u32 ep0_dir; /* Endpoint zero direction: can be
+ USB_DIR_IN or USB_DIR_OUT*/
+ u32 usb_sof_count; /* SOF count */
+ u32 errors; /* USB ERRORs count */
+
+ u8 *tmpbuf;
+ u32 c_start;
+ u32 c_end;
+
+ u8 *nullbuf;
+ u8 *statusbuf;
+ dma_addr_t nullp;
+ u8 nullmap;
+ u8 device_address; /* Device USB address */
+
+ unsigned int usb_clock;
+ unsigned int usb_irq;
+ struct usb_ctlr __iomem *usb_regs;
+
+ struct tasklet_struct rx_tasklet;
+
+ struct completion *done; /* to make sure release() is done */
+};
+
+#define EP_STATE_IDLE 0
+#define EP_STATE_NACK 1
+#define EP_STATE_STALL 2
+
+/*
+ * transmit BD's status
+ */
+#define T_R 0x80000000 /* ready bit */
+#define T_W 0x20000000 /* wrap bit */
+#define T_I 0x10000000 /* interrupt on completion */
+#define T_L 0x08000000 /* last */
+#define T_TC 0x04000000 /* transmit CRC */
+#define T_CNF 0x02000000 /* wait for transmit confirm */
+#define T_LSP 0x01000000 /* Low-speed transaction */
+#define T_PID 0x00c00000 /* packet id */
+#define T_NAK 0x00100000 /* No ack. */
+#define T_STAL 0x00080000 /* Stall received */
+#define T_TO 0x00040000 /* time out */
+#define T_UN 0x00020000 /* underrun */
+
+#define DEVICE_T_ERROR (T_UN | T_TO)
+#define HOST_T_ERROR (T_UN | T_TO | T_NAK | T_STAL)
+#define DEVICE_T_BD_MASK DEVICE_T_ERROR
+#define HOST_T_BD_MASK HOST_T_ERROR
+
+#define T_PID_SHIFT 6
+#define T_PID_DATA0 0x00800000 /* Data 0 toggle */
+#define T_PID_DATA1 0x00c00000 /* Data 1 toggle */
+
+/*
+ * receive BD's status
+ */
+#define R_E 0x80000000 /* buffer empty */
+#define R_W 0x20000000 /* wrap bit */
+#define R_I 0x10000000 /* interrupt on reception */
+#define R_L 0x08000000 /* last */
+#define R_F 0x04000000 /* first */
+#define R_PID 0x00c00000 /* packet id */
+#define R_NO 0x00100000 /* Rx Non Octet Aligned Packet */
+#define R_AB 0x00080000 /* Frame Aborted */
+#define R_CR 0x00040000 /* CRC Error */
+#define R_OV 0x00020000 /* Overrun */
+
+#define R_ERROR (R_NO | R_AB | R_CR | R_OV)
+#define R_BD_MASK R_ERROR
+
+#define R_PID_DATA0 0x00000000
+#define R_PID_DATA1 0x00400000
+#define R_PID_SETUP 0x00800000
+
+#define CPM_USB_STOP_TX 0x2e600000
+#define CPM_USB_RESTART_TX 0x2e600000
+#define CPM_USB_STOP_TX_OPCODE 0x0a
+#define CPM_USB_RESTART_TX_OPCODE 0x0b
+#define CPM_USB_EP_SHIFT 5
+
+#endif /* __FSL_QE_UDC_H */
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
new file mode 100644
index 000000000..a67873a07
--- /dev/null
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -0,0 +1,2688 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2004-2007,2011-2012 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Jiang Bo <tanya.jiang@freescale.com>
+ *
+ * Description:
+ * Freescale high-speed USB SOC DR module device controller driver.
+ * This can be found on MPC8349E/MPC8313E/MPC5121E cpus.
+ * The driver is previously named as mpc_udc. Based on bare board
+ * code from Dave Liu and Shlomi Gridish.
+ */
+
+#undef VERBOSE
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+#include <linux/dmapool.h>
+#include <linux/of_device.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+#include <asm/dma.h>
+
+#include "fsl_usb2_udc.h"
+
+#define DRIVER_DESC "Freescale High-Speed USB SOC Device Controller driver"
+#define DRIVER_AUTHOR "Li Yang/Jiang Bo"
+#define DRIVER_VERSION "Apr 20, 2007"
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+static const char driver_name[] = "fsl-usb2-udc";
+
+static struct usb_dr_device __iomem *dr_regs;
+
+static struct usb_sys_interface __iomem *usb_sys_regs;
+
+/* it is initialized in probe() */
+static struct fsl_udc *udc_controller = NULL;
+
+static const struct usb_endpoint_descriptor
+fsl_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = USB_MAX_CTRL_PAYLOAD,
+};
+
+static void fsl_ep_fifo_flush(struct usb_ep *_ep);
+
+#ifdef CONFIG_PPC32
+/*
+ * On some SoCs, the USB controller registers can be big or little endian,
+ * depending on the version of the chip. In order to be able to run the
+ * same kernel binary on 2 different versions of an SoC, the BE/LE decision
+ * must be made at run time. _fsl_readl and fsl_writel are pointers to the
+ * BE or LE readl() and writel() functions, and fsl_readl() and fsl_writel()
+ * call through those pointers. Platform code for SoCs that have BE USB
+ * registers should set pdata->big_endian_mmio flag.
+ *
+ * This also applies to controller-to-cpu accessors for the USB descriptors,
+ * since their endianness is also SoC dependant. Platform code for SoCs that
+ * have BE USB descriptors should set pdata->big_endian_desc flag.
+ */
+static u32 _fsl_readl_be(const unsigned __iomem *p)
+{
+ return in_be32(p);
+}
+
+static u32 _fsl_readl_le(const unsigned __iomem *p)
+{
+ return in_le32(p);
+}
+
+static void _fsl_writel_be(u32 v, unsigned __iomem *p)
+{
+ out_be32(p, v);
+}
+
+static void _fsl_writel_le(u32 v, unsigned __iomem *p)
+{
+ out_le32(p, v);
+}
+
+static u32 (*_fsl_readl)(const unsigned __iomem *p);
+static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
+
+#define fsl_readl(p) (*_fsl_readl)((p))
+#define fsl_writel(v, p) (*_fsl_writel)((v), (p))
+
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata)
+{
+ if (pdata->big_endian_mmio) {
+ _fsl_readl = _fsl_readl_be;
+ _fsl_writel = _fsl_writel_be;
+ } else {
+ _fsl_readl = _fsl_readl_le;
+ _fsl_writel = _fsl_writel_le;
+ }
+}
+
+static inline u32 cpu_to_hc32(const u32 x)
+{
+ return udc_controller->pdata->big_endian_desc
+ ? (__force u32)cpu_to_be32(x)
+ : (__force u32)cpu_to_le32(x);
+}
+
+static inline u32 hc32_to_cpu(const u32 x)
+{
+ return udc_controller->pdata->big_endian_desc
+ ? be32_to_cpu((__force __be32)x)
+ : le32_to_cpu((__force __le32)x);
+}
+#else /* !CONFIG_PPC32 */
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata) {}
+
+#define fsl_readl(addr) readl(addr)
+#define fsl_writel(val32, addr) writel(val32, addr)
+#define cpu_to_hc32(x) cpu_to_le32(x)
+#define hc32_to_cpu(x) le32_to_cpu(x)
+#endif /* CONFIG_PPC32 */
+
+/********************************************************************
+ * Internal Used Function
+********************************************************************/
+/*-----------------------------------------------------------------
+ * done() - retire a request; caller blocked irqs
+ * @status : request status to be set, only works when
+ * request is still in progress.
+ *--------------------------------------------------------------*/
+static void done(struct fsl_ep *ep, struct fsl_req *req, int status)
+__releases(ep->udc->lock)
+__acquires(ep->udc->lock)
+{
+ struct fsl_udc *udc = NULL;
+ unsigned char stopped = ep->stopped;
+ struct ep_td_struct *curr_td, *next_td;
+ int j;
+
+ udc = (struct fsl_udc *)ep->udc;
+ /* Removed the req from fsl_ep->queue */
+ list_del_init(&req->queue);
+
+ /* req.status should be set as -EINPROGRESS in ep_queue() */
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ /* Free dtd for the request */
+ next_td = req->head;
+ for (j = 0; j < req->dtd_count; j++) {
+ curr_td = next_td;
+ if (j != req->dtd_count - 1) {
+ next_td = curr_td->next_td_virt;
+ }
+ dma_pool_free(udc->td_pool, curr_td, curr_td->td_dma);
+ }
+
+ usb_gadget_unmap_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
+
+ if (status && (status != -ESHUTDOWN))
+ VDBG("complete %s req %p stat %d len %u/%u",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+
+ ep->stopped = 1;
+
+ spin_unlock(&ep->udc->lock);
+
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+
+ spin_lock(&ep->udc->lock);
+ ep->stopped = stopped;
+}
+
+/*-----------------------------------------------------------------
+ * nuke(): delete all requests related to this ep
+ * called with spinlock held
+ *--------------------------------------------------------------*/
+static void nuke(struct fsl_ep *ep, int status)
+{
+ ep->stopped = 1;
+
+ /* Flush fifo */
+ fsl_ep_fifo_flush(&ep->ep);
+
+ /* Whether this eq has request linked */
+ while (!list_empty(&ep->queue)) {
+ struct fsl_req *req = NULL;
+
+ req = list_entry(ep->queue.next, struct fsl_req, queue);
+ done(ep, req, status);
+ }
+}
+
+/*------------------------------------------------------------------
+ Internal Hardware related function
+ ------------------------------------------------------------------*/
+
+static int dr_controller_setup(struct fsl_udc *udc)
+{
+ unsigned int tmp, portctrl, ep_num;
+ unsigned int max_no_of_ep;
+ unsigned int ctrl;
+ unsigned long timeout;
+
+#define FSL_UDC_RESET_TIMEOUT 1000
+
+ /* Config PHY interface */
+ portctrl = fsl_readl(&dr_regs->portsc1);
+ portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
+ switch (udc->phy_mode) {
+ case FSL_USB2_PHY_ULPI:
+ if (udc->pdata->have_sysif_regs) {
+ if (udc->pdata->controller_ver) {
+ /* controller version 1.6 or above */
+ ctrl = __raw_readl(&usb_sys_regs->control);
+ ctrl &= ~USB_CTRL_UTMI_PHY_EN;
+ ctrl |= USB_CTRL_USB_EN;
+ __raw_writel(ctrl, &usb_sys_regs->control);
+ }
+ }
+ portctrl |= PORTSCX_PTS_ULPI;
+ break;
+ case FSL_USB2_PHY_UTMI_WIDE:
+ portctrl |= PORTSCX_PTW_16BIT;
+ fallthrough;
+ case FSL_USB2_PHY_UTMI:
+ case FSL_USB2_PHY_UTMI_DUAL:
+ if (udc->pdata->have_sysif_regs) {
+ if (udc->pdata->controller_ver) {
+ /* controller version 1.6 or above */
+ ctrl = __raw_readl(&usb_sys_regs->control);
+ ctrl |= (USB_CTRL_UTMI_PHY_EN |
+ USB_CTRL_USB_EN);
+ __raw_writel(ctrl, &usb_sys_regs->control);
+ mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI
+ PHY CLK to become stable - 10ms*/
+ }
+ }
+ portctrl |= PORTSCX_PTS_UTMI;
+ break;
+ case FSL_USB2_PHY_SERIAL:
+ portctrl |= PORTSCX_PTS_FSLS;
+ break;
+ default:
+ return -EINVAL;
+ }
+ fsl_writel(portctrl, &dr_regs->portsc1);
+
+ /* Stop and reset the usb controller */
+ tmp = fsl_readl(&dr_regs->usbcmd);
+ tmp &= ~USB_CMD_RUN_STOP;
+ fsl_writel(tmp, &dr_regs->usbcmd);
+
+ tmp = fsl_readl(&dr_regs->usbcmd);
+ tmp |= USB_CMD_CTRL_RESET;
+ fsl_writel(tmp, &dr_regs->usbcmd);
+
+ /* Wait for reset to complete */
+ timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
+ while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
+ if (time_after(jiffies, timeout)) {
+ ERR("udc reset timeout!\n");
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+
+ /* Set the controller as device mode */
+ tmp = fsl_readl(&dr_regs->usbmode);
+ tmp &= ~USB_MODE_CTRL_MODE_MASK; /* clear mode bits */
+ tmp |= USB_MODE_CTRL_MODE_DEVICE;
+ /* Disable Setup Lockout */
+ tmp |= USB_MODE_SETUP_LOCK_OFF;
+ if (udc->pdata->es)
+ tmp |= USB_MODE_ES;
+ fsl_writel(tmp, &dr_regs->usbmode);
+
+ /* Clear the setup status */
+ fsl_writel(0, &dr_regs->usbsts);
+
+ tmp = udc->ep_qh_dma;
+ tmp &= USB_EP_LIST_ADDRESS_MASK;
+ fsl_writel(tmp, &dr_regs->endpointlistaddr);
+
+ VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
+ udc->ep_qh, (int)tmp,
+ fsl_readl(&dr_regs->endpointlistaddr));
+
+ max_no_of_ep = (0x0000001F & fsl_readl(&dr_regs->dccparams));
+ for (ep_num = 1; ep_num < max_no_of_ep; ep_num++) {
+ tmp = fsl_readl(&dr_regs->endptctrl[ep_num]);
+ tmp &= ~(EPCTRL_TX_TYPE | EPCTRL_RX_TYPE);
+ tmp |= (EPCTRL_EP_TYPE_BULK << EPCTRL_TX_EP_TYPE_SHIFT)
+ | (EPCTRL_EP_TYPE_BULK << EPCTRL_RX_EP_TYPE_SHIFT);
+ fsl_writel(tmp, &dr_regs->endptctrl[ep_num]);
+ }
+ /* Config control enable i/o output, cpu endian register */
+ if (udc->pdata->have_sysif_regs) {
+ ctrl = __raw_readl(&usb_sys_regs->control);
+ ctrl |= USB_CTRL_IOENB;
+ __raw_writel(ctrl, &usb_sys_regs->control);
+ }
+
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ /* Turn on cache snooping hardware, since some PowerPC platforms
+ * wholly rely on hardware to deal with cache coherent. */
+
+ if (udc->pdata->have_sysif_regs) {
+ /* Setup Snooping for all the 4GB space */
+ tmp = SNOOP_SIZE_2GB; /* starts from 0x0, size 2G */
+ __raw_writel(tmp, &usb_sys_regs->snoop1);
+ tmp |= 0x80000000; /* starts from 0x8000000, size 2G */
+ __raw_writel(tmp, &usb_sys_regs->snoop2);
+ }
+#endif
+
+ return 0;
+}
+
+/* Enable DR irq and set controller to run state */
+static void dr_controller_run(struct fsl_udc *udc)
+{
+ u32 temp;
+
+ /* Enable DR irq reg */
+ temp = USB_INTR_INT_EN | USB_INTR_ERR_INT_EN
+ | USB_INTR_PTC_DETECT_EN | USB_INTR_RESET_EN
+ | USB_INTR_DEVICE_SUSPEND | USB_INTR_SYS_ERR_EN;
+
+ fsl_writel(temp, &dr_regs->usbintr);
+
+ /* Clear stopped bit */
+ udc->stopped = 0;
+
+ /* Set the controller as device mode */
+ temp = fsl_readl(&dr_regs->usbmode);
+ temp |= USB_MODE_CTRL_MODE_DEVICE;
+ fsl_writel(temp, &dr_regs->usbmode);
+
+ /* Set controller to Run */
+ temp = fsl_readl(&dr_regs->usbcmd);
+ temp |= USB_CMD_RUN_STOP;
+ fsl_writel(temp, &dr_regs->usbcmd);
+}
+
+static void dr_controller_stop(struct fsl_udc *udc)
+{
+ unsigned int tmp;
+
+ pr_debug("%s\n", __func__);
+
+ /* if we're in OTG mode, and the Host is currently using the port,
+ * stop now and don't rip the controller out from under the
+ * ehci driver
+ */
+ if (udc->gadget.is_otg) {
+ if (!(fsl_readl(&dr_regs->otgsc) & OTGSC_STS_USB_ID)) {
+ pr_debug("udc: Leaving early\n");
+ return;
+ }
+ }
+
+ /* disable all INTR */
+ fsl_writel(0, &dr_regs->usbintr);
+
+ /* Set stopped bit for isr */
+ udc->stopped = 1;
+
+ /* disable IO output */
+/* usb_sys_regs->control = 0; */
+
+ /* set controller to Stop */
+ tmp = fsl_readl(&dr_regs->usbcmd);
+ tmp &= ~USB_CMD_RUN_STOP;
+ fsl_writel(tmp, &dr_regs->usbcmd);
+}
+
+static void dr_ep_setup(unsigned char ep_num, unsigned char dir,
+ unsigned char ep_type)
+{
+ unsigned int tmp_epctrl = 0;
+
+ tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+ if (dir) {
+ if (ep_num)
+ tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
+ tmp_epctrl |= EPCTRL_TX_ENABLE;
+ tmp_epctrl &= ~EPCTRL_TX_TYPE;
+ tmp_epctrl |= ((unsigned int)(ep_type)
+ << EPCTRL_TX_EP_TYPE_SHIFT);
+ } else {
+ if (ep_num)
+ tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
+ tmp_epctrl |= EPCTRL_RX_ENABLE;
+ tmp_epctrl &= ~EPCTRL_RX_TYPE;
+ tmp_epctrl |= ((unsigned int)(ep_type)
+ << EPCTRL_RX_EP_TYPE_SHIFT);
+ }
+
+ fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
+}
+
+static void
+dr_ep_change_stall(unsigned char ep_num, unsigned char dir, int value)
+{
+ u32 tmp_epctrl = 0;
+
+ tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+
+ if (value) {
+ /* set the stall bit */
+ if (dir)
+ tmp_epctrl |= EPCTRL_TX_EP_STALL;
+ else
+ tmp_epctrl |= EPCTRL_RX_EP_STALL;
+ } else {
+ /* clear the stall bit and reset data toggle */
+ if (dir) {
+ tmp_epctrl &= ~EPCTRL_TX_EP_STALL;
+ tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
+ } else {
+ tmp_epctrl &= ~EPCTRL_RX_EP_STALL;
+ tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
+ }
+ }
+ fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
+}
+
+/* Get stall status of a specific ep
+ Return: 0: not stalled; 1:stalled */
+static int dr_ep_get_stall(unsigned char ep_num, unsigned char dir)
+{
+ u32 epctrl;
+
+ epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+ if (dir)
+ return (epctrl & EPCTRL_TX_EP_STALL) ? 1 : 0;
+ else
+ return (epctrl & EPCTRL_RX_EP_STALL) ? 1 : 0;
+}
+
+/********************************************************************
+ Internal Structure Build up functions
+********************************************************************/
+
+/*------------------------------------------------------------------
+* struct_ep_qh_setup(): set the Endpoint Capabilites field of QH
+ * @zlt: Zero Length Termination Select (1: disable; 0: enable)
+ * @mult: Mult field
+ ------------------------------------------------------------------*/
+static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
+ unsigned char dir, unsigned char ep_type,
+ unsigned int max_pkt_len,
+ unsigned int zlt, unsigned char mult)
+{
+ struct ep_queue_head *p_QH = &udc->ep_qh[2 * ep_num + dir];
+ unsigned int tmp = 0;
+
+ /* set the Endpoint Capabilites in QH */
+ switch (ep_type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* Interrupt On Setup (IOS). for control ep */
+ tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+ | EP_QUEUE_HEAD_IOS;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+ | (mult << EP_QUEUE_HEAD_MULT_POS);
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ tmp = max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS;
+ break;
+ default:
+ VDBG("error ep type is %d", ep_type);
+ return;
+ }
+ if (zlt)
+ tmp |= EP_QUEUE_HEAD_ZLT_SEL;
+
+ p_QH->max_pkt_length = cpu_to_hc32(tmp);
+ p_QH->next_dtd_ptr = 1;
+ p_QH->size_ioc_int_sts = 0;
+}
+
+/* Setup qh structure and ep register for ep0. */
+static void ep0_setup(struct fsl_udc *udc)
+{
+ /* the initialization of an ep includes: fields in QH, Regs,
+ * fsl_ep struct */
+ struct_ep_qh_setup(udc, 0, USB_RECV, USB_ENDPOINT_XFER_CONTROL,
+ USB_MAX_CTRL_PAYLOAD, 0, 0);
+ struct_ep_qh_setup(udc, 0, USB_SEND, USB_ENDPOINT_XFER_CONTROL,
+ USB_MAX_CTRL_PAYLOAD, 0, 0);
+ dr_ep_setup(0, USB_RECV, USB_ENDPOINT_XFER_CONTROL);
+ dr_ep_setup(0, USB_SEND, USB_ENDPOINT_XFER_CONTROL);
+
+ return;
+
+}
+
+/***********************************************************************
+ Endpoint Management Functions
+***********************************************************************/
+
+/*-------------------------------------------------------------------------
+ * when configurations are set, or when interface settings change
+ * for example the do_set_interface() in gadget layer,
+ * the driver will enable or disable the relevant endpoints
+ * ep0 doesn't use this routine. It is always enabled.
+-------------------------------------------------------------------------*/
+static int fsl_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct fsl_udc *udc = NULL;
+ struct fsl_ep *ep = NULL;
+ unsigned short max = 0;
+ unsigned char mult = 0, zlt;
+ int retval = -EINVAL;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct fsl_ep, ep);
+
+ /* catch various bogus parameters */
+ if (!_ep || !desc
+ || (desc->bDescriptorType != USB_DT_ENDPOINT))
+ return -EINVAL;
+
+ udc = ep->udc;
+
+ if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
+ return -ESHUTDOWN;
+
+ max = usb_endpoint_maxp(desc);
+
+ /* Disable automatic zlp generation. Driver is responsible to indicate
+ * explicitly through req->req.zero. This is needed to enable multi-td
+ * request. */
+ zlt = 1;
+
+ /* Assume the max packet size from gadget is always correct */
+ switch (desc->bmAttributes & 0x03) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ /* mult = 0. Execute N Transactions as demonstrated by
+ * the USB variable length packet protocol where N is
+ * computed using the Maximum Packet Length (dQH) and
+ * the Total Bytes field (dTD) */
+ mult = 0;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ /* Calculate transactions needed for high bandwidth iso */
+ mult = usb_endpoint_maxp_mult(desc);
+ /* 3 transactions at most */
+ if (mult > 3)
+ goto en_done;
+ break;
+ default:
+ goto en_done;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ ep->ep.maxpacket = max;
+ ep->ep.desc = desc;
+ ep->stopped = 0;
+
+ /* Controller related setup */
+ /* Init EPx Queue Head (Ep Capabilites field in QH
+ * according to max, zlt, mult) */
+ struct_ep_qh_setup(udc, (unsigned char) ep_index(ep),
+ (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
+ ? USB_SEND : USB_RECV),
+ (unsigned char) (desc->bmAttributes
+ & USB_ENDPOINT_XFERTYPE_MASK),
+ max, zlt, mult);
+
+ /* Init endpoint ctrl register */
+ dr_ep_setup((unsigned char) ep_index(ep),
+ (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
+ ? USB_SEND : USB_RECV),
+ (unsigned char) (desc->bmAttributes
+ & USB_ENDPOINT_XFERTYPE_MASK));
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ retval = 0;
+
+ VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name,
+ ep->ep.desc->bEndpointAddress & 0x0f,
+ (desc->bEndpointAddress & USB_DIR_IN)
+ ? "in" : "out", max);
+en_done:
+ return retval;
+}
+
+/*---------------------------------------------------------------------
+ * @ep : the ep being unconfigured. May not be ep0
+ * Any pending and uncomplete req will complete with status (-ESHUTDOWN)
+*---------------------------------------------------------------------*/
+static int fsl_ep_disable(struct usb_ep *_ep)
+{
+ struct fsl_udc *udc = NULL;
+ struct fsl_ep *ep = NULL;
+ unsigned long flags;
+ u32 epctrl;
+ int ep_num;
+
+ ep = container_of(_ep, struct fsl_ep, ep);
+ if (!_ep || !ep->ep.desc) {
+ VDBG("%s not enabled", _ep ? ep->ep.name : NULL);
+ return -EINVAL;
+ }
+
+ /* disable ep on controller */
+ ep_num = ep_index(ep);
+ epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+ if (ep_is_in(ep)) {
+ epctrl &= ~(EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE);
+ epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_TX_EP_TYPE_SHIFT;
+ } else {
+ epctrl &= ~(EPCTRL_RX_ENABLE | EPCTRL_TX_TYPE);
+ epctrl |= EPCTRL_EP_TYPE_BULK << EPCTRL_RX_EP_TYPE_SHIFT;
+ }
+ fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+
+ udc = (struct fsl_udc *)ep->udc;
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* nuke all pending requests (does flush) */
+ nuke(ep, -ESHUTDOWN);
+
+ ep->ep.desc = NULL;
+ ep->stopped = 1;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ VDBG("disabled %s OK", _ep->name);
+ return 0;
+}
+
+/*---------------------------------------------------------------------
+ * allocate a request object used by this endpoint
+ * the main operation is to insert the req->queue to the eq->queue
+ * Returns the request, or null if one could not be allocated
+*---------------------------------------------------------------------*/
+static struct usb_request *
+fsl_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct fsl_req *req = NULL;
+
+ req = kzalloc(sizeof *req, gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->req.dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct fsl_req *req = NULL;
+
+ req = container_of(_req, struct fsl_req, req);
+
+ if (_req)
+ kfree(req);
+}
+
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+ struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+ /* Clear active and halt bit */
+ qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+ | EP_QUEUE_HEAD_STATUS_HALT));
+
+ /* Ensure that updates to the QH will occur before priming. */
+ wmb();
+
+ /* Prime endpoint by writing correct bit to ENDPTPRIME */
+ fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
+static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
+{
+ u32 temp, bitmask, tmp_stat;
+
+ /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
+ VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
+
+ bitmask = ep_is_in(ep)
+ ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep)));
+
+ /* check if the pipe is empty */
+ if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) {
+ /* Add td to the end */
+ struct fsl_req *lastreq;
+ lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
+ lastreq->tail->next_td_ptr =
+ cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
+ /* Ensure dTD's next dtd pointer to be updated */
+ wmb();
+ /* Read prime bit, if 1 goto done */
+ if (fsl_readl(&dr_regs->endpointprime) & bitmask)
+ return;
+
+ do {
+ /* Set ATDTW bit in USBCMD */
+ temp = fsl_readl(&dr_regs->usbcmd);
+ fsl_writel(temp | USB_CMD_ATDTW, &dr_regs->usbcmd);
+
+ /* Read correct status bit */
+ tmp_stat = fsl_readl(&dr_regs->endptstatus) & bitmask;
+
+ } while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_ATDTW));
+
+ /* Write ATDTW bit to 0 */
+ temp = fsl_readl(&dr_regs->usbcmd);
+ fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
+
+ if (tmp_stat)
+ return;
+ }
+
+ fsl_prime_ep(ep, req->head);
+}
+
+/* Fill in the dTD structure
+ * @req: request that the transfer belongs to
+ * @length: return actually data length of the dTD
+ * @dma: return dma address of the dTD
+ * @is_last: return flag if it is the last dTD of the request
+ * return: pointer to the built dTD */
+static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
+ dma_addr_t *dma, int *is_last, gfp_t gfp_flags)
+{
+ u32 swap_temp;
+ struct ep_td_struct *dtd;
+
+ /* how big will this transfer be? */
+ *length = min(req->req.length - req->req.actual,
+ (unsigned)EP_MAX_LENGTH_TRANSFER);
+
+ dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma);
+ if (dtd == NULL)
+ return dtd;
+
+ dtd->td_dma = *dma;
+ /* Clear reserved field */
+ swap_temp = hc32_to_cpu(dtd->size_ioc_sts);
+ swap_temp &= ~DTD_RESERVED_FIELDS;
+ dtd->size_ioc_sts = cpu_to_hc32(swap_temp);
+
+ /* Init all of buffer page pointers */
+ swap_temp = (u32) (req->req.dma + req->req.actual);
+ dtd->buff_ptr0 = cpu_to_hc32(swap_temp);
+ dtd->buff_ptr1 = cpu_to_hc32(swap_temp + 0x1000);
+ dtd->buff_ptr2 = cpu_to_hc32(swap_temp + 0x2000);
+ dtd->buff_ptr3 = cpu_to_hc32(swap_temp + 0x3000);
+ dtd->buff_ptr4 = cpu_to_hc32(swap_temp + 0x4000);
+
+ req->req.actual += *length;
+
+ /* zlp is needed if req->req.zero is set */
+ if (req->req.zero) {
+ if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+ *is_last = 1;
+ else
+ *is_last = 0;
+ } else if (req->req.length == req->req.actual)
+ *is_last = 1;
+ else
+ *is_last = 0;
+
+ if ((*is_last) == 0)
+ VDBG("multi-dtd request!");
+ /* Fill in the transfer size; set active bit */
+ swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
+
+ /* Enable interrupt for the last dtd of a request */
+ if (*is_last && !req->req.no_interrupt)
+ swap_temp |= DTD_IOC;
+
+ dtd->size_ioc_sts = cpu_to_hc32(swap_temp);
+
+ mb();
+
+ VDBG("length = %d address= 0x%x", *length, (int)*dma);
+
+ return dtd;
+}
+
+/* Generate dtd chain for a request */
+static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags)
+{
+ unsigned count;
+ int is_last;
+ int is_first =1;
+ struct ep_td_struct *last_dtd = NULL, *dtd;
+ dma_addr_t dma;
+
+ do {
+ dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags);
+ if (dtd == NULL)
+ return -ENOMEM;
+
+ if (is_first) {
+ is_first = 0;
+ req->head = dtd;
+ } else {
+ last_dtd->next_td_ptr = cpu_to_hc32(dma);
+ last_dtd->next_td_virt = dtd;
+ }
+ last_dtd = dtd;
+
+ req->dtd_count++;
+ } while (!is_last);
+
+ dtd->next_td_ptr = cpu_to_hc32(DTD_NEXT_TERMINATE);
+
+ req->tail = dtd;
+
+ return 0;
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int
+fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
+ struct fsl_req *req = container_of(_req, struct fsl_req, req);
+ struct fsl_udc *udc;
+ unsigned long flags;
+ int ret;
+
+ /* catch various bogus parameters */
+ if (!_req || !req->req.complete || !req->req.buf
+ || !list_empty(&req->queue)) {
+ VDBG("%s, bad params", __func__);
+ return -EINVAL;
+ }
+ if (unlikely(!_ep || !ep->ep.desc)) {
+ VDBG("%s, bad ep", __func__);
+ return -EINVAL;
+ }
+ if (usb_endpoint_xfer_isoc(ep->ep.desc)) {
+ if (req->req.length > ep->ep.maxpacket)
+ return -EMSGSIZE;
+ }
+
+ udc = ep->udc;
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ req->ep = ep;
+
+ ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
+ if (ret)
+ return ret;
+
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->dtd_count = 0;
+
+ /* build dtds and push them to device queue */
+ if (!fsl_req_to_dtd(req, gfp_flags)) {
+ spin_lock_irqsave(&udc->lock, flags);
+ fsl_queue_td(ep, req);
+ } else {
+ return -ENOMEM;
+ }
+
+ /* irq handler advances the queue */
+ if (req != NULL)
+ list_add_tail(&req->queue, &ep->queue);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
+ struct fsl_req *req = NULL;
+ struct fsl_req *iter;
+ unsigned long flags;
+ int ep_num, stopped, ret = 0;
+ u32 epctrl;
+
+ if (!_ep || !_req)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ stopped = ep->stopped;
+
+ /* Stop the ep before we deal with the queue */
+ ep->stopped = 1;
+ ep_num = ep_index(ep);
+ epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+ if (ep_is_in(ep))
+ epctrl &= ~EPCTRL_TX_ENABLE;
+ else
+ epctrl &= ~EPCTRL_RX_ENABLE;
+ fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* The request is in progress, or completed but not dequeued */
+ if (ep->queue.next == &req->queue) {
+ _req->status = -ECONNRESET;
+ fsl_ep_fifo_flush(_ep); /* flush current transfer */
+
+ /* The request isn't the last request in this ep queue */
+ if (req->queue.next != &ep->queue) {
+ struct fsl_req *next_req;
+
+ next_req = list_entry(req->queue.next, struct fsl_req,
+ queue);
+
+ /* prime with dTD of next request */
+ fsl_prime_ep(ep, next_req->head);
+ }
+ /* The request hasn't been processed, patch up the TD chain */
+ } else {
+ struct fsl_req *prev_req;
+
+ prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
+ prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
+ }
+
+ done(ep, req, -ECONNRESET);
+
+ /* Enable EP */
+out: epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+ if (ep_is_in(ep))
+ epctrl |= EPCTRL_TX_ENABLE;
+ else
+ epctrl |= EPCTRL_RX_ENABLE;
+ fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+ ep->stopped = stopped;
+
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*-----------------------------------------------------------------
+ * modify the endpoint halt feature
+ * @ep: the non-isochronous endpoint being stalled
+ * @value: 1--set halt 0--clear halt
+ * Returns zero, or a negative error code.
+*----------------------------------------------------------------*/
+static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct fsl_ep *ep = NULL;
+ unsigned long flags;
+ int status = -EOPNOTSUPP; /* operation not supported */
+ unsigned char ep_dir = 0, ep_num = 0;
+ struct fsl_udc *udc = NULL;
+
+ ep = container_of(_ep, struct fsl_ep, ep);
+ udc = ep->udc;
+ if (!_ep || !ep->ep.desc) {
+ status = -EINVAL;
+ goto out;
+ }
+
+ if (usb_endpoint_xfer_isoc(ep->ep.desc)) {
+ status = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* Attempt to halt IN ep will fail if any transfer requests
+ * are still queue */
+ if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
+ status = -EAGAIN;
+ goto out;
+ }
+
+ status = 0;
+ ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
+ ep_num = (unsigned char)(ep_index(ep));
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ dr_ep_change_stall(ep_num, ep_dir, value);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ if (ep_index(ep) == 0) {
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = 0;
+ }
+out:
+ VDBG(" %s %s halt stat %d", ep->ep.name,
+ value ? "set" : "clear", status);
+
+ return status;
+}
+
+static int fsl_ep_fifo_status(struct usb_ep *_ep)
+{
+ struct fsl_ep *ep;
+ struct fsl_udc *udc;
+ int size = 0;
+ u32 bitmask;
+ struct ep_queue_head *qh;
+
+ if (!_ep || !_ep->desc || !(_ep->desc->bEndpointAddress&0xF))
+ return -ENODEV;
+
+ ep = container_of(_ep, struct fsl_ep, ep);
+
+ udc = (struct fsl_udc *)ep->udc;
+
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ qh = get_qh_by_ep(ep);
+
+ bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
+ (1 << (ep_index(ep)));
+
+ if (fsl_readl(&dr_regs->endptstatus) & bitmask)
+ size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+ >> DTD_LENGTH_BIT_POS;
+
+ pr_debug("%s %u\n", __func__, size);
+ return size;
+}
+
+static void fsl_ep_fifo_flush(struct usb_ep *_ep)
+{
+ struct fsl_ep *ep;
+ int ep_num, ep_dir;
+ u32 bits;
+ unsigned long timeout;
+#define FSL_UDC_FLUSH_TIMEOUT 1000
+
+ if (!_ep) {
+ return;
+ } else {
+ ep = container_of(_ep, struct fsl_ep, ep);
+ if (!ep->ep.desc)
+ return;
+ }
+ ep_num = ep_index(ep);
+ ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
+
+ if (ep_num == 0)
+ bits = (1 << 16) | 1;
+ else if (ep_dir == USB_SEND)
+ bits = 1 << (16 + ep_num);
+ else
+ bits = 1 << ep_num;
+
+ timeout = jiffies + FSL_UDC_FLUSH_TIMEOUT;
+ do {
+ fsl_writel(bits, &dr_regs->endptflush);
+
+ /* Wait until flush complete */
+ while (fsl_readl(&dr_regs->endptflush)) {
+ if (time_after(jiffies, timeout)) {
+ ERR("ep flush timeout\n");
+ return;
+ }
+ cpu_relax();
+ }
+ /* See if we need to flush again */
+ } while (fsl_readl(&dr_regs->endptstatus) & bits);
+}
+
+static const struct usb_ep_ops fsl_ep_ops = {
+ .enable = fsl_ep_enable,
+ .disable = fsl_ep_disable,
+
+ .alloc_request = fsl_alloc_request,
+ .free_request = fsl_free_request,
+
+ .queue = fsl_ep_queue,
+ .dequeue = fsl_ep_dequeue,
+
+ .set_halt = fsl_ep_set_halt,
+ .fifo_status = fsl_ep_fifo_status,
+ .fifo_flush = fsl_ep_fifo_flush, /* flush fifo */
+};
+
+/*-------------------------------------------------------------------------
+ Gadget Driver Layer Operations
+-------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------
+ * Get the current frame number (from DR frame_index Reg )
+ *----------------------------------------------------------------------*/
+static int fsl_get_frame(struct usb_gadget *gadget)
+{
+ return (int)(fsl_readl(&dr_regs->frindex) & USB_FRINDEX_MASKS);
+}
+
+/*-----------------------------------------------------------------------
+ * Tries to wake up the host connected to this gadget
+ -----------------------------------------------------------------------*/
+static int fsl_wakeup(struct usb_gadget *gadget)
+{
+ struct fsl_udc *udc = container_of(gadget, struct fsl_udc, gadget);
+ u32 portsc;
+
+ /* Remote wakeup feature not enabled by host */
+ if (!udc->remote_wakeup)
+ return -ENOTSUPP;
+
+ portsc = fsl_readl(&dr_regs->portsc1);
+ /* not suspended? */
+ if (!(portsc & PORTSCX_PORT_SUSPEND))
+ return 0;
+ /* trigger force resume */
+ portsc |= PORTSCX_PORT_FORCE_RESUME;
+ fsl_writel(portsc, &dr_regs->portsc1);
+ return 0;
+}
+
+static int can_pullup(struct fsl_udc *udc)
+{
+ return udc->driver && udc->softconnect && udc->vbus_active;
+}
+
+/* Notify controller that VBUS is powered, Called by whatever
+ detects VBUS sessions */
+static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct fsl_udc *udc;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct fsl_udc, gadget);
+ spin_lock_irqsave(&udc->lock, flags);
+ VDBG("VBUS %s", is_active ? "on" : "off");
+ udc->vbus_active = (is_active != 0);
+ if (can_pullup(udc))
+ fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
+ &dr_regs->usbcmd);
+ else
+ fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
+ &dr_regs->usbcmd);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+/* constrain controller's VBUS power usage
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume. For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct fsl_udc *udc;
+
+ udc = container_of(gadget, struct fsl_udc, gadget);
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ return usb_phy_set_power(udc->transceiver, mA);
+ return -ENOTSUPP;
+}
+
+/* Change Data+ pullup status
+ * this func is used by usb_gadget_connect/disconnect
+ */
+static int fsl_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct fsl_udc *udc;
+
+ udc = container_of(gadget, struct fsl_udc, gadget);
+
+ if (!udc->vbus_active)
+ return -EOPNOTSUPP;
+
+ udc->softconnect = (is_on != 0);
+ if (can_pullup(udc))
+ fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
+ &dr_regs->usbcmd);
+ else
+ fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
+ &dr_regs->usbcmd);
+
+ return 0;
+}
+
+static int fsl_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int fsl_udc_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops fsl_gadget_ops = {
+ .get_frame = fsl_get_frame,
+ .wakeup = fsl_wakeup,
+/* .set_selfpowered = fsl_set_selfpowered, */ /* Always selfpowered */
+ .vbus_session = fsl_vbus_session,
+ .vbus_draw = fsl_vbus_draw,
+ .pullup = fsl_pullup,
+ .udc_start = fsl_udc_start,
+ .udc_stop = fsl_udc_stop,
+};
+
+/*
+ * Empty complete function used by this driver to fill in the req->complete
+ * field when creating a request since the complete field is mandatory.
+ */
+static void fsl_noop_complete(struct usb_ep *ep, struct usb_request *req) { }
+
+/* Set protocol stall on ep0, protocol stall will automatically be cleared
+ on new transaction */
+static void ep0stall(struct fsl_udc *udc)
+{
+ u32 tmp;
+
+ /* must set tx and rx to stall at the same time */
+ tmp = fsl_readl(&dr_regs->endptctrl[0]);
+ tmp |= EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL;
+ fsl_writel(tmp, &dr_regs->endptctrl[0]);
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = 0;
+}
+
+/* Prime a status phase for ep0 */
+static int ep0_prime_status(struct fsl_udc *udc, int direction)
+{
+ struct fsl_req *req = udc->status_req;
+ struct fsl_ep *ep;
+ int ret;
+
+ if (direction == EP_DIR_IN)
+ udc->ep0_dir = USB_DIR_IN;
+ else
+ udc->ep0_dir = USB_DIR_OUT;
+
+ ep = &udc->eps[0];
+ if (udc->ep0_state != DATA_STATE_XMIT)
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
+
+ req->ep = ep;
+ req->req.length = 0;
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->req.complete = fsl_noop_complete;
+ req->dtd_count = 0;
+
+ ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
+ if (ret)
+ return ret;
+
+ if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0)
+ fsl_queue_td(ep, req);
+ else
+ return -ENOMEM;
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ return 0;
+}
+
+static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
+{
+ struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
+
+ if (ep->ep.name)
+ nuke(ep, -ESHUTDOWN);
+}
+
+/*
+ * ch9 Set address
+ */
+static void ch9setaddress(struct fsl_udc *udc, u16 value, u16 index, u16 length)
+{
+ /* Save the new address to device struct */
+ udc->device_address = (u8) value;
+ /* Update usb state */
+ udc->usb_state = USB_STATE_ADDRESS;
+ /* Status phase */
+ if (ep0_prime_status(udc, EP_DIR_IN))
+ ep0stall(udc);
+}
+
+/*
+ * ch9 Get status
+ */
+static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
+ u16 index, u16 length)
+{
+ u16 tmp = 0; /* Status, cpu endian */
+ struct fsl_req *req;
+ struct fsl_ep *ep;
+ int ret;
+
+ ep = &udc->eps[0];
+
+ if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ /* Get device status */
+ tmp = udc->gadget.is_selfpowered;
+ tmp |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+ } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+ /* Get interface status */
+ /* We don't have interface information in udc driver */
+ tmp = 0;
+ } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+ /* Get endpoint status */
+ struct fsl_ep *target_ep;
+
+ target_ep = get_ep_by_pipe(udc, get_pipe_by_windex(index));
+
+ /* stall if endpoint doesn't exist */
+ if (!target_ep->ep.desc)
+ goto stall;
+ tmp = dr_ep_get_stall(ep_index(target_ep), ep_is_in(target_ep))
+ << USB_ENDPOINT_HALT;
+ }
+
+ udc->ep0_dir = USB_DIR_IN;
+ /* Borrow the per device status_req */
+ req = udc->status_req;
+ /* Fill in the reqest structure */
+ *((u16 *) req->req.buf) = cpu_to_le16(tmp);
+
+ req->ep = ep;
+ req->req.length = 2;
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->req.complete = fsl_noop_complete;
+ req->dtd_count = 0;
+
+ ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
+ if (ret)
+ goto stall;
+
+ /* prime the data phase */
+ if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
+ fsl_queue_td(ep, req);
+ else /* no mem */
+ goto stall;
+
+ list_add_tail(&req->queue, &ep->queue);
+ udc->ep0_state = DATA_STATE_XMIT;
+ if (ep0_prime_status(udc, EP_DIR_OUT))
+ ep0stall(udc);
+
+ return;
+stall:
+ ep0stall(udc);
+}
+
+static void setup_received_irq(struct fsl_udc *udc,
+ struct usb_ctrlrequest *setup)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ u16 wValue = le16_to_cpu(setup->wValue);
+ u16 wIndex = le16_to_cpu(setup->wIndex);
+ u16 wLength = le16_to_cpu(setup->wLength);
+
+ udc_reset_ep_queue(udc, 0);
+
+ /* We process some stardard setup requests here */
+ switch (setup->bRequest) {
+ case USB_REQ_GET_STATUS:
+ /* Data+Status phase from udc */
+ if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+ != (USB_DIR_IN | USB_TYPE_STANDARD))
+ break;
+ ch9getstatus(udc, setup->bRequestType, wValue, wIndex, wLength);
+ return;
+
+ case USB_REQ_SET_ADDRESS:
+ /* Status phase from udc */
+ if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
+ | USB_RECIP_DEVICE))
+ break;
+ ch9setaddress(udc, wValue, wIndex, wLength);
+ return;
+
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ /* Status phase from udc */
+ {
+ int rc = -EOPNOTSUPP;
+ u16 ptc = 0;
+
+ if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+ == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
+ int pipe = get_pipe_by_windex(wIndex);
+ struct fsl_ep *ep;
+
+ if (wValue != 0 || wLength != 0 || pipe >= udc->max_ep)
+ break;
+ ep = get_ep_by_pipe(udc, pipe);
+
+ spin_unlock(&udc->lock);
+ rc = fsl_ep_set_halt(&ep->ep,
+ (setup->bRequest == USB_REQ_SET_FEATURE)
+ ? 1 : 0);
+ spin_lock(&udc->lock);
+
+ } else if ((setup->bRequestType & (USB_RECIP_MASK
+ | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
+ | USB_TYPE_STANDARD)) {
+ /* Note: The driver has not include OTG support yet.
+ * This will be set when OTG support is added */
+ if (wValue == USB_DEVICE_TEST_MODE)
+ ptc = wIndex >> 8;
+ else if (gadget_is_otg(&udc->gadget)) {
+ if (setup->bRequest ==
+ USB_DEVICE_B_HNP_ENABLE)
+ udc->gadget.b_hnp_enable = 1;
+ else if (setup->bRequest ==
+ USB_DEVICE_A_HNP_SUPPORT)
+ udc->gadget.a_hnp_support = 1;
+ else if (setup->bRequest ==
+ USB_DEVICE_A_ALT_HNP_SUPPORT)
+ udc->gadget.a_alt_hnp_support = 1;
+ }
+ rc = 0;
+ } else
+ break;
+
+ if (rc == 0) {
+ if (ep0_prime_status(udc, EP_DIR_IN))
+ ep0stall(udc);
+ }
+ if (ptc) {
+ u32 tmp;
+
+ mdelay(10);
+ tmp = fsl_readl(&dr_regs->portsc1) | (ptc << 16);
+ fsl_writel(tmp, &dr_regs->portsc1);
+ printk(KERN_INFO "udc: switch to test mode %d.\n", ptc);
+ }
+
+ return;
+ }
+
+ default:
+ break;
+ }
+
+ /* Requests handled by gadget */
+ if (wLength) {
+ /* Data phase from gadget, status phase from udc */
+ udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+ ? USB_DIR_IN : USB_DIR_OUT;
+ spin_unlock(&udc->lock);
+ if (udc->driver->setup(&udc->gadget,
+ &udc->local_setup_buff) < 0)
+ ep0stall(udc);
+ spin_lock(&udc->lock);
+ udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
+ ? DATA_STATE_XMIT : DATA_STATE_RECV;
+ /*
+ * If the data stage is IN, send status prime immediately.
+ * See 2.0 Spec chapter 8.5.3.3 for detail.
+ */
+ if (udc->ep0_state == DATA_STATE_XMIT)
+ if (ep0_prime_status(udc, EP_DIR_OUT))
+ ep0stall(udc);
+
+ } else {
+ /* No data phase, IN status from gadget */
+ udc->ep0_dir = USB_DIR_IN;
+ spin_unlock(&udc->lock);
+ if (udc->driver->setup(&udc->gadget,
+ &udc->local_setup_buff) < 0)
+ ep0stall(udc);
+ spin_lock(&udc->lock);
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
+ }
+}
+
+/* Process request for Data or Status phase of ep0
+ * prime status phase if needed */
+static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
+ struct fsl_req *req)
+{
+ if (udc->usb_state == USB_STATE_ADDRESS) {
+ /* Set the new address */
+ u32 new_address = (u32) udc->device_address;
+ fsl_writel(new_address << USB_DEVICE_ADDRESS_BIT_POS,
+ &dr_regs->deviceaddr);
+ }
+
+ done(ep0, req, 0);
+
+ switch (udc->ep0_state) {
+ case DATA_STATE_XMIT:
+ /* already primed at setup_received_irq */
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
+ break;
+ case DATA_STATE_RECV:
+ /* send status phase */
+ if (ep0_prime_status(udc, EP_DIR_IN))
+ ep0stall(udc);
+ break;
+ case WAIT_FOR_OUT_STATUS:
+ udc->ep0_state = WAIT_FOR_SETUP;
+ break;
+ case WAIT_FOR_SETUP:
+ ERR("Unexpected ep0 packets\n");
+ break;
+ default:
+ ep0stall(udc);
+ break;
+ }
+}
+
+/* Tripwire mechanism to ensure a setup packet payload is extracted without
+ * being corrupted by another incoming setup packet */
+static void tripwire_handler(struct fsl_udc *udc, u8 ep_num, u8 *buffer_ptr)
+{
+ u32 temp;
+ struct ep_queue_head *qh;
+ struct fsl_usb2_platform_data *pdata = udc->pdata;
+
+ qh = &udc->ep_qh[ep_num * 2 + EP_DIR_OUT];
+
+ /* Clear bit in ENDPTSETUPSTAT */
+ temp = fsl_readl(&dr_regs->endptsetupstat);
+ fsl_writel(temp | (1 << ep_num), &dr_regs->endptsetupstat);
+
+ /* while a hazard exists when setup package arrives */
+ do {
+ /* Set Setup Tripwire */
+ temp = fsl_readl(&dr_regs->usbcmd);
+ fsl_writel(temp | USB_CMD_SUTW, &dr_regs->usbcmd);
+
+ /* Copy the setup packet to local buffer */
+ if (pdata->le_setup_buf) {
+ u32 *p = (u32 *)buffer_ptr;
+ u32 *s = (u32 *)qh->setup_buffer;
+
+ /* Convert little endian setup buffer to CPU endian */
+ *p++ = le32_to_cpu(*s++);
+ *p = le32_to_cpu(*s);
+ } else {
+ memcpy(buffer_ptr, (u8 *) qh->setup_buffer, 8);
+ }
+ } while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_SUTW));
+
+ /* Clear Setup Tripwire */
+ temp = fsl_readl(&dr_regs->usbcmd);
+ fsl_writel(temp & ~USB_CMD_SUTW, &dr_regs->usbcmd);
+}
+
+/* process-ep_req(): free the completed Tds for this req */
+static int process_ep_req(struct fsl_udc *udc, int pipe,
+ struct fsl_req *curr_req)
+{
+ struct ep_td_struct *curr_td;
+ int actual, remaining_length, j, tmp;
+ int status = 0;
+ int errors = 0;
+ struct ep_queue_head *curr_qh = &udc->ep_qh[pipe];
+ int direction = pipe % 2;
+
+ curr_td = curr_req->head;
+ actual = curr_req->req.length;
+
+ for (j = 0; j < curr_req->dtd_count; j++) {
+ remaining_length = (hc32_to_cpu(curr_td->size_ioc_sts)
+ & DTD_PACKET_SIZE)
+ >> DTD_LENGTH_BIT_POS;
+ actual -= remaining_length;
+
+ errors = hc32_to_cpu(curr_td->size_ioc_sts);
+ if (errors & DTD_ERROR_MASK) {
+ if (errors & DTD_STATUS_HALTED) {
+ ERR("dTD error %08x QH=%d\n", errors, pipe);
+ /* Clear the errors and Halt condition */
+ tmp = hc32_to_cpu(curr_qh->size_ioc_int_sts);
+ tmp &= ~errors;
+ curr_qh->size_ioc_int_sts = cpu_to_hc32(tmp);
+ status = -EPIPE;
+ /* FIXME: continue with next queued TD? */
+
+ break;
+ }
+ if (errors & DTD_STATUS_DATA_BUFF_ERR) {
+ VDBG("Transfer overflow");
+ status = -EPROTO;
+ break;
+ } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
+ VDBG("ISO error");
+ status = -EILSEQ;
+ break;
+ } else
+ ERR("Unknown error has occurred (0x%x)!\n",
+ errors);
+
+ } else if (hc32_to_cpu(curr_td->size_ioc_sts)
+ & DTD_STATUS_ACTIVE) {
+ VDBG("Request not complete");
+ status = REQ_UNCOMPLETE;
+ return status;
+ } else if (remaining_length) {
+ if (direction) {
+ VDBG("Transmit dTD remaining length not zero");
+ status = -EPROTO;
+ break;
+ } else {
+ break;
+ }
+ } else {
+ VDBG("dTD transmitted successful");
+ }
+
+ if (j != curr_req->dtd_count - 1)
+ curr_td = (struct ep_td_struct *)curr_td->next_td_virt;
+ }
+
+ if (status)
+ return status;
+
+ curr_req->req.actual = actual;
+
+ return 0;
+}
+
+/* Process a DTD completion interrupt */
+static void dtd_complete_irq(struct fsl_udc *udc)
+{
+ u32 bit_pos;
+ int i, ep_num, direction, bit_mask, status;
+ struct fsl_ep *curr_ep;
+ struct fsl_req *curr_req, *temp_req;
+
+ /* Clear the bits in the register */
+ bit_pos = fsl_readl(&dr_regs->endptcomplete);
+ fsl_writel(bit_pos, &dr_regs->endptcomplete);
+
+ if (!bit_pos)
+ return;
+
+ for (i = 0; i < udc->max_ep; i++) {
+ ep_num = i >> 1;
+ direction = i % 2;
+
+ bit_mask = 1 << (ep_num + 16 * direction);
+
+ if (!(bit_pos & bit_mask))
+ continue;
+
+ curr_ep = get_ep_by_pipe(udc, i);
+
+ /* If the ep is configured */
+ if (!curr_ep->ep.name) {
+ WARNING("Invalid EP?");
+ continue;
+ }
+
+ /* process the req queue until an uncomplete request */
+ list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue,
+ queue) {
+ status = process_ep_req(udc, i, curr_req);
+
+ VDBG("status of process_ep_req= %d, ep = %d",
+ status, ep_num);
+ if (status == REQ_UNCOMPLETE)
+ break;
+ /* write back status to req */
+ curr_req->req.status = status;
+
+ if (ep_num == 0) {
+ ep0_req_complete(udc, curr_ep, curr_req);
+ break;
+ } else
+ done(curr_ep, curr_req, status);
+ }
+ }
+}
+
+static inline enum usb_device_speed portscx_device_speed(u32 reg)
+{
+ switch (reg & PORTSCX_PORT_SPEED_MASK) {
+ case PORTSCX_PORT_SPEED_HIGH:
+ return USB_SPEED_HIGH;
+ case PORTSCX_PORT_SPEED_FULL:
+ return USB_SPEED_FULL;
+ case PORTSCX_PORT_SPEED_LOW:
+ return USB_SPEED_LOW;
+ default:
+ return USB_SPEED_UNKNOWN;
+ }
+}
+
+/* Process a port change interrupt */
+static void port_change_irq(struct fsl_udc *udc)
+{
+ if (udc->bus_reset)
+ udc->bus_reset = 0;
+
+ /* Bus resetting is finished */
+ if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET))
+ /* Get the speed */
+ udc->gadget.speed =
+ portscx_device_speed(fsl_readl(&dr_regs->portsc1));
+
+ /* Update USB state */
+ if (!udc->resume_state)
+ udc->usb_state = USB_STATE_DEFAULT;
+}
+
+/* Process suspend interrupt */
+static void suspend_irq(struct fsl_udc *udc)
+{
+ udc->resume_state = udc->usb_state;
+ udc->usb_state = USB_STATE_SUSPENDED;
+
+ /* report suspend to the driver, serial.c does not support this */
+ if (udc->driver->suspend)
+ udc->driver->suspend(&udc->gadget);
+}
+
+static void bus_resume(struct fsl_udc *udc)
+{
+ udc->usb_state = udc->resume_state;
+ udc->resume_state = 0;
+
+ /* report resume to the driver, serial.c does not support this */
+ if (udc->driver->resume)
+ udc->driver->resume(&udc->gadget);
+}
+
+/* Clear up all ep queues */
+static int reset_queues(struct fsl_udc *udc, bool bus_reset)
+{
+ u8 pipe;
+
+ for (pipe = 0; pipe < udc->max_pipes; pipe++)
+ udc_reset_ep_queue(udc, pipe);
+
+ /* report disconnect; the driver is already quiesced */
+ spin_unlock(&udc->lock);
+ if (bus_reset)
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
+ else
+ udc->driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+
+ return 0;
+}
+
+/* Process reset interrupt */
+static void reset_irq(struct fsl_udc *udc)
+{
+ u32 temp;
+ unsigned long timeout;
+
+ /* Clear the device address */
+ temp = fsl_readl(&dr_regs->deviceaddr);
+ fsl_writel(temp & ~USB_DEVICE_ADDRESS_MASK, &dr_regs->deviceaddr);
+
+ udc->device_address = 0;
+
+ /* Clear usb state */
+ udc->resume_state = 0;
+ udc->ep0_dir = 0;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->remote_wakeup = 0; /* default to 0 on reset */
+ udc->gadget.b_hnp_enable = 0;
+ udc->gadget.a_hnp_support = 0;
+ udc->gadget.a_alt_hnp_support = 0;
+
+ /* Clear all the setup token semaphores */
+ temp = fsl_readl(&dr_regs->endptsetupstat);
+ fsl_writel(temp, &dr_regs->endptsetupstat);
+
+ /* Clear all the endpoint complete status bits */
+ temp = fsl_readl(&dr_regs->endptcomplete);
+ fsl_writel(temp, &dr_regs->endptcomplete);
+
+ timeout = jiffies + 100;
+ while (fsl_readl(&dr_regs->endpointprime)) {
+ /* Wait until all endptprime bits cleared */
+ if (time_after(jiffies, timeout)) {
+ ERR("Timeout for reset\n");
+ break;
+ }
+ cpu_relax();
+ }
+
+ /* Write 1s to the flush register */
+ fsl_writel(0xffffffff, &dr_regs->endptflush);
+
+ if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
+ VDBG("Bus reset");
+ /* Bus is reseting */
+ udc->bus_reset = 1;
+ /* Reset all the queues, include XD, dTD, EP queue
+ * head and TR Queue */
+ reset_queues(udc, true);
+ udc->usb_state = USB_STATE_DEFAULT;
+ } else {
+ VDBG("Controller reset");
+ /* initialize usb hw reg except for regs for EP, not
+ * touch usbintr reg */
+ dr_controller_setup(udc);
+
+ /* Reset all internal used Queues */
+ reset_queues(udc, false);
+
+ ep0_setup(udc);
+
+ /* Enable DR IRQ reg, Set Run bit, change udc state */
+ dr_controller_run(udc);
+ udc->usb_state = USB_STATE_ATTACHED;
+ }
+}
+
+/*
+ * USB device controller interrupt handler
+ */
+static irqreturn_t fsl_udc_irq(int irq, void *_udc)
+{
+ struct fsl_udc *udc = _udc;
+ u32 irq_src;
+ irqreturn_t status = IRQ_NONE;
+ unsigned long flags;
+
+ /* Disable ISR for OTG host mode */
+ if (udc->stopped)
+ return IRQ_NONE;
+ spin_lock_irqsave(&udc->lock, flags);
+ irq_src = fsl_readl(&dr_regs->usbsts) & fsl_readl(&dr_regs->usbintr);
+ /* Clear notification bits */
+ fsl_writel(irq_src, &dr_regs->usbsts);
+
+ /* VDBG("irq_src [0x%8x]", irq_src); */
+
+ /* Need to resume? */
+ if (udc->usb_state == USB_STATE_SUSPENDED)
+ if ((fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_SUSPEND) == 0)
+ bus_resume(udc);
+
+ /* USB Interrupt */
+ if (irq_src & USB_STS_INT) {
+ VDBG("Packet int");
+ /* Setup package, we only support ep0 as control ep */
+ if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
+ tripwire_handler(udc, 0,
+ (u8 *) (&udc->local_setup_buff));
+ setup_received_irq(udc, &udc->local_setup_buff);
+ status = IRQ_HANDLED;
+ }
+
+ /* completion of dtd */
+ if (fsl_readl(&dr_regs->endptcomplete)) {
+ dtd_complete_irq(udc);
+ status = IRQ_HANDLED;
+ }
+ }
+
+ /* SOF (for ISO transfer) */
+ if (irq_src & USB_STS_SOF) {
+ status = IRQ_HANDLED;
+ }
+
+ /* Port Change */
+ if (irq_src & USB_STS_PORT_CHANGE) {
+ port_change_irq(udc);
+ status = IRQ_HANDLED;
+ }
+
+ /* Reset Received */
+ if (irq_src & USB_STS_RESET) {
+ VDBG("reset int");
+ reset_irq(udc);
+ status = IRQ_HANDLED;
+ }
+
+ /* Sleep Enable (Suspend) */
+ if (irq_src & USB_STS_SUSPEND) {
+ suspend_irq(udc);
+ status = IRQ_HANDLED;
+ }
+
+ if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
+ VDBG("Error IRQ %x", irq_src);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return status;
+}
+
+/*----------------------------------------------------------------*
+ * Hook to gadget drivers
+ * Called by initialization code of gadget drivers
+*----------------------------------------------------------------*/
+static int fsl_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ int retval = 0;
+ unsigned long flags;
+
+ /* lock is needed but whether should use this lock or another */
+ spin_lock_irqsave(&udc_controller->lock, flags);
+
+ /* hook up the driver */
+ udc_controller->driver = driver;
+ spin_unlock_irqrestore(&udc_controller->lock, flags);
+ g->is_selfpowered = 1;
+
+ if (!IS_ERR_OR_NULL(udc_controller->transceiver)) {
+ /* Suspend the controller until OTG enable it */
+ udc_controller->stopped = 1;
+ printk(KERN_INFO "Suspend udc for OTG auto detect\n");
+
+ /* connect to bus through transceiver */
+ if (!IS_ERR_OR_NULL(udc_controller->transceiver)) {
+ retval = otg_set_peripheral(
+ udc_controller->transceiver->otg,
+ &udc_controller->gadget);
+ if (retval < 0) {
+ ERR("can't bind to transceiver\n");
+ udc_controller->driver = NULL;
+ return retval;
+ }
+ }
+ } else {
+ /* Enable DR IRQ reg and set USBCMD reg Run bit */
+ dr_controller_run(udc_controller);
+ udc_controller->usb_state = USB_STATE_ATTACHED;
+ udc_controller->ep0_state = WAIT_FOR_SETUP;
+ udc_controller->ep0_dir = 0;
+ }
+
+ return retval;
+}
+
+/* Disconnect from gadget driver */
+static int fsl_udc_stop(struct usb_gadget *g)
+{
+ struct fsl_ep *loop_ep;
+ unsigned long flags;
+
+ if (!IS_ERR_OR_NULL(udc_controller->transceiver))
+ otg_set_peripheral(udc_controller->transceiver->otg, NULL);
+
+ /* stop DR, disable intr */
+ dr_controller_stop(udc_controller);
+
+ /* in fact, no needed */
+ udc_controller->usb_state = USB_STATE_ATTACHED;
+ udc_controller->ep0_state = WAIT_FOR_SETUP;
+ udc_controller->ep0_dir = 0;
+
+ /* stand operation */
+ spin_lock_irqsave(&udc_controller->lock, flags);
+ udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+ nuke(&udc_controller->eps[0], -ESHUTDOWN);
+ list_for_each_entry(loop_ep, &udc_controller->gadget.ep_list,
+ ep.ep_list)
+ nuke(loop_ep, -ESHUTDOWN);
+ spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+ udc_controller->driver = NULL;
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------
+ PROC File System Support
+-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+#include <linux/seq_file.h>
+
+static const char proc_filename[] = "driver/fsl_usb2_udc";
+
+static int fsl_proc_read(struct seq_file *m, void *v)
+{
+ unsigned long flags;
+ int i;
+ u32 tmp_reg;
+ struct fsl_ep *ep = NULL;
+ struct fsl_req *req;
+
+ struct fsl_udc *udc = udc_controller;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* ------basic driver information ---- */
+ seq_printf(m,
+ DRIVER_DESC "\n"
+ "%s version: %s\n"
+ "Gadget driver: %s\n\n",
+ driver_name, DRIVER_VERSION,
+ udc->driver ? udc->driver->driver.name : "(none)");
+
+ /* ------ DR Registers ----- */
+ tmp_reg = fsl_readl(&dr_regs->usbcmd);
+ seq_printf(m,
+ "USBCMD reg:\n"
+ "SetupTW: %d\n"
+ "Run/Stop: %s\n\n",
+ (tmp_reg & USB_CMD_SUTW) ? 1 : 0,
+ (tmp_reg & USB_CMD_RUN_STOP) ? "Run" : "Stop");
+
+ tmp_reg = fsl_readl(&dr_regs->usbsts);
+ seq_printf(m,
+ "USB Status Reg:\n"
+ "Dr Suspend: %d Reset Received: %d System Error: %s "
+ "USB Error Interrupt: %s\n\n",
+ (tmp_reg & USB_STS_SUSPEND) ? 1 : 0,
+ (tmp_reg & USB_STS_RESET) ? 1 : 0,
+ (tmp_reg & USB_STS_SYS_ERR) ? "Err" : "Normal",
+ (tmp_reg & USB_STS_ERR) ? "Err detected" : "No err");
+
+ tmp_reg = fsl_readl(&dr_regs->usbintr);
+ seq_printf(m,
+ "USB Interrupt Enable Reg:\n"
+ "Sleep Enable: %d SOF Received Enable: %d "
+ "Reset Enable: %d\n"
+ "System Error Enable: %d "
+ "Port Change Detected Enable: %d\n"
+ "USB Error Intr Enable: %d USB Intr Enable: %d\n\n",
+ (tmp_reg & USB_INTR_DEVICE_SUSPEND) ? 1 : 0,
+ (tmp_reg & USB_INTR_SOF_EN) ? 1 : 0,
+ (tmp_reg & USB_INTR_RESET_EN) ? 1 : 0,
+ (tmp_reg & USB_INTR_SYS_ERR_EN) ? 1 : 0,
+ (tmp_reg & USB_INTR_PTC_DETECT_EN) ? 1 : 0,
+ (tmp_reg & USB_INTR_ERR_INT_EN) ? 1 : 0,
+ (tmp_reg & USB_INTR_INT_EN) ? 1 : 0);
+
+ tmp_reg = fsl_readl(&dr_regs->frindex);
+ seq_printf(m,
+ "USB Frame Index Reg: Frame Number is 0x%x\n\n",
+ (tmp_reg & USB_FRINDEX_MASKS));
+
+ tmp_reg = fsl_readl(&dr_regs->deviceaddr);
+ seq_printf(m,
+ "USB Device Address Reg: Device Addr is 0x%x\n\n",
+ (tmp_reg & USB_DEVICE_ADDRESS_MASK));
+
+ tmp_reg = fsl_readl(&dr_regs->endpointlistaddr);
+ seq_printf(m,
+ "USB Endpoint List Address Reg: "
+ "Device Addr is 0x%x\n\n",
+ (tmp_reg & USB_EP_LIST_ADDRESS_MASK));
+
+ tmp_reg = fsl_readl(&dr_regs->portsc1);
+ seq_printf(m,
+ "USB Port Status&Control Reg:\n"
+ "Port Transceiver Type : %s Port Speed: %s\n"
+ "PHY Low Power Suspend: %s Port Reset: %s "
+ "Port Suspend Mode: %s\n"
+ "Over-current Change: %s "
+ "Port Enable/Disable Change: %s\n"
+ "Port Enabled/Disabled: %s "
+ "Current Connect Status: %s\n\n", ( {
+ const char *s;
+ switch (tmp_reg & PORTSCX_PTS_FSLS) {
+ case PORTSCX_PTS_UTMI:
+ s = "UTMI"; break;
+ case PORTSCX_PTS_ULPI:
+ s = "ULPI "; break;
+ case PORTSCX_PTS_FSLS:
+ s = "FS/LS Serial"; break;
+ default:
+ s = "None"; break;
+ }
+ s;} ),
+ usb_speed_string(portscx_device_speed(tmp_reg)),
+ (tmp_reg & PORTSCX_PHY_LOW_POWER_SPD) ?
+ "Normal PHY mode" : "Low power mode",
+ (tmp_reg & PORTSCX_PORT_RESET) ? "In Reset" :
+ "Not in Reset",
+ (tmp_reg & PORTSCX_PORT_SUSPEND) ? "In " : "Not in",
+ (tmp_reg & PORTSCX_OVER_CURRENT_CHG) ? "Dected" :
+ "No",
+ (tmp_reg & PORTSCX_PORT_EN_DIS_CHANGE) ? "Disable" :
+ "Not change",
+ (tmp_reg & PORTSCX_PORT_ENABLE) ? "Enable" :
+ "Not correct",
+ (tmp_reg & PORTSCX_CURRENT_CONNECT_STATUS) ?
+ "Attached" : "Not-Att");
+
+ tmp_reg = fsl_readl(&dr_regs->usbmode);
+ seq_printf(m,
+ "USB Mode Reg: Controller Mode is: %s\n\n", ( {
+ const char *s;
+ switch (tmp_reg & USB_MODE_CTRL_MODE_HOST) {
+ case USB_MODE_CTRL_MODE_IDLE:
+ s = "Idle"; break;
+ case USB_MODE_CTRL_MODE_DEVICE:
+ s = "Device Controller"; break;
+ case USB_MODE_CTRL_MODE_HOST:
+ s = "Host Controller"; break;
+ default:
+ s = "None"; break;
+ }
+ s;
+ } ));
+
+ tmp_reg = fsl_readl(&dr_regs->endptsetupstat);
+ seq_printf(m,
+ "Endpoint Setup Status Reg: SETUP on ep 0x%x\n\n",
+ (tmp_reg & EP_SETUP_STATUS_MASK));
+
+ for (i = 0; i < udc->max_ep / 2; i++) {
+ tmp_reg = fsl_readl(&dr_regs->endptctrl[i]);
+ seq_printf(m, "EP Ctrl Reg [0x%x]: = [0x%x]\n", i, tmp_reg);
+ }
+ tmp_reg = fsl_readl(&dr_regs->endpointprime);
+ seq_printf(m, "EP Prime Reg = [0x%x]\n\n", tmp_reg);
+
+ if (udc->pdata->have_sysif_regs) {
+ tmp_reg = usb_sys_regs->snoop1;
+ seq_printf(m, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
+
+ tmp_reg = usb_sys_regs->control;
+ seq_printf(m, "General Control Reg : = [0x%x]\n\n", tmp_reg);
+ }
+
+ /* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
+ ep = &udc->eps[0];
+ seq_printf(m, "For %s Maxpkt is 0x%x index is 0x%x\n",
+ ep->ep.name, ep_maxpacket(ep), ep_index(ep));
+
+ if (list_empty(&ep->queue)) {
+ seq_puts(m, "its req queue is empty\n\n");
+ } else {
+ list_for_each_entry(req, &ep->queue, queue) {
+ seq_printf(m,
+ "req %p actual 0x%x length 0x%x buf %p\n",
+ &req->req, req->req.actual,
+ req->req.length, req->req.buf);
+ }
+ }
+ /* other gadget->eplist ep */
+ list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+ if (ep->ep.desc) {
+ seq_printf(m,
+ "\nFor %s Maxpkt is 0x%x "
+ "index is 0x%x\n",
+ ep->ep.name, ep_maxpacket(ep),
+ ep_index(ep));
+
+ if (list_empty(&ep->queue)) {
+ seq_puts(m, "its req queue is empty\n\n");
+ } else {
+ list_for_each_entry(req, &ep->queue, queue) {
+ seq_printf(m,
+ "req %p actual 0x%x length "
+ "0x%x buf %p\n",
+ &req->req, req->req.actual,
+ req->req.length, req->req.buf);
+ } /* end for each_entry of ep req */
+ } /* end for else */
+ } /* end for if(ep->queue) */
+ } /* end (ep->desc) */
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+#define create_proc_file() \
+ proc_create_single(proc_filename, 0, NULL, fsl_proc_read)
+#define remove_proc_file() remove_proc_entry(proc_filename, NULL)
+
+#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
+
+#define create_proc_file() do {} while (0)
+#define remove_proc_file() do {} while (0)
+
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/*-------------------------------------------------------------------------*/
+
+/* Release udc structures */
+static void fsl_udc_release(struct device *dev)
+{
+ complete(udc_controller->done);
+ dma_free_coherent(dev->parent, udc_controller->ep_qh_size,
+ udc_controller->ep_qh, udc_controller->ep_qh_dma);
+ kfree(udc_controller);
+}
+
+/******************************************************************
+ Internal structure setup functions
+*******************************************************************/
+/*------------------------------------------------------------------
+ * init resource for global controller called by fsl_udc_probe()
+ * On success the udc handle is initialized, on failure it is
+ * unchanged (reset).
+ * Return 0 on success and -1 on allocation failure
+ ------------------------------------------------------------------*/
+static int struct_udc_setup(struct fsl_udc *udc,
+ struct platform_device *pdev)
+{
+ struct fsl_usb2_platform_data *pdata;
+ size_t size;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ udc->phy_mode = pdata->phy_mode;
+
+ udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
+ if (!udc->eps) {
+ ERR("kmalloc udc endpoint status failed\n");
+ goto eps_alloc_failed;
+ }
+
+ /* initialized QHs, take care of alignment */
+ size = udc->max_ep * sizeof(struct ep_queue_head);
+ if (size < QH_ALIGNMENT)
+ size = QH_ALIGNMENT;
+ else if ((size % QH_ALIGNMENT) != 0) {
+ size += QH_ALIGNMENT + 1;
+ size &= ~(QH_ALIGNMENT - 1);
+ }
+ udc->ep_qh = dma_alloc_coherent(&pdev->dev, size,
+ &udc->ep_qh_dma, GFP_KERNEL);
+ if (!udc->ep_qh) {
+ ERR("malloc QHs for udc failed\n");
+ goto ep_queue_alloc_failed;
+ }
+
+ udc->ep_qh_size = size;
+
+ /* Initialize ep0 status request structure */
+ /* FIXME: fsl_alloc_request() ignores ep argument */
+ udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
+ struct fsl_req, req);
+ if (!udc->status_req) {
+ ERR("kzalloc for udc status request failed\n");
+ goto udc_status_alloc_failed;
+ }
+
+ /* allocate a small amount of memory to get valid address */
+ udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+ if (!udc->status_req->req.buf) {
+ ERR("kzalloc for udc request buffer failed\n");
+ goto udc_req_buf_alloc_failed;
+ }
+
+ udc->resume_state = USB_STATE_NOTATTACHED;
+ udc->usb_state = USB_STATE_POWERED;
+ udc->ep0_dir = 0;
+ udc->remote_wakeup = 0; /* default to 0 on reset */
+
+ return 0;
+
+udc_req_buf_alloc_failed:
+ kfree(udc->status_req);
+udc_status_alloc_failed:
+ kfree(udc->ep_qh);
+ udc->ep_qh_size = 0;
+ep_queue_alloc_failed:
+ kfree(udc->eps);
+eps_alloc_failed:
+ udc->phy_mode = 0;
+ return -1;
+
+}
+
+/*----------------------------------------------------------------
+ * Setup the fsl_ep struct for eps
+ * Link fsl_ep->ep to gadget->ep_list
+ * ep0out is not used so do nothing here
+ * ep0in should be taken care
+ *--------------------------------------------------------------*/
+static int struct_ep_setup(struct fsl_udc *udc, unsigned char index,
+ char *name, int link)
+{
+ struct fsl_ep *ep = &udc->eps[index];
+
+ ep->udc = udc;
+ strcpy(ep->name, name);
+ ep->ep.name = ep->name;
+
+ ep->ep.ops = &fsl_ep_ops;
+ ep->stopped = 0;
+
+ if (index == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ if (index & 1)
+ ep->ep.caps.dir_in = true;
+ else
+ ep->ep.caps.dir_out = true;
+
+ /* for ep0: maxP defined in desc
+ * for other eps, maxP is set by epautoconfig() called by gadget layer
+ */
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
+
+ /* the queue lists any req for this ep */
+ INIT_LIST_HEAD(&ep->queue);
+
+ /* gagdet.ep_list used for ep_autoconfig so no ep0 */
+ if (link)
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ ep->gadget = &udc->gadget;
+ ep->qh = &udc->ep_qh[index];
+
+ return 0;
+}
+
+/* Driver probe function
+ * all initialization operations implemented here except enabling usb_intr reg
+ * board setup should have been done in the platform code
+ */
+static int fsl_udc_probe(struct platform_device *pdev)
+{
+ struct fsl_usb2_platform_data *pdata;
+ struct resource *res;
+ int ret = -ENODEV;
+ unsigned int i;
+ u32 dccparams;
+
+ udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
+ if (udc_controller == NULL)
+ return -ENOMEM;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ udc_controller->pdata = pdata;
+ spin_lock_init(&udc_controller->lock);
+ udc_controller->stopped = 1;
+
+#ifdef CONFIG_USB_OTG
+ if (pdata->operating_mode == FSL_USB2_DR_OTG) {
+ udc_controller->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR_OR_NULL(udc_controller->transceiver)) {
+ ERR("Can't find OTG driver!\n");
+ ret = -ENODEV;
+ goto err_kfree;
+ }
+ }
+#endif
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENXIO;
+ goto err_kfree;
+ }
+
+ if (pdata->operating_mode == FSL_USB2_DR_DEVICE) {
+ if (!request_mem_region(res->start, resource_size(res),
+ driver_name)) {
+ ERR("request mem region for %s failed\n", pdev->name);
+ ret = -EBUSY;
+ goto err_kfree;
+ }
+ }
+
+ dr_regs = ioremap(res->start, resource_size(res));
+ if (!dr_regs) {
+ ret = -ENOMEM;
+ goto err_release_mem_region;
+ }
+
+ pdata->regs = (void __iomem *)dr_regs;
+
+ /*
+ * do platform specific init: check the clock, grab/config pins, etc.
+ */
+ if (pdata->init && pdata->init(pdev)) {
+ ret = -ENODEV;
+ goto err_iounmap;
+ }
+
+ /* Set accessors only after pdata->init() ! */
+ fsl_set_accessors(pdata);
+
+ if (pdata->have_sysif_regs)
+ usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
+
+ /* Read Device Controller Capability Parameters register */
+ dccparams = fsl_readl(&dr_regs->dccparams);
+ if (!(dccparams & DCCPARAMS_DC)) {
+ ERR("This SOC doesn't support device role\n");
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ /* Get max device endpoints */
+ /* DEN is bidirectional ep number, max_ep doubles the number */
+ udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ ret = ret ? : -ENODEV;
+ goto err_exit;
+ }
+ udc_controller->irq = ret;
+
+ ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
+ driver_name, udc_controller);
+ if (ret != 0) {
+ ERR("cannot request irq %d err %d\n",
+ udc_controller->irq, ret);
+ goto err_exit;
+ }
+
+ /* Initialize the udc structure including QH member and other member */
+ if (struct_udc_setup(udc_controller, pdev)) {
+ ERR("Can't initialize udc data structure\n");
+ ret = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ if (IS_ERR_OR_NULL(udc_controller->transceiver)) {
+ /* initialize usb hw reg except for regs for EP,
+ * leave usbintr reg untouched */
+ dr_controller_setup(udc_controller);
+ }
+
+ /* Setup gadget structure */
+ udc_controller->gadget.ops = &fsl_gadget_ops;
+ udc_controller->gadget.max_speed = USB_SPEED_HIGH;
+ udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
+ INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
+ udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+ udc_controller->gadget.name = driver_name;
+
+ /* Setup gadget.dev and register with kernel */
+ dev_set_name(&udc_controller->gadget.dev, "gadget");
+ udc_controller->gadget.dev.of_node = pdev->dev.of_node;
+
+ if (!IS_ERR_OR_NULL(udc_controller->transceiver))
+ udc_controller->gadget.is_otg = 1;
+
+ /* setup QH and epctrl for ep0 */
+ ep0_setup(udc_controller);
+
+ /* setup udc->eps[] for ep0 */
+ struct_ep_setup(udc_controller, 0, "ep0", 0);
+ /* for ep0: the desc defined here;
+ * for other eps, gadget layer called ep_enable with defined desc
+ */
+ udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
+ usb_ep_set_maxpacket_limit(&udc_controller->eps[0].ep,
+ USB_MAX_CTRL_PAYLOAD);
+
+ /* setup the udc->eps[] for non-control endpoints and link
+ * to gadget.ep_list */
+ for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
+ char name[14];
+
+ sprintf(name, "ep%dout", i);
+ struct_ep_setup(udc_controller, i * 2, name, 1);
+ sprintf(name, "ep%din", i);
+ struct_ep_setup(udc_controller, i * 2 + 1, name, 1);
+ }
+
+ /* use dma_pool for TD management */
+ udc_controller->td_pool = dma_pool_create("udc_td", &pdev->dev,
+ sizeof(struct ep_td_struct),
+ DTD_ALIGNMENT, UDC_DMA_BOUNDARY);
+ if (udc_controller->td_pool == NULL) {
+ ret = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ ret = usb_add_gadget_udc_release(&pdev->dev, &udc_controller->gadget,
+ fsl_udc_release);
+ if (ret)
+ goto err_del_udc;
+
+ create_proc_file();
+ return 0;
+
+err_del_udc:
+ dma_pool_destroy(udc_controller->td_pool);
+err_free_irq:
+ free_irq(udc_controller->irq, udc_controller);
+err_exit:
+ if (pdata->exit)
+ pdata->exit(pdev);
+err_iounmap:
+ iounmap(dr_regs);
+err_release_mem_region:
+ if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
+ release_mem_region(res->start, resource_size(res));
+err_kfree:
+ kfree(udc_controller);
+ udc_controller = NULL;
+ return ret;
+}
+
+/* Driver removal function
+ * Free resources and finish pending transactions
+ */
+static int fsl_udc_remove(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ if (!udc_controller)
+ return -ENODEV;
+
+ udc_controller->done = &done;
+ usb_del_gadget_udc(&udc_controller->gadget);
+
+ /* DR has been stopped in usb_gadget_unregister_driver() */
+ remove_proc_file();
+
+ /* Free allocated memory */
+ kfree(udc_controller->status_req->req.buf);
+ kfree(udc_controller->status_req);
+ kfree(udc_controller->eps);
+
+ dma_pool_destroy(udc_controller->td_pool);
+ free_irq(udc_controller->irq, udc_controller);
+ iounmap(dr_regs);
+ if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE))
+ release_mem_region(res->start, resource_size(res));
+
+ /* free udc --wait for the release() finished */
+ wait_for_completion(&done);
+
+ /*
+ * do platform specific un-initialization:
+ * release iomux pins, etc.
+ */
+ if (pdata->exit)
+ pdata->exit(pdev);
+
+ return 0;
+}
+
+/*-----------------------------------------------------------------
+ * Modify Power management attributes
+ * Used by OTG statemachine to disable gadget temporarily
+ -----------------------------------------------------------------*/
+static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ dr_controller_stop(udc_controller);
+ return 0;
+}
+
+/*-----------------------------------------------------------------
+ * Invoked on USB resume. May be called in_interrupt.
+ * Here we start the DR controller and enable the irq
+ *-----------------------------------------------------------------*/
+static int fsl_udc_resume(struct platform_device *pdev)
+{
+ /* Enable DR irq reg and set controller Run */
+ if (udc_controller->stopped) {
+ dr_controller_setup(udc_controller);
+ dr_controller_run(udc_controller);
+ }
+ udc_controller->usb_state = USB_STATE_ATTACHED;
+ udc_controller->ep0_state = WAIT_FOR_SETUP;
+ udc_controller->ep0_dir = 0;
+ return 0;
+}
+
+static int fsl_udc_otg_suspend(struct device *dev, pm_message_t state)
+{
+ struct fsl_udc *udc = udc_controller;
+ u32 mode, usbcmd;
+
+ mode = fsl_readl(&dr_regs->usbmode) & USB_MODE_CTRL_MODE_MASK;
+
+ pr_debug("%s(): mode 0x%x stopped %d\n", __func__, mode, udc->stopped);
+
+ /*
+ * If the controller is already stopped, then this must be a
+ * PM suspend. Remember this fact, so that we will leave the
+ * controller stopped at PM resume time.
+ */
+ if (udc->stopped) {
+ pr_debug("gadget already stopped, leaving early\n");
+ udc->already_stopped = 1;
+ return 0;
+ }
+
+ if (mode != USB_MODE_CTRL_MODE_DEVICE) {
+ pr_debug("gadget not in device mode, leaving early\n");
+ return 0;
+ }
+
+ /* stop the controller */
+ usbcmd = fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP;
+ fsl_writel(usbcmd, &dr_regs->usbcmd);
+
+ udc->stopped = 1;
+
+ pr_info("USB Gadget suspended\n");
+
+ return 0;
+}
+
+static int fsl_udc_otg_resume(struct device *dev)
+{
+ pr_debug("%s(): stopped %d already_stopped %d\n", __func__,
+ udc_controller->stopped, udc_controller->already_stopped);
+
+ /*
+ * If the controller was stopped at suspend time, then
+ * don't resume it now.
+ */
+ if (udc_controller->already_stopped) {
+ udc_controller->already_stopped = 0;
+ pr_debug("gadget was already stopped, leaving early\n");
+ return 0;
+ }
+
+ pr_info("USB Gadget resume\n");
+
+ return fsl_udc_resume(NULL);
+}
+/*-------------------------------------------------------------------------
+ Register entry point for the peripheral controller driver
+--------------------------------------------------------------------------*/
+static const struct platform_device_id fsl_udc_devtype[] = {
+ {
+ .name = "fsl-usb2-udc",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
+static struct platform_driver udc_driver = {
+ .remove = fsl_udc_remove,
+ .id_table = fsl_udc_devtype,
+ /* these suspend and resume are not usb suspend and resume */
+ .suspend = fsl_udc_suspend,
+ .resume = fsl_udc_resume,
+ .driver = {
+ .name = driver_name,
+ /* udc suspend/resume called from OTG driver */
+ .suspend = fsl_udc_otg_suspend,
+ .resume = fsl_udc_otg_resume,
+ },
+};
+
+module_platform_driver_probe(udc_driver, fsl_udc_probe);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:fsl-usb2-udc");
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
new file mode 100644
index 000000000..2efc5a930
--- /dev/null
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2004,2012 Freescale Semiconductor, Inc
+ * All rights reserved.
+ *
+ * Freescale USB device/endpoint management registers
+ */
+#ifndef __FSL_USB2_UDC_H
+#define __FSL_USB2_UDC_H
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* ### define USB registers here
+ */
+#define USB_MAX_CTRL_PAYLOAD 64
+#define USB_DR_SYS_OFFSET 0x400
+
+ /* USB DR device mode registers (Little Endian) */
+struct usb_dr_device {
+ /* Capability register */
+ u8 res1[256];
+ u16 caplength; /* Capability Register Length */
+ u16 hciversion; /* Host Controller Interface Version */
+ u32 hcsparams; /* Host Controller Structural Parameters */
+ u32 hccparams; /* Host Controller Capability Parameters */
+ u8 res2[20];
+ u32 dciversion; /* Device Controller Interface Version */
+ u32 dccparams; /* Device Controller Capability Parameters */
+ u8 res3[24];
+ /* Operation register */
+ u32 usbcmd; /* USB Command Register */
+ u32 usbsts; /* USB Status Register */
+ u32 usbintr; /* USB Interrupt Enable Register */
+ u32 frindex; /* Frame Index Register */
+ u8 res4[4];
+ u32 deviceaddr; /* Device Address */
+ u32 endpointlistaddr; /* Endpoint List Address Register */
+ u8 res5[4];
+ u32 burstsize; /* Master Interface Data Burst Size Register */
+ u32 txttfilltuning; /* Transmit FIFO Tuning Controls Register */
+ u8 res6[24];
+ u32 configflag; /* Configure Flag Register */
+ u32 portsc1; /* Port 1 Status and Control Register */
+ u8 res7[28];
+ u32 otgsc; /* On-The-Go Status and Control */
+ u32 usbmode; /* USB Mode Register */
+ u32 endptsetupstat; /* Endpoint Setup Status Register */
+ u32 endpointprime; /* Endpoint Initialization Register */
+ u32 endptflush; /* Endpoint Flush Register */
+ u32 endptstatus; /* Endpoint Status Register */
+ u32 endptcomplete; /* Endpoint Complete Register */
+ u32 endptctrl[6]; /* Endpoint Control Registers */
+};
+
+ /* USB DR host mode registers (Little Endian) */
+struct usb_dr_host {
+ /* Capability register */
+ u8 res1[256];
+ u16 caplength; /* Capability Register Length */
+ u16 hciversion; /* Host Controller Interface Version */
+ u32 hcsparams; /* Host Controller Structural Parameters */
+ u32 hccparams; /* Host Controller Capability Parameters */
+ u8 res2[20];
+ u32 dciversion; /* Device Controller Interface Version */
+ u32 dccparams; /* Device Controller Capability Parameters */
+ u8 res3[24];
+ /* Operation register */
+ u32 usbcmd; /* USB Command Register */
+ u32 usbsts; /* USB Status Register */
+ u32 usbintr; /* USB Interrupt Enable Register */
+ u32 frindex; /* Frame Index Register */
+ u8 res4[4];
+ u32 periodiclistbase; /* Periodic Frame List Base Address Register */
+ u32 asynclistaddr; /* Current Asynchronous List Address Register */
+ u8 res5[4];
+ u32 burstsize; /* Master Interface Data Burst Size Register */
+ u32 txttfilltuning; /* Transmit FIFO Tuning Controls Register */
+ u8 res6[24];
+ u32 configflag; /* Configure Flag Register */
+ u32 portsc1; /* Port 1 Status and Control Register */
+ u8 res7[28];
+ u32 otgsc; /* On-The-Go Status and Control */
+ u32 usbmode; /* USB Mode Register */
+ u32 endptsetupstat; /* Endpoint Setup Status Register */
+ u32 endpointprime; /* Endpoint Initialization Register */
+ u32 endptflush; /* Endpoint Flush Register */
+ u32 endptstatus; /* Endpoint Status Register */
+ u32 endptcomplete; /* Endpoint Complete Register */
+ u32 endptctrl[6]; /* Endpoint Control Registers */
+};
+
+ /* non-EHCI USB system interface registers (Big Endian) */
+struct usb_sys_interface {
+ u32 snoop1;
+ u32 snoop2;
+ u32 age_cnt_thresh; /* Age Count Threshold Register */
+ u32 pri_ctrl; /* Priority Control Register */
+ u32 si_ctrl; /* System Interface Control Register */
+ u8 res[236];
+ u32 control; /* General Purpose Control Register */
+};
+
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP 0
+#define DATA_STATE_XMIT 1
+#define DATA_STATE_NEED_ZLP 2
+#define WAIT_FOR_OUT_STATUS 3
+#define DATA_STATE_RECV 4
+
+/* Device Controller Capability Parameter register */
+#define DCCPARAMS_DC 0x00000080
+#define DCCPARAMS_DEN_MASK 0x0000001f
+
+/* Frame Index Register Bit Masks */
+#define USB_FRINDEX_MASKS 0x3fff
+/* USB CMD Register Bit Masks */
+#define USB_CMD_RUN_STOP 0x00000001
+#define USB_CMD_CTRL_RESET 0x00000002
+#define USB_CMD_PERIODIC_SCHEDULE_EN 0x00000010
+#define USB_CMD_ASYNC_SCHEDULE_EN 0x00000020
+#define USB_CMD_INT_AA_DOORBELL 0x00000040
+#define USB_CMD_ASP 0x00000300
+#define USB_CMD_ASYNC_SCH_PARK_EN 0x00000800
+#define USB_CMD_SUTW 0x00002000
+#define USB_CMD_ATDTW 0x00004000
+#define USB_CMD_ITC 0x00FF0000
+
+/* bit 15,3,2 are frame list size */
+#define USB_CMD_FRAME_SIZE_1024 0x00000000
+#define USB_CMD_FRAME_SIZE_512 0x00000004
+#define USB_CMD_FRAME_SIZE_256 0x00000008
+#define USB_CMD_FRAME_SIZE_128 0x0000000C
+#define USB_CMD_FRAME_SIZE_64 0x00008000
+#define USB_CMD_FRAME_SIZE_32 0x00008004
+#define USB_CMD_FRAME_SIZE_16 0x00008008
+#define USB_CMD_FRAME_SIZE_8 0x0000800C
+
+/* bit 9-8 are async schedule park mode count */
+#define USB_CMD_ASP_00 0x00000000
+#define USB_CMD_ASP_01 0x00000100
+#define USB_CMD_ASP_10 0x00000200
+#define USB_CMD_ASP_11 0x00000300
+#define USB_CMD_ASP_BIT_POS 8
+
+/* bit 23-16 are interrupt threshold control */
+#define USB_CMD_ITC_NO_THRESHOLD 0x00000000
+#define USB_CMD_ITC_1_MICRO_FRM 0x00010000
+#define USB_CMD_ITC_2_MICRO_FRM 0x00020000
+#define USB_CMD_ITC_4_MICRO_FRM 0x00040000
+#define USB_CMD_ITC_8_MICRO_FRM 0x00080000
+#define USB_CMD_ITC_16_MICRO_FRM 0x00100000
+#define USB_CMD_ITC_32_MICRO_FRM 0x00200000
+#define USB_CMD_ITC_64_MICRO_FRM 0x00400000
+#define USB_CMD_ITC_BIT_POS 16
+
+/* USB STS Register Bit Masks */
+#define USB_STS_INT 0x00000001
+#define USB_STS_ERR 0x00000002
+#define USB_STS_PORT_CHANGE 0x00000004
+#define USB_STS_FRM_LST_ROLL 0x00000008
+#define USB_STS_SYS_ERR 0x00000010
+#define USB_STS_IAA 0x00000020
+#define USB_STS_RESET 0x00000040
+#define USB_STS_SOF 0x00000080
+#define USB_STS_SUSPEND 0x00000100
+#define USB_STS_HC_HALTED 0x00001000
+#define USB_STS_RCL 0x00002000
+#define USB_STS_PERIODIC_SCHEDULE 0x00004000
+#define USB_STS_ASYNC_SCHEDULE 0x00008000
+
+/* USB INTR Register Bit Masks */
+#define USB_INTR_INT_EN 0x00000001
+#define USB_INTR_ERR_INT_EN 0x00000002
+#define USB_INTR_PTC_DETECT_EN 0x00000004
+#define USB_INTR_FRM_LST_ROLL_EN 0x00000008
+#define USB_INTR_SYS_ERR_EN 0x00000010
+#define USB_INTR_ASYN_ADV_EN 0x00000020
+#define USB_INTR_RESET_EN 0x00000040
+#define USB_INTR_SOF_EN 0x00000080
+#define USB_INTR_DEVICE_SUSPEND 0x00000100
+
+/* Device Address bit masks */
+#define USB_DEVICE_ADDRESS_MASK 0xFE000000
+#define USB_DEVICE_ADDRESS_BIT_POS 25
+
+/* endpoint list address bit masks */
+#define USB_EP_LIST_ADDRESS_MASK 0xfffff800
+
+/* PORTSCX Register Bit Masks */
+#define PORTSCX_CURRENT_CONNECT_STATUS 0x00000001
+#define PORTSCX_CONNECT_STATUS_CHANGE 0x00000002
+#define PORTSCX_PORT_ENABLE 0x00000004
+#define PORTSCX_PORT_EN_DIS_CHANGE 0x00000008
+#define PORTSCX_OVER_CURRENT_ACT 0x00000010
+#define PORTSCX_OVER_CURRENT_CHG 0x00000020
+#define PORTSCX_PORT_FORCE_RESUME 0x00000040
+#define PORTSCX_PORT_SUSPEND 0x00000080
+#define PORTSCX_PORT_RESET 0x00000100
+#define PORTSCX_LINE_STATUS_BITS 0x00000C00
+#define PORTSCX_PORT_POWER 0x00001000
+#define PORTSCX_PORT_INDICTOR_CTRL 0x0000C000
+#define PORTSCX_PORT_TEST_CTRL 0x000F0000
+#define PORTSCX_WAKE_ON_CONNECT_EN 0x00100000
+#define PORTSCX_WAKE_ON_CONNECT_DIS 0x00200000
+#define PORTSCX_WAKE_ON_OVER_CURRENT 0x00400000
+#define PORTSCX_PHY_LOW_POWER_SPD 0x00800000
+#define PORTSCX_PORT_FORCE_FULL_SPEED 0x01000000
+#define PORTSCX_PORT_SPEED_MASK 0x0C000000
+#define PORTSCX_PORT_WIDTH 0x10000000
+#define PORTSCX_PHY_TYPE_SEL 0xC0000000
+
+/* bit 11-10 are line status */
+#define PORTSCX_LINE_STATUS_SE0 0x00000000
+#define PORTSCX_LINE_STATUS_JSTATE 0x00000400
+#define PORTSCX_LINE_STATUS_KSTATE 0x00000800
+#define PORTSCX_LINE_STATUS_UNDEF 0x00000C00
+#define PORTSCX_LINE_STATUS_BIT_POS 10
+
+/* bit 15-14 are port indicator control */
+#define PORTSCX_PIC_OFF 0x00000000
+#define PORTSCX_PIC_AMBER 0x00004000
+#define PORTSCX_PIC_GREEN 0x00008000
+#define PORTSCX_PIC_UNDEF 0x0000C000
+#define PORTSCX_PIC_BIT_POS 14
+
+/* bit 19-16 are port test control */
+#define PORTSCX_PTC_DISABLE 0x00000000
+#define PORTSCX_PTC_JSTATE 0x00010000
+#define PORTSCX_PTC_KSTATE 0x00020000
+#define PORTSCX_PTC_SEQNAK 0x00030000
+#define PORTSCX_PTC_PACKET 0x00040000
+#define PORTSCX_PTC_FORCE_EN 0x00050000
+#define PORTSCX_PTC_BIT_POS 16
+
+/* bit 27-26 are port speed */
+#define PORTSCX_PORT_SPEED_FULL 0x00000000
+#define PORTSCX_PORT_SPEED_LOW 0x04000000
+#define PORTSCX_PORT_SPEED_HIGH 0x08000000
+#define PORTSCX_PORT_SPEED_UNDEF 0x0C000000
+#define PORTSCX_SPEED_BIT_POS 26
+
+/* bit 28 is parallel transceiver width for UTMI interface */
+#define PORTSCX_PTW 0x10000000
+#define PORTSCX_PTW_8BIT 0x00000000
+#define PORTSCX_PTW_16BIT 0x10000000
+
+/* bit 31-30 are port transceiver select */
+#define PORTSCX_PTS_UTMI 0x00000000
+#define PORTSCX_PTS_ULPI 0x80000000
+#define PORTSCX_PTS_FSLS 0xC0000000
+#define PORTSCX_PTS_BIT_POS 30
+
+/* otgsc Register Bit Masks */
+#define OTGSC_CTRL_VUSB_DISCHARGE 0x00000001
+#define OTGSC_CTRL_VUSB_CHARGE 0x00000002
+#define OTGSC_CTRL_OTG_TERM 0x00000008
+#define OTGSC_CTRL_DATA_PULSING 0x00000010
+#define OTGSC_STS_USB_ID 0x00000100
+#define OTGSC_STS_A_VBUS_VALID 0x00000200
+#define OTGSC_STS_A_SESSION_VALID 0x00000400
+#define OTGSC_STS_B_SESSION_VALID 0x00000800
+#define OTGSC_STS_B_SESSION_END 0x00001000
+#define OTGSC_STS_1MS_TOGGLE 0x00002000
+#define OTGSC_STS_DATA_PULSING 0x00004000
+#define OTGSC_INTSTS_USB_ID 0x00010000
+#define OTGSC_INTSTS_A_VBUS_VALID 0x00020000
+#define OTGSC_INTSTS_A_SESSION_VALID 0x00040000
+#define OTGSC_INTSTS_B_SESSION_VALID 0x00080000
+#define OTGSC_INTSTS_B_SESSION_END 0x00100000
+#define OTGSC_INTSTS_1MS 0x00200000
+#define OTGSC_INTSTS_DATA_PULSING 0x00400000
+#define OTGSC_INTR_USB_ID 0x01000000
+#define OTGSC_INTR_A_VBUS_VALID 0x02000000
+#define OTGSC_INTR_A_SESSION_VALID 0x04000000
+#define OTGSC_INTR_B_SESSION_VALID 0x08000000
+#define OTGSC_INTR_B_SESSION_END 0x10000000
+#define OTGSC_INTR_1MS_TIMER 0x20000000
+#define OTGSC_INTR_DATA_PULSING 0x40000000
+
+/* USB MODE Register Bit Masks */
+#define USB_MODE_CTRL_MODE_IDLE 0x00000000
+#define USB_MODE_CTRL_MODE_DEVICE 0x00000002
+#define USB_MODE_CTRL_MODE_HOST 0x00000003
+#define USB_MODE_CTRL_MODE_MASK 0x00000003
+#define USB_MODE_CTRL_MODE_RSV 0x00000001
+#define USB_MODE_ES 0x00000004 /* Endian Select */
+#define USB_MODE_SETUP_LOCK_OFF 0x00000008
+#define USB_MODE_STREAM_DISABLE 0x00000010
+/* Endpoint Flush Register */
+#define EPFLUSH_TX_OFFSET 0x00010000
+#define EPFLUSH_RX_OFFSET 0x00000000
+
+/* Endpoint Setup Status bit masks */
+#define EP_SETUP_STATUS_MASK 0x0000003F
+#define EP_SETUP_STATUS_EP0 0x00000001
+
+/* ENDPOINTCTRLx Register Bit Masks */
+#define EPCTRL_TX_ENABLE 0x00800000
+#define EPCTRL_TX_DATA_TOGGLE_RST 0x00400000 /* Not EP0 */
+#define EPCTRL_TX_DATA_TOGGLE_INH 0x00200000 /* Not EP0 */
+#define EPCTRL_TX_TYPE 0x000C0000
+#define EPCTRL_TX_DATA_SOURCE 0x00020000 /* Not EP0 */
+#define EPCTRL_TX_EP_STALL 0x00010000
+#define EPCTRL_RX_ENABLE 0x00000080
+#define EPCTRL_RX_DATA_TOGGLE_RST 0x00000040 /* Not EP0 */
+#define EPCTRL_RX_DATA_TOGGLE_INH 0x00000020 /* Not EP0 */
+#define EPCTRL_RX_TYPE 0x0000000C
+#define EPCTRL_RX_DATA_SINK 0x00000002 /* Not EP0 */
+#define EPCTRL_RX_EP_STALL 0x00000001
+
+/* bit 19-18 and 3-2 are endpoint type */
+#define EPCTRL_EP_TYPE_CONTROL 0
+#define EPCTRL_EP_TYPE_ISO 1
+#define EPCTRL_EP_TYPE_BULK 2
+#define EPCTRL_EP_TYPE_INTERRUPT 3
+#define EPCTRL_TX_EP_TYPE_SHIFT 18
+#define EPCTRL_RX_EP_TYPE_SHIFT 2
+
+/* SNOOPn Register Bit Masks */
+#define SNOOP_ADDRESS_MASK 0xFFFFF000
+#define SNOOP_SIZE_ZERO 0x00 /* snooping disable */
+#define SNOOP_SIZE_4KB 0x0B /* 4KB snoop size */
+#define SNOOP_SIZE_8KB 0x0C
+#define SNOOP_SIZE_16KB 0x0D
+#define SNOOP_SIZE_32KB 0x0E
+#define SNOOP_SIZE_64KB 0x0F
+#define SNOOP_SIZE_128KB 0x10
+#define SNOOP_SIZE_256KB 0x11
+#define SNOOP_SIZE_512KB 0x12
+#define SNOOP_SIZE_1MB 0x13
+#define SNOOP_SIZE_2MB 0x14
+#define SNOOP_SIZE_4MB 0x15
+#define SNOOP_SIZE_8MB 0x16
+#define SNOOP_SIZE_16MB 0x17
+#define SNOOP_SIZE_32MB 0x18
+#define SNOOP_SIZE_64MB 0x19
+#define SNOOP_SIZE_128MB 0x1A
+#define SNOOP_SIZE_256MB 0x1B
+#define SNOOP_SIZE_512MB 0x1C
+#define SNOOP_SIZE_1GB 0x1D
+#define SNOOP_SIZE_2GB 0x1E /* 2GB snoop size */
+
+/* pri_ctrl Register Bit Masks */
+#define PRI_CTRL_PRI_LVL1 0x0000000C
+#define PRI_CTRL_PRI_LVL0 0x00000003
+
+/* si_ctrl Register Bit Masks */
+#define SI_CTRL_ERR_DISABLE 0x00000010
+#define SI_CTRL_IDRC_DISABLE 0x00000008
+#define SI_CTRL_RD_SAFE_EN 0x00000004
+#define SI_CTRL_RD_PREFETCH_DISABLE 0x00000002
+#define SI_CTRL_RD_PREFEFETCH_VAL 0x00000001
+
+/* control Register Bit Masks */
+#define USB_CTRL_IOENB 0x00000004
+#define USB_CTRL_ULPI_INT0EN 0x00000001
+#define USB_CTRL_UTMI_PHY_EN 0x00000200
+#define USB_CTRL_USB_EN 0x00000004
+#define USB_CTRL_ULPI_PHY_CLK_SEL 0x00000400
+
+/* Endpoint Queue Head data struct
+ * Rem: all the variables of qh are LittleEndian Mode
+ * and NEXT_POINTER_MASK should operate on a LittleEndian, Phy Addr
+ */
+struct ep_queue_head {
+ u32 max_pkt_length; /* Mult(31-30) , Zlt(29) , Max Pkt len
+ and IOS(15) */
+ u32 curr_dtd_ptr; /* Current dTD Pointer(31-5) */
+ u32 next_dtd_ptr; /* Next dTD Pointer(31-5), T(0) */
+ u32 size_ioc_int_sts; /* Total bytes (30-16), IOC (15),
+ MultO(11-10), STS (7-0) */
+ u32 buff_ptr0; /* Buffer pointer Page 0 (31-12) */
+ u32 buff_ptr1; /* Buffer pointer Page 1 (31-12) */
+ u32 buff_ptr2; /* Buffer pointer Page 2 (31-12) */
+ u32 buff_ptr3; /* Buffer pointer Page 3 (31-12) */
+ u32 buff_ptr4; /* Buffer pointer Page 4 (31-12) */
+ u32 res1;
+ u8 setup_buffer[8]; /* Setup data 8 bytes */
+ u32 res2[4];
+};
+
+/* Endpoint Queue Head Bit Masks */
+#define EP_QUEUE_HEAD_MULT_POS 30
+#define EP_QUEUE_HEAD_ZLT_SEL 0x20000000
+#define EP_QUEUE_HEAD_MAX_PKT_LEN_POS 16
+#define EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info) (((ep_info)>>16)&0x07ff)
+#define EP_QUEUE_HEAD_IOS 0x00008000
+#define EP_QUEUE_HEAD_NEXT_TERMINATE 0x00000001
+#define EP_QUEUE_HEAD_IOC 0x00008000
+#define EP_QUEUE_HEAD_MULTO 0x00000C00
+#define EP_QUEUE_HEAD_STATUS_HALT 0x00000040
+#define EP_QUEUE_HEAD_STATUS_ACTIVE 0x00000080
+#define EP_QUEUE_CURRENT_OFFSET_MASK 0x00000FFF
+#define EP_QUEUE_HEAD_NEXT_POINTER_MASK 0xFFFFFFE0
+#define EP_QUEUE_FRINDEX_MASK 0x000007FF
+#define EP_MAX_LENGTH_TRANSFER 0x4000
+
+/* Endpoint Transfer Descriptor data struct */
+/* Rem: all the variables of td are LittleEndian Mode */
+struct ep_td_struct {
+ u32 next_td_ptr; /* Next TD pointer(31-5), T(0) set
+ indicate invalid */
+ u32 size_ioc_sts; /* Total bytes (30-16), IOC (15),
+ MultO(11-10), STS (7-0) */
+ u32 buff_ptr0; /* Buffer pointer Page 0 */
+ u32 buff_ptr1; /* Buffer pointer Page 1 */
+ u32 buff_ptr2; /* Buffer pointer Page 2 */
+ u32 buff_ptr3; /* Buffer pointer Page 3 */
+ u32 buff_ptr4; /* Buffer pointer Page 4 */
+ u32 res;
+ /* 32 bytes */
+ dma_addr_t td_dma; /* dma address for this td */
+ /* virtual address of next td specified in next_td_ptr */
+ struct ep_td_struct *next_td_virt;
+};
+
+/* Endpoint Transfer Descriptor bit Masks */
+#define DTD_NEXT_TERMINATE 0x00000001
+#define DTD_IOC 0x00008000
+#define DTD_STATUS_ACTIVE 0x00000080
+#define DTD_STATUS_HALTED 0x00000040
+#define DTD_STATUS_DATA_BUFF_ERR 0x00000020
+#define DTD_STATUS_TRANSACTION_ERR 0x00000008
+#define DTD_RESERVED_FIELDS 0x80007300
+#define DTD_ADDR_MASK 0xFFFFFFE0
+#define DTD_PACKET_SIZE 0x7FFF0000
+#define DTD_LENGTH_BIT_POS 16
+#define DTD_ERROR_MASK (DTD_STATUS_HALTED | \
+ DTD_STATUS_DATA_BUFF_ERR | \
+ DTD_STATUS_TRANSACTION_ERR)
+/* Alignment requirements; must be a power of two */
+#define DTD_ALIGNMENT 0x20
+#define QH_ALIGNMENT 2048
+
+/* Controller dma boundary */
+#define UDC_DMA_BOUNDARY 0x1000
+
+/*-------------------------------------------------------------------------*/
+
+/* ### driver private data
+ */
+struct fsl_req {
+ struct usb_request req;
+ struct list_head queue;
+ /* ep_queue() func will add
+ a request->queue into a udc_ep->queue 'd tail */
+ struct fsl_ep *ep;
+ unsigned mapped:1;
+
+ struct ep_td_struct *head, *tail; /* For dTD List
+ cpu endian Virtual addr */
+ unsigned int dtd_count;
+};
+
+#define REQ_UNCOMPLETE 1
+
+struct fsl_ep {
+ struct usb_ep ep;
+ struct list_head queue;
+ struct fsl_udc *udc;
+ struct ep_queue_head *qh;
+ struct usb_gadget *gadget;
+
+ char name[14];
+ unsigned stopped:1;
+};
+
+#define EP_DIR_IN 1
+#define EP_DIR_OUT 0
+
+struct fsl_udc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct fsl_usb2_platform_data *pdata;
+ struct completion *done; /* to make sure release() is done */
+ struct fsl_ep *eps;
+ unsigned int max_ep;
+ unsigned int irq;
+
+ struct usb_ctrlrequest local_setup_buff;
+ spinlock_t lock;
+ struct usb_phy *transceiver;
+ unsigned softconnect:1;
+ unsigned vbus_active:1;
+ unsigned stopped:1;
+ unsigned remote_wakeup:1;
+ unsigned already_stopped:1;
+ unsigned big_endian_desc:1;
+
+ struct ep_queue_head *ep_qh; /* Endpoints Queue-Head */
+ struct fsl_req *status_req; /* ep0 status request */
+ struct dma_pool *td_pool; /* dma pool for DTD */
+ enum fsl_usb2_phy_modes phy_mode;
+
+ size_t ep_qh_size; /* size after alignment adjustment*/
+ dma_addr_t ep_qh_dma; /* dma address of QH */
+
+ u32 max_pipes; /* Device max pipes */
+ u32 bus_reset; /* Device is bus resetting */
+ u32 resume_state; /* USB state to resume */
+ u32 usb_state; /* USB current state */
+ u32 ep0_state; /* Endpoint zero state */
+ u32 ep0_dir; /* Endpoint zero direction: can be
+ USB_DIR_IN or USB_DIR_OUT */
+ u8 device_address; /* Device USB address */
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef DEBUG
+#define DBG(fmt, args...) printk(KERN_DEBUG "[%s] " fmt "\n", \
+ __func__, ## args)
+#else
+#define DBG(fmt, args...) do{}while(0)
+#endif
+
+#if 0
+static void dump_msg(const char *label, const u8 * buf, unsigned int length)
+{
+ unsigned int start, num, i;
+ char line[52], *p;
+
+ if (length >= 512)
+ return;
+ DBG("%s, length %u:\n", label, length);
+ start = 0;
+ while (length > 0) {
+ num = min(length, 16u);
+ p = line;
+ for (i = 0; i < num; ++i) {
+ if (i == 8)
+ *p++ = ' ';
+ sprintf(p, " %02x", buf[i]);
+ p += 3;
+ }
+ *p = 0;
+ printk(KERN_DEBUG "%6x: %s\n", start, line);
+ buf += num;
+ start += num;
+ length -= num;
+ }
+}
+#endif
+
+#ifdef VERBOSE
+#define VDBG DBG
+#else
+#define VDBG(stuff...) do{}while(0)
+#endif
+
+#define ERR(stuff...) pr_err("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
+#define INFO(stuff...) pr_info("udc: " stuff)
+
+/*-------------------------------------------------------------------------*/
+
+/* ### Add board specific defines here
+ */
+
+/*
+ * ### pipe direction macro from device view
+ */
+#define USB_RECV 0 /* OUT EP */
+#define USB_SEND 1 /* IN EP */
+
+/*
+ * ### internal used help routines.
+ */
+#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF)
+#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
+#define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
+ USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
+ & USB_DIR_IN)==USB_DIR_IN)
+#define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \
+ &udc->eps[pipe])
+#define get_pipe_by_windex(windex) ((windex & USB_ENDPOINT_NUMBER_MASK) \
+ * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
+#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
+
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+ /* we only have one ep0 structure but two queue heads */
+ if (ep_index(ep) != 0)
+ return ep->qh;
+ else
+ return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+ USB_DIR_IN) ? 1 : 0];
+}
+
+#endif
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
new file mode 100644
index 000000000..08ba9c8c1
--- /dev/null
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -0,0 +1,1517 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Fusb300 UDC (USB gadget)
+ *
+ * Copyright (C) 2010 Faraday Technology Corp.
+ *
+ * Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "fusb300_udc.h"
+
+MODULE_DESCRIPTION("FUSB300 USB gadget driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
+MODULE_ALIAS("platform:fusb300_udc");
+
+#define DRIVER_VERSION "20 October 2010"
+
+static const char udc_name[] = "fusb300_udc";
+static const char * const fusb300_ep_name[] = {
+ "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7", "ep8", "ep9",
+ "ep10", "ep11", "ep12", "ep13", "ep14", "ep15"
+};
+
+static void done(struct fusb300_ep *ep, struct fusb300_request *req,
+ int status);
+
+static void fusb300_enable_bit(struct fusb300 *fusb300, u32 offset,
+ u32 value)
+{
+ u32 reg = ioread32(fusb300->reg + offset);
+
+ reg |= value;
+ iowrite32(reg, fusb300->reg + offset);
+}
+
+static void fusb300_disable_bit(struct fusb300 *fusb300, u32 offset,
+ u32 value)
+{
+ u32 reg = ioread32(fusb300->reg + offset);
+
+ reg &= ~value;
+ iowrite32(reg, fusb300->reg + offset);
+}
+
+
+static void fusb300_ep_setting(struct fusb300_ep *ep,
+ struct fusb300_ep_info info)
+{
+ ep->epnum = info.epnum;
+ ep->type = info.type;
+}
+
+static int fusb300_ep_release(struct fusb300_ep *ep)
+{
+ if (!ep->epnum)
+ return 0;
+ ep->epnum = 0;
+ ep->stall = 0;
+ ep->wedged = 0;
+ return 0;
+}
+
+static void fusb300_set_fifo_entry(struct fusb300 *fusb300,
+ u32 ep)
+{
+ u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+
+ val &= ~FUSB300_EPSET1_FIFOENTRY_MSK;
+ val |= FUSB300_EPSET1_FIFOENTRY(FUSB300_FIFO_ENTRY_NUM);
+ iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+}
+
+static void fusb300_set_start_entry(struct fusb300 *fusb300,
+ u8 ep)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+ u32 start_entry = fusb300->fifo_entry_num * FUSB300_FIFO_ENTRY_NUM;
+
+ reg &= ~FUSB300_EPSET1_START_ENTRY_MSK ;
+ reg |= FUSB300_EPSET1_START_ENTRY(start_entry);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+ if (fusb300->fifo_entry_num == FUSB300_MAX_FIFO_ENTRY) {
+ fusb300->fifo_entry_num = 0;
+ fusb300->addrofs = 0;
+ pr_err("fifo entry is over the maximum number!\n");
+ } else
+ fusb300->fifo_entry_num++;
+}
+
+/* set fusb300_set_start_entry first before fusb300_set_epaddrofs */
+static void fusb300_set_epaddrofs(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+
+ reg &= ~FUSB300_EPSET2_ADDROFS_MSK;
+ reg |= FUSB300_EPSET2_ADDROFS(fusb300->addrofs);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+ fusb300->addrofs += (info.maxpacket + 7) / 8 * FUSB300_FIFO_ENTRY_NUM;
+}
+
+static void ep_fifo_setting(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ fusb300_set_fifo_entry(fusb300, info.epnum);
+ fusb300_set_start_entry(fusb300, info.epnum);
+ fusb300_set_epaddrofs(fusb300, info);
+}
+
+static void fusb300_set_eptype(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+
+ reg &= ~FUSB300_EPSET1_TYPE_MSK;
+ reg |= FUSB300_EPSET1_TYPE(info.type);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void fusb300_set_epdir(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg;
+
+ if (!info.dir_in)
+ return;
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+ reg &= ~FUSB300_EPSET1_DIR_MSK;
+ reg |= FUSB300_EPSET1_DIRIN;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void fusb300_set_ep_active(struct fusb300 *fusb300,
+ u8 ep)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+
+ reg |= FUSB300_EPSET1_ACTEN;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
+}
+
+static void fusb300_set_epmps(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+
+ reg &= ~FUSB300_EPSET2_MPS_MSK;
+ reg |= FUSB300_EPSET2_MPS(info.maxpacket);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
+}
+
+static void fusb300_set_interval(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+
+ reg &= ~FUSB300_EPSET1_INTERVAL(0x7);
+ reg |= FUSB300_EPSET1_INTERVAL(info.interval);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void fusb300_set_bwnum(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+
+ reg &= ~FUSB300_EPSET1_BWNUM(0x3);
+ reg |= FUSB300_EPSET1_BWNUM(info.bw_num);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
+}
+
+static void set_ep_reg(struct fusb300 *fusb300,
+ struct fusb300_ep_info info)
+{
+ fusb300_set_eptype(fusb300, info);
+ fusb300_set_epdir(fusb300, info);
+ fusb300_set_epmps(fusb300, info);
+
+ if (info.interval)
+ fusb300_set_interval(fusb300, info);
+
+ if (info.bw_num)
+ fusb300_set_bwnum(fusb300, info);
+
+ fusb300_set_ep_active(fusb300, info.epnum);
+}
+
+static int config_ep(struct fusb300_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct fusb300 *fusb300 = ep->fusb300;
+ struct fusb300_ep_info info;
+
+ ep->ep.desc = desc;
+
+ info.interval = 0;
+ info.addrofs = 0;
+ info.bw_num = 0;
+
+ info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
+ info.maxpacket = usb_endpoint_maxp(desc);
+ info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+
+ if ((info.type == USB_ENDPOINT_XFER_INT) ||
+ (info.type == USB_ENDPOINT_XFER_ISOC)) {
+ info.interval = desc->bInterval;
+ if (info.type == USB_ENDPOINT_XFER_ISOC)
+ info.bw_num = usb_endpoint_maxp_mult(desc);
+ }
+
+ ep_fifo_setting(fusb300, info);
+
+ set_ep_reg(fusb300, info);
+
+ fusb300_ep_setting(ep, info);
+
+ fusb300->ep[info.epnum] = ep;
+
+ return 0;
+}
+
+static int fusb300_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct fusb300_ep *ep;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+
+ if (ep->fusb300->reenum) {
+ ep->fusb300->fifo_entry_num = 0;
+ ep->fusb300->addrofs = 0;
+ ep->fusb300->reenum = 0;
+ }
+
+ return config_ep(ep, desc);
+}
+
+static int fusb300_disable(struct usb_ep *_ep)
+{
+ struct fusb300_ep *ep;
+ struct fusb300_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+
+ BUG_ON(!ep);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct fusb300_request, queue);
+ spin_lock_irqsave(&ep->fusb300->lock, flags);
+ done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+ }
+
+ return fusb300_ep_release(ep);
+}
+
+static struct usb_request *fusb300_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct fusb300_request *req;
+
+ req = kzalloc(sizeof(struct fusb300_request), gfp_flags);
+ if (!req)
+ return NULL;
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void fusb300_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct fusb300_request *req;
+
+ req = container_of(_req, struct fusb300_request, req);
+ kfree(req);
+}
+
+static int enable_fifo_int(struct fusb300_ep *ep)
+{
+ struct fusb300 *fusb300 = ep->fusb300;
+
+ if (ep->epnum) {
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_IGER0,
+ FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
+ } else {
+ pr_err("can't enable_fifo_int ep0\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int disable_fifo_int(struct fusb300_ep *ep)
+{
+ struct fusb300 *fusb300 = ep->fusb300;
+
+ if (ep->epnum) {
+ fusb300_disable_bit(fusb300, FUSB300_OFFSET_IGER0,
+ FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
+ } else {
+ pr_err("can't disable_fifo_int ep0\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void fusb300_set_cxlen(struct fusb300 *fusb300, u32 length)
+{
+ u32 reg;
+
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
+ reg &= ~FUSB300_CSR_LEN_MSK;
+ reg |= FUSB300_CSR_LEN(length);
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_CSR);
+}
+
+/* write data to cx fifo */
+static void fusb300_wrcxf(struct fusb300_ep *ep,
+ struct fusb300_request *req)
+{
+ int i = 0;
+ u8 *tmp;
+ u32 data;
+ struct fusb300 *fusb300 = ep->fusb300;
+ u32 length = req->req.length - req->req.actual;
+
+ tmp = req->req.buf + req->req.actual;
+
+ if (length > SS_CTL_MAX_PACKET_SIZE) {
+ fusb300_set_cxlen(fusb300, SS_CTL_MAX_PACKET_SIZE);
+ for (i = (SS_CTL_MAX_PACKET_SIZE >> 2); i > 0; i--) {
+ data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
+ *(tmp + 3) << 24;
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ tmp += 4;
+ }
+ req->req.actual += SS_CTL_MAX_PACKET_SIZE;
+ } else { /* length is less than max packet size */
+ fusb300_set_cxlen(fusb300, length);
+ for (i = length >> 2; i > 0; i--) {
+ data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
+ *(tmp + 3) << 24;
+ printk(KERN_DEBUG " 0x%x\n", data);
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ tmp = tmp + 4;
+ }
+ switch (length % 4) {
+ case 1:
+ data = *tmp;
+ printk(KERN_DEBUG " 0x%x\n", data);
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ break;
+ case 2:
+ data = *tmp | *(tmp + 1) << 8;
+ printk(KERN_DEBUG " 0x%x\n", data);
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ break;
+ case 3:
+ data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
+ printk(KERN_DEBUG " 0x%x\n", data);
+ iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
+ break;
+ default:
+ break;
+ }
+ req->req.actual += length;
+ }
+}
+
+static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep)
+{
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
+ FUSB300_EPSET0_STL);
+}
+
+static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
+
+ if (reg & FUSB300_EPSET0_STL) {
+ printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep);
+ reg |= FUSB300_EPSET0_STL_CLR;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
+ }
+}
+
+static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req)
+{
+ if (ep->fusb300->ep0_dir) { /* if IN */
+ if (req->req.length) {
+ fusb300_wrcxf(ep, req);
+ } else
+ printk(KERN_DEBUG "%s : req->req.length = 0x%x\n",
+ __func__, req->req.length);
+ if ((req->req.length == req->req.actual) ||
+ (req->req.actual < ep->ep.maxpacket))
+ done(ep, req, 0);
+ } else { /* OUT */
+ if (!req->req.length)
+ done(ep, req, 0);
+ else
+ fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1,
+ FUSB300_IGER1_CX_OUT_INT);
+ }
+}
+
+static int fusb300_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct fusb300_ep *ep;
+ struct fusb300_request *req;
+ unsigned long flags;
+ int request = 0;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+ req = container_of(_req, struct fusb300_request, req);
+
+ if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&ep->fusb300->lock, flags);
+
+ if (list_empty(&ep->queue))
+ request = 1;
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+
+ if (ep->ep.desc == NULL) /* ep0 */
+ ep0_queue(ep, req);
+ else if (request && !ep->stall)
+ enable_fifo_int(ep);
+
+ spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+
+ return 0;
+}
+
+static int fusb300_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct fusb300_ep *ep;
+ struct fusb300_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+ req = container_of(_req, struct fusb300_request, req);
+
+ spin_lock_irqsave(&ep->fusb300->lock, flags);
+ if (!list_empty(&ep->queue))
+ done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+
+ return 0;
+}
+
+static int fusb300_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
+{
+ struct fusb300_ep *ep;
+ struct fusb300 *fusb300;
+ unsigned long flags;
+ int ret = 0;
+
+ ep = container_of(_ep, struct fusb300_ep, ep);
+
+ fusb300 = ep->fusb300;
+
+ spin_lock_irqsave(&ep->fusb300->lock, flags);
+
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (value) {
+ fusb300_set_epnstall(fusb300, ep->epnum);
+ ep->stall = 1;
+ if (wedge)
+ ep->wedged = 1;
+ } else {
+ fusb300_clear_epnstall(fusb300, ep->epnum);
+ ep->stall = 0;
+ ep->wedged = 0;
+ }
+
+out:
+ spin_unlock_irqrestore(&ep->fusb300->lock, flags);
+ return ret;
+}
+
+static int fusb300_set_halt(struct usb_ep *_ep, int value)
+{
+ return fusb300_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int fusb300_set_wedge(struct usb_ep *_ep)
+{
+ return fusb300_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static void fusb300_fifo_flush(struct usb_ep *_ep)
+{
+}
+
+static const struct usb_ep_ops fusb300_ep_ops = {
+ .enable = fusb300_enable,
+ .disable = fusb300_disable,
+
+ .alloc_request = fusb300_alloc_request,
+ .free_request = fusb300_free_request,
+
+ .queue = fusb300_queue,
+ .dequeue = fusb300_dequeue,
+
+ .set_halt = fusb300_set_halt,
+ .fifo_flush = fusb300_fifo_flush,
+ .set_wedge = fusb300_set_wedge,
+};
+
+/*****************************************************************************/
+static void fusb300_clear_int(struct fusb300 *fusb300, u32 offset,
+ u32 value)
+{
+ iowrite32(value, fusb300->reg + offset);
+}
+
+static void fusb300_reset(void)
+{
+}
+
+static void fusb300_set_cxstall(struct fusb300 *fusb300)
+{
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
+ FUSB300_CSR_STL);
+}
+
+static void fusb300_set_cxdone(struct fusb300 *fusb300)
+{
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
+ FUSB300_CSR_DONE);
+}
+
+/* read data from cx fifo */
+static void fusb300_rdcxf(struct fusb300 *fusb300,
+ u8 *buffer, u32 length)
+{
+ int i = 0;
+ u8 *tmp;
+ u32 data;
+
+ tmp = buffer;
+
+ for (i = (length >> 2); i > 0; i--) {
+ data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+ printk(KERN_DEBUG " 0x%x\n", data);
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ *(tmp + 3) = (data >> 24) & 0xFF;
+ tmp = tmp + 4;
+ }
+
+ switch (length % 4) {
+ case 1:
+ data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+ printk(KERN_DEBUG " 0x%x\n", data);
+ *tmp = data & 0xFF;
+ break;
+ case 2:
+ data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+ printk(KERN_DEBUG " 0x%x\n", data);
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ break;
+ case 3:
+ data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
+ printk(KERN_DEBUG " 0x%x\n", data);
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ break;
+ default:
+ break;
+ }
+}
+
+static void fusb300_rdfifo(struct fusb300_ep *ep,
+ struct fusb300_request *req,
+ u32 length)
+{
+ int i = 0;
+ u8 *tmp;
+ u32 data, reg;
+ struct fusb300 *fusb300 = ep->fusb300;
+
+ tmp = req->req.buf + req->req.actual;
+ req->req.actual += length;
+
+ if (req->req.actual > req->req.length)
+ printk(KERN_DEBUG "req->req.actual > req->req.length\n");
+
+ for (i = (length >> 2); i > 0; i--) {
+ data = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ *(tmp + 3) = (data >> 24) & 0xFF;
+ tmp = tmp + 4;
+ }
+
+ switch (length % 4) {
+ case 1:
+ data = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ *tmp = data & 0xFF;
+ break;
+ case 2:
+ data = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ break;
+ case 3:
+ data = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPPORT(ep->epnum));
+ *tmp = data & 0xFF;
+ *(tmp + 1) = (data >> 8) & 0xFF;
+ *(tmp + 2) = (data >> 16) & 0xFF;
+ break;
+ default:
+ break;
+ }
+
+ do {
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
+ reg &= FUSB300_IGR1_SYNF0_EMPTY_INT;
+ if (i)
+ printk(KERN_INFO "sync fifo is not empty!\n");
+ i++;
+ } while (!reg);
+}
+
+static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep)
+{
+ u8 value;
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
+
+ value = reg & FUSB300_EPSET0_STL;
+
+ return value;
+}
+
+static u8 fusb300_get_cxstall(struct fusb300 *fusb300)
+{
+ u8 value;
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
+
+ value = (reg & FUSB300_CSR_STL) >> 1;
+
+ return value;
+}
+
+static void request_error(struct fusb300 *fusb300)
+{
+ fusb300_set_cxstall(fusb300);
+ printk(KERN_DEBUG "request error!!\n");
+}
+
+static void get_status(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+__releases(fusb300->lock)
+__acquires(fusb300->lock)
+{
+ u8 ep;
+ u16 status = 0;
+ u16 w_index = ctrl->wIndex;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ status = 1 << USB_DEVICE_SELF_POWERED;
+ break;
+ case USB_RECIP_INTERFACE:
+ status = 0;
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = w_index & USB_ENDPOINT_NUMBER_MASK;
+ if (ep) {
+ if (fusb300_get_epnstall(fusb300, ep))
+ status = 1 << USB_ENDPOINT_HALT;
+ } else {
+ if (fusb300_get_cxstall(fusb300))
+ status = 0;
+ }
+ break;
+
+ default:
+ request_error(fusb300);
+ return; /* exit */
+ }
+
+ fusb300->ep0_data = cpu_to_le16(status);
+ fusb300->ep0_req->buf = &fusb300->ep0_data;
+ fusb300->ep0_req->length = 2;
+
+ spin_unlock(&fusb300->lock);
+ fusb300_queue(fusb300->gadget.ep0, fusb300->ep0_req, GFP_KERNEL);
+ spin_lock(&fusb300->lock);
+}
+
+static void set_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+ u8 ep;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ fusb300_set_cxdone(fusb300);
+ break;
+ case USB_RECIP_INTERFACE:
+ fusb300_set_cxdone(fusb300);
+ break;
+ case USB_RECIP_ENDPOINT: {
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ ep = w_index & USB_ENDPOINT_NUMBER_MASK;
+ if (ep)
+ fusb300_set_epnstall(fusb300, ep);
+ else
+ fusb300_set_cxstall(fusb300);
+ fusb300_set_cxdone(fusb300);
+ }
+ break;
+ default:
+ request_error(fusb300);
+ break;
+ }
+}
+
+static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep)
+{
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
+ FUSB300_EPSET0_CLRSEQNUM);
+}
+
+static void clear_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+ struct fusb300_ep *ep =
+ fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ fusb300_set_cxdone(fusb300);
+ break;
+ case USB_RECIP_INTERFACE:
+ fusb300_set_cxdone(fusb300);
+ break;
+ case USB_RECIP_ENDPOINT:
+ if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
+ if (ep->wedged) {
+ fusb300_set_cxdone(fusb300);
+ break;
+ }
+ if (ep->stall) {
+ ep->stall = 0;
+ fusb300_clear_seqnum(fusb300, ep->epnum);
+ fusb300_clear_epnstall(fusb300, ep->epnum);
+ if (!list_empty(&ep->queue))
+ enable_fifo_int(ep);
+ }
+ }
+ fusb300_set_cxdone(fusb300);
+ break;
+ default:
+ request_error(fusb300);
+ break;
+ }
+}
+
+static void fusb300_set_dev_addr(struct fusb300 *fusb300, u16 addr)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_DAR);
+
+ reg &= ~FUSB300_DAR_DRVADDR_MSK;
+ reg |= FUSB300_DAR_DRVADDR(addr);
+
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_DAR);
+}
+
+static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+ if (ctrl->wValue >= 0x0100)
+ request_error(fusb300);
+ else {
+ fusb300_set_dev_addr(fusb300, ctrl->wValue);
+ fusb300_set_cxdone(fusb300);
+ }
+}
+
+#define UVC_COPY_DESCRIPTORS(mem, src) \
+ do { \
+ const struct usb_descriptor_header * const *__src; \
+ for (__src = src; *__src; ++__src) { \
+ memcpy(mem, *__src, (*__src)->bLength); \
+ mem += (*__src)->bLength; \
+ } \
+ } while (0)
+
+static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
+{
+ u8 *p = (u8 *)ctrl;
+ u8 ret = 0;
+ u8 i = 0;
+
+ fusb300_rdcxf(fusb300, p, 8);
+ fusb300->ep0_dir = ctrl->bRequestType & USB_DIR_IN;
+ fusb300->ep0_length = ctrl->wLength;
+
+ /* check request */
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ get_status(fusb300, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ clear_feature(fusb300, ctrl);
+ break;
+ case USB_REQ_SET_FEATURE:
+ set_feature(fusb300, ctrl);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ set_address(fusb300, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_DAR,
+ FUSB300_DAR_SETCONFG);
+ /* clear sequence number */
+ for (i = 1; i <= FUSB300_MAX_NUM_EP; i++)
+ fusb300_clear_seqnum(fusb300, i);
+ fusb300->reenum = 1;
+ ret = 1;
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ } else
+ ret = 1;
+
+ return ret;
+}
+
+static void done(struct fusb300_ep *ep, struct fusb300_request *req,
+ int status)
+{
+ list_del_init(&req->queue);
+
+ /* don't modify queue heads during completion callback */
+ if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
+ req->req.status = -ESHUTDOWN;
+ else
+ req->req.status = status;
+
+ spin_unlock(&ep->fusb300->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&ep->fusb300->lock);
+
+ if (ep->epnum) {
+ disable_fifo_int(ep);
+ if (!list_empty(&ep->queue))
+ enable_fifo_int(ep);
+ } else
+ fusb300_set_cxdone(ep->fusb300);
+}
+
+static void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, dma_addr_t d,
+ u32 len)
+{
+ u32 value;
+ u32 reg;
+
+ /* wait SW owner */
+ do {
+ reg = ioread32(ep->fusb300->reg +
+ FUSB300_OFFSET_EPPRD_W0(ep->epnum));
+ reg &= FUSB300_EPPRD0_H;
+ } while (reg);
+
+ iowrite32(d, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum));
+
+ value = FUSB300_EPPRD0_BTC(len) | FUSB300_EPPRD0_H |
+ FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I;
+ iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum));
+
+ iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum));
+
+ fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY,
+ FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum));
+}
+
+static void fusb300_wait_idma_finished(struct fusb300_ep *ep)
+{
+ u32 reg;
+
+ do {
+ reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1);
+ if ((reg & FUSB300_IGR1_VBUS_CHG_INT) ||
+ (reg & FUSB300_IGR1_WARM_RST_INT) ||
+ (reg & FUSB300_IGR1_HOT_RST_INT) ||
+ (reg & FUSB300_IGR1_USBRST_INT)
+ )
+ goto IDMA_RESET;
+ reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0);
+ reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum);
+ } while (!reg);
+
+ fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0,
+ FUSB300_IGR0_EPn_PRD_INT(ep->epnum));
+ return;
+
+IDMA_RESET:
+ reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGER0);
+ reg &= ~FUSB300_IGER0_EEPn_PRD_INT(ep->epnum);
+ iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_IGER0);
+}
+
+static void fusb300_set_idma(struct fusb300_ep *ep,
+ struct fusb300_request *req)
+{
+ int ret;
+
+ ret = usb_gadget_map_request(&ep->fusb300->gadget,
+ &req->req, DMA_TO_DEVICE);
+ if (ret)
+ return;
+
+ fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0,
+ FUSB300_IGER0_EEPn_PRD_INT(ep->epnum));
+
+ fusb300_fill_idma_prdtbl(ep, req->req.dma, req->req.length);
+ /* check idma is done */
+ fusb300_wait_idma_finished(ep);
+
+ usb_gadget_unmap_request(&ep->fusb300->gadget,
+ &req->req, DMA_TO_DEVICE);
+}
+
+static void in_ep_fifo_handler(struct fusb300_ep *ep)
+{
+ struct fusb300_request *req = list_entry(ep->queue.next,
+ struct fusb300_request, queue);
+
+ if (req->req.length)
+ fusb300_set_idma(ep, req);
+ done(ep, req, 0);
+}
+
+static void out_ep_fifo_handler(struct fusb300_ep *ep)
+{
+ struct fusb300 *fusb300 = ep->fusb300;
+ struct fusb300_request *req = list_entry(ep->queue.next,
+ struct fusb300_request, queue);
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum));
+ u32 length = reg & FUSB300_FFR_BYCNT;
+
+ fusb300_rdfifo(ep, req, length);
+
+ /* finish out transfer */
+ if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket))
+ done(ep, req, 0);
+}
+
+static void check_device_mode(struct fusb300 *fusb300)
+{
+ u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_GCR);
+
+ switch (reg & FUSB300_GCR_DEVEN_MSK) {
+ case FUSB300_GCR_DEVEN_SS:
+ fusb300->gadget.speed = USB_SPEED_SUPER;
+ break;
+ case FUSB300_GCR_DEVEN_HS:
+ fusb300->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case FUSB300_GCR_DEVEN_FS:
+ fusb300->gadget.speed = USB_SPEED_FULL;
+ break;
+ default:
+ fusb300->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+ printk(KERN_INFO "dev_mode = %d\n", (reg & FUSB300_GCR_DEVEN_MSK));
+}
+
+
+static void fusb300_ep0out(struct fusb300 *fusb300)
+{
+ struct fusb300_ep *ep = fusb300->ep[0];
+ u32 reg;
+
+ if (!list_empty(&ep->queue)) {
+ struct fusb300_request *req;
+
+ req = list_first_entry(&ep->queue,
+ struct fusb300_request, queue);
+ if (req->req.length)
+ fusb300_rdcxf(ep->fusb300, req->req.buf,
+ req->req.length);
+ done(ep, req, 0);
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
+ reg &= ~FUSB300_IGER1_CX_OUT_INT;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_IGER1);
+ } else
+ pr_err("%s : empty queue\n", __func__);
+}
+
+static void fusb300_ep0in(struct fusb300 *fusb300)
+{
+ struct fusb300_request *req;
+ struct fusb300_ep *ep = fusb300->ep[0];
+
+ if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) {
+ req = list_entry(ep->queue.next,
+ struct fusb300_request, queue);
+ if (req->req.length)
+ fusb300_wrcxf(ep, req);
+ if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
+ done(ep, req, 0);
+ } else
+ fusb300_set_cxdone(fusb300);
+}
+
+static void fusb300_grp2_handler(void)
+{
+}
+
+static void fusb300_grp3_handler(void)
+{
+}
+
+static void fusb300_grp4_handler(void)
+{
+}
+
+static void fusb300_grp5_handler(void)
+{
+}
+
+static irqreturn_t fusb300_irq(int irq, void *_fusb300)
+{
+ struct fusb300 *fusb300 = _fusb300;
+ u32 int_grp1 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
+ u32 int_grp1_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
+ u32 int_grp0 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR0);
+ u32 int_grp0_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER0);
+ struct usb_ctrlrequest ctrl;
+ u8 in;
+ u32 reg;
+ int i;
+
+ spin_lock(&fusb300->lock);
+
+ int_grp1 &= int_grp1_en;
+ int_grp0 &= int_grp0_en;
+
+ if (int_grp1 & FUSB300_IGR1_WARM_RST_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_WARM_RST_INT);
+ printk(KERN_INFO"fusb300_warmreset\n");
+ fusb300_reset();
+ }
+
+ if (int_grp1 & FUSB300_IGR1_HOT_RST_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_HOT_RST_INT);
+ printk(KERN_INFO"fusb300_hotreset\n");
+ fusb300_reset();
+ }
+
+ if (int_grp1 & FUSB300_IGR1_USBRST_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_USBRST_INT);
+ fusb300_reset();
+ }
+ /* COMABT_INT has a highest priority */
+
+ if (int_grp1 & FUSB300_IGR1_CX_COMABT_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_CX_COMABT_INT);
+ printk(KERN_INFO"fusb300_ep0abt\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_VBUS_CHG_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_VBUS_CHG_INT);
+ printk(KERN_INFO"fusb300_vbus_change\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U3_EXIT_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U3_EXIT_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U2_EXIT_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U2_EXIT_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U1_EXIT_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U1_EXIT_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U2_ENTRY_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U2_ENTRY_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U1_ENTRY_FAIL_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U1_ENTRY_FAIL_INT);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U3_EXIT_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U3_EXIT_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U3_EXIT_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U2_EXIT_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U2_EXIT_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U2_EXIT_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U1_EXIT_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U1_EXIT_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U1_EXIT_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U3_ENTRY_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U3_ENTRY_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U3_ENTRY_INT\n");
+ fusb300_enable_bit(fusb300, FUSB300_OFFSET_SSCR1,
+ FUSB300_SSCR1_GO_U3_DONE);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U2_ENTRY_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U2_ENTRY_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U2_ENTRY_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_U1_ENTRY_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_U1_ENTRY_INT);
+ printk(KERN_INFO "FUSB300_IGR1_U1_ENTRY_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_RESM_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_RESM_INT);
+ printk(KERN_INFO "fusb300_resume\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_SUSP_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_SUSP_INT);
+ printk(KERN_INFO "fusb300_suspend\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_HS_LPM_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_HS_LPM_INT);
+ printk(KERN_INFO "fusb300_HS_LPM_INT\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_DEV_MODE_CHG_INT) {
+ fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
+ FUSB300_IGR1_DEV_MODE_CHG_INT);
+ check_device_mode(fusb300);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_CX_COMFAIL_INT) {
+ fusb300_set_cxstall(fusb300);
+ printk(KERN_INFO "fusb300_ep0fail\n");
+ }
+
+ if (int_grp1 & FUSB300_IGR1_CX_SETUP_INT) {
+ printk(KERN_INFO "fusb300_ep0setup\n");
+ if (setup_packet(fusb300, &ctrl)) {
+ spin_unlock(&fusb300->lock);
+ if (fusb300->driver->setup(&fusb300->gadget, &ctrl) < 0)
+ fusb300_set_cxstall(fusb300);
+ spin_lock(&fusb300->lock);
+ }
+ }
+
+ if (int_grp1 & FUSB300_IGR1_CX_CMDEND_INT)
+ printk(KERN_INFO "fusb300_cmdend\n");
+
+
+ if (int_grp1 & FUSB300_IGR1_CX_OUT_INT) {
+ printk(KERN_INFO "fusb300_cxout\n");
+ fusb300_ep0out(fusb300);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_CX_IN_INT) {
+ printk(KERN_INFO "fusb300_cxin\n");
+ fusb300_ep0in(fusb300);
+ }
+
+ if (int_grp1 & FUSB300_IGR1_INTGRP5)
+ fusb300_grp5_handler();
+
+ if (int_grp1 & FUSB300_IGR1_INTGRP4)
+ fusb300_grp4_handler();
+
+ if (int_grp1 & FUSB300_IGR1_INTGRP3)
+ fusb300_grp3_handler();
+
+ if (int_grp1 & FUSB300_IGR1_INTGRP2)
+ fusb300_grp2_handler();
+
+ if (int_grp0) {
+ for (i = 1; i < FUSB300_MAX_NUM_EP; i++) {
+ if (int_grp0 & FUSB300_IGR0_EPn_FIFO_INT(i)) {
+ reg = ioread32(fusb300->reg +
+ FUSB300_OFFSET_EPSET1(i));
+ in = (reg & FUSB300_EPSET1_DIRIN) ? 1 : 0;
+ if (in)
+ in_ep_fifo_handler(fusb300->ep[i]);
+ else
+ out_ep_fifo_handler(fusb300->ep[i]);
+ }
+ }
+ }
+
+ spin_unlock(&fusb300->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void fusb300_set_u2_timeout(struct fusb300 *fusb300,
+ u32 time)
+{
+ u32 reg;
+
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
+ reg &= ~0xff;
+ reg |= FUSB300_SSCR2_U2TIMEOUT(time);
+
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
+}
+
+static void fusb300_set_u1_timeout(struct fusb300 *fusb300,
+ u32 time)
+{
+ u32 reg;
+
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
+ reg &= ~(0xff << 8);
+ reg |= FUSB300_SSCR2_U1TIMEOUT(time);
+
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
+}
+
+static void init_controller(struct fusb300 *fusb300)
+{
+ u32 reg;
+ u32 mask = 0;
+ u32 val = 0;
+
+ /* split on */
+ mask = val = FUSB300_AHBBCR_S0_SPLIT_ON | FUSB300_AHBBCR_S1_SPLIT_ON;
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_AHBCR);
+ reg &= ~mask;
+ reg |= val;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_AHBCR);
+
+ /* enable high-speed LPM */
+ mask = val = FUSB300_HSCR_HS_LPM_PERMIT;
+ reg = ioread32(fusb300->reg + FUSB300_OFFSET_HSCR);
+ reg &= ~mask;
+ reg |= val;
+ iowrite32(reg, fusb300->reg + FUSB300_OFFSET_HSCR);
+
+ /*set u1 u2 timmer*/
+ fusb300_set_u2_timeout(fusb300, 0xff);
+ fusb300_set_u1_timeout(fusb300, 0xff);
+
+ /* enable all grp1 interrupt */
+ iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1);
+}
+/*------------------------------------------------------------------------*/
+static int fusb300_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct fusb300 *fusb300 = to_fusb300(g);
+
+ /* hook up the driver */
+ fusb300->driver = driver;
+
+ return 0;
+}
+
+static int fusb300_udc_stop(struct usb_gadget *g)
+{
+ struct fusb300 *fusb300 = to_fusb300(g);
+
+ init_controller(fusb300);
+ fusb300->driver = NULL;
+
+ return 0;
+}
+/*--------------------------------------------------------------------------*/
+
+static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active)
+{
+ return 0;
+}
+
+static const struct usb_gadget_ops fusb300_gadget_ops = {
+ .pullup = fusb300_udc_pullup,
+ .udc_start = fusb300_udc_start,
+ .udc_stop = fusb300_udc_stop,
+};
+
+static int fusb300_remove(struct platform_device *pdev)
+{
+ struct fusb300 *fusb300 = platform_get_drvdata(pdev);
+ int i;
+
+ usb_del_gadget_udc(&fusb300->gadget);
+ iounmap(fusb300->reg);
+ free_irq(platform_get_irq(pdev, 0), fusb300);
+ free_irq(platform_get_irq(pdev, 1), fusb300);
+
+ fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+ kfree(fusb300->ep[i]);
+ kfree(fusb300);
+
+ return 0;
+}
+
+static int fusb300_probe(struct platform_device *pdev)
+{
+ struct resource *res, *ires, *ires1;
+ void __iomem *reg = NULL;
+ struct fusb300 *fusb300 = NULL;
+ struct fusb300_ep *_ep[FUSB300_MAX_NUM_EP];
+ int ret = 0;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ pr_err("platform_get_resource error.\n");
+ goto clean_up;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev,
+ "platform_get_resource IORESOURCE_IRQ error.\n");
+ goto clean_up;
+ }
+
+ ires1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!ires1) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev,
+ "platform_get_resource IORESOURCE_IRQ 1 error.\n");
+ goto clean_up;
+ }
+
+ reg = ioremap(res->start, resource_size(res));
+ if (reg == NULL) {
+ ret = -ENOMEM;
+ pr_err("ioremap error.\n");
+ goto clean_up;
+ }
+
+ /* initialize udc */
+ fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL);
+ if (fusb300 == NULL) {
+ ret = -ENOMEM;
+ goto clean_up;
+ }
+
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++) {
+ _ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL);
+ if (_ep[i] == NULL) {
+ ret = -ENOMEM;
+ goto clean_up;
+ }
+ fusb300->ep[i] = _ep[i];
+ }
+
+ spin_lock_init(&fusb300->lock);
+
+ platform_set_drvdata(pdev, fusb300);
+
+ fusb300->gadget.ops = &fusb300_gadget_ops;
+
+ fusb300->gadget.max_speed = USB_SPEED_HIGH;
+ fusb300->gadget.name = udc_name;
+ fusb300->reg = reg;
+
+ ret = request_irq(ires->start, fusb300_irq, IRQF_SHARED,
+ udc_name, fusb300);
+ if (ret < 0) {
+ pr_err("request_irq error (%d)\n", ret);
+ goto clean_up;
+ }
+
+ ret = request_irq(ires1->start, fusb300_irq,
+ IRQF_SHARED, udc_name, fusb300);
+ if (ret < 0) {
+ pr_err("request_irq1 error (%d)\n", ret);
+ goto err_request_irq1;
+ }
+
+ INIT_LIST_HEAD(&fusb300->gadget.ep_list);
+
+ for (i = 0; i < FUSB300_MAX_NUM_EP ; i++) {
+ struct fusb300_ep *ep = fusb300->ep[i];
+
+ if (i != 0) {
+ INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list);
+ list_add_tail(&fusb300->ep[i]->ep.ep_list,
+ &fusb300->gadget.ep_list);
+ }
+ ep->fusb300 = fusb300;
+ INIT_LIST_HEAD(&ep->queue);
+ ep->ep.name = fusb300_ep_name[i];
+ ep->ep.ops = &fusb300_ep_ops;
+ usb_ep_set_maxpacket_limit(&ep->ep, HS_BULK_MAX_PACKET_SIZE);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ }
+ usb_ep_set_maxpacket_limit(&fusb300->ep[0]->ep, HS_CTL_MAX_PACKET_SIZE);
+ fusb300->ep[0]->epnum = 0;
+ fusb300->gadget.ep0 = &fusb300->ep[0]->ep;
+ INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list);
+
+ fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
+ GFP_KERNEL);
+ if (fusb300->ep0_req == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_request;
+ }
+
+ init_controller(fusb300);
+ ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget);
+ if (ret)
+ goto err_add_udc;
+
+ dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+
+ return 0;
+
+err_add_udc:
+ fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+
+err_alloc_request:
+ free_irq(ires1->start, fusb300);
+
+err_request_irq1:
+ free_irq(ires->start, fusb300);
+
+clean_up:
+ if (fusb300) {
+ if (fusb300->ep0_req)
+ fusb300_free_request(&fusb300->ep[0]->ep,
+ fusb300->ep0_req);
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+ kfree(fusb300->ep[i]);
+ kfree(fusb300);
+ }
+ if (reg)
+ iounmap(reg);
+
+ return ret;
+}
+
+static struct platform_driver fusb300_driver = {
+ .remove = fusb300_remove,
+ .driver = {
+ .name = udc_name,
+ },
+};
+
+module_platform_driver_probe(fusb300_driver, fusb300_probe);
diff --git a/drivers/usb/gadget/udc/fusb300_udc.h b/drivers/usb/gadget/udc/fusb300_udc.h
new file mode 100644
index 000000000..eb3d6d379
--- /dev/null
+++ b/drivers/usb/gadget/udc/fusb300_udc.h
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Fusb300 UDC (USB gadget)
+ *
+ * Copyright (C) 2010 Faraday Technology Corp.
+ *
+ * Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
+ */
+
+
+#ifndef __FUSB300_UDC_H__
+#define __FUSB300_UDC_H__
+
+#include <linux/kernel.h>
+
+#define FUSB300_OFFSET_GCR 0x00
+#define FUSB300_OFFSET_GTM 0x04
+#define FUSB300_OFFSET_DAR 0x08
+#define FUSB300_OFFSET_CSR 0x0C
+#define FUSB300_OFFSET_CXPORT 0x10
+#define FUSB300_OFFSET_EPSET0(n) (0x20 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPSET1(n) (0x24 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPSET2(n) (0x28 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPFFR(n) (0x2c + (n - 1) * 0x30)
+#define FUSB300_OFFSET_EPSTRID(n) (0x40 + (n - 1) * 0x30)
+#define FUSB300_OFFSET_HSPTM 0x300
+#define FUSB300_OFFSET_HSCR 0x304
+#define FUSB300_OFFSET_SSCR0 0x308
+#define FUSB300_OFFSET_SSCR1 0x30C
+#define FUSB300_OFFSET_TT 0x310
+#define FUSB300_OFFSET_DEVNOTF 0x314
+#define FUSB300_OFFSET_DNC1 0x318
+#define FUSB300_OFFSET_CS 0x31C
+#define FUSB300_OFFSET_SOF 0x324
+#define FUSB300_OFFSET_EFCS 0x328
+#define FUSB300_OFFSET_IGR0 0x400
+#define FUSB300_OFFSET_IGR1 0x404
+#define FUSB300_OFFSET_IGR2 0x408
+#define FUSB300_OFFSET_IGR3 0x40C
+#define FUSB300_OFFSET_IGR4 0x410
+#define FUSB300_OFFSET_IGR5 0x414
+#define FUSB300_OFFSET_IGER0 0x420
+#define FUSB300_OFFSET_IGER1 0x424
+#define FUSB300_OFFSET_IGER2 0x428
+#define FUSB300_OFFSET_IGER3 0x42C
+#define FUSB300_OFFSET_IGER4 0x430
+#define FUSB300_OFFSET_IGER5 0x434
+#define FUSB300_OFFSET_DMAHMER 0x500
+#define FUSB300_OFFSET_EPPRDRDY 0x504
+#define FUSB300_OFFSET_DMAEPMR 0x508
+#define FUSB300_OFFSET_DMAENR 0x50C
+#define FUSB300_OFFSET_DMAAPR 0x510
+#define FUSB300_OFFSET_AHBCR 0x514
+#define FUSB300_OFFSET_EPPRD_W0(n) (0x520 + (n - 1) * 0x10)
+#define FUSB300_OFFSET_EPPRD_W1(n) (0x524 + (n - 1) * 0x10)
+#define FUSB300_OFFSET_EPPRD_W2(n) (0x528 + (n - 1) * 0x10)
+#define FUSB300_OFFSET_EPRD_PTR(n) (0x52C + (n - 1) * 0x10)
+#define FUSB300_OFFSET_BUFDBG_START 0x800
+#define FUSB300_OFFSET_BUFDBG_END 0xBFC
+#define FUSB300_OFFSET_EPPORT(n) (0x1010 + (n - 1) * 0x10)
+
+/*
+ * * Global Control Register (offset = 000H)
+ * */
+#define FUSB300_GCR_SF_RST (1 << 8)
+#define FUSB300_GCR_VBUS_STATUS (1 << 7)
+#define FUSB300_GCR_FORCE_HS_SUSP (1 << 6)
+#define FUSB300_GCR_SYNC_FIFO1_CLR (1 << 5)
+#define FUSB300_GCR_SYNC_FIFO0_CLR (1 << 4)
+#define FUSB300_GCR_FIFOCLR (1 << 3)
+#define FUSB300_GCR_GLINTEN (1 << 2)
+#define FUSB300_GCR_DEVEN_FS 0x3
+#define FUSB300_GCR_DEVEN_HS 0x2
+#define FUSB300_GCR_DEVEN_SS 0x1
+#define FUSB300_GCR_DEVDIS 0x0
+#define FUSB300_GCR_DEVEN_MSK 0x3
+
+
+/*
+ * *Global Test Mode (offset = 004H)
+ * */
+#define FUSB300_GTM_TST_DIS_SOFGEN (1 << 16)
+#define FUSB300_GTM_TST_CUR_EP_ENTRY(n) ((n & 0xF) << 12)
+#define FUSB300_GTM_TST_EP_ENTRY(n) ((n & 0xF) << 8)
+#define FUSB300_GTM_TST_EP_NUM(n) ((n & 0xF) << 4)
+#define FUSB300_GTM_TST_FIFO_DEG (1 << 1)
+#define FUSB300_GTM_TSTMODE (1 << 0)
+
+/*
+ * * Device Address Register (offset = 008H)
+ * */
+#define FUSB300_DAR_SETCONFG (1 << 7)
+#define FUSB300_DAR_DRVADDR(x) (x & 0x7F)
+#define FUSB300_DAR_DRVADDR_MSK 0x7F
+
+/*
+ * *Control Transfer Configuration and Status Register
+ * (CX_Config_Status, offset = 00CH)
+ * */
+#define FUSB300_CSR_LEN(x) ((x & 0xFFFF) << 8)
+#define FUSB300_CSR_LEN_MSK (0xFFFF << 8)
+#define FUSB300_CSR_EMP (1 << 4)
+#define FUSB300_CSR_FUL (1 << 3)
+#define FUSB300_CSR_CLR (1 << 2)
+#define FUSB300_CSR_STL (1 << 1)
+#define FUSB300_CSR_DONE (1 << 0)
+
+/*
+ * * EPn Setting 0 (EPn_SET0, offset = 020H+(n-1)*30H, n=1~15 )
+ * */
+#define FUSB300_EPSET0_STL_CLR (1 << 3)
+#define FUSB300_EPSET0_CLRSEQNUM (1 << 2)
+#define FUSB300_EPSET0_STL (1 << 0)
+
+/*
+ * * EPn Setting 1 (EPn_SET1, offset = 024H+(n-1)*30H, n=1~15)
+ * */
+#define FUSB300_EPSET1_START_ENTRY(x) ((x & 0xFF) << 24)
+#define FUSB300_EPSET1_START_ENTRY_MSK (0xFF << 24)
+#define FUSB300_EPSET1_FIFOENTRY(x) ((x & 0x1F) << 12)
+#define FUSB300_EPSET1_FIFOENTRY_MSK (0x1f << 12)
+#define FUSB300_EPSET1_INTERVAL(x) ((x & 0x7) << 6)
+#define FUSB300_EPSET1_BWNUM(x) ((x & 0x3) << 4)
+#define FUSB300_EPSET1_TYPEISO (1 << 2)
+#define FUSB300_EPSET1_TYPEBLK (2 << 2)
+#define FUSB300_EPSET1_TYPEINT (3 << 2)
+#define FUSB300_EPSET1_TYPE(x) ((x & 0x3) << 2)
+#define FUSB300_EPSET1_TYPE_MSK (0x3 << 2)
+#define FUSB300_EPSET1_DIROUT (0 << 1)
+#define FUSB300_EPSET1_DIRIN (1 << 1)
+#define FUSB300_EPSET1_DIR(x) ((x & 0x1) << 1)
+#define FUSB300_EPSET1_DIRIN (1 << 1)
+#define FUSB300_EPSET1_DIR_MSK ((0x1) << 1)
+#define FUSB300_EPSET1_ACTDIS 0
+#define FUSB300_EPSET1_ACTEN 1
+
+/*
+ * *EPn Setting 2 (EPn_SET2, offset = 028H+(n-1)*30H, n=1~15)
+ * */
+#define FUSB300_EPSET2_ADDROFS(x) ((x & 0x7FFF) << 16)
+#define FUSB300_EPSET2_ADDROFS_MSK (0x7fff << 16)
+#define FUSB300_EPSET2_MPS(x) (x & 0x7FF)
+#define FUSB300_EPSET2_MPS_MSK 0x7FF
+
+/*
+ * * EPn FIFO Register (offset = 2cH+(n-1)*30H)
+ * */
+#define FUSB300_FFR_RST (1 << 31)
+#define FUSB300_FF_FUL (1 << 30)
+#define FUSB300_FF_EMPTY (1 << 29)
+#define FUSB300_FFR_BYCNT 0x1FFFF
+
+/*
+ * *EPn Stream ID (EPn_STR_ID, offset = 040H+(n-1)*30H, n=1~15)
+ * */
+#define FUSB300_STRID_STREN (1 << 16)
+#define FUSB300_STRID_STRID(x) (x & 0xFFFF)
+
+/*
+ * *HS PHY Test Mode (offset = 300H)
+ * */
+#define FUSB300_HSPTM_TSTPKDONE (1 << 4)
+#define FUSB300_HSPTM_TSTPKT (1 << 3)
+#define FUSB300_HSPTM_TSTSET0NAK (1 << 2)
+#define FUSB300_HSPTM_TSTKSTA (1 << 1)
+#define FUSB300_HSPTM_TSTJSTA (1 << 0)
+
+/*
+ * *HS Control Register (offset = 304H)
+ * */
+#define FUSB300_HSCR_HS_LPM_PERMIT (1 << 8)
+#define FUSB300_HSCR_HS_LPM_RMWKUP (1 << 7)
+#define FUSB300_HSCR_CAP_LPM_RMWKUP (1 << 6)
+#define FUSB300_HSCR_HS_GOSUSP (1 << 5)
+#define FUSB300_HSCR_HS_GORMWKU (1 << 4)
+#define FUSB300_HSCR_CAP_RMWKUP (1 << 3)
+#define FUSB300_HSCR_IDLECNT_0MS 0
+#define FUSB300_HSCR_IDLECNT_1MS 1
+#define FUSB300_HSCR_IDLECNT_2MS 2
+#define FUSB300_HSCR_IDLECNT_3MS 3
+#define FUSB300_HSCR_IDLECNT_4MS 4
+#define FUSB300_HSCR_IDLECNT_5MS 5
+#define FUSB300_HSCR_IDLECNT_6MS 6
+#define FUSB300_HSCR_IDLECNT_7MS 7
+
+/*
+ * * SS Controller Register 0 (offset = 308H)
+ * */
+#define FUSB300_SSCR0_MAX_INTERVAL(x) ((x & 0x7) << 4)
+#define FUSB300_SSCR0_U2_FUN_EN (1 << 1)
+#define FUSB300_SSCR0_U1_FUN_EN (1 << 0)
+
+/*
+ * * SS Controller Register 1 (offset = 30CH)
+ * */
+#define FUSB300_SSCR1_GO_U3_DONE (1 << 8)
+#define FUSB300_SSCR1_TXDEEMPH_LEVEL (1 << 7)
+#define FUSB300_SSCR1_DIS_SCRMB (1 << 6)
+#define FUSB300_SSCR1_FORCE_RECOVERY (1 << 5)
+#define FUSB300_SSCR1_U3_WAKEUP_EN (1 << 4)
+#define FUSB300_SSCR1_U2_EXIT_EN (1 << 3)
+#define FUSB300_SSCR1_U1_EXIT_EN (1 << 2)
+#define FUSB300_SSCR1_U2_ENTRY_EN (1 << 1)
+#define FUSB300_SSCR1_U1_ENTRY_EN (1 << 0)
+
+/*
+ * *SS Controller Register 2 (offset = 310H)
+ * */
+#define FUSB300_SSCR2_SS_TX_SWING (1 << 25)
+#define FUSB300_SSCR2_FORCE_LINKPM_ACCEPT (1 << 24)
+#define FUSB300_SSCR2_U2_INACT_TIMEOUT(x) ((x & 0xFF) << 16)
+#define FUSB300_SSCR2_U1TIMEOUT(x) ((x & 0xFF) << 8)
+#define FUSB300_SSCR2_U2TIMEOUT(x) (x & 0xFF)
+
+/*
+ * *SS Device Notification Control (DEV_NOTF, offset = 314H)
+ * */
+#define FUSB300_DEVNOTF_CONTEXT0(x) ((x & 0xFFFFFF) << 8)
+#define FUSB300_DEVNOTF_TYPE_DIS 0
+#define FUSB300_DEVNOTF_TYPE_FUNCWAKE 1
+#define FUSB300_DEVNOTF_TYPE_LTM 2
+#define FUSB300_DEVNOTF_TYPE_BUSINT_ADJMSG 3
+
+/*
+ * *BFM Arbiter Priority Register (BFM_ARB offset = 31CH)
+ * */
+#define FUSB300_BFMARB_ARB_M1 (1 << 3)
+#define FUSB300_BFMARB_ARB_M0 (1 << 2)
+#define FUSB300_BFMARB_ARB_S1 (1 << 1)
+#define FUSB300_BFMARB_ARB_S0 1
+
+/*
+ * *Vendor Specific IO Control Register (offset = 320H)
+ * */
+#define FUSB300_VSIC_VCTLOAD_N (1 << 8)
+#define FUSB300_VSIC_VCTL(x) (x & 0x3F)
+
+/*
+ * *SOF Mask Timer (offset = 324H)
+ * */
+#define FUSB300_SOF_MASK_TIMER_HS 0x044c
+#define FUSB300_SOF_MASK_TIMER_FS 0x2710
+
+/*
+ * *Error Flag and Control Status (offset = 328H)
+ * */
+#define FUSB300_EFCS_PM_STATE_U3 3
+#define FUSB300_EFCS_PM_STATE_U2 2
+#define FUSB300_EFCS_PM_STATE_U1 1
+#define FUSB300_EFCS_PM_STATE_U0 0
+
+/*
+ * *Interrupt Group 0 Register (offset = 400H)
+ * */
+#define FUSB300_IGR0_EP15_PRD_INT (1 << 31)
+#define FUSB300_IGR0_EP14_PRD_INT (1 << 30)
+#define FUSB300_IGR0_EP13_PRD_INT (1 << 29)
+#define FUSB300_IGR0_EP12_PRD_INT (1 << 28)
+#define FUSB300_IGR0_EP11_PRD_INT (1 << 27)
+#define FUSB300_IGR0_EP10_PRD_INT (1 << 26)
+#define FUSB300_IGR0_EP9_PRD_INT (1 << 25)
+#define FUSB300_IGR0_EP8_PRD_INT (1 << 24)
+#define FUSB300_IGR0_EP7_PRD_INT (1 << 23)
+#define FUSB300_IGR0_EP6_PRD_INT (1 << 22)
+#define FUSB300_IGR0_EP5_PRD_INT (1 << 21)
+#define FUSB300_IGR0_EP4_PRD_INT (1 << 20)
+#define FUSB300_IGR0_EP3_PRD_INT (1 << 19)
+#define FUSB300_IGR0_EP2_PRD_INT (1 << 18)
+#define FUSB300_IGR0_EP1_PRD_INT (1 << 17)
+#define FUSB300_IGR0_EPn_PRD_INT(n) (1 << (n + 16))
+
+#define FUSB300_IGR0_EP15_FIFO_INT (1 << 15)
+#define FUSB300_IGR0_EP14_FIFO_INT (1 << 14)
+#define FUSB300_IGR0_EP13_FIFO_INT (1 << 13)
+#define FUSB300_IGR0_EP12_FIFO_INT (1 << 12)
+#define FUSB300_IGR0_EP11_FIFO_INT (1 << 11)
+#define FUSB300_IGR0_EP10_FIFO_INT (1 << 10)
+#define FUSB300_IGR0_EP9_FIFO_INT (1 << 9)
+#define FUSB300_IGR0_EP8_FIFO_INT (1 << 8)
+#define FUSB300_IGR0_EP7_FIFO_INT (1 << 7)
+#define FUSB300_IGR0_EP6_FIFO_INT (1 << 6)
+#define FUSB300_IGR0_EP5_FIFO_INT (1 << 5)
+#define FUSB300_IGR0_EP4_FIFO_INT (1 << 4)
+#define FUSB300_IGR0_EP3_FIFO_INT (1 << 3)
+#define FUSB300_IGR0_EP2_FIFO_INT (1 << 2)
+#define FUSB300_IGR0_EP1_FIFO_INT (1 << 1)
+#define FUSB300_IGR0_EPn_FIFO_INT(n) (1 << n)
+
+/*
+ * *Interrupt Group 1 Register (offset = 404H)
+ * */
+#define FUSB300_IGR1_INTGRP5 (1 << 31)
+#define FUSB300_IGR1_VBUS_CHG_INT (1 << 30)
+#define FUSB300_IGR1_SYNF1_EMPTY_INT (1 << 29)
+#define FUSB300_IGR1_SYNF0_EMPTY_INT (1 << 28)
+#define FUSB300_IGR1_U3_EXIT_FAIL_INT (1 << 27)
+#define FUSB300_IGR1_U2_EXIT_FAIL_INT (1 << 26)
+#define FUSB300_IGR1_U1_EXIT_FAIL_INT (1 << 25)
+#define FUSB300_IGR1_U2_ENTRY_FAIL_INT (1 << 24)
+#define FUSB300_IGR1_U1_ENTRY_FAIL_INT (1 << 23)
+#define FUSB300_IGR1_U3_EXIT_INT (1 << 22)
+#define FUSB300_IGR1_U2_EXIT_INT (1 << 21)
+#define FUSB300_IGR1_U1_EXIT_INT (1 << 20)
+#define FUSB300_IGR1_U3_ENTRY_INT (1 << 19)
+#define FUSB300_IGR1_U2_ENTRY_INT (1 << 18)
+#define FUSB300_IGR1_U1_ENTRY_INT (1 << 17)
+#define FUSB300_IGR1_HOT_RST_INT (1 << 16)
+#define FUSB300_IGR1_WARM_RST_INT (1 << 15)
+#define FUSB300_IGR1_RESM_INT (1 << 14)
+#define FUSB300_IGR1_SUSP_INT (1 << 13)
+#define FUSB300_IGR1_HS_LPM_INT (1 << 12)
+#define FUSB300_IGR1_USBRST_INT (1 << 11)
+#define FUSB300_IGR1_DEV_MODE_CHG_INT (1 << 9)
+#define FUSB300_IGR1_CX_COMABT_INT (1 << 8)
+#define FUSB300_IGR1_CX_COMFAIL_INT (1 << 7)
+#define FUSB300_IGR1_CX_CMDEND_INT (1 << 6)
+#define FUSB300_IGR1_CX_OUT_INT (1 << 5)
+#define FUSB300_IGR1_CX_IN_INT (1 << 4)
+#define FUSB300_IGR1_CX_SETUP_INT (1 << 3)
+#define FUSB300_IGR1_INTGRP4 (1 << 2)
+#define FUSB300_IGR1_INTGRP3 (1 << 1)
+#define FUSB300_IGR1_INTGRP2 (1 << 0)
+
+/*
+ * *Interrupt Group 2 Register (offset = 408H)
+ * */
+#define FUSB300_IGR2_EP6_STR_ACCEPT_INT (1 << 29)
+#define FUSB300_IGR2_EP6_STR_RESUME_INT (1 << 28)
+#define FUSB300_IGR2_EP6_STR_REQ_INT (1 << 27)
+#define FUSB300_IGR2_EP6_STR_NOTRDY_INT (1 << 26)
+#define FUSB300_IGR2_EP6_STR_PRIME_INT (1 << 25)
+#define FUSB300_IGR2_EP5_STR_ACCEPT_INT (1 << 24)
+#define FUSB300_IGR2_EP5_STR_RESUME_INT (1 << 23)
+#define FUSB300_IGR2_EP5_STR_REQ_INT (1 << 22)
+#define FUSB300_IGR2_EP5_STR_NOTRDY_INT (1 << 21)
+#define FUSB300_IGR2_EP5_STR_PRIME_INT (1 << 20)
+#define FUSB300_IGR2_EP4_STR_ACCEPT_INT (1 << 19)
+#define FUSB300_IGR2_EP4_STR_RESUME_INT (1 << 18)
+#define FUSB300_IGR2_EP4_STR_REQ_INT (1 << 17)
+#define FUSB300_IGR2_EP4_STR_NOTRDY_INT (1 << 16)
+#define FUSB300_IGR2_EP4_STR_PRIME_INT (1 << 15)
+#define FUSB300_IGR2_EP3_STR_ACCEPT_INT (1 << 14)
+#define FUSB300_IGR2_EP3_STR_RESUME_INT (1 << 13)
+#define FUSB300_IGR2_EP3_STR_REQ_INT (1 << 12)
+#define FUSB300_IGR2_EP3_STR_NOTRDY_INT (1 << 11)
+#define FUSB300_IGR2_EP3_STR_PRIME_INT (1 << 10)
+#define FUSB300_IGR2_EP2_STR_ACCEPT_INT (1 << 9)
+#define FUSB300_IGR2_EP2_STR_RESUME_INT (1 << 8)
+#define FUSB300_IGR2_EP2_STR_REQ_INT (1 << 7)
+#define FUSB300_IGR2_EP2_STR_NOTRDY_INT (1 << 6)
+#define FUSB300_IGR2_EP2_STR_PRIME_INT (1 << 5)
+#define FUSB300_IGR2_EP1_STR_ACCEPT_INT (1 << 4)
+#define FUSB300_IGR2_EP1_STR_RESUME_INT (1 << 3)
+#define FUSB300_IGR2_EP1_STR_REQ_INT (1 << 2)
+#define FUSB300_IGR2_EP1_STR_NOTRDY_INT (1 << 1)
+#define FUSB300_IGR2_EP1_STR_PRIME_INT (1 << 0)
+
+#define FUSB300_IGR2_EP_STR_ACCEPT_INT(n) (1 << (5 * n - 1))
+#define FUSB300_IGR2_EP_STR_RESUME_INT(n) (1 << (5 * n - 2))
+#define FUSB300_IGR2_EP_STR_REQ_INT(n) (1 << (5 * n - 3))
+#define FUSB300_IGR2_EP_STR_NOTRDY_INT(n) (1 << (5 * n - 4))
+#define FUSB300_IGR2_EP_STR_PRIME_INT(n) (1 << (5 * n - 5))
+
+/*
+ * *Interrupt Group 3 Register (offset = 40CH)
+ * */
+#define FUSB300_IGR3_EP12_STR_ACCEPT_INT (1 << 29)
+#define FUSB300_IGR3_EP12_STR_RESUME_INT (1 << 28)
+#define FUSB300_IGR3_EP12_STR_REQ_INT (1 << 27)
+#define FUSB300_IGR3_EP12_STR_NOTRDY_INT (1 << 26)
+#define FUSB300_IGR3_EP12_STR_PRIME_INT (1 << 25)
+#define FUSB300_IGR3_EP11_STR_ACCEPT_INT (1 << 24)
+#define FUSB300_IGR3_EP11_STR_RESUME_INT (1 << 23)
+#define FUSB300_IGR3_EP11_STR_REQ_INT (1 << 22)
+#define FUSB300_IGR3_EP11_STR_NOTRDY_INT (1 << 21)
+#define FUSB300_IGR3_EP11_STR_PRIME_INT (1 << 20)
+#define FUSB300_IGR3_EP10_STR_ACCEPT_INT (1 << 19)
+#define FUSB300_IGR3_EP10_STR_RESUME_INT (1 << 18)
+#define FUSB300_IGR3_EP10_STR_REQ_INT (1 << 17)
+#define FUSB300_IGR3_EP10_STR_NOTRDY_INT (1 << 16)
+#define FUSB300_IGR3_EP10_STR_PRIME_INT (1 << 15)
+#define FUSB300_IGR3_EP9_STR_ACCEPT_INT (1 << 14)
+#define FUSB300_IGR3_EP9_STR_RESUME_INT (1 << 13)
+#define FUSB300_IGR3_EP9_STR_REQ_INT (1 << 12)
+#define FUSB300_IGR3_EP9_STR_NOTRDY_INT (1 << 11)
+#define FUSB300_IGR3_EP9_STR_PRIME_INT (1 << 10)
+#define FUSB300_IGR3_EP8_STR_ACCEPT_INT (1 << 9)
+#define FUSB300_IGR3_EP8_STR_RESUME_INT (1 << 8)
+#define FUSB300_IGR3_EP8_STR_REQ_INT (1 << 7)
+#define FUSB300_IGR3_EP8_STR_NOTRDY_INT (1 << 6)
+#define FUSB300_IGR3_EP8_STR_PRIME_INT (1 << 5)
+#define FUSB300_IGR3_EP7_STR_ACCEPT_INT (1 << 4)
+#define FUSB300_IGR3_EP7_STR_RESUME_INT (1 << 3)
+#define FUSB300_IGR3_EP7_STR_REQ_INT (1 << 2)
+#define FUSB300_IGR3_EP7_STR_NOTRDY_INT (1 << 1)
+#define FUSB300_IGR3_EP7_STR_PRIME_INT (1 << 0)
+
+#define FUSB300_IGR3_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1))
+#define FUSB300_IGR3_EP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2))
+#define FUSB300_IGR3_EP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3))
+#define FUSB300_IGR3_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4))
+#define FUSB300_IGR3_EP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5))
+
+/*
+ * *Interrupt Group 4 Register (offset = 410H)
+ * */
+#define FUSB300_IGR4_EP15_RX0_INT (1 << 31)
+#define FUSB300_IGR4_EP14_RX0_INT (1 << 30)
+#define FUSB300_IGR4_EP13_RX0_INT (1 << 29)
+#define FUSB300_IGR4_EP12_RX0_INT (1 << 28)
+#define FUSB300_IGR4_EP11_RX0_INT (1 << 27)
+#define FUSB300_IGR4_EP10_RX0_INT (1 << 26)
+#define FUSB300_IGR4_EP9_RX0_INT (1 << 25)
+#define FUSB300_IGR4_EP8_RX0_INT (1 << 24)
+#define FUSB300_IGR4_EP7_RX0_INT (1 << 23)
+#define FUSB300_IGR4_EP6_RX0_INT (1 << 22)
+#define FUSB300_IGR4_EP5_RX0_INT (1 << 21)
+#define FUSB300_IGR4_EP4_RX0_INT (1 << 20)
+#define FUSB300_IGR4_EP3_RX0_INT (1 << 19)
+#define FUSB300_IGR4_EP2_RX0_INT (1 << 18)
+#define FUSB300_IGR4_EP1_RX0_INT (1 << 17)
+#define FUSB300_IGR4_EP_RX0_INT(x) (1 << (x + 16))
+#define FUSB300_IGR4_EP15_STR_ACCEPT_INT (1 << 14)
+#define FUSB300_IGR4_EP15_STR_RESUME_INT (1 << 13)
+#define FUSB300_IGR4_EP15_STR_REQ_INT (1 << 12)
+#define FUSB300_IGR4_EP15_STR_NOTRDY_INT (1 << 11)
+#define FUSB300_IGR4_EP15_STR_PRIME_INT (1 << 10)
+#define FUSB300_IGR4_EP14_STR_ACCEPT_INT (1 << 9)
+#define FUSB300_IGR4_EP14_STR_RESUME_INT (1 << 8)
+#define FUSB300_IGR4_EP14_STR_REQ_INT (1 << 7)
+#define FUSB300_IGR4_EP14_STR_NOTRDY_INT (1 << 6)
+#define FUSB300_IGR4_EP14_STR_PRIME_INT (1 << 5)
+#define FUSB300_IGR4_EP13_STR_ACCEPT_INT (1 << 4)
+#define FUSB300_IGR4_EP13_STR_RESUME_INT (1 << 3)
+#define FUSB300_IGR4_EP13_STR_REQ_INT (1 << 2)
+#define FUSB300_IGR4_EP13_STR_NOTRDY_INT (1 << 1)
+#define FUSB300_IGR4_EP13_STR_PRIME_INT (1 << 0)
+
+#define FUSB300_IGR4_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 12) - 1))
+#define FUSB300_IGR4_EP_STR_RESUME_INT(n) (1 << (5 * (n - 12) - 2))
+#define FUSB300_IGR4_EP_STR_REQ_INT(n) (1 << (5 * (n - 12) - 3))
+#define FUSB300_IGR4_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 12) - 4))
+#define FUSB300_IGR4_EP_STR_PRIME_INT(n) (1 << (5 * (n - 12) - 5))
+
+/*
+ * *Interrupt Group 5 Register (offset = 414H)
+ * */
+#define FUSB300_IGR5_EP_STL_INT(n) (1 << n)
+
+/*
+ * *Interrupt Enable Group 0 Register (offset = 420H)
+ * */
+#define FUSB300_IGER0_EEP15_PRD_INT (1 << 31)
+#define FUSB300_IGER0_EEP14_PRD_INT (1 << 30)
+#define FUSB300_IGER0_EEP13_PRD_INT (1 << 29)
+#define FUSB300_IGER0_EEP12_PRD_INT (1 << 28)
+#define FUSB300_IGER0_EEP11_PRD_INT (1 << 27)
+#define FUSB300_IGER0_EEP10_PRD_INT (1 << 26)
+#define FUSB300_IGER0_EEP9_PRD_INT (1 << 25)
+#define FUSB300_IGER0_EP8_PRD_INT (1 << 24)
+#define FUSB300_IGER0_EEP7_PRD_INT (1 << 23)
+#define FUSB300_IGER0_EEP6_PRD_INT (1 << 22)
+#define FUSB300_IGER0_EEP5_PRD_INT (1 << 21)
+#define FUSB300_IGER0_EEP4_PRD_INT (1 << 20)
+#define FUSB300_IGER0_EEP3_PRD_INT (1 << 19)
+#define FUSB300_IGER0_EEP2_PRD_INT (1 << 18)
+#define FUSB300_IGER0_EEP1_PRD_INT (1 << 17)
+#define FUSB300_IGER0_EEPn_PRD_INT(n) (1 << (n + 16))
+
+#define FUSB300_IGER0_EEP15_FIFO_INT (1 << 15)
+#define FUSB300_IGER0_EEP14_FIFO_INT (1 << 14)
+#define FUSB300_IGER0_EEP13_FIFO_INT (1 << 13)
+#define FUSB300_IGER0_EEP12_FIFO_INT (1 << 12)
+#define FUSB300_IGER0_EEP11_FIFO_INT (1 << 11)
+#define FUSB300_IGER0_EEP10_FIFO_INT (1 << 10)
+#define FUSB300_IGER0_EEP9_FIFO_INT (1 << 9)
+#define FUSB300_IGER0_EEP8_FIFO_INT (1 << 8)
+#define FUSB300_IGER0_EEP7_FIFO_INT (1 << 7)
+#define FUSB300_IGER0_EEP6_FIFO_INT (1 << 6)
+#define FUSB300_IGER0_EEP5_FIFO_INT (1 << 5)
+#define FUSB300_IGER0_EEP4_FIFO_INT (1 << 4)
+#define FUSB300_IGER0_EEP3_FIFO_INT (1 << 3)
+#define FUSB300_IGER0_EEP2_FIFO_INT (1 << 2)
+#define FUSB300_IGER0_EEP1_FIFO_INT (1 << 1)
+#define FUSB300_IGER0_EEPn_FIFO_INT(n) (1 << n)
+
+/*
+ * *Interrupt Enable Group 1 Register (offset = 424H)
+ * */
+#define FUSB300_IGER1_EINT_GRP5 (1 << 31)
+#define FUSB300_IGER1_VBUS_CHG_INT (1 << 30)
+#define FUSB300_IGER1_SYNF1_EMPTY_INT (1 << 29)
+#define FUSB300_IGER1_SYNF0_EMPTY_INT (1 << 28)
+#define FUSB300_IGER1_U3_EXIT_FAIL_INT (1 << 27)
+#define FUSB300_IGER1_U2_EXIT_FAIL_INT (1 << 26)
+#define FUSB300_IGER1_U1_EXIT_FAIL_INT (1 << 25)
+#define FUSB300_IGER1_U2_ENTRY_FAIL_INT (1 << 24)
+#define FUSB300_IGER1_U1_ENTRY_FAIL_INT (1 << 23)
+#define FUSB300_IGER1_U3_EXIT_INT (1 << 22)
+#define FUSB300_IGER1_U2_EXIT_INT (1 << 21)
+#define FUSB300_IGER1_U1_EXIT_INT (1 << 20)
+#define FUSB300_IGER1_U3_ENTRY_INT (1 << 19)
+#define FUSB300_IGER1_U2_ENTRY_INT (1 << 18)
+#define FUSB300_IGER1_U1_ENTRY_INT (1 << 17)
+#define FUSB300_IGER1_HOT_RST_INT (1 << 16)
+#define FUSB300_IGER1_WARM_RST_INT (1 << 15)
+#define FUSB300_IGER1_RESM_INT (1 << 14)
+#define FUSB300_IGER1_SUSP_INT (1 << 13)
+#define FUSB300_IGER1_LPM_INT (1 << 12)
+#define FUSB300_IGER1_HS_RST_INT (1 << 11)
+#define FUSB300_IGER1_EDEV_MODE_CHG_INT (1 << 9)
+#define FUSB300_IGER1_CX_COMABT_INT (1 << 8)
+#define FUSB300_IGER1_CX_COMFAIL_INT (1 << 7)
+#define FUSB300_IGER1_CX_CMDEND_INT (1 << 6)
+#define FUSB300_IGER1_CX_OUT_INT (1 << 5)
+#define FUSB300_IGER1_CX_IN_INT (1 << 4)
+#define FUSB300_IGER1_CX_SETUP_INT (1 << 3)
+#define FUSB300_IGER1_INTGRP4 (1 << 2)
+#define FUSB300_IGER1_INTGRP3 (1 << 1)
+#define FUSB300_IGER1_INTGRP2 (1 << 0)
+
+/*
+ * *Interrupt Enable Group 2 Register (offset = 428H)
+ * */
+#define FUSB300_IGER2_EEP_STR_ACCEPT_INT(n) (1 << (5 * n - 1))
+#define FUSB300_IGER2_EEP_STR_RESUME_INT(n) (1 << (5 * n - 2))
+#define FUSB300_IGER2_EEP_STR_REQ_INT(n) (1 << (5 * n - 3))
+#define FUSB300_IGER2_EEP_STR_NOTRDY_INT(n) (1 << (5 * n - 4))
+#define FUSB300_IGER2_EEP_STR_PRIME_INT(n) (1 << (5 * n - 5))
+
+/*
+ * *Interrupt Enable Group 3 Register (offset = 42CH)
+ * */
+
+#define FUSB300_IGER3_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1))
+#define FUSB300_IGER3_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2))
+#define FUSB300_IGER3_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3))
+#define FUSB300_IGER3_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4))
+#define FUSB300_IGER3_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5))
+
+/*
+ * *Interrupt Enable Group 4 Register (offset = 430H)
+ * */
+
+#define FUSB300_IGER4_EEP_RX0_INT(n) (1 << (n + 16))
+#define FUSB300_IGER4_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1))
+#define FUSB300_IGER4_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2))
+#define FUSB300_IGER4_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3))
+#define FUSB300_IGER4_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4))
+#define FUSB300_IGER4_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5))
+
+/* EP PRD Ready (EP_PRD_RDY, offset = 504H) */
+
+#define FUSB300_EPPRDR_EP15_PRD_RDY (1 << 15)
+#define FUSB300_EPPRDR_EP14_PRD_RDY (1 << 14)
+#define FUSB300_EPPRDR_EP13_PRD_RDY (1 << 13)
+#define FUSB300_EPPRDR_EP12_PRD_RDY (1 << 12)
+#define FUSB300_EPPRDR_EP11_PRD_RDY (1 << 11)
+#define FUSB300_EPPRDR_EP10_PRD_RDY (1 << 10)
+#define FUSB300_EPPRDR_EP9_PRD_RDY (1 << 9)
+#define FUSB300_EPPRDR_EP8_PRD_RDY (1 << 8)
+#define FUSB300_EPPRDR_EP7_PRD_RDY (1 << 7)
+#define FUSB300_EPPRDR_EP6_PRD_RDY (1 << 6)
+#define FUSB300_EPPRDR_EP5_PRD_RDY (1 << 5)
+#define FUSB300_EPPRDR_EP4_PRD_RDY (1 << 4)
+#define FUSB300_EPPRDR_EP3_PRD_RDY (1 << 3)
+#define FUSB300_EPPRDR_EP2_PRD_RDY (1 << 2)
+#define FUSB300_EPPRDR_EP1_PRD_RDY (1 << 1)
+#define FUSB300_EPPRDR_EP_PRD_RDY(n) (1 << n)
+
+/* AHB Bus Control Register (offset = 514H) */
+#define FUSB300_AHBBCR_S1_SPLIT_ON (1 << 17)
+#define FUSB300_AHBBCR_S0_SPLIT_ON (1 << 16)
+#define FUSB300_AHBBCR_S1_1entry (0 << 12)
+#define FUSB300_AHBBCR_S1_4entry (3 << 12)
+#define FUSB300_AHBBCR_S1_8entry (5 << 12)
+#define FUSB300_AHBBCR_S1_16entry (7 << 12)
+#define FUSB300_AHBBCR_S0_1entry (0 << 8)
+#define FUSB300_AHBBCR_S0_4entry (3 << 8)
+#define FUSB300_AHBBCR_S0_8entry (5 << 8)
+#define FUSB300_AHBBCR_S0_16entry (7 << 8)
+#define FUSB300_AHBBCR_M1_BURST_SINGLE (0 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR (1 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR4 (3 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR8 (5 << 4)
+#define FUSB300_AHBBCR_M1_BURST_INCR16 (7 << 4)
+#define FUSB300_AHBBCR_M0_BURST_SINGLE 0
+#define FUSB300_AHBBCR_M0_BURST_INCR 1
+#define FUSB300_AHBBCR_M0_BURST_INCR4 3
+#define FUSB300_AHBBCR_M0_BURST_INCR8 5
+#define FUSB300_AHBBCR_M0_BURST_INCR16 7
+#define FUSB300_IGER5_EEP_STL_INT(n) (1 << n)
+
+/* WORD 0 Data Structure of PRD Table */
+#define FUSB300_EPPRD0_M (1 << 30)
+#define FUSB300_EPPRD0_O (1 << 29)
+/* The finished prd */
+#define FUSB300_EPPRD0_F (1 << 28)
+#define FUSB300_EPPRD0_I (1 << 27)
+#define FUSB300_EPPRD0_A (1 << 26)
+/* To decide HW point to first prd at next time */
+#define FUSB300_EPPRD0_L (1 << 25)
+#define FUSB300_EPPRD0_H (1 << 24)
+#define FUSB300_EPPRD0_BTC(n) (n & 0xFFFFFF)
+
+/*----------------------------------------------------------------------*/
+#define FUSB300_MAX_NUM_EP 16
+
+#define FUSB300_FIFO_ENTRY_NUM 8
+#define FUSB300_MAX_FIFO_ENTRY 8
+
+#define SS_CTL_MAX_PACKET_SIZE 0x200
+#define SS_BULK_MAX_PACKET_SIZE 0x400
+#define SS_INT_MAX_PACKET_SIZE 0x400
+#define SS_ISO_MAX_PACKET_SIZE 0x400
+
+#define HS_BULK_MAX_PACKET_SIZE 0x200
+#define HS_CTL_MAX_PACKET_SIZE 0x40
+#define HS_INT_MAX_PACKET_SIZE 0x400
+#define HS_ISO_MAX_PACKET_SIZE 0x400
+
+struct fusb300_ep_info {
+ u8 epnum;
+ u8 type;
+ u8 interval;
+ u8 dir_in;
+ u16 maxpacket;
+ u16 addrofs;
+ u16 bw_num;
+};
+
+struct fusb300_request {
+
+ struct usb_request req;
+ struct list_head queue;
+};
+
+
+struct fusb300_ep {
+ struct usb_ep ep;
+ struct fusb300 *fusb300;
+
+ struct list_head queue;
+ unsigned stall:1;
+ unsigned wedged:1;
+ unsigned use_dma:1;
+
+ unsigned char epnum;
+ unsigned char type;
+};
+
+struct fusb300 {
+ spinlock_t lock;
+ void __iomem *reg;
+
+ unsigned long irq_trigger;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+ struct fusb300_ep *ep[FUSB300_MAX_NUM_EP];
+
+ struct usb_request *ep0_req; /* for internal request */
+ __le16 ep0_data;
+ u32 ep0_length; /* for internal request */
+ u8 ep0_dir; /* 0/0x80 out/in */
+
+ u8 fifo_entry_num; /* next start fifo entry */
+ u32 addrofs; /* next fifo address offset */
+ u8 reenum; /* if re-enumeration */
+};
+
+#define to_fusb300(g) (container_of((g), struct fusb300, gadget))
+
+#endif
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
new file mode 100644
index 000000000..5ffb3d5c6
--- /dev/null
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -0,0 +1,1860 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
+ *
+ * Copyright (C) 2000-2002 Lineo
+ * by Stuart Lynne, Tom Rushworth, and Bruce Balden
+ * Copyright (C) 2002 Toshiba Corporation
+ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
+ */
+
+/*
+ * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
+ *
+ * - Endpoint numbering is fixed: ep{1,2,3}-bulk
+ * - Gadget drivers can choose ep maxpacket (8/16/32/64)
+ * - Gadget drivers can choose direction (IN, OUT)
+ * - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
+ */
+
+// #define VERBOSE /* extra debug messages (success too) */
+// #define USB_TRACE /* packet-level success messages */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+
+#include "goku_udc.h"
+
+#define DRIVER_DESC "TC86C001 USB Device Controller"
+#define DRIVER_VERSION "30-Oct 2003"
+
+static const char driver_name [] = "goku_udc";
+static const char driver_desc [] = DRIVER_DESC;
+
+MODULE_AUTHOR("source@mvista.com");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+
+/*
+ * IN dma behaves ok under testing, though the IN-dma abort paths don't
+ * seem to behave quite as expected. Used by default.
+ *
+ * OUT dma documents design problems handling the common "short packet"
+ * transfer termination policy; it couldn't be enabled by default, even
+ * if the OUT-dma abort problems had a resolution.
+ */
+static unsigned use_dma = 1;
+
+#if 0
+//#include <linux/moduleparam.h>
+/* "modprobe goku_udc use_dma=1" etc
+ * 0 to disable dma
+ * 1 to use IN dma only (normal operation)
+ * 2 to use IN and OUT dma
+ */
+module_param(use_dma, uint, S_IRUGO);
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static void nuke(struct goku_ep *, int status);
+
+static inline void
+command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
+{
+ writel(COMMAND_EP(epnum) | command, &regs->Command);
+ udelay(300);
+}
+
+static int
+goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+ struct goku_udc *dev;
+ struct goku_ep *ep;
+ u32 mode;
+ u16 max;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct goku_ep, ep);
+ if (!_ep || !desc
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+ dev = ep->dev;
+ if (ep == &dev->ep[0])
+ return -EINVAL;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+ if (ep->num != usb_endpoint_num(desc))
+ return -EINVAL;
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
+ != EPxSTATUS_EP_INVALID)
+ return -EBUSY;
+
+ /* enabling the no-toggle interrupt mode would need an api hook */
+ mode = 0;
+ max = get_unaligned_le16(&desc->wMaxPacketSize);
+ switch (max) {
+ case 64:
+ mode++;
+ fallthrough;
+ case 32:
+ mode++;
+ fallthrough;
+ case 16:
+ mode++;
+ fallthrough;
+ case 8:
+ mode <<= 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mode |= 2 << 1; /* bulk, or intr-with-toggle */
+
+ /* ep1/ep2 dma direction is chosen early; it works in the other
+ * direction, with pio. be cautious with out-dma.
+ */
+ ep->is_in = usb_endpoint_dir_in(desc);
+ if (ep->is_in) {
+ mode |= 1;
+ ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
+ } else {
+ ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
+ if (ep->dma)
+ DBG(dev, "%s out-dma hides short packets\n",
+ ep->ep.name);
+ }
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+
+ /* ep1 and ep2 can do double buffering and/or dma */
+ if (ep->num < 3) {
+ struct goku_udc_regs __iomem *regs = ep->dev->regs;
+ u32 tmp;
+
+ /* double buffer except (for now) with pio in */
+ tmp = ((ep->dma || !ep->is_in)
+ ? 0x10 /* double buffered */
+ : 0x11 /* single buffer */
+ ) << ep->num;
+ tmp |= readl(&regs->EPxSingle);
+ writel(tmp, &regs->EPxSingle);
+
+ tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
+ tmp |= readl(&regs->EPxBCS);
+ writel(tmp, &regs->EPxBCS);
+ }
+ writel(mode, ep->reg_mode);
+ command(ep->dev->regs, COMMAND_RESET, ep->num);
+ ep->ep.maxpacket = max;
+ ep->stopped = 0;
+ ep->ep.desc = desc;
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+ DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
+ ep->is_in ? "IN" : "OUT",
+ ep->dma ? "dma" : "pio",
+ max);
+
+ return 0;
+}
+
+static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
+{
+ struct goku_udc *dev = ep->dev;
+
+ if (regs) {
+ command(regs, COMMAND_INVALID, ep->num);
+ if (ep->num) {
+ if (ep->num == UDC_MSTWR_ENDPOINT)
+ dev->int_enable &= ~(INT_MSTWREND
+ |INT_MSTWRTMOUT);
+ else if (ep->num == UDC_MSTRD_ENDPOINT)
+ dev->int_enable &= ~INT_MSTRDEND;
+ dev->int_enable &= ~INT_EPxDATASET (ep->num);
+ } else
+ dev->int_enable &= ~INT_EP0;
+ writel(dev->int_enable, &regs->int_enable);
+ readl(&regs->int_enable);
+ if (ep->num < 3) {
+ struct goku_udc_regs __iomem *r = ep->dev->regs;
+ u32 tmp;
+
+ tmp = readl(&r->EPxSingle);
+ tmp &= ~(0x11 << ep->num);
+ writel(tmp, &r->EPxSingle);
+
+ tmp = readl(&r->EPxBCS);
+ tmp &= ~(0x11 << ep->num);
+ writel(tmp, &r->EPxBCS);
+ }
+ /* reset dma in case we're still using it */
+ if (ep->dma) {
+ u32 master;
+
+ master = readl(&regs->dma_master) & MST_RW_BITS;
+ if (ep->num == UDC_MSTWR_ENDPOINT) {
+ master &= ~MST_W_BITS;
+ master |= MST_WR_RESET;
+ } else {
+ master &= ~MST_R_BITS;
+ master |= MST_RD_RESET;
+ }
+ writel(master, &regs->dma_master);
+ }
+ }
+
+ usb_ep_set_maxpacket_limit(&ep->ep, MAX_FIFO_SIZE);
+ ep->ep.desc = NULL;
+ ep->stopped = 1;
+ ep->irqs = 0;
+ ep->dma = 0;
+}
+
+static int goku_ep_disable(struct usb_ep *_ep)
+{
+ struct goku_ep *ep;
+ struct goku_udc *dev;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct goku_ep, ep);
+ if (!_ep || !ep->ep.desc)
+ return -ENODEV;
+ dev = ep->dev;
+ if (dev->ep0state == EP0_SUSPEND)
+ return -EBUSY;
+
+ VDBG(dev, "disable %s\n", _ep->name);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ nuke(ep, -ESHUTDOWN);
+ ep_reset(dev->regs, ep);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct goku_request *req;
+
+ if (!_ep)
+ return NULL;
+ req = kzalloc(sizeof *req, gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+ return &req->req;
+}
+
+static void
+goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct goku_request *req;
+
+ if (!_ep || !_req)
+ return;
+
+ req = container_of(_req, struct goku_request, req);
+ WARN_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+done(struct goku_ep *ep, struct goku_request *req, int status)
+{
+ struct goku_udc *dev;
+ unsigned stopped = ep->stopped;
+
+ list_del_init(&req->queue);
+
+ if (likely(req->req.status == -EINPROGRESS))
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ dev = ep->dev;
+
+ if (ep->dma)
+ usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
+
+#ifndef USB_TRACE
+ if (status && status != -ESHUTDOWN)
+#endif
+ VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+ spin_unlock(&dev->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&dev->lock);
+ ep->stopped = stopped;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
+{
+ unsigned length, count;
+
+ length = min(req->req.length - req->req.actual, max);
+ req->req.actual += length;
+
+ count = length;
+ while (likely(count--))
+ writel(*buf++, fifo);
+ return length;
+}
+
+// return: 0 = still running, 1 = completed, negative = errno
+static int write_fifo(struct goku_ep *ep, struct goku_request *req)
+{
+ struct goku_udc *dev = ep->dev;
+ u32 tmp;
+ u8 *buf;
+ unsigned count;
+ int is_last;
+
+ tmp = readl(&dev->regs->DataSet);
+ buf = req->req.buf + req->req.actual;
+ prefetch(buf);
+
+ dev = ep->dev;
+ if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
+ return -EL2HLT;
+
+ /* NOTE: just single-buffered PIO-IN for now. */
+ if (unlikely((tmp & DATASET_A(ep->num)) != 0))
+ return 0;
+
+ /* clear our "packet available" irq */
+ if (ep->num != 0)
+ writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
+
+ count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
+
+ /* last packet often short (sometimes a zlp, especially on ep0) */
+ if (unlikely(count != ep->ep.maxpacket)) {
+ writel(~(1<<ep->num), &dev->regs->EOP);
+ if (ep->num == 0) {
+ dev->ep[0].stopped = 1;
+ dev->ep0state = EP0_STATUS;
+ }
+ is_last = 1;
+ } else {
+ if (likely(req->req.length != req->req.actual)
+ || req->req.zero)
+ is_last = 0;
+ else
+ is_last = 1;
+ }
+#if 0 /* printk seemed to trash is_last...*/
+//#ifdef USB_TRACE
+ VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
+ ep->ep.name, count, is_last ? "/last" : "",
+ req->req.length - req->req.actual, req);
+#endif
+
+ /* requests complete when all IN data is in the FIFO,
+ * or sometimes later, if a zlp was needed.
+ */
+ if (is_last) {
+ done(ep, req, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int read_fifo(struct goku_ep *ep, struct goku_request *req)
+{
+ struct goku_udc_regs __iomem *regs;
+ u32 size, set;
+ u8 *buf;
+ unsigned bufferspace, is_short, dbuff;
+
+ regs = ep->dev->regs;
+top:
+ buf = req->req.buf + req->req.actual;
+ prefetchw(buf);
+
+ if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
+ return -EL2HLT;
+
+ dbuff = (ep->num == 1 || ep->num == 2);
+ do {
+ /* ack dataset irq matching the status we'll handle */
+ if (ep->num != 0)
+ writel(~INT_EPxDATASET(ep->num), &regs->int_status);
+
+ set = readl(&regs->DataSet) & DATASET_AB(ep->num);
+ size = readl(&regs->EPxSizeLA[ep->num]);
+ bufferspace = req->req.length - req->req.actual;
+
+ /* usually do nothing without an OUT packet */
+ if (likely(ep->num != 0 || bufferspace != 0)) {
+ if (unlikely(set == 0))
+ break;
+ /* use ep1/ep2 double-buffering for OUT */
+ if (!(size & PACKET_ACTIVE))
+ size = readl(&regs->EPxSizeLB[ep->num]);
+ if (!(size & PACKET_ACTIVE)) /* "can't happen" */
+ break;
+ size &= DATASIZE; /* EPxSizeH == 0 */
+
+ /* ep0out no-out-data case for set_config, etc */
+ } else
+ size = 0;
+
+ /* read all bytes from this packet */
+ req->req.actual += size;
+ is_short = (size < ep->ep.maxpacket);
+#ifdef USB_TRACE
+ VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
+ ep->ep.name, size, is_short ? "/S" : "",
+ req, req->req.actual, req->req.length);
+#endif
+ while (likely(size-- != 0)) {
+ u8 byte = (u8) readl(ep->reg_fifo);
+
+ if (unlikely(bufferspace == 0)) {
+ /* this happens when the driver's buffer
+ * is smaller than what the host sent.
+ * discard the extra data in this packet.
+ */
+ if (req->req.status != -EOVERFLOW)
+ DBG(ep->dev, "%s overflow %u\n",
+ ep->ep.name, size);
+ req->req.status = -EOVERFLOW;
+ } else {
+ *buf++ = byte;
+ bufferspace--;
+ }
+ }
+
+ /* completion */
+ if (unlikely(is_short || req->req.actual == req->req.length)) {
+ if (unlikely(ep->num == 0)) {
+ /* non-control endpoints now usable? */
+ if (ep->dev->req_config)
+ writel(ep->dev->configured
+ ? USBSTATE_CONFIGURED
+ : 0,
+ &regs->UsbState);
+ /* ep0out status stage */
+ writel(~(1<<0), &regs->EOP);
+ ep->stopped = 1;
+ ep->dev->ep0state = EP0_STATUS;
+ }
+ done(ep, req, 0);
+
+ /* empty the second buffer asap */
+ if (dbuff && !list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct goku_request, queue);
+ goto top;
+ }
+ return 1;
+ }
+ } while (dbuff);
+ return 0;
+}
+
+static inline void
+pio_irq_enable(struct goku_udc *dev,
+ struct goku_udc_regs __iomem *regs, int epnum)
+{
+ dev->int_enable |= INT_EPxDATASET (epnum);
+ writel(dev->int_enable, &regs->int_enable);
+ /* write may still be posted */
+}
+
+static inline void
+pio_irq_disable(struct goku_udc *dev,
+ struct goku_udc_regs __iomem *regs, int epnum)
+{
+ dev->int_enable &= ~INT_EPxDATASET (epnum);
+ writel(dev->int_enable, &regs->int_enable);
+ /* write may still be posted */
+}
+
+static inline void
+pio_advance(struct goku_ep *ep)
+{
+ struct goku_request *req;
+
+ if (unlikely(list_empty (&ep->queue)))
+ return;
+ req = list_entry(ep->queue.next, struct goku_request, queue);
+ (ep->is_in ? write_fifo : read_fifo)(ep, req);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+// return: 0 = q running, 1 = q stopped, negative = errno
+static int start_dma(struct goku_ep *ep, struct goku_request *req)
+{
+ struct goku_udc_regs __iomem *regs = ep->dev->regs;
+ u32 master;
+ u32 start = req->req.dma;
+ u32 end = start + req->req.length - 1;
+
+ master = readl(&regs->dma_master) & MST_RW_BITS;
+
+ /* re-init the bits affecting IN dma; careful with zlps */
+ if (likely(ep->is_in)) {
+ if (unlikely(master & MST_RD_ENA)) {
+ DBG (ep->dev, "start, IN active dma %03x!!\n",
+ master);
+// return -EL2HLT;
+ }
+ writel(end, &regs->in_dma_end);
+ writel(start, &regs->in_dma_start);
+
+ master &= ~MST_R_BITS;
+ if (unlikely(req->req.length == 0))
+ master |= MST_RD_ENA | MST_RD_EOPB;
+ else if ((req->req.length % ep->ep.maxpacket) != 0
+ || req->req.zero)
+ master |= MST_RD_ENA | MST_EOPB_ENA;
+ else
+ master |= MST_RD_ENA | MST_EOPB_DIS;
+
+ ep->dev->int_enable |= INT_MSTRDEND;
+
+ /* Goku DMA-OUT merges short packets, which plays poorly with
+ * protocols where short packets mark the transfer boundaries.
+ * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
+ * ending transfers after 3 SOFs; we don't turn it on.
+ */
+ } else {
+ if (unlikely(master & MST_WR_ENA)) {
+ DBG (ep->dev, "start, OUT active dma %03x!!\n",
+ master);
+// return -EL2HLT;
+ }
+ writel(end, &regs->out_dma_end);
+ writel(start, &regs->out_dma_start);
+
+ master &= ~MST_W_BITS;
+ master |= MST_WR_ENA | MST_TIMEOUT_DIS;
+
+ ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
+ }
+
+ writel(master, &regs->dma_master);
+ writel(ep->dev->int_enable, &regs->int_enable);
+ return 0;
+}
+
+static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
+{
+ struct goku_request *req;
+ struct goku_udc_regs __iomem *regs = ep->dev->regs;
+ u32 master;
+
+ master = readl(&regs->dma_master);
+
+ if (unlikely(list_empty(&ep->queue))) {
+stop:
+ if (ep->is_in)
+ dev->int_enable &= ~INT_MSTRDEND;
+ else
+ dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
+ writel(dev->int_enable, &regs->int_enable);
+ return;
+ }
+ req = list_entry(ep->queue.next, struct goku_request, queue);
+
+ /* normal hw dma completion (not abort) */
+ if (likely(ep->is_in)) {
+ if (unlikely(master & MST_RD_ENA))
+ return;
+ req->req.actual = readl(&regs->in_dma_current);
+ } else {
+ if (unlikely(master & MST_WR_ENA))
+ return;
+
+ /* hardware merges short packets, and also hides packet
+ * overruns. a partial packet MAY be in the fifo here.
+ */
+ req->req.actual = readl(&regs->out_dma_current);
+ }
+ req->req.actual -= req->req.dma;
+ req->req.actual++;
+
+#ifdef USB_TRACE
+ VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
+ ep->ep.name, ep->is_in ? "IN" : "OUT",
+ req->req.actual, req->req.length, req);
+#endif
+ done(ep, req, 0);
+ if (list_empty(&ep->queue))
+ goto stop;
+ req = list_entry(ep->queue.next, struct goku_request, queue);
+ (void) start_dma(ep, req);
+}
+
+static void abort_dma(struct goku_ep *ep, int status)
+{
+ struct goku_udc_regs __iomem *regs = ep->dev->regs;
+ struct goku_request *req;
+ u32 curr, master;
+
+ /* NAK future host requests, hoping the implicit delay lets the
+ * dma engine finish reading (or writing) its latest packet and
+ * empty the dma buffer (up to 16 bytes).
+ *
+ * This avoids needing to clean up a partial packet in the fifo;
+ * we can't do that for IN without side effects to HALT and TOGGLE.
+ */
+ command(regs, COMMAND_FIFO_DISABLE, ep->num);
+ req = list_entry(ep->queue.next, struct goku_request, queue);
+ master = readl(&regs->dma_master) & MST_RW_BITS;
+
+ /* FIXME using these resets isn't usably documented. this may
+ * not work unless it's followed by disabling the endpoint.
+ *
+ * FIXME the OUT reset path doesn't even behave consistently.
+ */
+ if (ep->is_in) {
+ if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
+ goto finished;
+ curr = readl(&regs->in_dma_current);
+
+ writel(curr, &regs->in_dma_end);
+ writel(curr, &regs->in_dma_start);
+
+ master &= ~MST_R_BITS;
+ master |= MST_RD_RESET;
+ writel(master, &regs->dma_master);
+
+ if (readl(&regs->dma_master) & MST_RD_ENA)
+ DBG(ep->dev, "IN dma active after reset!\n");
+
+ } else {
+ if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
+ goto finished;
+ curr = readl(&regs->out_dma_current);
+
+ writel(curr, &regs->out_dma_end);
+ writel(curr, &regs->out_dma_start);
+
+ master &= ~MST_W_BITS;
+ master |= MST_WR_RESET;
+ writel(master, &regs->dma_master);
+
+ if (readl(&regs->dma_master) & MST_WR_ENA)
+ DBG(ep->dev, "OUT dma active after reset!\n");
+ }
+ req->req.actual = (curr - req->req.dma) + 1;
+ req->req.status = status;
+
+ VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name,
+ ep->is_in ? "IN" : "OUT",
+ req->req.actual, req->req.length);
+
+ command(regs, COMMAND_FIFO_ENABLE, ep->num);
+
+ return;
+
+finished:
+ /* dma already completed; no abort needed */
+ command(regs, COMMAND_FIFO_ENABLE, ep->num);
+ req->req.actual = req->req.length;
+ req->req.status = 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct goku_request *req;
+ struct goku_ep *ep;
+ struct goku_udc *dev;
+ unsigned long flags;
+ int status;
+
+ /* always require a cpu-view buffer so pio works */
+ req = container_of(_req, struct goku_request, req);
+ if (unlikely(!_req || !_req->complete
+ || !_req->buf || !list_empty(&req->queue)))
+ return -EINVAL;
+ ep = container_of(_ep, struct goku_ep, ep);
+ if (unlikely(!_ep || (!ep->ep.desc && ep->num != 0)))
+ return -EINVAL;
+ dev = ep->dev;
+ if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
+ return -ESHUTDOWN;
+
+ /* can't touch registers when suspended */
+ if (dev->ep0state == EP0_SUSPEND)
+ return -EBUSY;
+
+ /* set up dma mapping in case the caller didn't */
+ if (ep->dma) {
+ status = usb_gadget_map_request(&dev->gadget, &req->req,
+ ep->is_in);
+ if (status)
+ return status;
+ }
+
+#ifdef USB_TRACE
+ VDBG(dev, "%s queue req %p, len %u buf %p\n",
+ _ep->name, _req, _req->length, _req->buf);
+#endif
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ /* for ep0 IN without premature status, zlp is required and
+ * writing EOP starts the status stage (OUT).
+ */
+ if (unlikely(ep->num == 0 && ep->is_in))
+ _req->zero = 1;
+
+ /* kickstart this i/o queue? */
+ status = 0;
+ if (list_empty(&ep->queue) && likely(!ep->stopped)) {
+ /* dma: done after dma completion IRQ (or error)
+ * pio: done after last fifo operation
+ */
+ if (ep->dma)
+ status = start_dma(ep, req);
+ else
+ status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
+
+ if (unlikely(status != 0)) {
+ if (status > 0)
+ status = 0;
+ req = NULL;
+ }
+
+ } /* else pio or dma irq handler advances the queue. */
+
+ if (likely(req != NULL))
+ list_add_tail(&req->queue, &ep->queue);
+
+ if (likely(!list_empty(&ep->queue))
+ && likely(ep->num != 0)
+ && !ep->dma
+ && !(dev->int_enable & INT_EPxDATASET (ep->num)))
+ pio_irq_enable(dev, dev->regs, ep->num);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* pci writes may still be posted */
+ return status;
+}
+
+/* dequeue ALL requests */
+static void nuke(struct goku_ep *ep, int status)
+{
+ struct goku_request *req;
+
+ ep->stopped = 1;
+ if (list_empty(&ep->queue))
+ return;
+ if (ep->dma)
+ abort_dma(ep, status);
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct goku_request, queue);
+ done(ep, req, status);
+ }
+}
+
+/* dequeue JUST ONE request */
+static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct goku_request *req = NULL, *iter;
+ struct goku_ep *ep;
+ struct goku_udc *dev;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct goku_ep, ep);
+ if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
+ return -EINVAL;
+ dev = ep->dev;
+ if (!dev->driver)
+ return -ESHUTDOWN;
+
+ /* we can't touch (dma) registers when suspended */
+ if (dev->ep0state == EP0_SUSPEND)
+ return -EBUSY;
+
+ VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name,
+ ep->is_in ? "IN" : "OUT",
+ ep->dma ? "dma" : "pio",
+ _req);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ spin_unlock_irqrestore (&dev->lock, flags);
+ return -EINVAL;
+ }
+
+ if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
+ abort_dma(ep, -ECONNRESET);
+ done(ep, req, -ECONNRESET);
+ dma_advance(dev, ep);
+ } else if (!list_empty(&req->queue))
+ done(ep, req, -ECONNRESET);
+ else
+ req = NULL;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return req ? 0 : -EOPNOTSUPP;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void goku_clear_halt(struct goku_ep *ep)
+{
+ // assert (ep->num !=0)
+ VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
+ command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
+ command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
+ if (ep->stopped) {
+ ep->stopped = 0;
+ if (ep->dma) {
+ struct goku_request *req;
+
+ if (list_empty(&ep->queue))
+ return;
+ req = list_entry(ep->queue.next, struct goku_request,
+ queue);
+ (void) start_dma(ep, req);
+ } else
+ pio_advance(ep);
+ }
+}
+
+static int goku_set_halt(struct usb_ep *_ep, int value)
+{
+ struct goku_ep *ep;
+ unsigned long flags;
+ int retval = 0;
+
+ if (!_ep)
+ return -ENODEV;
+ ep = container_of (_ep, struct goku_ep, ep);
+
+ if (ep->num == 0) {
+ if (value) {
+ ep->dev->ep0state = EP0_STALL;
+ ep->dev->ep[0].stopped = 1;
+ } else
+ return -EINVAL;
+
+ /* don't change EPxSTATUS_EP_INVALID to READY */
+ } else if (!ep->ep.desc) {
+ DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ if (!list_empty(&ep->queue))
+ retval = -EAGAIN;
+ else if (ep->is_in && value
+ /* data in (either) packet buffer? */
+ && (readl(&ep->dev->regs->DataSet)
+ & DATASET_AB(ep->num)))
+ retval = -EAGAIN;
+ else if (!value)
+ goku_clear_halt(ep);
+ else {
+ ep->stopped = 1;
+ VDBG(ep->dev, "%s set halt\n", ep->ep.name);
+ command(ep->dev->regs, COMMAND_STALL, ep->num);
+ readl(ep->reg_status);
+ }
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return retval;
+}
+
+static int goku_fifo_status(struct usb_ep *_ep)
+{
+ struct goku_ep *ep;
+ struct goku_udc_regs __iomem *regs;
+ u32 size;
+
+ if (!_ep)
+ return -ENODEV;
+ ep = container_of(_ep, struct goku_ep, ep);
+
+ /* size is only reported sanely for OUT */
+ if (ep->is_in)
+ return -EOPNOTSUPP;
+
+ /* ignores 16-byte dma buffer; SizeH == 0 */
+ regs = ep->dev->regs;
+ size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
+ size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
+ VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size);
+ return size;
+}
+
+static void goku_fifo_flush(struct usb_ep *_ep)
+{
+ struct goku_ep *ep;
+ struct goku_udc_regs __iomem *regs;
+ u32 size;
+
+ if (!_ep)
+ return;
+ ep = container_of(_ep, struct goku_ep, ep);
+ VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name);
+
+ /* don't change EPxSTATUS_EP_INVALID to READY */
+ if (!ep->ep.desc && ep->num != 0) {
+ DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
+ return;
+ }
+
+ regs = ep->dev->regs;
+ size = readl(&regs->EPxSizeLA[ep->num]);
+ size &= DATASIZE;
+
+ /* Non-desirable behavior: FIFO_CLEAR also clears the
+ * endpoint halt feature. For OUT, we _could_ just read
+ * the bytes out (PIO, if !ep->dma); for in, no choice.
+ */
+ if (size)
+ command(regs, COMMAND_FIFO_CLEAR, ep->num);
+}
+
+static const struct usb_ep_ops goku_ep_ops = {
+ .enable = goku_ep_enable,
+ .disable = goku_ep_disable,
+
+ .alloc_request = goku_alloc_request,
+ .free_request = goku_free_request,
+
+ .queue = goku_queue,
+ .dequeue = goku_dequeue,
+
+ .set_halt = goku_set_halt,
+ .fifo_status = goku_fifo_status,
+ .fifo_flush = goku_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int goku_get_frame(struct usb_gadget *_gadget)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct usb_ep *goku_match_ep(struct usb_gadget *g,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ struct goku_udc *dev = to_goku_udc(g);
+ struct usb_ep *ep;
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_INT:
+ /* single buffering is enough */
+ ep = &dev->ep[3].ep;
+ if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
+ return ep;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_endpoint_dir_in(desc)) {
+ /* DMA may be available */
+ ep = &dev->ep[2].ep;
+ if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
+ return ep;
+ }
+ break;
+ default:
+ /* nothing */ ;
+ }
+
+ return NULL;
+}
+
+static int goku_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int goku_udc_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops goku_ops = {
+ .get_frame = goku_get_frame,
+ .udc_start = goku_udc_start,
+ .udc_stop = goku_udc_stop,
+ .match_ep = goku_match_ep,
+ // no remote wakeup
+ // not selfpowered
+};
+
+/*-------------------------------------------------------------------------*/
+
+static inline const char *dmastr(void)
+{
+ if (use_dma == 0)
+ return "(dma disabled)";
+ else if (use_dma == 2)
+ return "(dma IN and OUT)";
+ else
+ return "(dma IN)";
+}
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static const char proc_node_name [] = "driver/udc";
+
+#define FOURBITS "%s%s%s%s"
+#define EIGHTBITS FOURBITS FOURBITS
+
+static void dump_intmask(struct seq_file *m, const char *label, u32 mask)
+{
+ /* int_status is the same format ... */
+ seq_printf(m, "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
+ label, mask,
+ (mask & INT_PWRDETECT) ? " power" : "",
+ (mask & INT_SYSERROR) ? " sys" : "",
+ (mask & INT_MSTRDEND) ? " in-dma" : "",
+ (mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
+
+ (mask & INT_MSTWREND) ? " out-dma" : "",
+ (mask & INT_MSTWRSET) ? " wrset" : "",
+ (mask & INT_ERR) ? " err" : "",
+ (mask & INT_SOF) ? " sof" : "",
+
+ (mask & INT_EP3NAK) ? " ep3nak" : "",
+ (mask & INT_EP2NAK) ? " ep2nak" : "",
+ (mask & INT_EP1NAK) ? " ep1nak" : "",
+ (mask & INT_EP3DATASET) ? " ep3" : "",
+
+ (mask & INT_EP2DATASET) ? " ep2" : "",
+ (mask & INT_EP1DATASET) ? " ep1" : "",
+ (mask & INT_STATUSNAK) ? " ep0snak" : "",
+ (mask & INT_STATUS) ? " ep0status" : "",
+
+ (mask & INT_SETUP) ? " setup" : "",
+ (mask & INT_ENDPOINT0) ? " ep0" : "",
+ (mask & INT_USBRESET) ? " reset" : "",
+ (mask & INT_SUSPEND) ? " suspend" : "");
+}
+
+static const char *udc_ep_state(enum ep0state state)
+{
+ switch (state) {
+ case EP0_DISCONNECT:
+ return "ep0_disconnect";
+ case EP0_IDLE:
+ return "ep0_idle";
+ case EP0_IN:
+ return "ep0_in";
+ case EP0_OUT:
+ return "ep0_out";
+ case EP0_STATUS:
+ return "ep0_status";
+ case EP0_STALL:
+ return "ep0_stall";
+ case EP0_SUSPEND:
+ return "ep0_suspend";
+ }
+
+ return "ep0_?";
+}
+
+static const char *udc_ep_status(u32 status)
+{
+ switch (status & EPxSTATUS_EP_MASK) {
+ case EPxSTATUS_EP_READY:
+ return "ready";
+ case EPxSTATUS_EP_DATAIN:
+ return "packet";
+ case EPxSTATUS_EP_FULL:
+ return "full";
+ case EPxSTATUS_EP_TX_ERR: /* host will retry */
+ return "tx_err";
+ case EPxSTATUS_EP_RX_ERR:
+ return "rx_err";
+ case EPxSTATUS_EP_BUSY: /* ep0 only */
+ return "busy";
+ case EPxSTATUS_EP_STALL:
+ return "stall";
+ case EPxSTATUS_EP_INVALID: /* these "can't happen" */
+ return "invalid";
+ }
+
+ return "?";
+}
+
+static int udc_proc_read(struct seq_file *m, void *v)
+{
+ struct goku_udc *dev = m->private;
+ struct goku_udc_regs __iomem *regs = dev->regs;
+ unsigned long flags;
+ int i, is_usb_connected;
+ u32 tmp;
+
+ local_irq_save(flags);
+
+ /* basic device status */
+ tmp = readl(&regs->power_detect);
+ is_usb_connected = tmp & PW_DETECT;
+ seq_printf(m,
+ "%s - %s\n"
+ "%s version: %s %s\n"
+ "Gadget driver: %s\n"
+ "Host %s, %s\n"
+ "\n",
+ pci_name(dev->pdev), driver_desc,
+ driver_name, DRIVER_VERSION, dmastr(),
+ dev->driver ? dev->driver->driver.name : "(none)",
+ is_usb_connected
+ ? ((tmp & PW_PULLUP) ? "full speed" : "powered")
+ : "disconnected",
+ udc_ep_state(dev->ep0state));
+
+ dump_intmask(m, "int_status", readl(&regs->int_status));
+ dump_intmask(m, "int_enable", readl(&regs->int_enable));
+
+ if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
+ goto done;
+
+ /* registers for (active) device and ep0 */
+ seq_printf(m, "\nirqs %lu\ndataset %02x single.bcs %02x.%02x state %x addr %u\n",
+ dev->irqs, readl(&regs->DataSet),
+ readl(&regs->EPxSingle), readl(&regs->EPxBCS),
+ readl(&regs->UsbState),
+ readl(&regs->address));
+ if (seq_has_overflowed(m))
+ goto done;
+
+ tmp = readl(&regs->dma_master);
+ seq_printf(m, "dma %03X =" EIGHTBITS "%s %s\n",
+ tmp,
+ (tmp & MST_EOPB_DIS) ? " eopb-" : "",
+ (tmp & MST_EOPB_ENA) ? " eopb+" : "",
+ (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
+ (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
+
+ (tmp & MST_RD_EOPB) ? " eopb" : "",
+ (tmp & MST_RD_RESET) ? " in_reset" : "",
+ (tmp & MST_WR_RESET) ? " out_reset" : "",
+ (tmp & MST_RD_ENA) ? " IN" : "",
+
+ (tmp & MST_WR_ENA) ? " OUT" : "",
+ (tmp & MST_CONNECTION) ? "ep1in/ep2out" : "ep1out/ep2in");
+ if (seq_has_overflowed(m))
+ goto done;
+
+ /* dump endpoint queues */
+ for (i = 0; i < 4; i++) {
+ struct goku_ep *ep = &dev->ep [i];
+ struct goku_request *req;
+
+ if (i && !ep->ep.desc)
+ continue;
+
+ tmp = readl(ep->reg_status);
+ seq_printf(m, "%s %s max %u %s, irqs %lu, status %02x (%s) " FOURBITS "\n",
+ ep->ep.name,
+ ep->is_in ? "in" : "out",
+ ep->ep.maxpacket,
+ ep->dma ? "dma" : "pio",
+ ep->irqs,
+ tmp, udc_ep_status(tmp),
+ (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
+ (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
+ (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
+ (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : "");
+ if (seq_has_overflowed(m))
+ goto done;
+
+ if (list_empty(&ep->queue)) {
+ seq_puts(m, "\t(nothing queued)\n");
+ if (seq_has_overflowed(m))
+ goto done;
+ continue;
+ }
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (ep->dma && req->queue.prev == &ep->queue) {
+ if (i == UDC_MSTRD_ENDPOINT)
+ tmp = readl(&regs->in_dma_current);
+ else
+ tmp = readl(&regs->out_dma_current);
+ tmp -= req->req.dma;
+ tmp++;
+ } else
+ tmp = req->req.actual;
+
+ seq_printf(m, "\treq %p len %u/%u buf %p\n",
+ &req->req, tmp, req->req.length,
+ req->req.buf);
+ if (seq_has_overflowed(m))
+ goto done;
+ }
+ }
+
+done:
+ local_irq_restore(flags);
+ return 0;
+}
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/*-------------------------------------------------------------------------*/
+
+static void udc_reinit (struct goku_udc *dev)
+{
+ static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
+
+ unsigned i;
+
+ INIT_LIST_HEAD (&dev->gadget.ep_list);
+ dev->gadget.ep0 = &dev->ep [0].ep;
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ dev->ep0state = EP0_DISCONNECT;
+ dev->irqs = 0;
+
+ for (i = 0; i < 4; i++) {
+ struct goku_ep *ep = &dev->ep[i];
+
+ ep->num = i;
+ ep->ep.name = names[i];
+ ep->reg_fifo = &dev->regs->ep_fifo [i];
+ ep->reg_status = &dev->regs->ep_status [i];
+ ep->reg_mode = &dev->regs->ep_mode[i];
+
+ ep->ep.ops = &goku_ep_ops;
+ list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
+ ep->dev = dev;
+ INIT_LIST_HEAD (&ep->queue);
+
+ ep_reset(NULL, ep);
+
+ if (i == 0)
+ ep->ep.caps.type_control = true;
+ else
+ ep->ep.caps.type_bulk = true;
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ }
+
+ dev->ep[0].reg_mode = NULL;
+ usb_ep_set_maxpacket_limit(&dev->ep[0].ep, MAX_EP0_SIZE);
+ list_del_init (&dev->ep[0].ep.ep_list);
+}
+
+static void udc_reset(struct goku_udc *dev)
+{
+ struct goku_udc_regs __iomem *regs = dev->regs;
+
+ writel(0, &regs->power_detect);
+ writel(0, &regs->int_enable);
+ readl(&regs->int_enable);
+ dev->int_enable = 0;
+
+ /* deassert reset, leave USB D+ at hi-Z (no pullup)
+ * don't let INT_PWRDETECT sequence begin
+ */
+ udelay(250);
+ writel(PW_RESETB, &regs->power_detect);
+ readl(&regs->int_enable);
+}
+
+static void ep0_start(struct goku_udc *dev)
+{
+ struct goku_udc_regs __iomem *regs = dev->regs;
+ unsigned i;
+
+ VDBG(dev, "%s\n", __func__);
+
+ udc_reset(dev);
+ udc_reinit (dev);
+ //writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
+
+ /* hw handles set_address, set_feature, get_status; maybe more */
+ writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
+ | G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
+ | G_REQMODE_GET_DESC
+ | G_REQMODE_CLEAR_FEAT
+ , &regs->reqmode);
+
+ for (i = 0; i < 4; i++)
+ dev->ep[i].irqs = 0;
+
+ /* can't modify descriptors after writing UsbReady */
+ for (i = 0; i < DESC_LEN; i++)
+ writel(0, &regs->descriptors[i]);
+ writel(0, &regs->UsbReady);
+
+ /* expect ep0 requests when the host drops reset */
+ writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
+ dev->int_enable = INT_DEVWIDE | INT_EP0;
+ writel(dev->int_enable, &dev->regs->int_enable);
+ readl(&regs->int_enable);
+ dev->gadget.speed = USB_SPEED_FULL;
+ dev->ep0state = EP0_IDLE;
+}
+
+static void udc_enable(struct goku_udc *dev)
+{
+ /* start enumeration now, or after power detect irq */
+ if (readl(&dev->regs->power_detect) & PW_DETECT)
+ ep0_start(dev);
+ else {
+ DBG(dev, "%s\n", __func__);
+ dev->int_enable = INT_PWRDETECT;
+ writel(dev->int_enable, &dev->regs->int_enable);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* keeping it simple:
+ * - one bus driver, initted first;
+ * - one function driver, initted second
+ */
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests. then usb traffic follows until a
+ * disconnect is reported. then a host may connect again, or
+ * the driver might get unbound.
+ */
+static int goku_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct goku_udc *dev = to_goku_udc(g);
+
+ /* hook up the driver */
+ dev->driver = driver;
+
+ /*
+ * then enable host detection and ep0; and we're ready
+ * for set_configuration as well as eventual disconnect.
+ */
+ udc_enable(dev);
+
+ return 0;
+}
+
+static void stop_activity(struct goku_udc *dev)
+{
+ unsigned i;
+
+ DBG (dev, "%s\n", __func__);
+
+ /* disconnect gadget driver after quiesceing hw and the driver */
+ udc_reset (dev);
+ for (i = 0; i < 4; i++)
+ nuke(&dev->ep [i], -ESHUTDOWN);
+
+ if (dev->driver)
+ udc_enable(dev);
+}
+
+static int goku_udc_stop(struct usb_gadget *g)
+{
+ struct goku_udc *dev = to_goku_udc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->driver = NULL;
+ stop_activity(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void ep0_setup(struct goku_udc *dev)
+{
+ struct goku_udc_regs __iomem *regs = dev->regs;
+ struct usb_ctrlrequest ctrl;
+ int tmp;
+
+ /* read SETUP packet and enter DATA stage */
+ ctrl.bRequestType = readl(&regs->bRequestType);
+ ctrl.bRequest = readl(&regs->bRequest);
+ ctrl.wValue = cpu_to_le16((readl(&regs->wValueH) << 8)
+ | readl(&regs->wValueL));
+ ctrl.wIndex = cpu_to_le16((readl(&regs->wIndexH) << 8)
+ | readl(&regs->wIndexL));
+ ctrl.wLength = cpu_to_le16((readl(&regs->wLengthH) << 8)
+ | readl(&regs->wLengthL));
+ writel(0, &regs->SetupRecv);
+
+ nuke(&dev->ep[0], 0);
+ dev->ep[0].stopped = 0;
+ if (likely(ctrl.bRequestType & USB_DIR_IN)) {
+ dev->ep[0].is_in = 1;
+ dev->ep0state = EP0_IN;
+ /* detect early status stages */
+ writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
+ } else {
+ dev->ep[0].is_in = 0;
+ dev->ep0state = EP0_OUT;
+
+ /* NOTE: CLEAR_FEATURE is done in software so that we can
+ * synchronize transfer restarts after bulk IN stalls. data
+ * won't even enter the fifo until the halt is cleared.
+ */
+ switch (ctrl.bRequest) {
+ case USB_REQ_CLEAR_FEATURE:
+ switch (ctrl.bRequestType) {
+ case USB_RECIP_ENDPOINT:
+ tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
+ /* active endpoint */
+ if (tmp > 3 ||
+ (!dev->ep[tmp].ep.desc && tmp != 0))
+ goto stall;
+ if (ctrl.wIndex & cpu_to_le16(
+ USB_DIR_IN)) {
+ if (!dev->ep[tmp].is_in)
+ goto stall;
+ } else {
+ if (dev->ep[tmp].is_in)
+ goto stall;
+ }
+ if (ctrl.wValue != cpu_to_le16(
+ USB_ENDPOINT_HALT))
+ goto stall;
+ if (tmp)
+ goku_clear_halt(&dev->ep[tmp]);
+succeed:
+ /* start ep0out status stage */
+ writel(~(1<<0), &regs->EOP);
+ dev->ep[0].stopped = 1;
+ dev->ep0state = EP0_STATUS;
+ return;
+ case USB_RECIP_DEVICE:
+ /* device remote wakeup: always clear */
+ if (ctrl.wValue != cpu_to_le16(1))
+ goto stall;
+ VDBG(dev, "clear dev remote wakeup\n");
+ goto succeed;
+ case USB_RECIP_INTERFACE:
+ goto stall;
+ default: /* pass to gadget driver */
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+#ifdef USB_TRACE
+ VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+ ctrl.bRequestType, ctrl.bRequest,
+ le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
+ le16_to_cpu(ctrl.wLength));
+#endif
+
+ /* hw wants to know when we're configured (or not) */
+ dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
+ && ctrl.bRequestType == USB_RECIP_DEVICE);
+ if (unlikely(dev->req_config))
+ dev->configured = (ctrl.wValue != cpu_to_le16(0));
+
+ /* delegate everything to the gadget driver.
+ * it may respond after this irq handler returns.
+ */
+ spin_unlock (&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &ctrl);
+ spin_lock (&dev->lock);
+ if (unlikely(tmp < 0)) {
+stall:
+#ifdef USB_TRACE
+ VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
+ ctrl.bRequestType, ctrl.bRequest, tmp);
+#endif
+ command(regs, COMMAND_STALL, 0);
+ dev->ep[0].stopped = 1;
+ dev->ep0state = EP0_STALL;
+ }
+
+ /* expect at least one data or status stage irq */
+}
+
+#define ACK(irqbit) { \
+ stat &= ~irqbit; \
+ writel(~irqbit, &regs->int_status); \
+ handled = 1; \
+ }
+
+static irqreturn_t goku_irq(int irq, void *_dev)
+{
+ struct goku_udc *dev = _dev;
+ struct goku_udc_regs __iomem *regs = dev->regs;
+ struct goku_ep *ep;
+ u32 stat, handled = 0;
+ unsigned i, rescans = 5;
+
+ spin_lock(&dev->lock);
+
+rescan:
+ stat = readl(&regs->int_status) & dev->int_enable;
+ if (!stat)
+ goto done;
+ dev->irqs++;
+
+ /* device-wide irqs */
+ if (unlikely(stat & INT_DEVWIDE)) {
+ if (stat & INT_SYSERROR) {
+ ERROR(dev, "system error\n");
+ stop_activity(dev);
+ stat = 0;
+ handled = 1;
+ // FIXME have a neater way to prevent re-enumeration
+ dev->driver = NULL;
+ goto done;
+ }
+ if (stat & INT_PWRDETECT) {
+ writel(~stat, &regs->int_status);
+ if (readl(&dev->regs->power_detect) & PW_DETECT) {
+ VDBG(dev, "connect\n");
+ ep0_start(dev);
+ } else {
+ DBG(dev, "disconnect\n");
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ stop_activity(dev);
+ dev->ep0state = EP0_DISCONNECT;
+ dev->int_enable = INT_DEVWIDE;
+ writel(dev->int_enable, &dev->regs->int_enable);
+ }
+ stat = 0;
+ handled = 1;
+ goto done;
+ }
+ if (stat & INT_SUSPEND) {
+ ACK(INT_SUSPEND);
+ if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
+ switch (dev->ep0state) {
+ case EP0_DISCONNECT:
+ case EP0_SUSPEND:
+ goto pm_next;
+ default:
+ break;
+ }
+ DBG(dev, "USB suspend\n");
+ dev->ep0state = EP0_SUSPEND;
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN
+ && dev->driver
+ && dev->driver->suspend) {
+ spin_unlock(&dev->lock);
+ dev->driver->suspend(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+ } else {
+ if (dev->ep0state != EP0_SUSPEND) {
+ DBG(dev, "bogus USB resume %d\n",
+ dev->ep0state);
+ goto pm_next;
+ }
+ DBG(dev, "USB resume\n");
+ dev->ep0state = EP0_IDLE;
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN
+ && dev->driver
+ && dev->driver->resume) {
+ spin_unlock(&dev->lock);
+ dev->driver->resume(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+ }
+ }
+pm_next:
+ if (stat & INT_USBRESET) { /* hub reset done */
+ ACK(INT_USBRESET);
+ INFO(dev, "USB reset done, gadget %s\n",
+ dev->driver->driver.name);
+ }
+ // and INT_ERR on some endpoint's crc/bitstuff/... problem
+ }
+
+ /* progress ep0 setup, data, or status stages.
+ * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
+ */
+ if (stat & INT_SETUP) {
+ ACK(INT_SETUP);
+ dev->ep[0].irqs++;
+ ep0_setup(dev);
+ }
+ if (stat & INT_STATUSNAK) {
+ ACK(INT_STATUSNAK|INT_ENDPOINT0);
+ if (dev->ep0state == EP0_IN) {
+ ep = &dev->ep[0];
+ ep->irqs++;
+ nuke(ep, 0);
+ writel(~(1<<0), &regs->EOP);
+ dev->ep0state = EP0_STATUS;
+ }
+ }
+ if (stat & INT_ENDPOINT0) {
+ ACK(INT_ENDPOINT0);
+ ep = &dev->ep[0];
+ ep->irqs++;
+ pio_advance(ep);
+ }
+
+ /* dma completion */
+ if (stat & INT_MSTRDEND) { /* IN */
+ ACK(INT_MSTRDEND);
+ ep = &dev->ep[UDC_MSTRD_ENDPOINT];
+ ep->irqs++;
+ dma_advance(dev, ep);
+ }
+ if (stat & INT_MSTWREND) { /* OUT */
+ ACK(INT_MSTWREND);
+ ep = &dev->ep[UDC_MSTWR_ENDPOINT];
+ ep->irqs++;
+ dma_advance(dev, ep);
+ }
+ if (stat & INT_MSTWRTMOUT) { /* OUT */
+ ACK(INT_MSTWRTMOUT);
+ ep = &dev->ep[UDC_MSTWR_ENDPOINT];
+ ep->irqs++;
+ ERROR(dev, "%s write timeout ?\n", ep->ep.name);
+ // reset dma? then dma_advance()
+ }
+
+ /* pio */
+ for (i = 1; i < 4; i++) {
+ u32 tmp = INT_EPxDATASET(i);
+
+ if (!(stat & tmp))
+ continue;
+ ep = &dev->ep[i];
+ pio_advance(ep);
+ if (list_empty (&ep->queue))
+ pio_irq_disable(dev, regs, i);
+ stat &= ~tmp;
+ handled = 1;
+ ep->irqs++;
+ }
+
+ if (rescans--)
+ goto rescan;
+
+done:
+ (void)readl(&regs->int_enable);
+ spin_unlock(&dev->lock);
+ if (stat)
+ DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
+ readl(&regs->int_status), dev->int_enable);
+ return IRQ_RETVAL(handled);
+}
+
+#undef ACK
+
+/*-------------------------------------------------------------------------*/
+
+static void gadget_release(struct device *_dev)
+{
+ struct goku_udc *dev = dev_get_drvdata(_dev);
+
+ kfree(dev);
+}
+
+/* tear down the binding between this driver and the pci device */
+
+static void goku_remove(struct pci_dev *pdev)
+{
+ struct goku_udc *dev = pci_get_drvdata(pdev);
+
+ DBG(dev, "%s\n", __func__);
+
+ usb_del_gadget_udc(&dev->gadget);
+
+ BUG_ON(dev->driver);
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ remove_proc_entry(proc_node_name, NULL);
+#endif
+ if (dev->regs)
+ udc_reset(dev);
+ if (dev->got_irq)
+ free_irq(pdev->irq, dev);
+ if (dev->regs)
+ iounmap(dev->regs);
+ if (dev->got_region)
+ release_mem_region(pci_resource_start (pdev, 0),
+ pci_resource_len (pdev, 0));
+ if (dev->enabled)
+ pci_disable_device(pdev);
+
+ dev->regs = NULL;
+
+ INFO(dev, "unbind\n");
+}
+
+/* wrap this driver around the specified pci device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+
+static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct goku_udc *dev = NULL;
+ unsigned long resource, len;
+ void __iomem *base = NULL;
+ int retval;
+
+ if (!pdev->irq) {
+ printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
+ retval = -ENODEV;
+ goto err;
+ }
+
+ /* alloc, and start init */
+ dev = kzalloc (sizeof *dev, GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ spin_lock_init(&dev->lock);
+ dev->pdev = pdev;
+ dev->gadget.ops = &goku_ops;
+ dev->gadget.max_speed = USB_SPEED_FULL;
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ dev->gadget.name = driver_name;
+
+ /* now all the pci goodies ... */
+ retval = pci_enable_device(pdev);
+ if (retval < 0) {
+ DBG(dev, "can't enable, %d\n", retval);
+ goto err;
+ }
+ dev->enabled = 1;
+
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!request_mem_region(resource, len, driver_name)) {
+ DBG(dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto err;
+ }
+ dev->got_region = 1;
+
+ base = ioremap(resource, len);
+ if (base == NULL) {
+ DBG(dev, "can't map memory\n");
+ retval = -EFAULT;
+ goto err;
+ }
+ dev->regs = (struct goku_udc_regs __iomem *) base;
+
+ INFO(dev, "%s\n", driver_desc);
+ INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
+ INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
+
+ /* init to known state, then setup irqs */
+ udc_reset(dev);
+ udc_reinit (dev);
+ if (request_irq(pdev->irq, goku_irq, IRQF_SHARED,
+ driver_name, dev) != 0) {
+ DBG(dev, "request interrupt %d failed\n", pdev->irq);
+ retval = -EBUSY;
+ goto err;
+ }
+ dev->got_irq = 1;
+ if (use_dma)
+ pci_set_master(pdev);
+
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ proc_create_single_data(proc_node_name, 0, NULL, udc_proc_read, dev);
+#endif
+
+ retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
+ gadget_release);
+ if (retval)
+ goto err;
+
+ return 0;
+
+err:
+ if (dev)
+ goku_remove (pdev);
+ /* gadget_release is not registered yet, kfree explicitly */
+ kfree(dev);
+ return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static const struct pci_device_id pci_ids[] = { {
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = ~0,
+ .vendor = 0x102f, /* Toshiba */
+ .device = 0x0107, /* this UDC */
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+
+}, { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE (pci, pci_ids);
+
+static struct pci_driver goku_pci_driver = {
+ .name = driver_name,
+ .id_table = pci_ids,
+
+ .probe = goku_probe,
+ .remove = goku_remove,
+
+ /* FIXME add power management support */
+};
+
+module_pci_driver(goku_pci_driver);
diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
new file mode 100644
index 000000000..70023d401
--- /dev/null
+++ b/drivers/usb/gadget/udc/goku_udc.h
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
+ *
+ * Copyright (C) 2000-2002 Lineo
+ * by Stuart Lynne, Tom Rushworth, and Bruce Balden
+ * Copyright (C) 2002 Toshiba Corporation
+ * Copyright (C) 2003 MontaVista Software (source@mvista.com)
+ */
+
+/*
+ * PCI BAR 0 points to these registers.
+ */
+struct goku_udc_regs {
+ /* irq management */
+ u32 int_status; /* 0x000 */
+ u32 int_enable;
+#define INT_SUSPEND 0x00001 /* or resume */
+#define INT_USBRESET 0x00002
+#define INT_ENDPOINT0 0x00004
+#define INT_SETUP 0x00008
+#define INT_STATUS 0x00010
+#define INT_STATUSNAK 0x00020
+#define INT_EPxDATASET(n) (0x00020 << (n)) /* 0 < n < 4 */
+# define INT_EP1DATASET 0x00040
+# define INT_EP2DATASET 0x00080
+# define INT_EP3DATASET 0x00100
+#define INT_EPnNAK(n) (0x00100 << (n)) /* 0 < n < 4 */
+# define INT_EP1NAK 0x00200
+# define INT_EP2NAK 0x00400
+# define INT_EP3NAK 0x00800
+#define INT_SOF 0x01000
+#define INT_ERR 0x02000
+#define INT_MSTWRSET 0x04000
+#define INT_MSTWREND 0x08000
+#define INT_MSTWRTMOUT 0x10000
+#define INT_MSTRDEND 0x20000
+#define INT_SYSERROR 0x40000
+#define INT_PWRDETECT 0x80000
+
+#define INT_DEVWIDE \
+ (INT_PWRDETECT|INT_SYSERROR/*|INT_ERR*/|INT_USBRESET|INT_SUSPEND)
+#define INT_EP0 \
+ (INT_SETUP|INT_ENDPOINT0/*|INT_STATUS*/|INT_STATUSNAK)
+
+ u32 dma_master;
+#define MST_EOPB_DIS 0x0800
+#define MST_EOPB_ENA 0x0400
+#define MST_TIMEOUT_DIS 0x0200
+#define MST_TIMEOUT_ENA 0x0100
+#define MST_RD_EOPB 0x0080 /* write-only */
+#define MST_RD_RESET 0x0040
+#define MST_WR_RESET 0x0020
+#define MST_RD_ENA 0x0004 /* 1:start, 0:ignore */
+#define MST_WR_ENA 0x0002 /* 1:start, 0:ignore */
+#define MST_CONNECTION 0x0001 /* 0 for ep1out/ep2in */
+
+#define MST_R_BITS (MST_EOPB_DIS|MST_EOPB_ENA \
+ |MST_RD_ENA|MST_RD_RESET)
+#define MST_W_BITS (MST_TIMEOUT_DIS|MST_TIMEOUT_ENA \
+ |MST_WR_ENA|MST_WR_RESET)
+#define MST_RW_BITS (MST_R_BITS|MST_W_BITS \
+ |MST_CONNECTION)
+
+/* these values assume (dma_master & MST_CONNECTION) == 0 */
+#define UDC_MSTWR_ENDPOINT 1
+#define UDC_MSTRD_ENDPOINT 2
+
+ /* dma master write */
+ u32 out_dma_start;
+ u32 out_dma_end;
+ u32 out_dma_current;
+
+ /* dma master read */
+ u32 in_dma_start;
+ u32 in_dma_end;
+ u32 in_dma_current;
+
+ u32 power_detect;
+#define PW_DETECT 0x04
+#define PW_RESETB 0x02
+#define PW_PULLUP 0x01
+
+ u8 _reserved0 [0x1d8];
+
+ /* endpoint registers */
+ u32 ep_fifo [4]; /* 0x200 */
+ u8 _reserved1 [0x10];
+ u32 ep_mode [4]; /* only 1-3 valid */
+ u8 _reserved2 [0x10];
+
+ u32 ep_status [4];
+#define EPxSTATUS_TOGGLE 0x40
+#define EPxSTATUS_SUSPEND 0x20
+#define EPxSTATUS_EP_MASK (0x07<<2)
+# define EPxSTATUS_EP_READY (0<<2)
+# define EPxSTATUS_EP_DATAIN (1<<2)
+# define EPxSTATUS_EP_FULL (2<<2)
+# define EPxSTATUS_EP_TX_ERR (3<<2)
+# define EPxSTATUS_EP_RX_ERR (4<<2)
+# define EPxSTATUS_EP_BUSY (5<<2)
+# define EPxSTATUS_EP_STALL (6<<2)
+# define EPxSTATUS_EP_INVALID (7<<2)
+#define EPxSTATUS_FIFO_DISABLE 0x02
+#define EPxSTATUS_STAGE_ERROR 0x01
+
+ u8 _reserved3 [0x10];
+ u32 EPxSizeLA[4];
+#define PACKET_ACTIVE (1<<7)
+#define DATASIZE 0x7f
+ u8 _reserved3a [0x10];
+ u32 EPxSizeLB[4]; /* only 1,2 valid */
+ u8 _reserved3b [0x10];
+ u32 EPxSizeHA[4]; /* only 1-3 valid */
+ u8 _reserved3c [0x10];
+ u32 EPxSizeHB[4]; /* only 1,2 valid */
+ u8 _reserved4[0x30];
+
+ /* SETUP packet contents */
+ u32 bRequestType; /* 0x300 */
+ u32 bRequest;
+ u32 wValueL;
+ u32 wValueH;
+ u32 wIndexL;
+ u32 wIndexH;
+ u32 wLengthL;
+ u32 wLengthH;
+
+ /* command interaction/handshaking */
+ u32 SetupRecv; /* 0x320 */
+ u32 CurrConfig;
+ u32 StdRequest;
+ u32 Request;
+ u32 DataSet;
+#define DATASET_A(epnum) (1<<(2*(epnum)))
+#define DATASET_B(epnum) (2<<(2*(epnum)))
+#define DATASET_AB(epnum) (3<<(2*(epnum)))
+ u8 _reserved5[4];
+
+ u32 UsbState;
+#define USBSTATE_CONFIGURED 0x04
+#define USBSTATE_ADDRESSED 0x02
+#define USBSTATE_DEFAULT 0x01
+
+ u32 EOP;
+
+ u32 Command; /* 0x340 */
+#define COMMAND_SETDATA0 2
+#define COMMAND_RESET 3
+#define COMMAND_STALL 4
+#define COMMAND_INVALID 5
+#define COMMAND_FIFO_DISABLE 7
+#define COMMAND_FIFO_ENABLE 8
+#define COMMAND_INIT_DESCRIPTOR 9
+#define COMMAND_FIFO_CLEAR 10 /* also stall */
+#define COMMAND_STALL_CLEAR 11
+#define COMMAND_EP(n) ((n) << 4)
+
+ u32 EPxSingle;
+ u8 _reserved6[4];
+ u32 EPxBCS;
+ u8 _reserved7[8];
+ u32 IntControl;
+#define ICONTROL_STATUSNAK 1
+ u8 _reserved8[4];
+
+ u32 reqmode; // 0x360 standard request mode, low 8 bits
+#define G_REQMODE_SET_INTF (1<<7)
+#define G_REQMODE_GET_INTF (1<<6)
+#define G_REQMODE_SET_CONF (1<<5)
+#define G_REQMODE_GET_CONF (1<<4)
+#define G_REQMODE_GET_DESC (1<<3)
+#define G_REQMODE_SET_FEAT (1<<2)
+#define G_REQMODE_CLEAR_FEAT (1<<1)
+#define G_REQMODE_GET_STATUS (1<<0)
+
+ u32 ReqMode;
+ u8 _reserved9[0x18];
+ u32 PortStatus; /* 0x380 */
+ u8 _reserved10[8];
+ u32 address;
+ u32 buff_test;
+ u8 _reserved11[4];
+ u32 UsbReady;
+ u8 _reserved12[4];
+ u32 SetDescStall; /* 0x3a0 */
+ u8 _reserved13[0x45c];
+
+ /* hardware could handle limited GET_DESCRIPTOR duties */
+#define DESC_LEN 0x80
+ u32 descriptors[DESC_LEN]; /* 0x800 */
+ u8 _reserved14[0x600];
+
+} __attribute__ ((packed));
+
+#define MAX_FIFO_SIZE 64
+#define MAX_EP0_SIZE 8 /* ep0 fifo is bigger, though */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* DRIVER DATA STRUCTURES and UTILITIES */
+
+struct goku_ep {
+ struct usb_ep ep;
+ struct goku_udc *dev;
+ unsigned long irqs;
+
+ unsigned num:8,
+ dma:1,
+ is_in:1,
+ stopped:1;
+
+ /* analogous to a host-side qh */
+ struct list_head queue;
+
+ u32 __iomem *reg_fifo;
+ u32 __iomem *reg_mode;
+ u32 __iomem *reg_status;
+};
+
+struct goku_request {
+ struct usb_request req;
+ struct list_head queue;
+
+ unsigned mapped:1;
+};
+
+enum ep0state {
+ EP0_DISCONNECT, /* no host */
+ EP0_IDLE, /* between STATUS ack and SETUP report */
+ EP0_IN, EP0_OUT, /* data stage */
+ EP0_STATUS, /* status stage */
+ EP0_STALL, /* data or status stages */
+ EP0_SUSPEND, /* usb suspend */
+};
+
+struct goku_udc {
+ /* each pci device provides one gadget, several endpoints */
+ struct usb_gadget gadget;
+ spinlock_t lock;
+ struct goku_ep ep[4];
+ struct usb_gadget_driver *driver;
+
+ enum ep0state ep0state;
+ unsigned got_irq:1,
+ got_region:1,
+ req_config:1,
+ configured:1,
+ enabled:1;
+
+ /* pci state used to access those endpoints */
+ struct pci_dev *pdev;
+ struct goku_udc_regs __iomem *regs;
+ u32 int_enable;
+
+ /* statistics... */
+ unsigned long irqs;
+};
+#define to_goku_udc(g) (container_of((g), struct goku_udc, gadget))
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(dev,level,fmt,args...) \
+ printk(level "%s %s: " fmt , driver_name , \
+ pci_name(dev->pdev) , ## args)
+
+#ifdef DEBUG
+#define DBG(dev,fmt,args...) \
+ xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev,fmt,args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE
+#define VDBG DBG
+#else
+#define VDBG(dev,fmt,args...) \
+ do { } while (0)
+#endif /* VERBOSE */
+
+#define ERROR(dev,fmt,args...) \
+ xprintk(dev , KERN_ERR , fmt , ## args)
+#define WARNING(dev,fmt,args...) \
+ xprintk(dev , KERN_WARNING , fmt , ## args)
+#define INFO(dev,fmt,args...) \
+ xprintk(dev , KERN_INFO , fmt , ## args)
+
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
new file mode 100644
index 000000000..097625599
--- /dev/null
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -0,0 +1,2258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
+ *
+ * 2013 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRUSBDC USB Device Controller cores available in the
+ * GRLIB VHDL IP core library.
+ *
+ * Full documentation of the GRUSBDC core can be found here:
+ * https://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * Contributors:
+ * - Andreas Larsson <andreas@gaisler.com>
+ * - Marko Isomaki
+ */
+
+/*
+ * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
+ * individually configurable to any of the four USB transfer types. This driver
+ * only supports cores in DMA mode.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <asm/byteorder.h>
+
+#include "gr_udc.h"
+
+#define DRIVER_NAME "gr_udc"
+#define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
+
+static const char driver_name[] = DRIVER_NAME;
+
+#define gr_read32(x) (ioread32be((x)))
+#define gr_write32(x, v) (iowrite32be((v), (x)))
+
+/* USB speed and corresponding string calculated from status register value */
+#define GR_SPEED(status) \
+ ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
+#define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
+
+/* Size of hardware buffer calculated from epctrl register value */
+#define GR_BUFFER_SIZE(epctrl) \
+ ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
+ GR_EPCTRL_BUFSZ_SCALER)
+
+/* ---------------------------------------------------------------------- */
+/* Debug printout functionality */
+
+static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
+
+static const char *gr_ep0state_string(enum gr_ep0state state)
+{
+ static const char *const names[] = {
+ [GR_EP0_DISCONNECT] = "disconnect",
+ [GR_EP0_SETUP] = "setup",
+ [GR_EP0_IDATA] = "idata",
+ [GR_EP0_ODATA] = "odata",
+ [GR_EP0_ISTATUS] = "istatus",
+ [GR_EP0_OSTATUS] = "ostatus",
+ [GR_EP0_STALL] = "stall",
+ [GR_EP0_SUSPEND] = "suspend",
+ };
+
+ if (state < 0 || state >= ARRAY_SIZE(names))
+ return "UNKNOWN";
+
+ return names[state];
+}
+
+#ifdef VERBOSE_DEBUG
+
+static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
+ struct gr_request *req)
+{
+ int buflen = ep->is_in ? req->req.length : req->req.actual;
+ int rowlen = 32;
+ int plen = min(rowlen, buflen);
+
+ dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
+ (buflen > plen ? " (truncated)" : ""));
+ print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
+ rowlen, 4, req->req.buf, plen, false);
+}
+
+static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index, u16 length)
+{
+ dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
+ type, request, value, index, length);
+}
+#else /* !VERBOSE_DEBUG */
+
+static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
+ struct gr_request *req) {}
+
+static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index, u16 length) {}
+
+#endif /* VERBOSE_DEBUG */
+
+/* ---------------------------------------------------------------------- */
+/* Debugfs functionality */
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+
+static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
+{
+ u32 epctrl = gr_read32(&ep->regs->epctrl);
+ u32 epstat = gr_read32(&ep->regs->epstat);
+ int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
+ struct gr_request *req;
+
+ seq_printf(seq, "%s:\n", ep->ep.name);
+ seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
+ seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
+ seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
+ seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
+ seq_printf(seq, " dma_start = %d\n", ep->dma_start);
+ seq_printf(seq, " stopped = %d\n", ep->stopped);
+ seq_printf(seq, " wedged = %d\n", ep->wedged);
+ seq_printf(seq, " callback = %d\n", ep->callback);
+ seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
+ seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
+ seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
+ if (mode == 1 || mode == 3)
+ seq_printf(seq, " nt = %d\n",
+ (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
+
+ seq_printf(seq, " Buffer 0: %s %s%d\n",
+ epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
+ epstat & GR_EPSTAT_BS ? " " : "selected ",
+ (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
+ seq_printf(seq, " Buffer 1: %s %s%d\n",
+ epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
+ epstat & GR_EPSTAT_BS ? "selected " : " ",
+ (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
+
+ if (list_empty(&ep->queue)) {
+ seq_puts(seq, " Queue: empty\n\n");
+ return;
+ }
+
+ seq_puts(seq, " Queue:\n");
+ list_for_each_entry(req, &ep->queue, queue) {
+ struct gr_dma_desc *desc;
+ struct gr_dma_desc *next;
+
+ seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
+ &req->req.buf, req->req.actual, req->req.length);
+
+ next = req->first_desc;
+ do {
+ desc = next;
+ next = desc->next_desc;
+ seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
+ desc == req->curr_desc ? 'c' : ' ',
+ desc, desc->paddr, desc->ctrl, desc->data);
+ } while (desc != req->last_desc);
+ }
+ seq_puts(seq, "\n");
+}
+
+static int gr_dfs_show(struct seq_file *seq, void *v)
+{
+ struct gr_udc *dev = seq->private;
+ u32 control = gr_read32(&dev->regs->control);
+ u32 status = gr_read32(&dev->regs->status);
+ struct gr_ep *ep;
+
+ seq_printf(seq, "usb state = %s\n",
+ usb_state_string(dev->gadget.state));
+ seq_printf(seq, "address = %d\n",
+ (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
+ seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
+ seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
+ seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
+ seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
+ seq_printf(seq, "test_mode = %d\n", dev->test_mode);
+ seq_puts(seq, "\n");
+
+ list_for_each_entry(ep, &dev->ep_list, ep_list)
+ gr_seq_ep_show(seq, ep);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(gr_dfs);
+
+static void gr_dfs_create(struct gr_udc *dev)
+{
+ const char *name = "gr_udc_state";
+ struct dentry *root;
+
+ root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
+ debugfs_create_file(name, 0444, root, dev, &gr_dfs_fops);
+}
+
+static void gr_dfs_delete(struct gr_udc *dev)
+{
+ debugfs_lookup_and_remove(dev_name(dev->dev), usb_debug_root);
+}
+
+#else /* !CONFIG_USB_GADGET_DEBUG_FS */
+
+static void gr_dfs_create(struct gr_udc *dev) {}
+static void gr_dfs_delete(struct gr_udc *dev) {}
+
+#endif /* CONFIG_USB_GADGET_DEBUG_FS */
+
+/* ---------------------------------------------------------------------- */
+/* DMA and request handling */
+
+/* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
+static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
+{
+ dma_addr_t paddr;
+ struct gr_dma_desc *dma_desc;
+
+ dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
+ if (!dma_desc) {
+ dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
+ return NULL;
+ }
+
+ dma_desc->paddr = paddr;
+
+ return dma_desc;
+}
+
+static inline void gr_free_dma_desc(struct gr_udc *dev,
+ struct gr_dma_desc *desc)
+{
+ dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
+}
+
+/* Frees the chain of struct gr_dma_desc for the given request */
+static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
+{
+ struct gr_dma_desc *desc;
+ struct gr_dma_desc *next;
+
+ next = req->first_desc;
+ if (!next)
+ return;
+
+ do {
+ desc = next;
+ next = desc->next_desc;
+ gr_free_dma_desc(dev, desc);
+ } while (desc != req->last_desc);
+
+ req->first_desc = NULL;
+ req->curr_desc = NULL;
+ req->last_desc = NULL;
+}
+
+static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
+
+/*
+ * Frees allocated resources and calls the appropriate completion function/setup
+ * package handler for a finished request.
+ *
+ * Must be called with dev->lock held and irqs disabled.
+ */
+static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
+ int status)
+ __releases(&dev->lock)
+ __acquires(&dev->lock)
+{
+ struct gr_udc *dev;
+
+ list_del_init(&req->queue);
+
+ if (likely(req->req.status == -EINPROGRESS))
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ dev = ep->dev;
+ usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
+ gr_free_dma_desc_chain(dev, req);
+
+ if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
+ req->req.actual = req->req.length;
+ } else if (req->oddlen && req->req.actual > req->evenlen) {
+ /*
+ * Copy to user buffer in this case where length was not evenly
+ * divisible by ep->ep.maxpacket and the last descriptor was
+ * actually used.
+ */
+ char *buftail = ((char *)req->req.buf + req->evenlen);
+
+ memcpy(buftail, ep->tailbuf, req->oddlen);
+
+ if (req->req.actual > req->req.length) {
+ /* We got more data than was requested */
+ dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
+ ep->ep.name);
+ gr_dbgprint_request("OVFL", ep, req);
+ req->req.status = -EOVERFLOW;
+ }
+ }
+
+ if (!status) {
+ if (ep->is_in)
+ gr_dbgprint_request("SENT", ep, req);
+ else
+ gr_dbgprint_request("RECV", ep, req);
+ }
+
+ /* Prevent changes to ep->queue during callback */
+ ep->callback = 1;
+ if (req == dev->ep0reqo && !status) {
+ if (req->setup)
+ gr_ep0_setup(dev, req);
+ else
+ dev_err(dev->dev,
+ "Unexpected non setup packet on ep0in\n");
+ } else if (req->req.complete) {
+ spin_unlock(&dev->lock);
+
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+
+ spin_lock(&dev->lock);
+ }
+ ep->callback = 0;
+}
+
+static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct gr_request *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+/*
+ * Starts DMA for endpoint ep if there are requests in the queue.
+ *
+ * Must be called with dev->lock held and with !ep->stopped.
+ */
+static void gr_start_dma(struct gr_ep *ep)
+{
+ struct gr_request *req;
+ u32 dmactrl;
+
+ if (list_empty(&ep->queue)) {
+ ep->dma_start = 0;
+ return;
+ }
+
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+
+ /* A descriptor should already have been allocated */
+ BUG_ON(!req->curr_desc);
+
+ /*
+ * The DMA controller can not handle smaller OUT buffers than
+ * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
+ * long packet are received. Therefore an internal bounce buffer gets
+ * used when such a request gets enabled.
+ */
+ if (!ep->is_in && req->oddlen)
+ req->last_desc->data = ep->tailbuf_paddr;
+
+ wmb(); /* Make sure all is settled before handing it over to DMA */
+
+ /* Set the descriptor pointer in the hardware */
+ gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
+
+ /* Announce available descriptors */
+ dmactrl = gr_read32(&ep->regs->dmactrl);
+ gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
+
+ ep->dma_start = 1;
+}
+
+/*
+ * Finishes the first request in the ep's queue and, if available, starts the
+ * next request in queue.
+ *
+ * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
+ */
+static void gr_dma_advance(struct gr_ep *ep, int status)
+{
+ struct gr_request *req;
+
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+ gr_finish_request(ep, req, status);
+ gr_start_dma(ep); /* Regardless of ep->dma_start */
+}
+
+/*
+ * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
+ * transfer to be canceled and clears GR_DMACTRL_DA.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_abort_dma(struct gr_ep *ep)
+{
+ u32 dmactrl;
+
+ dmactrl = gr_read32(&ep->regs->dmactrl);
+ gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
+}
+
+/*
+ * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
+ * chain.
+ *
+ * Size is not used for OUT endpoints. Hardware can not be instructed to handle
+ * smaller buffer than MAXPL in the OUT direction.
+ */
+static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
+ dma_addr_t data, unsigned size, gfp_t gfp_flags)
+{
+ struct gr_dma_desc *desc;
+
+ desc = gr_alloc_dma_desc(ep, gfp_flags);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->data = data;
+ if (ep->is_in)
+ desc->ctrl =
+ (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
+ else
+ desc->ctrl = GR_DESC_OUT_CTRL_IE;
+
+ if (!req->first_desc) {
+ req->first_desc = desc;
+ req->curr_desc = desc;
+ } else {
+ req->last_desc->next_desc = desc;
+ req->last_desc->next = desc->paddr;
+ req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
+ }
+ req->last_desc = desc;
+
+ return 0;
+}
+
+/*
+ * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
+ * together covers req->req.length bytes of the buffer at DMA address
+ * req->req.dma for the OUT direction.
+ *
+ * The first descriptor in the chain is enabled, the rest disabled. The
+ * interrupt handler will later enable them one by one when needed so we can
+ * find out when the transfer is finished. For OUT endpoints, all descriptors
+ * therefore generate interrutps.
+ */
+static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
+ gfp_t gfp_flags)
+{
+ u16 bytes_left; /* Bytes left to provide descriptors for */
+ u16 bytes_used; /* Bytes accommodated for */
+ int ret = 0;
+
+ req->first_desc = NULL; /* Signals that no allocation is done yet */
+ bytes_left = req->req.length;
+ bytes_used = 0;
+ while (bytes_left > 0) {
+ dma_addr_t start = req->req.dma + bytes_used;
+ u16 size = min(bytes_left, ep->bytes_per_buffer);
+
+ if (size < ep->bytes_per_buffer) {
+ /* Prepare using bounce buffer */
+ req->evenlen = req->req.length - bytes_left;
+ req->oddlen = size;
+ }
+
+ ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
+ if (ret)
+ goto alloc_err;
+
+ bytes_left -= size;
+ bytes_used += size;
+ }
+
+ req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
+
+ return 0;
+
+alloc_err:
+ gr_free_dma_desc_chain(ep->dev, req);
+
+ return ret;
+}
+
+/*
+ * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
+ * together covers req->req.length bytes of the buffer at DMA address
+ * req->req.dma for the IN direction.
+ *
+ * When more data is provided than the maximum payload size, the hardware splits
+ * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
+ * is always set to a multiple of the maximum payload (restricted to the valid
+ * number of maximum payloads during high bandwidth isochronous or interrupt
+ * transfers)
+ *
+ * All descriptors are enabled from the beginning and we only generate an
+ * interrupt for the last one indicating that the entire request has been pushed
+ * to hardware.
+ */
+static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
+ gfp_t gfp_flags)
+{
+ u16 bytes_left; /* Bytes left in req to provide descriptors for */
+ u16 bytes_used; /* Bytes in req accommodated for */
+ int ret = 0;
+
+ req->first_desc = NULL; /* Signals that no allocation is done yet */
+ bytes_left = req->req.length;
+ bytes_used = 0;
+ do { /* Allow for zero length packets */
+ dma_addr_t start = req->req.dma + bytes_used;
+ u16 size = min(bytes_left, ep->bytes_per_buffer);
+
+ ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
+ if (ret)
+ goto alloc_err;
+
+ bytes_left -= size;
+ bytes_used += size;
+ } while (bytes_left > 0);
+
+ /*
+ * Send an extra zero length packet to indicate that no more data is
+ * available when req->req.zero is set and the data length is even
+ * multiples of ep->ep.maxpacket.
+ */
+ if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
+ ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
+ if (ret)
+ goto alloc_err;
+ }
+
+ /*
+ * For IN packets we only want to know when the last packet has been
+ * transmitted (not just put into internal buffers).
+ */
+ req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
+
+ return 0;
+
+alloc_err:
+ gr_free_dma_desc_chain(ep->dev, req);
+
+ return ret;
+}
+
+/* Must be called with dev->lock held */
+static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
+{
+ struct gr_udc *dev = ep->dev;
+ int ret;
+
+ if (unlikely(!ep->ep.desc && ep->num != 0)) {
+ dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
+ return -EINVAL;
+ }
+
+ if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
+ dev_err(dev->dev,
+ "Invalid request for %s: buf=%p list_empty=%d\n",
+ ep->ep.name, req->req.buf, list_empty(&req->queue));
+ return -EINVAL;
+ }
+
+ if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+ dev_err(dev->dev, "-ESHUTDOWN");
+ return -ESHUTDOWN;
+ }
+
+ /* Can't touch registers when suspended */
+ if (dev->ep0state == GR_EP0_SUSPEND) {
+ dev_err(dev->dev, "-EBUSY");
+ return -EBUSY;
+ }
+
+ /* Set up DMA mapping in case the caller didn't */
+ ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
+ if (ret) {
+ dev_err(dev->dev, "usb_gadget_map_request");
+ return ret;
+ }
+
+ if (ep->is_in)
+ ret = gr_setup_in_desc_list(ep, req, gfp_flags);
+ else
+ ret = gr_setup_out_desc_list(ep, req, gfp_flags);
+ if (ret)
+ return ret;
+
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ list_add_tail(&req->queue, &ep->queue);
+
+ /* Start DMA if not started, otherwise interrupt handler handles it */
+ if (!ep->dma_start && likely(!ep->stopped))
+ gr_start_dma(ep);
+
+ return 0;
+}
+
+/*
+ * Queue a request from within the driver.
+ *
+ * Must be called with dev->lock held.
+ */
+static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
+ gfp_t gfp_flags)
+{
+ if (ep->is_in)
+ gr_dbgprint_request("RESP", ep, req);
+
+ return gr_queue(ep, req, gfp_flags);
+}
+
+/* ---------------------------------------------------------------------- */
+/* General helper functions */
+
+/*
+ * Dequeue ALL requests.
+ *
+ * Must be called with dev->lock held and irqs disabled.
+ */
+static void gr_ep_nuke(struct gr_ep *ep)
+{
+ struct gr_request *req;
+
+ ep->stopped = 1;
+ ep->dma_start = 0;
+ gr_abort_dma(ep);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+ gr_finish_request(ep, req, -ESHUTDOWN);
+ }
+}
+
+/*
+ * Reset the hardware state of this endpoint.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_ep_reset(struct gr_ep *ep)
+{
+ gr_write32(&ep->regs->epctrl, 0);
+ gr_write32(&ep->regs->dmactrl, 0);
+
+ ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
+ ep->ep.desc = NULL;
+ ep->stopped = 1;
+ ep->dma_start = 0;
+}
+
+/*
+ * Generate STALL on ep0in/out.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_control_stall(struct gr_udc *dev)
+{
+ u32 epctrl;
+
+ epctrl = gr_read32(&dev->epo[0].regs->epctrl);
+ gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
+ epctrl = gr_read32(&dev->epi[0].regs->epctrl);
+ gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
+
+ dev->ep0state = GR_EP0_STALL;
+}
+
+/*
+ * Halts, halts and wedges, or clears halt for an endpoint.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
+{
+ u32 epctrl;
+ int retval = 0;
+
+ if (ep->num && !ep->ep.desc)
+ return -EINVAL;
+
+ if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+ return -EOPNOTSUPP;
+
+ /* Never actually halt ep0, and therefore never clear halt for ep0 */
+ if (!ep->num) {
+ if (halt && !fromhost) {
+ /* ep0 halt from gadget - generate protocol stall */
+ gr_control_stall(ep->dev);
+ dev_dbg(ep->dev->dev, "EP: stall ep0\n");
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
+ (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
+
+ epctrl = gr_read32(&ep->regs->epctrl);
+ if (halt) {
+ /* Set HALT */
+ gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
+ ep->stopped = 1;
+ if (wedge)
+ ep->wedged = 1;
+ } else {
+ gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
+ ep->stopped = 0;
+ ep->wedged = 0;
+
+ /* Things might have been queued up in the meantime */
+ if (!ep->dma_start)
+ gr_start_dma(ep);
+ }
+
+ return retval;
+}
+
+/* Must be called with dev->lock held */
+static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
+{
+ if (dev->ep0state != value)
+ dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
+ gr_ep0state_string(value));
+ dev->ep0state = value;
+}
+
+/*
+ * Should only be called when endpoints can not generate interrupts.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
+{
+ gr_write32(&dev->regs->control, 0);
+ wmb(); /* Make sure that we do not deny one of our interrupts */
+ dev->irq_enabled = 0;
+}
+
+/*
+ * Stop all device activity and disable data line pullup.
+ *
+ * Must be called with dev->lock held and irqs disabled.
+ */
+static void gr_stop_activity(struct gr_udc *dev)
+{
+ struct gr_ep *ep;
+
+ list_for_each_entry(ep, &dev->ep_list, ep_list)
+ gr_ep_nuke(ep);
+
+ gr_disable_interrupts_and_pullup(dev);
+
+ gr_set_ep0state(dev, GR_EP0_DISCONNECT);
+ usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
+}
+
+/* ---------------------------------------------------------------------- */
+/* ep0 setup packet handling */
+
+static void gr_ep0_testmode_complete(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct gr_ep *ep;
+ struct gr_udc *dev;
+ u32 control;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ dev = ep->dev;
+
+ spin_lock(&dev->lock);
+
+ control = gr_read32(&dev->regs->control);
+ control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
+ gr_write32(&dev->regs->control, control);
+
+ spin_unlock(&dev->lock);
+}
+
+static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
+{
+ /* Nothing needs to be done here */
+}
+
+/*
+ * Queue a response on ep0in.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
+ void (*complete)(struct usb_ep *ep,
+ struct usb_request *req))
+{
+ u8 *reqbuf = dev->ep0reqi->req.buf;
+ int status;
+ int i;
+
+ for (i = 0; i < length; i++)
+ reqbuf[i] = buf[i];
+ dev->ep0reqi->req.length = length;
+ dev->ep0reqi->req.complete = complete;
+
+ status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
+ if (status < 0)
+ dev_err(dev->dev,
+ "Could not queue ep0in setup response: %d\n", status);
+
+ return status;
+}
+
+/*
+ * Queue a 2 byte response on ep0in.
+ *
+ * Must be called with dev->lock held.
+ */
+static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
+{
+ __le16 le_response = cpu_to_le16(response);
+
+ return gr_ep0_respond(dev, (u8 *)&le_response, 2,
+ gr_ep0_dummy_complete);
+}
+
+/*
+ * Queue a ZLP response on ep0in.
+ *
+ * Must be called with dev->lock held.
+ */
+static inline int gr_ep0_respond_empty(struct gr_udc *dev)
+{
+ return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
+}
+
+/*
+ * This is run when a SET_ADDRESS request is received. First writes
+ * the new address to the control register which is updated internally
+ * when the next IN packet is ACKED.
+ *
+ * Must be called with dev->lock held.
+ */
+static void gr_set_address(struct gr_udc *dev, u8 address)
+{
+ u32 control;
+
+ control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
+ control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
+ control |= GR_CONTROL_SU;
+ gr_write32(&dev->regs->control, control);
+}
+
+/*
+ * Returns negative for STALL, 0 for successful handling and positive for
+ * delegation.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index)
+{
+ u16 response;
+ u8 test;
+
+ switch (request) {
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
+ gr_set_address(dev, value & 0xff);
+ if (value)
+ usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
+ return gr_ep0_respond_empty(dev);
+
+ case USB_REQ_GET_STATUS:
+ /* Self powered | remote wakeup */
+ response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
+ return gr_ep0_respond_u16(dev, response);
+
+ case USB_REQ_SET_FEATURE:
+ switch (value) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ /* Allow remote wakeup */
+ dev->remote_wakeup = 1;
+ return gr_ep0_respond_empty(dev);
+
+ case USB_DEVICE_TEST_MODE:
+ /* The hardware does not support USB_TEST_FORCE_ENABLE */
+ test = index >> 8;
+ if (test >= USB_TEST_J && test <= USB_TEST_PACKET) {
+ dev->test_mode = test;
+ return gr_ep0_respond(dev, NULL, 0,
+ gr_ep0_testmode_complete);
+ }
+ }
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (value) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ /* Disallow remote wakeup */
+ dev->remote_wakeup = 0;
+ return gr_ep0_respond_empty(dev);
+ }
+ break;
+ }
+
+ return 1; /* Delegate the rest */
+}
+
+/*
+ * Returns negative for STALL, 0 for successful handling and positive for
+ * delegation.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index)
+{
+ if (dev->gadget.state != USB_STATE_CONFIGURED)
+ return -1;
+
+ /*
+ * Should return STALL for invalid interfaces, but udc driver does not
+ * know anything about that. However, many gadget drivers do not handle
+ * GET_STATUS so we need to take care of that.
+ */
+
+ switch (request) {
+ case USB_REQ_GET_STATUS:
+ return gr_ep0_respond_u16(dev, 0x0000);
+
+ case USB_REQ_SET_FEATURE:
+ case USB_REQ_CLEAR_FEATURE:
+ /*
+ * No possible valid standard requests. Still let gadget drivers
+ * have a go at it.
+ */
+ break;
+ }
+
+ return 1; /* Delegate the rest */
+}
+
+/*
+ * Returns negative for STALL, 0 for successful handling and positive for
+ * delegation.
+ *
+ * Must be called with dev->lock held.
+ */
+static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
+ u16 value, u16 index)
+{
+ struct gr_ep *ep;
+ int status;
+ int halted;
+ u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
+ u8 is_in = index & USB_ENDPOINT_DIR_MASK;
+
+ if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
+ return -1;
+
+ if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
+ return -1;
+
+ ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
+
+ switch (request) {
+ case USB_REQ_GET_STATUS:
+ halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
+ return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
+
+ case USB_REQ_SET_FEATURE:
+ switch (value) {
+ case USB_ENDPOINT_HALT:
+ status = gr_ep_halt_wedge(ep, 1, 0, 1);
+ if (status >= 0)
+ status = gr_ep0_respond_empty(dev);
+ return status;
+ }
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (value) {
+ case USB_ENDPOINT_HALT:
+ if (ep->wedged)
+ return -1;
+ status = gr_ep_halt_wedge(ep, 0, 0, 1);
+ if (status >= 0)
+ status = gr_ep0_respond_empty(dev);
+ return status;
+ }
+ break;
+ }
+
+ return 1; /* Delegate the rest */
+}
+
+/* Must be called with dev->lock held */
+static void gr_ep0out_requeue(struct gr_udc *dev)
+{
+ int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
+
+ if (ret)
+ dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
+ ret);
+}
+
+/*
+ * The main function dealing with setup requests on ep0.
+ *
+ * Must be called with dev->lock held and irqs disabled
+ */
+static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
+ __releases(&dev->lock)
+ __acquires(&dev->lock)
+{
+ union {
+ struct usb_ctrlrequest ctrl;
+ u8 raw[8];
+ u32 word[2];
+ } u;
+ u8 type;
+ u8 request;
+ u16 value;
+ u16 index;
+ u16 length;
+ int i;
+ int status;
+
+ /* Restore from ep0 halt */
+ if (dev->ep0state == GR_EP0_STALL) {
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ if (!req->req.actual)
+ goto out;
+ }
+
+ if (dev->ep0state == GR_EP0_ISTATUS) {
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ if (req->req.actual > 0)
+ dev_dbg(dev->dev,
+ "Unexpected setup packet at state %s\n",
+ gr_ep0state_string(GR_EP0_ISTATUS));
+ else
+ goto out; /* Got expected ZLP */
+ } else if (dev->ep0state != GR_EP0_SETUP) {
+ dev_info(dev->dev,
+ "Unexpected ep0out request at state %s - stalling\n",
+ gr_ep0state_string(dev->ep0state));
+ gr_control_stall(dev);
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ goto out;
+ } else if (!req->req.actual) {
+ dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
+ gr_ep0state_string(dev->ep0state));
+ goto out;
+ }
+
+ /* Handle SETUP packet */
+ for (i = 0; i < req->req.actual; i++)
+ u.raw[i] = ((u8 *)req->req.buf)[i];
+
+ type = u.ctrl.bRequestType;
+ request = u.ctrl.bRequest;
+ value = le16_to_cpu(u.ctrl.wValue);
+ index = le16_to_cpu(u.ctrl.wIndex);
+ length = le16_to_cpu(u.ctrl.wLength);
+
+ gr_dbgprint_devreq(dev, type, request, value, index, length);
+
+ /* Check for data stage */
+ if (length) {
+ if (type & USB_DIR_IN)
+ gr_set_ep0state(dev, GR_EP0_IDATA);
+ else
+ gr_set_ep0state(dev, GR_EP0_ODATA);
+ }
+
+ status = 1; /* Positive status flags delegation */
+ if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (type & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ status = gr_device_request(dev, type, request,
+ value, index);
+ break;
+ case USB_RECIP_ENDPOINT:
+ status = gr_endpoint_request(dev, type, request,
+ value, index);
+ break;
+ case USB_RECIP_INTERFACE:
+ status = gr_interface_request(dev, type, request,
+ value, index);
+ break;
+ }
+ }
+
+ if (status > 0) {
+ spin_unlock(&dev->lock);
+
+ dev_vdbg(dev->dev, "DELEGATE\n");
+ status = dev->driver->setup(&dev->gadget, &u.ctrl);
+
+ spin_lock(&dev->lock);
+ }
+
+ /* Generate STALL on both ep0out and ep0in if requested */
+ if (unlikely(status < 0)) {
+ dev_vdbg(dev->dev, "STALL\n");
+ gr_control_stall(dev);
+ }
+
+ if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
+ request == USB_REQ_SET_CONFIGURATION) {
+ if (!value) {
+ dev_dbg(dev->dev, "STATUS: deconfigured\n");
+ usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
+ } else if (status >= 0) {
+ /* Not configured unless gadget OK:s it */
+ dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
+ usb_gadget_set_state(&dev->gadget,
+ USB_STATE_CONFIGURED);
+ }
+ }
+
+ /* Get ready for next stage */
+ if (dev->ep0state == GR_EP0_ODATA)
+ gr_set_ep0state(dev, GR_EP0_OSTATUS);
+ else if (dev->ep0state == GR_EP0_IDATA)
+ gr_set_ep0state(dev, GR_EP0_ISTATUS);
+ else
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+
+out:
+ gr_ep0out_requeue(dev);
+}
+
+/* ---------------------------------------------------------------------- */
+/* VBUS and USB reset handling */
+
+/* Must be called with dev->lock held and irqs disabled */
+static void gr_vbus_connected(struct gr_udc *dev, u32 status)
+{
+ u32 control;
+
+ dev->gadget.speed = GR_SPEED(status);
+ usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
+
+ /* Turn on full interrupts and pullup */
+ control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
+ GR_CONTROL_SP | GR_CONTROL_EP);
+ gr_write32(&dev->regs->control, control);
+}
+
+/* Must be called with dev->lock held */
+static void gr_enable_vbus_detect(struct gr_udc *dev)
+{
+ u32 status;
+
+ dev->irq_enabled = 1;
+ wmb(); /* Make sure we do not ignore an interrupt */
+ gr_write32(&dev->regs->control, GR_CONTROL_VI);
+
+ /* Take care of the case we are already plugged in at this point */
+ status = gr_read32(&dev->regs->status);
+ if (status & GR_STATUS_VB)
+ gr_vbus_connected(dev, status);
+}
+
+/* Must be called with dev->lock held and irqs disabled */
+static void gr_vbus_disconnected(struct gr_udc *dev)
+{
+ gr_stop_activity(dev);
+
+ /* Report disconnect */
+ if (dev->driver && dev->driver->disconnect) {
+ spin_unlock(&dev->lock);
+
+ dev->driver->disconnect(&dev->gadget);
+
+ spin_lock(&dev->lock);
+ }
+
+ gr_enable_vbus_detect(dev);
+}
+
+/* Must be called with dev->lock held and irqs disabled */
+static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
+{
+ gr_set_address(dev, 0);
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
+ dev->gadget.speed = GR_SPEED(status);
+
+ gr_ep_nuke(&dev->epo[0]);
+ gr_ep_nuke(&dev->epi[0]);
+ dev->epo[0].stopped = 0;
+ dev->epi[0].stopped = 0;
+ gr_ep0out_requeue(dev);
+}
+
+/* ---------------------------------------------------------------------- */
+/* Irq handling */
+
+/*
+ * Handles interrupts from in endpoints. Returns whether something was handled.
+ *
+ * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
+ */
+static int gr_handle_in_ep(struct gr_ep *ep)
+{
+ struct gr_request *req;
+
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+ if (!req->last_desc)
+ return 0;
+
+ if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
+ return 0; /* Not put in hardware buffers yet */
+
+ if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
+ return 0; /* Not transmitted yet, still in hardware buffers */
+
+ /* Write complete */
+ gr_dma_advance(ep, 0);
+
+ return 1;
+}
+
+/*
+ * Handles interrupts from out endpoints. Returns whether something was handled.
+ *
+ * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
+ */
+static int gr_handle_out_ep(struct gr_ep *ep)
+{
+ u32 ep_dmactrl;
+ u32 ctrl;
+ u16 len;
+ struct gr_request *req;
+ struct gr_udc *dev = ep->dev;
+
+ req = list_first_entry(&ep->queue, struct gr_request, queue);
+ if (!req->curr_desc)
+ return 0;
+
+ ctrl = READ_ONCE(req->curr_desc->ctrl);
+ if (ctrl & GR_DESC_OUT_CTRL_EN)
+ return 0; /* Not received yet */
+
+ /* Read complete */
+ len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
+ req->req.actual += len;
+ if (ctrl & GR_DESC_OUT_CTRL_SE)
+ req->setup = 1;
+
+ if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
+ /* Short packet or >= expected size - we are done */
+
+ if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
+ /*
+ * Send a status stage ZLP to ack the DATA stage in the
+ * OUT direction. This needs to be done before
+ * gr_dma_advance as that can lead to a call to
+ * ep0_setup that can change dev->ep0state.
+ */
+ gr_ep0_respond_empty(dev);
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ }
+
+ gr_dma_advance(ep, 0);
+ } else {
+ /* Not done yet. Enable the next descriptor to receive more. */
+ req->curr_desc = req->curr_desc->next_desc;
+ req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
+
+ ep_dmactrl = gr_read32(&ep->regs->dmactrl);
+ gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
+ }
+
+ return 1;
+}
+
+/*
+ * Handle state changes. Returns whether something was handled.
+ *
+ * Must be called with dev->lock held and irqs disabled.
+ */
+static int gr_handle_state_changes(struct gr_udc *dev)
+{
+ u32 status = gr_read32(&dev->regs->status);
+ int handled = 0;
+ int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
+ dev->gadget.state == USB_STATE_ATTACHED);
+
+ /* VBUS valid detected */
+ if (!powstate && (status & GR_STATUS_VB)) {
+ dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
+ gr_vbus_connected(dev, status);
+ handled = 1;
+ }
+
+ /* Disconnect */
+ if (powstate && !(status & GR_STATUS_VB)) {
+ dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
+ gr_vbus_disconnected(dev);
+ handled = 1;
+ }
+
+ /* USB reset detected */
+ if (status & GR_STATUS_UR) {
+ dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
+ GR_SPEED_STR(status));
+ gr_write32(&dev->regs->status, GR_STATUS_UR);
+ gr_udc_usbreset(dev, status);
+ handled = 1;
+ }
+
+ /* Speed change */
+ if (dev->gadget.speed != GR_SPEED(status)) {
+ dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
+ GR_SPEED_STR(status));
+ dev->gadget.speed = GR_SPEED(status);
+ handled = 1;
+ }
+
+ /* Going into suspend */
+ if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
+ dev_dbg(dev->dev, "STATUS: USB suspend\n");
+ gr_set_ep0state(dev, GR_EP0_SUSPEND);
+ dev->suspended_from = dev->gadget.state;
+ usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
+
+ if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
+ dev->driver && dev->driver->suspend) {
+ spin_unlock(&dev->lock);
+
+ dev->driver->suspend(&dev->gadget);
+
+ spin_lock(&dev->lock);
+ }
+ handled = 1;
+ }
+
+ /* Coming out of suspend */
+ if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
+ dev_dbg(dev->dev, "STATUS: USB resume\n");
+ if (dev->suspended_from == USB_STATE_POWERED)
+ gr_set_ep0state(dev, GR_EP0_DISCONNECT);
+ else
+ gr_set_ep0state(dev, GR_EP0_SETUP);
+ usb_gadget_set_state(&dev->gadget, dev->suspended_from);
+
+ if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
+ dev->driver && dev->driver->resume) {
+ spin_unlock(&dev->lock);
+
+ dev->driver->resume(&dev->gadget);
+
+ spin_lock(&dev->lock);
+ }
+ handled = 1;
+ }
+
+ return handled;
+}
+
+/* Non-interrupt context irq handler */
+static irqreturn_t gr_irq_handler(int irq, void *_dev)
+{
+ struct gr_udc *dev = _dev;
+ struct gr_ep *ep;
+ int handled = 0;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (!dev->irq_enabled)
+ goto out;
+
+ /*
+ * Check IN ep interrupts. We check these before the OUT eps because
+ * some gadgets reuse the request that might already be currently
+ * outstanding and needs to be completed (mainly setup requests).
+ */
+ for (i = 0; i < dev->nepi; i++) {
+ ep = &dev->epi[i];
+ if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
+ handled = gr_handle_in_ep(ep) || handled;
+ }
+
+ /* Check OUT ep interrupts */
+ for (i = 0; i < dev->nepo; i++) {
+ ep = &dev->epo[i];
+ if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
+ handled = gr_handle_out_ep(ep) || handled;
+ }
+
+ /* Check status interrupts */
+ handled = gr_handle_state_changes(dev) || handled;
+
+ /*
+ * Check AMBA DMA errors. Only check if we didn't find anything else to
+ * handle because this shouldn't happen if we did everything right.
+ */
+ if (!handled) {
+ list_for_each_entry(ep, &dev->ep_list, ep_list) {
+ if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
+ dev_err(dev->dev,
+ "AMBA Error occurred for %s\n",
+ ep->ep.name);
+ handled = 1;
+ }
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/* Interrupt context irq handler */
+static irqreturn_t gr_irq(int irq, void *_dev)
+{
+ struct gr_udc *dev = _dev;
+
+ if (!dev->irq_enabled)
+ return IRQ_NONE;
+
+ return IRQ_WAKE_THREAD;
+}
+
+/* ---------------------------------------------------------------------- */
+/* USB ep ops */
+
+/* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
+static int gr_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct gr_udc *dev;
+ struct gr_ep *ep;
+ u8 mode;
+ u8 nt;
+ u16 max;
+ u16 buffer_size = 0;
+ u32 epctrl;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ dev = ep->dev;
+
+ /* 'ep0' IN and OUT are reserved */
+ if (ep == &dev->epo[0] || ep == &dev->epi[0])
+ return -EINVAL;
+
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ /* Make sure we are clear for enabling */
+ epctrl = gr_read32(&ep->regs->epctrl);
+ if (epctrl & GR_EPCTRL_EV)
+ return -EBUSY;
+
+ /* Check that directions match */
+ if (!ep->is_in != !usb_endpoint_dir_in(desc))
+ return -EINVAL;
+
+ /* Check ep num */
+ if ((!ep->is_in && ep->num >= dev->nepo) ||
+ (ep->is_in && ep->num >= dev->nepi))
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_control(desc)) {
+ mode = 0;
+ } else if (usb_endpoint_xfer_isoc(desc)) {
+ mode = 1;
+ } else if (usb_endpoint_xfer_bulk(desc)) {
+ mode = 2;
+ } else if (usb_endpoint_xfer_int(desc)) {
+ mode = 3;
+ } else {
+ dev_err(dev->dev, "Unknown transfer type for %s\n",
+ ep->ep.name);
+ return -EINVAL;
+ }
+
+ /*
+ * Bits 10-0 set the max payload. 12-11 set the number of
+ * additional transactions.
+ */
+ max = usb_endpoint_maxp(desc);
+ nt = usb_endpoint_maxp_mult(desc) - 1;
+ buffer_size = GR_BUFFER_SIZE(epctrl);
+ if (nt && (mode == 0 || mode == 2)) {
+ dev_err(dev->dev,
+ "%s mode: multiple trans./microframe not valid\n",
+ (mode == 2 ? "Bulk" : "Control"));
+ return -EINVAL;
+ } else if (nt == 0x3) {
+ dev_err(dev->dev,
+ "Invalid value 0x3 for additional trans./microframe\n");
+ return -EINVAL;
+ } else if ((nt + 1) * max > buffer_size) {
+ dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
+ buffer_size, (nt + 1), max);
+ return -EINVAL;
+ } else if (max == 0) {
+ dev_err(dev->dev, "Max payload cannot be set to 0\n");
+ return -EINVAL;
+ } else if (max > ep->ep.maxpacket_limit) {
+ dev_err(dev->dev, "Requested max payload %d > limit %d\n",
+ max, ep->ep.maxpacket_limit);
+ return -EINVAL;
+ }
+
+ spin_lock(&ep->dev->lock);
+
+ if (!ep->stopped) {
+ spin_unlock(&ep->dev->lock);
+ return -EBUSY;
+ }
+
+ ep->stopped = 0;
+ ep->wedged = 0;
+ ep->ep.desc = desc;
+ ep->ep.maxpacket = max;
+ ep->dma_start = 0;
+
+
+ if (nt) {
+ /*
+ * Maximum possible size of all payloads in one microframe
+ * regardless of direction when using high-bandwidth mode.
+ */
+ ep->bytes_per_buffer = (nt + 1) * max;
+ } else if (ep->is_in) {
+ /*
+ * The biggest multiple of maximum packet size that fits into
+ * the buffer. The hardware will split up into many packets in
+ * the IN direction.
+ */
+ ep->bytes_per_buffer = (buffer_size / max) * max;
+ } else {
+ /*
+ * Only single packets will be placed the buffers in the OUT
+ * direction.
+ */
+ ep->bytes_per_buffer = max;
+ }
+
+ epctrl = (max << GR_EPCTRL_MAXPL_POS)
+ | (nt << GR_EPCTRL_NT_POS)
+ | (mode << GR_EPCTRL_TT_POS)
+ | GR_EPCTRL_EV;
+ if (ep->is_in)
+ epctrl |= GR_EPCTRL_PI;
+ gr_write32(&ep->regs->epctrl, epctrl);
+
+ gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
+
+ spin_unlock(&ep->dev->lock);
+
+ dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
+ ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
+ return 0;
+}
+
+/* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
+static int gr_ep_disable(struct usb_ep *_ep)
+{
+ struct gr_ep *ep;
+ struct gr_udc *dev;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ if (!_ep || !ep->ep.desc)
+ return -ENODEV;
+
+ dev = ep->dev;
+
+ /* 'ep0' IN and OUT are reserved */
+ if (ep == &dev->epo[0] || ep == &dev->epi[0])
+ return -EINVAL;
+
+ if (dev->ep0state == GR_EP0_SUSPEND)
+ return -EBUSY;
+
+ dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ gr_ep_nuke(ep);
+ gr_ep_reset(ep);
+ ep->ep.desc = NULL;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Frees a request, but not any DMA buffers associated with it
+ * (gr_finish_request should already have taken care of that).
+ */
+static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct gr_request *req;
+
+ if (!_ep || !_req)
+ return;
+ req = container_of(_req, struct gr_request, req);
+
+ /* Leads to memory leak */
+ WARN(!list_empty(&req->queue),
+ "request not dequeued properly before freeing\n");
+
+ kfree(req);
+}
+
+/* Queue a request from the gadget */
+static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct gr_ep *ep;
+ struct gr_request *req;
+ struct gr_udc *dev;
+ int ret;
+
+ if (unlikely(!_ep || !_req))
+ return -EINVAL;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ req = container_of(_req, struct gr_request, req);
+ dev = ep->dev;
+
+ spin_lock(&ep->dev->lock);
+
+ /*
+ * The ep0 pointer in the gadget struct is used both for ep0in and
+ * ep0out. In a data stage in the out direction ep0out needs to be used
+ * instead of the default ep0in. Completion functions might use
+ * driver_data, so that needs to be copied as well.
+ */
+ if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
+ ep = &dev->epo[0];
+ ep->ep.driver_data = dev->epi[0].ep.driver_data;
+ }
+
+ if (ep->is_in)
+ gr_dbgprint_request("EXTERN", ep, req);
+
+ ret = gr_queue(ep, req, GFP_ATOMIC);
+
+ spin_unlock(&ep->dev->lock);
+
+ return ret;
+}
+
+/* Dequeue JUST ONE request */
+static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct gr_request *req = NULL, *iter;
+ struct gr_ep *ep;
+ struct gr_udc *dev;
+ int ret = 0;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct gr_ep, ep);
+ if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
+ return -EINVAL;
+ dev = ep->dev;
+ if (!dev->driver)
+ return -ESHUTDOWN;
+
+ /* We can't touch (DMA) registers when suspended */
+ if (dev->ep0state == GR_EP0_SUSPEND)
+ return -EBUSY;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* Make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
+ /* This request is currently being processed */
+ gr_abort_dma(ep);
+ if (ep->stopped)
+ gr_finish_request(ep, req, -ECONNRESET);
+ else
+ gr_dma_advance(ep, -ECONNRESET);
+ } else if (!list_empty(&req->queue)) {
+ /* Not being processed - gr_finish_request dequeues it */
+ gr_finish_request(ep, req, -ECONNRESET);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+out:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return ret;
+}
+
+/* Helper for gr_set_halt and gr_set_wedge */
+static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+{
+ int ret;
+ struct gr_ep *ep;
+
+ if (!_ep)
+ return -ENODEV;
+ ep = container_of(_ep, struct gr_ep, ep);
+
+ spin_lock(&ep->dev->lock);
+
+ /* Halting an IN endpoint should fail if queue is not empty */
+ if (halt && ep->is_in && !list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
+
+out:
+ spin_unlock(&ep->dev->lock);
+
+ return ret;
+}
+
+/* Halt endpoint */
+static int gr_set_halt(struct usb_ep *_ep, int halt)
+{
+ return gr_set_halt_wedge(_ep, halt, 0);
+}
+
+/* Halt and wedge endpoint */
+static int gr_set_wedge(struct usb_ep *_ep)
+{
+ return gr_set_halt_wedge(_ep, 1, 1);
+}
+
+/*
+ * Return the total number of bytes currently stored in the internal buffers of
+ * the endpoint.
+ */
+static int gr_fifo_status(struct usb_ep *_ep)
+{
+ struct gr_ep *ep;
+ u32 epstat;
+ u32 bytes = 0;
+
+ if (!_ep)
+ return -ENODEV;
+ ep = container_of(_ep, struct gr_ep, ep);
+
+ epstat = gr_read32(&ep->regs->epstat);
+
+ if (epstat & GR_EPSTAT_B0)
+ bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
+ if (epstat & GR_EPSTAT_B1)
+ bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
+
+ return bytes;
+}
+
+
+/* Empty data from internal buffers of an endpoint. */
+static void gr_fifo_flush(struct usb_ep *_ep)
+{
+ struct gr_ep *ep;
+ u32 epctrl;
+
+ if (!_ep)
+ return;
+ ep = container_of(_ep, struct gr_ep, ep);
+ dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
+
+ spin_lock(&ep->dev->lock);
+
+ epctrl = gr_read32(&ep->regs->epctrl);
+ epctrl |= GR_EPCTRL_CB;
+ gr_write32(&ep->regs->epctrl, epctrl);
+
+ spin_unlock(&ep->dev->lock);
+}
+
+static const struct usb_ep_ops gr_ep_ops = {
+ .enable = gr_ep_enable,
+ .disable = gr_ep_disable,
+
+ .alloc_request = gr_alloc_request,
+ .free_request = gr_free_request,
+
+ .queue = gr_queue_ext,
+ .dequeue = gr_dequeue,
+
+ .set_halt = gr_set_halt,
+ .set_wedge = gr_set_wedge,
+ .fifo_status = gr_fifo_status,
+ .fifo_flush = gr_fifo_flush,
+};
+
+/* ---------------------------------------------------------------------- */
+/* USB Gadget ops */
+
+static int gr_get_frame(struct usb_gadget *_gadget)
+{
+ struct gr_udc *dev;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct gr_udc, gadget);
+ return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
+}
+
+static int gr_wakeup(struct usb_gadget *_gadget)
+{
+ struct gr_udc *dev;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct gr_udc, gadget);
+
+ /* Remote wakeup feature not enabled by host*/
+ if (!dev->remote_wakeup)
+ return -EINVAL;
+
+ spin_lock(&dev->lock);
+
+ gr_write32(&dev->regs->control,
+ gr_read32(&dev->regs->control) | GR_CONTROL_RW);
+
+ spin_unlock(&dev->lock);
+
+ return 0;
+}
+
+static int gr_pullup(struct usb_gadget *_gadget, int is_on)
+{
+ struct gr_udc *dev;
+ u32 control;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct gr_udc, gadget);
+
+ spin_lock(&dev->lock);
+
+ control = gr_read32(&dev->regs->control);
+ if (is_on)
+ control |= GR_CONTROL_EP;
+ else
+ control &= ~GR_CONTROL_EP;
+ gr_write32(&dev->regs->control, control);
+
+ spin_unlock(&dev->lock);
+
+ return 0;
+}
+
+static int gr_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct gr_udc *dev = to_gr_udc(gadget);
+
+ spin_lock(&dev->lock);
+
+ /* Hook up the driver */
+ dev->driver = driver;
+
+ /* Get ready for host detection */
+ gr_enable_vbus_detect(dev);
+
+ spin_unlock(&dev->lock);
+
+ return 0;
+}
+
+static int gr_udc_stop(struct usb_gadget *gadget)
+{
+ struct gr_udc *dev = to_gr_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ dev->driver = NULL;
+ gr_stop_activity(dev);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops gr_ops = {
+ .get_frame = gr_get_frame,
+ .wakeup = gr_wakeup,
+ .pullup = gr_pullup,
+ .udc_start = gr_udc_start,
+ .udc_stop = gr_udc_stop,
+ /* Other operations not supported */
+};
+
+/* ---------------------------------------------------------------------- */
+/* Module probe, removal and of-matching */
+
+static const char * const onames[] = {
+ "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
+ "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
+ "ep12out", "ep13out", "ep14out", "ep15out"
+};
+
+static const char * const inames[] = {
+ "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
+ "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
+ "ep12in", "ep13in", "ep14in", "ep15in"
+};
+
+/* Must be called with dev->lock held */
+static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
+{
+ struct gr_ep *ep;
+ struct gr_request *req;
+ struct usb_request *_req;
+ void *buf;
+
+ if (is_in) {
+ ep = &dev->epi[num];
+ ep->ep.name = inames[num];
+ ep->regs = &dev->regs->epi[num];
+ } else {
+ ep = &dev->epo[num];
+ ep->ep.name = onames[num];
+ ep->regs = &dev->regs->epo[num];
+ }
+
+ gr_ep_reset(ep);
+ ep->num = num;
+ ep->is_in = is_in;
+ ep->dev = dev;
+ ep->ep.ops = &gr_ep_ops;
+ INIT_LIST_HEAD(&ep->queue);
+
+ if (num == 0) {
+ _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
+ if (!_req)
+ return -ENOMEM;
+
+ buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
+ if (!buf) {
+ gr_free_request(&ep->ep, _req);
+ return -ENOMEM;
+ }
+
+ req = container_of(_req, struct gr_request, req);
+ req->req.buf = buf;
+ req->req.length = MAX_CTRL_PL_SIZE;
+
+ if (is_in)
+ dev->ep0reqi = req; /* Complete gets set as used */
+ else
+ dev->ep0reqo = req; /* Completion treated separately */
+
+ usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
+ ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
+
+ ep->ep.caps.type_control = true;
+ } else {
+ usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
+ list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+ list_add_tail(&ep->ep_list, &dev->ep_list);
+
+ if (is_in)
+ ep->ep.caps.dir_in = true;
+ else
+ ep->ep.caps.dir_out = true;
+
+ ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
+ &ep->tailbuf_paddr, GFP_ATOMIC);
+ if (!ep->tailbuf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Must be called with dev->lock held */
+static int gr_udc_init(struct gr_udc *dev)
+{
+ struct device_node *np = dev->dev->of_node;
+ u32 epctrl_val;
+ u32 dmactrl_val;
+ int i;
+ int ret = 0;
+ u32 bufsize;
+
+ gr_set_address(dev, 0);
+
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ dev->gadget.ep0 = &dev->epi[0].ep;
+
+ INIT_LIST_HEAD(&dev->ep_list);
+ gr_set_ep0state(dev, GR_EP0_DISCONNECT);
+
+ for (i = 0; i < dev->nepo; i++) {
+ if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
+ bufsize = 1024;
+ ret = gr_ep_init(dev, i, 0, bufsize);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < dev->nepi; i++) {
+ if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
+ bufsize = 1024;
+ ret = gr_ep_init(dev, i, 1, bufsize);
+ if (ret)
+ return ret;
+ }
+
+ /* Must be disabled by default */
+ dev->remote_wakeup = 0;
+
+ /* Enable ep0out and ep0in */
+ epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
+ dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
+ gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
+ gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
+ gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
+ gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
+
+ return 0;
+}
+
+static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
+{
+ struct gr_ep *ep;
+
+ if (is_in)
+ ep = &dev->epi[num];
+ else
+ ep = &dev->epo[num];
+
+ if (ep->tailbuf)
+ dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
+ ep->tailbuf, ep->tailbuf_paddr);
+}
+
+static int gr_remove(struct platform_device *pdev)
+{
+ struct gr_udc *dev = platform_get_drvdata(pdev);
+ int i;
+
+ if (dev->added)
+ usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
+ if (dev->driver)
+ return -EBUSY;
+
+ gr_dfs_delete(dev);
+ dma_pool_destroy(dev->desc_pool);
+ platform_set_drvdata(pdev, NULL);
+
+ gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
+ gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
+
+ for (i = 0; i < dev->nepo; i++)
+ gr_ep_remove(dev, i, 0);
+ for (i = 0; i < dev->nepi; i++)
+ gr_ep_remove(dev, i, 1);
+
+ return 0;
+}
+static int gr_request_irq(struct gr_udc *dev, int irq)
+{
+ return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
+ IRQF_SHARED, driver_name, dev);
+}
+
+static int gr_probe(struct platform_device *pdev)
+{
+ struct gr_udc *dev;
+ struct gr_regs __iomem *regs;
+ int retval;
+ u32 status;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->dev = &pdev->dev;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq <= 0)
+ return -ENODEV;
+
+ /* Some core configurations has separate irqs for IN and OUT events */
+ dev->irqi = platform_get_irq(pdev, 1);
+ if (dev->irqi > 0) {
+ dev->irqo = platform_get_irq(pdev, 2);
+ if (dev->irqo <= 0)
+ return -ENODEV;
+ } else {
+ dev->irqi = 0;
+ }
+
+ dev->gadget.name = driver_name;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
+ dev->gadget.ops = &gr_ops;
+
+ spin_lock_init(&dev->lock);
+ dev->regs = regs;
+
+ platform_set_drvdata(pdev, dev);
+
+ /* Determine number of endpoints and data interface mode */
+ status = gr_read32(&dev->regs->status);
+ dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
+ dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
+
+ if (!(status & GR_STATUS_DM)) {
+ dev_err(dev->dev, "Slave mode cores are not supported\n");
+ return -ENODEV;
+ }
+
+ /* --- Effects of the following calls might need explicit cleanup --- */
+
+ /* Create DMA pool for descriptors */
+ dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
+ sizeof(struct gr_dma_desc), 4, 0);
+ if (!dev->desc_pool) {
+ dev_err(dev->dev, "Could not allocate DMA pool");
+ return -ENOMEM;
+ }
+
+ /* Inside lock so that no gadget can use this udc until probe is done */
+ retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
+ if (retval) {
+ dev_err(dev->dev, "Could not add gadget udc");
+ goto out;
+ }
+ dev->added = 1;
+
+ spin_lock(&dev->lock);
+
+ retval = gr_udc_init(dev);
+ if (retval) {
+ spin_unlock(&dev->lock);
+ goto out;
+ }
+
+ /* Clear all interrupt enables that might be left on since last boot */
+ gr_disable_interrupts_and_pullup(dev);
+
+ spin_unlock(&dev->lock);
+
+ gr_dfs_create(dev);
+
+ retval = gr_request_irq(dev, dev->irq);
+ if (retval) {
+ dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
+ goto out;
+ }
+
+ if (dev->irqi) {
+ retval = gr_request_irq(dev, dev->irqi);
+ if (retval) {
+ dev_err(dev->dev, "Failed to request irqi %d\n",
+ dev->irqi);
+ goto out;
+ }
+ retval = gr_request_irq(dev, dev->irqo);
+ if (retval) {
+ dev_err(dev->dev, "Failed to request irqo %d\n",
+ dev->irqo);
+ goto out;
+ }
+ }
+
+ if (dev->irqi)
+ dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
+ dev->irq, dev->irqi, dev->irqo);
+ else
+ dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
+
+out:
+ if (retval)
+ gr_remove(pdev);
+
+ return retval;
+}
+
+static const struct of_device_id gr_match[] = {
+ {.name = "GAISLER_USBDC"},
+ {.name = "01_021"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, gr_match);
+
+static struct platform_driver gr_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = gr_match,
+ },
+ .probe = gr_probe,
+ .remove = gr_remove,
+};
+module_platform_driver(gr_driver);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/gr_udc.h b/drivers/usb/gadget/udc/gr_udc.h
new file mode 100644
index 000000000..701342391
--- /dev/null
+++ b/drivers/usb/gadget/udc/gr_udc.h
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
+ *
+ * 2013 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRUSBDC USB Device Controller cores available in the
+ * GRLIB VHDL IP core library.
+ *
+ * Full documentation of the GRUSBDC core can be found here:
+ * https://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * Contributors:
+ * - Andreas Larsson <andreas@gaisler.com>
+ * - Marko Isomaki
+ */
+
+/* Control registers on the AMBA bus */
+
+#define GR_MAXEP 16 /* Max # endpoints for *each* direction */
+
+struct gr_epregs {
+ u32 epctrl;
+ union {
+ struct { /* Slave mode*/
+ u32 slvctrl;
+ u32 slvdata;
+ };
+ struct { /* DMA mode*/
+ u32 dmactrl;
+ u32 dmaaddr;
+ };
+ };
+ u32 epstat;
+};
+
+struct gr_regs {
+ struct gr_epregs epo[GR_MAXEP]; /* 0x000 - 0x0fc */
+ struct gr_epregs epi[GR_MAXEP]; /* 0x100 - 0x1fc */
+ u32 control; /* 0x200 */
+ u32 status; /* 0x204 */
+};
+
+#define GR_EPCTRL_BUFSZ_SCALER 8
+#define GR_EPCTRL_BUFSZ_MASK 0xffe00000
+#define GR_EPCTRL_BUFSZ_POS 21
+#define GR_EPCTRL_PI BIT(20)
+#define GR_EPCTRL_CB BIT(19)
+#define GR_EPCTRL_CS BIT(18)
+#define GR_EPCTRL_MAXPL_MASK 0x0003ff80
+#define GR_EPCTRL_MAXPL_POS 7
+#define GR_EPCTRL_NT_MASK 0x00000060
+#define GR_EPCTRL_NT_POS 5
+#define GR_EPCTRL_TT_MASK 0x00000018
+#define GR_EPCTRL_TT_POS 3
+#define GR_EPCTRL_EH BIT(2)
+#define GR_EPCTRL_ED BIT(1)
+#define GR_EPCTRL_EV BIT(0)
+
+#define GR_DMACTRL_AE BIT(10)
+#define GR_DMACTRL_AD BIT(3)
+#define GR_DMACTRL_AI BIT(2)
+#define GR_DMACTRL_IE BIT(1)
+#define GR_DMACTRL_DA BIT(0)
+
+#define GR_EPSTAT_PT BIT(29)
+#define GR_EPSTAT_PR BIT(29)
+#define GR_EPSTAT_B1CNT_MASK 0x1fff0000
+#define GR_EPSTAT_B1CNT_POS 16
+#define GR_EPSTAT_B0CNT_MASK 0x0000fff8
+#define GR_EPSTAT_B0CNT_POS 3
+#define GR_EPSTAT_B1 BIT(2)
+#define GR_EPSTAT_B0 BIT(1)
+#define GR_EPSTAT_BS BIT(0)
+
+#define GR_CONTROL_SI BIT(31)
+#define GR_CONTROL_UI BIT(30)
+#define GR_CONTROL_VI BIT(29)
+#define GR_CONTROL_SP BIT(28)
+#define GR_CONTROL_FI BIT(27)
+#define GR_CONTROL_EP BIT(14)
+#define GR_CONTROL_DH BIT(13)
+#define GR_CONTROL_RW BIT(12)
+#define GR_CONTROL_TS_MASK 0x00000e00
+#define GR_CONTROL_TS_POS 9
+#define GR_CONTROL_TM BIT(8)
+#define GR_CONTROL_UA_MASK 0x000000fe
+#define GR_CONTROL_UA_POS 1
+#define GR_CONTROL_SU BIT(0)
+
+#define GR_STATUS_NEPI_MASK 0xf0000000
+#define GR_STATUS_NEPI_POS 28
+#define GR_STATUS_NEPO_MASK 0x0f000000
+#define GR_STATUS_NEPO_POS 24
+#define GR_STATUS_DM BIT(23)
+#define GR_STATUS_SU BIT(17)
+#define GR_STATUS_UR BIT(16)
+#define GR_STATUS_VB BIT(15)
+#define GR_STATUS_SP BIT(14)
+#define GR_STATUS_AF_MASK 0x00003800
+#define GR_STATUS_AF_POS 11
+#define GR_STATUS_FN_MASK 0x000007ff
+#define GR_STATUS_FN_POS 0
+
+
+#define MAX_CTRL_PL_SIZE 64 /* As per USB standard for full and high speed */
+
+/*-------------------------------------------------------------------------*/
+
+/* Driver data structures and utilities */
+
+struct gr_dma_desc {
+ u32 ctrl;
+ u32 data;
+ u32 next;
+
+ /* These must be last because hw uses the previous three */
+ u32 paddr;
+ struct gr_dma_desc *next_desc;
+};
+
+#define GR_DESC_OUT_CTRL_SE BIT(17)
+#define GR_DESC_OUT_CTRL_IE BIT(15)
+#define GR_DESC_OUT_CTRL_NX BIT(14)
+#define GR_DESC_OUT_CTRL_EN BIT(13)
+#define GR_DESC_OUT_CTRL_LEN_MASK 0x00001fff
+
+#define GR_DESC_IN_CTRL_MO BIT(18)
+#define GR_DESC_IN_CTRL_PI BIT(17)
+#define GR_DESC_IN_CTRL_ML BIT(16)
+#define GR_DESC_IN_CTRL_IE BIT(15)
+#define GR_DESC_IN_CTRL_NX BIT(14)
+#define GR_DESC_IN_CTRL_EN BIT(13)
+#define GR_DESC_IN_CTRL_LEN_MASK 0x00001fff
+
+#define GR_DESC_DMAADDR_MASK 0xfffffffc
+
+struct gr_ep {
+ struct usb_ep ep;
+ struct gr_udc *dev;
+ u16 bytes_per_buffer;
+ unsigned int dma_start;
+ struct gr_epregs __iomem *regs;
+
+ unsigned num:8;
+ unsigned is_in:1;
+ unsigned stopped:1;
+ unsigned wedged:1;
+ unsigned callback:1;
+
+ /* analogous to a host-side qh */
+ struct list_head queue;
+
+ struct list_head ep_list;
+
+ /* Bounce buffer for end of "odd" sized OUT requests */
+ void *tailbuf;
+ dma_addr_t tailbuf_paddr;
+};
+
+struct gr_request {
+ struct usb_request req;
+ struct list_head queue;
+
+ /* Chain of dma descriptors */
+ struct gr_dma_desc *first_desc; /* First in the chain */
+ struct gr_dma_desc *curr_desc; /* Current descriptor */
+ struct gr_dma_desc *last_desc; /* Last in the chain */
+
+ u16 evenlen; /* Size of even length head (if oddlen != 0) */
+ u16 oddlen; /* Size of odd length tail if buffer length is "odd" */
+
+ u8 setup; /* Setup packet */
+};
+
+enum gr_ep0state {
+ GR_EP0_DISCONNECT = 0, /* No host */
+ GR_EP0_SETUP, /* Between STATUS ack and SETUP report */
+ GR_EP0_IDATA, /* IN data stage */
+ GR_EP0_ODATA, /* OUT data stage */
+ GR_EP0_ISTATUS, /* Status stage after IN data stage */
+ GR_EP0_OSTATUS, /* Status stage after OUT data stage */
+ GR_EP0_STALL, /* Data or status stages */
+ GR_EP0_SUSPEND, /* USB suspend */
+};
+
+struct gr_udc {
+ struct usb_gadget gadget;
+ struct gr_ep epi[GR_MAXEP];
+ struct gr_ep epo[GR_MAXEP];
+ struct usb_gadget_driver *driver;
+ struct dma_pool *desc_pool;
+ struct device *dev;
+
+ enum gr_ep0state ep0state;
+ struct gr_request *ep0reqo;
+ struct gr_request *ep0reqi;
+
+ struct gr_regs __iomem *regs;
+ int irq;
+ int irqi;
+ int irqo;
+
+ unsigned added:1;
+ unsigned irq_enabled:1;
+ unsigned remote_wakeup:1;
+
+ u8 test_mode;
+
+ enum usb_device_state suspended_from;
+
+ unsigned int nepi;
+ unsigned int nepo;
+
+ struct list_head ep_list;
+
+ spinlock_t lock; /* General lock, a.k.a. "dev->lock" in comments */
+};
+
+#define to_gr_udc(gadget) (container_of((gadget), struct gr_udc, gadget))
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
new file mode 100644
index 000000000..fe62db32d
--- /dev/null
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -0,0 +1,3273 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * USB Gadget driver for LPC32xx
+ *
+ * Authors:
+ * Kevin Wells <kevin.wells@nxp.com>
+ * Mike James
+ * Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright (C) 2006 Philips Semiconductors
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2012 Roland Stigge
+ *
+ * Note: This driver is based on original work done by Mike James for
+ * the LPC3180.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/isp1301.h>
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif
+
+/*
+ * USB device configuration structure
+ */
+typedef void (*usc_chg_event)(int);
+struct lpc32xx_usbd_cfg {
+ int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */
+ usc_chg_event conn_chgb; /* Connection change event (optional) */
+ usc_chg_event susp_chgb; /* Suspend/resume event (optional) */
+ usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */
+};
+
+/*
+ * controller driver data structures
+ */
+
+/* 16 endpoints (not to be confused with 32 hardware endpoints) */
+#define NUM_ENDPOINTS 16
+
+/*
+ * IRQ indices make reading the code a little easier
+ */
+#define IRQ_USB_LP 0
+#define IRQ_USB_HP 1
+#define IRQ_USB_DEVDMA 2
+#define IRQ_USB_ATX 3
+
+#define EP_OUT 0 /* RX (from host) */
+#define EP_IN 1 /* TX (to host) */
+
+/* Returns the interrupt mask for the selected hardware endpoint */
+#define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir))
+
+#define EP_INT_TYPE 0
+#define EP_ISO_TYPE 1
+#define EP_BLK_TYPE 2
+#define EP_CTL_TYPE 3
+
+/* EP0 states */
+#define WAIT_FOR_SETUP 0 /* Wait for setup packet */
+#define DATA_IN 1 /* Expect dev->host transfer */
+#define DATA_OUT 2 /* Expect host->dev transfer */
+
+/* DD (DMA Descriptor) structure, requires word alignment, this is already
+ * defined in the LPC32XX USB device header file, but this version is slightly
+ * modified to tag some work data with each DMA descriptor. */
+struct lpc32xx_usbd_dd_gad {
+ u32 dd_next_phy;
+ u32 dd_setup;
+ u32 dd_buffer_addr;
+ u32 dd_status;
+ u32 dd_iso_ps_mem_addr;
+ u32 this_dma;
+ u32 iso_status[6]; /* 5 spare */
+ u32 dd_next_v;
+};
+
+/*
+ * Logical endpoint structure
+ */
+struct lpc32xx_ep {
+ struct usb_ep ep;
+ struct list_head queue;
+ struct lpc32xx_udc *udc;
+
+ u32 hwep_num_base; /* Physical hardware EP */
+ u32 hwep_num; /* Maps to hardware endpoint */
+ u32 maxpacket;
+ u32 lep;
+
+ bool is_in;
+ bool req_pending;
+ u32 eptype;
+
+ u32 totalints;
+
+ bool wedge;
+};
+
+enum atx_type {
+ ISP1301,
+ STOTG04,
+};
+
+/*
+ * Common UDC structure
+ */
+struct lpc32xx_udc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct platform_device *pdev;
+ struct device *dev;
+ spinlock_t lock;
+ struct i2c_client *isp1301_i2c_client;
+
+ /* Board and device specific */
+ struct lpc32xx_usbd_cfg *board;
+ void __iomem *udp_baseaddr;
+ int udp_irq[4];
+ struct clk *usb_slv_clk;
+
+ /* DMA support */
+ u32 *udca_v_base;
+ u32 udca_p_base;
+ struct dma_pool *dd_cache;
+
+ /* Common EP and control data */
+ u32 enabled_devints;
+ u32 enabled_hwepints;
+ u32 dev_status;
+ u32 realized_eps;
+
+ /* VBUS detection, pullup, and power flags */
+ u8 vbus;
+ u8 last_vbus;
+ int pullup;
+ int poweron;
+ enum atx_type atx;
+
+ /* Work queues related to I2C support */
+ struct work_struct pullup_job;
+ struct work_struct power_job;
+
+ /* USB device peripheral - various */
+ struct lpc32xx_ep ep[NUM_ENDPOINTS];
+ bool enabled;
+ bool clocked;
+ bool suspended;
+ int ep0state;
+ atomic_t enabled_ep_cnt;
+ wait_queue_head_t ep_disable_wait_queue;
+};
+
+/*
+ * Endpoint request
+ */
+struct lpc32xx_request {
+ struct usb_request req;
+ struct list_head queue;
+ struct lpc32xx_usbd_dd_gad *dd_desc_ptr;
+ bool mapped;
+ bool send_zlp;
+};
+
+static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
+{
+ return container_of(g, struct lpc32xx_udc, gadget);
+}
+
+#define ep_dbg(epp, fmt, arg...) \
+ dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg)
+#define ep_err(epp, fmt, arg...) \
+ dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg)
+#define ep_info(epp, fmt, arg...) \
+ dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg)
+#define ep_warn(epp, fmt, arg...) \
+ dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg)
+
+#define UDCA_BUFF_SIZE (128)
+
+/**********************************************************************
+ * USB device controller register offsets
+ **********************************************************************/
+
+#define USBD_DEVINTST(x) ((x) + 0x200)
+#define USBD_DEVINTEN(x) ((x) + 0x204)
+#define USBD_DEVINTCLR(x) ((x) + 0x208)
+#define USBD_DEVINTSET(x) ((x) + 0x20C)
+#define USBD_CMDCODE(x) ((x) + 0x210)
+#define USBD_CMDDATA(x) ((x) + 0x214)
+#define USBD_RXDATA(x) ((x) + 0x218)
+#define USBD_TXDATA(x) ((x) + 0x21C)
+#define USBD_RXPLEN(x) ((x) + 0x220)
+#define USBD_TXPLEN(x) ((x) + 0x224)
+#define USBD_CTRL(x) ((x) + 0x228)
+#define USBD_DEVINTPRI(x) ((x) + 0x22C)
+#define USBD_EPINTST(x) ((x) + 0x230)
+#define USBD_EPINTEN(x) ((x) + 0x234)
+#define USBD_EPINTCLR(x) ((x) + 0x238)
+#define USBD_EPINTSET(x) ((x) + 0x23C)
+#define USBD_EPINTPRI(x) ((x) + 0x240)
+#define USBD_REEP(x) ((x) + 0x244)
+#define USBD_EPIND(x) ((x) + 0x248)
+#define USBD_EPMAXPSIZE(x) ((x) + 0x24C)
+/* DMA support registers only below */
+/* Set, clear, or get enabled state of the DMA request status. If
+ * enabled, an IN or OUT token will start a DMA transfer for the EP */
+#define USBD_DMARST(x) ((x) + 0x250)
+#define USBD_DMARCLR(x) ((x) + 0x254)
+#define USBD_DMARSET(x) ((x) + 0x258)
+/* DMA UDCA head pointer */
+#define USBD_UDCAH(x) ((x) + 0x280)
+/* EP DMA status, enable, and disable. This is used to specifically
+ * enabled or disable DMA for a specific EP */
+#define USBD_EPDMAST(x) ((x) + 0x284)
+#define USBD_EPDMAEN(x) ((x) + 0x288)
+#define USBD_EPDMADIS(x) ((x) + 0x28C)
+/* DMA master interrupts enable and pending interrupts */
+#define USBD_DMAINTST(x) ((x) + 0x290)
+#define USBD_DMAINTEN(x) ((x) + 0x294)
+/* DMA end of transfer interrupt enable, disable, status */
+#define USBD_EOTINTST(x) ((x) + 0x2A0)
+#define USBD_EOTINTCLR(x) ((x) + 0x2A4)
+#define USBD_EOTINTSET(x) ((x) + 0x2A8)
+/* New DD request interrupt enable, disable, status */
+#define USBD_NDDRTINTST(x) ((x) + 0x2AC)
+#define USBD_NDDRTINTCLR(x) ((x) + 0x2B0)
+#define USBD_NDDRTINTSET(x) ((x) + 0x2B4)
+/* DMA error interrupt enable, disable, status */
+#define USBD_SYSERRTINTST(x) ((x) + 0x2B8)
+#define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC)
+#define USBD_SYSERRTINTSET(x) ((x) + 0x2C0)
+
+/**********************************************************************
+ * USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/
+ * USBD_DEVINTPRI register definitions
+ **********************************************************************/
+#define USBD_ERR_INT (1 << 9)
+#define USBD_EP_RLZED (1 << 8)
+#define USBD_TXENDPKT (1 << 7)
+#define USBD_RXENDPKT (1 << 6)
+#define USBD_CDFULL (1 << 5)
+#define USBD_CCEMPTY (1 << 4)
+#define USBD_DEV_STAT (1 << 3)
+#define USBD_EP_SLOW (1 << 2)
+#define USBD_EP_FAST (1 << 1)
+#define USBD_FRAME (1 << 0)
+
+/**********************************************************************
+ * USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/
+ * USBD_EPINTPRI register definitions
+ **********************************************************************/
+/* End point selection macro (RX) */
+#define USBD_RX_EP_SEL(e) (1 << ((e) << 1))
+
+/* End point selection macro (TX) */
+#define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1))
+
+/**********************************************************************
+ * USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/
+ * USBD_EPDMAEN/USBD_EPDMADIS/
+ * USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/
+ * USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/
+ * USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET
+ * register definitions
+ **********************************************************************/
+/* Endpoint selection macro */
+#define USBD_EP_SEL(e) (1 << (e))
+
+/**********************************************************************
+ * SBD_DMAINTST/USBD_DMAINTEN
+ **********************************************************************/
+#define USBD_SYS_ERR_INT (1 << 2)
+#define USBD_NEW_DD_INT (1 << 1)
+#define USBD_EOT_INT (1 << 0)
+
+/**********************************************************************
+ * USBD_RXPLEN register definitions
+ **********************************************************************/
+#define USBD_PKT_RDY (1 << 11)
+#define USBD_DV (1 << 10)
+#define USBD_PK_LEN_MASK 0x3FF
+
+/**********************************************************************
+ * USBD_CTRL register definitions
+ **********************************************************************/
+#define USBD_LOG_ENDPOINT(e) ((e) << 2)
+#define USBD_WR_EN (1 << 1)
+#define USBD_RD_EN (1 << 0)
+
+/**********************************************************************
+ * USBD_CMDCODE register definitions
+ **********************************************************************/
+#define USBD_CMD_CODE(c) ((c) << 16)
+#define USBD_CMD_PHASE(p) ((p) << 8)
+
+/**********************************************************************
+ * USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions
+ **********************************************************************/
+#define USBD_DMAEP(e) (1 << (e))
+
+/* DD (DMA Descriptor) structure, requires word alignment */
+struct lpc32xx_usbd_dd {
+ u32 *dd_next;
+ u32 dd_setup;
+ u32 dd_buffer_addr;
+ u32 dd_status;
+ u32 dd_iso_ps_mem_addr;
+};
+
+/* dd_setup bit defines */
+#define DD_SETUP_ATLE_DMA_MODE 0x01
+#define DD_SETUP_NEXT_DD_VALID 0x04
+#define DD_SETUP_ISO_EP 0x10
+#define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5)
+#define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16)
+
+/* dd_status bit defines */
+#define DD_STATUS_DD_RETIRED 0x01
+#define DD_STATUS_STS_MASK 0x1E
+#define DD_STATUS_STS_NS 0x00 /* Not serviced */
+#define DD_STATUS_STS_BS 0x02 /* Being serviced */
+#define DD_STATUS_STS_NC 0x04 /* Normal completion */
+#define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */
+#define DD_STATUS_STS_DOR 0x08 /* Data overrun */
+#define DD_STATUS_STS_SE 0x12 /* System error */
+#define DD_STATUS_PKT_VAL 0x20 /* Packet valid */
+#define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */
+#define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */
+#define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F)
+#define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF)
+
+/*
+ *
+ * Protocol engine bits below
+ *
+ */
+/* Device Interrupt Bit Definitions */
+#define FRAME_INT 0x00000001
+#define EP_FAST_INT 0x00000002
+#define EP_SLOW_INT 0x00000004
+#define DEV_STAT_INT 0x00000008
+#define CCEMTY_INT 0x00000010
+#define CDFULL_INT 0x00000020
+#define RxENDPKT_INT 0x00000040
+#define TxENDPKT_INT 0x00000080
+#define EP_RLZED_INT 0x00000100
+#define ERR_INT 0x00000200
+
+/* Rx & Tx Packet Length Definitions */
+#define PKT_LNGTH_MASK 0x000003FF
+#define PKT_DV 0x00000400
+#define PKT_RDY 0x00000800
+
+/* USB Control Definitions */
+#define CTRL_RD_EN 0x00000001
+#define CTRL_WR_EN 0x00000002
+
+/* Command Codes */
+#define CMD_SET_ADDR 0x00D00500
+#define CMD_CFG_DEV 0x00D80500
+#define CMD_SET_MODE 0x00F30500
+#define CMD_RD_FRAME 0x00F50500
+#define DAT_RD_FRAME 0x00F50200
+#define CMD_RD_TEST 0x00FD0500
+#define DAT_RD_TEST 0x00FD0200
+#define CMD_SET_DEV_STAT 0x00FE0500
+#define CMD_GET_DEV_STAT 0x00FE0500
+#define DAT_GET_DEV_STAT 0x00FE0200
+#define CMD_GET_ERR_CODE 0x00FF0500
+#define DAT_GET_ERR_CODE 0x00FF0200
+#define CMD_RD_ERR_STAT 0x00FB0500
+#define DAT_RD_ERR_STAT 0x00FB0200
+#define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16))
+#define CMD_SEL_EP(x) (0x00000500 | ((x) << 16))
+#define DAT_SEL_EP(x) (0x00000200 | ((x) << 16))
+#define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16))
+#define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16))
+#define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16))
+#define CMD_CLR_BUF 0x00F20500
+#define DAT_CLR_BUF 0x00F20200
+#define CMD_VALID_BUF 0x00FA0500
+
+/* Device Address Register Definitions */
+#define DEV_ADDR_MASK 0x7F
+#define DEV_EN 0x80
+
+/* Device Configure Register Definitions */
+#define CONF_DVICE 0x01
+
+/* Device Mode Register Definitions */
+#define AP_CLK 0x01
+#define INAK_CI 0x02
+#define INAK_CO 0x04
+#define INAK_II 0x08
+#define INAK_IO 0x10
+#define INAK_BI 0x20
+#define INAK_BO 0x40
+
+/* Device Status Register Definitions */
+#define DEV_CON 0x01
+#define DEV_CON_CH 0x02
+#define DEV_SUS 0x04
+#define DEV_SUS_CH 0x08
+#define DEV_RST 0x10
+
+/* Error Code Register Definitions */
+#define ERR_EC_MASK 0x0F
+#define ERR_EA 0x10
+
+/* Error Status Register Definitions */
+#define ERR_PID 0x01
+#define ERR_UEPKT 0x02
+#define ERR_DCRC 0x04
+#define ERR_TIMOUT 0x08
+#define ERR_EOP 0x10
+#define ERR_B_OVRN 0x20
+#define ERR_BTSTF 0x40
+#define ERR_TGL 0x80
+
+/* Endpoint Select Register Definitions */
+#define EP_SEL_F 0x01
+#define EP_SEL_ST 0x02
+#define EP_SEL_STP 0x04
+#define EP_SEL_PO 0x08
+#define EP_SEL_EPN 0x10
+#define EP_SEL_B_1_FULL 0x20
+#define EP_SEL_B_2_FULL 0x40
+
+/* Endpoint Status Register Definitions */
+#define EP_STAT_ST 0x01
+#define EP_STAT_DA 0x20
+#define EP_STAT_RF_MO 0x40
+#define EP_STAT_CND_ST 0x80
+
+/* Clear Buffer Register Definitions */
+#define CLR_BUF_PO 0x01
+
+/* DMA Interrupt Bit Definitions */
+#define EOT_INT 0x01
+#define NDD_REQ_INT 0x02
+#define SYS_ERR_INT 0x04
+
+#define DRIVER_VERSION "1.03"
+static const char driver_name[] = "lpc32xx_udc";
+
+/*
+ *
+ * proc interface support
+ *
+ */
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"};
+static const char debug_filename[] = "driver/udc";
+
+static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep)
+{
+ struct lpc32xx_request *req;
+
+ seq_printf(s, "\n");
+ seq_printf(s, "%12s, maxpacket %4d %3s",
+ ep->ep.name, ep->ep.maxpacket,
+ ep->is_in ? "in" : "out");
+ seq_printf(s, " type %4s", epnames[ep->eptype]);
+ seq_printf(s, " ints: %12d", ep->totalints);
+
+ if (list_empty(&ep->queue))
+ seq_printf(s, "\t(queue empty)\n");
+ else {
+ list_for_each_entry(req, &ep->queue, queue) {
+ u32 length = req->req.actual;
+
+ seq_printf(s, "\treq %p len %d/%d buf %p\n",
+ &req->req, length,
+ req->req.length, req->req.buf);
+ }
+ }
+}
+
+static int udc_show(struct seq_file *s, void *unused)
+{
+ struct lpc32xx_udc *udc = s->private;
+ struct lpc32xx_ep *ep;
+ unsigned long flags;
+
+ seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
+ udc->vbus ? "present" : "off",
+ udc->enabled ? (udc->vbus ? "active" : "enabled") :
+ "disabled",
+ udc->gadget.is_selfpowered ? "self" : "VBUS",
+ udc->suspended ? ", suspended" : "",
+ udc->driver ? udc->driver->driver.name : "(none)");
+
+ if (udc->enabled && udc->vbus) {
+ proc_ep_show(s, &udc->ep[0]);
+ list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list)
+ proc_ep_show(s, ep);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(udc);
+
+static void create_debug_file(struct lpc32xx_udc *udc)
+{
+ debugfs_create_file(debug_filename, 0, NULL, udc, &udc_fops);
+}
+
+static void remove_debug_file(struct lpc32xx_udc *udc)
+{
+ debugfs_lookup_and_remove(debug_filename, NULL);
+}
+
+#else
+static inline void create_debug_file(struct lpc32xx_udc *udc) {}
+static inline void remove_debug_file(struct lpc32xx_udc *udc) {}
+#endif
+
+/* Primary initialization sequence for the ISP1301 transceiver */
+static void isp1301_udc_configure(struct lpc32xx_udc *udc)
+{
+ u8 value;
+ s32 vendor, product;
+
+ vendor = i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00);
+ product = i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02);
+
+ if (vendor == 0x0483 && product == 0xa0c4)
+ udc->atx = STOTG04;
+
+ /* LPC32XX only supports DAT_SE0 USB mode */
+ /* This sequence is important */
+
+ /* Disable transparent UART mode first */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
+ MC1_UART_EN);
+
+ /* Set full speed and SE0 mode */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0));
+
+ /*
+ * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide
+ */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
+
+ value = MC2_BI_DI;
+ if (udc->atx != STOTG04)
+ value |= MC2_SPD_SUSP_CTRL;
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_MODE_CONTROL_2, value);
+
+ /* Driver VBUS_DRV high or low depending on board setup */
+ if (udc->board->vbus_drv_pol != 0)
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV);
+ else
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
+ OTG1_VBUS_DRV);
+
+ /* Bi-directional mode with suspend control
+ * Enable both pulldowns for now - the pullup will be enable when VBUS
+ * is detected */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1,
+ (0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
+
+ /* Discharge VBUS (just in case) */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
+ msleep(1);
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
+ OTG1_VBUS_DISCHRG);
+
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+
+ dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n", vendor);
+ dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n", product);
+ dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
+ i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14));
+
+}
+
+/* Enables or disables the USB device pullup via the ISP1301 transceiver */
+static void isp1301_pullup_set(struct lpc32xx_udc *udc)
+{
+ if (udc->pullup)
+ /* Enable pullup for bus signalling */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP);
+ else
+ /* Enable pullup for bus signalling */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
+ OTG1_DP_PULLUP);
+}
+
+static void pullup_work(struct work_struct *work)
+{
+ struct lpc32xx_udc *udc =
+ container_of(work, struct lpc32xx_udc, pullup_job);
+
+ isp1301_pullup_set(udc);
+}
+
+static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup,
+ int block)
+{
+ if (en_pullup == udc->pullup)
+ return;
+
+ udc->pullup = en_pullup;
+ if (block)
+ isp1301_pullup_set(udc);
+ else
+ /* defer slow i2c pull up setting */
+ schedule_work(&udc->pullup_job);
+}
+
+#ifdef CONFIG_PM
+/* Powers up or down the ISP1301 transceiver */
+static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
+{
+ /* There is no "global power down" register for stotg04 */
+ if (udc->atx == STOTG04)
+ return;
+
+ if (enable != 0)
+ /* Power up ISP1301 - this ISP1301 will automatically wakeup
+ when VBUS is detected */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR,
+ MC2_GLOBAL_PWR_DN);
+ else
+ /* Power down ISP1301 */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
+}
+
+static void power_work(struct work_struct *work)
+{
+ struct lpc32xx_udc *udc =
+ container_of(work, struct lpc32xx_udc, power_job);
+
+ isp1301_set_powerstate(udc, udc->poweron);
+}
+#endif
+
+/*
+ *
+ * USB protocol engine command/data read/write helper functions
+ *
+ */
+/* Issues a single command to the USB device state machine */
+static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd)
+{
+ u32 pass = 0;
+ int to;
+
+ /* EP may lock on CLRI if this read isn't done */
+ u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
+ (void) tmp;
+
+ while (pass == 0) {
+ writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr));
+
+ /* Write command code */
+ writel(cmd, USBD_CMDCODE(udc->udp_baseaddr));
+ to = 10000;
+ while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) &
+ USBD_CCEMPTY) == 0) && (to > 0)) {
+ to--;
+ }
+
+ if (to > 0)
+ pass = 1;
+
+ cpu_relax();
+ }
+}
+
+/* Issues 2 commands (or command and data) to the USB device state machine */
+static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
+ u32 data)
+{
+ udc_protocol_cmd_w(udc, cmd);
+ udc_protocol_cmd_w(udc, data);
+}
+
+/* Issues a single command to the USB device state machine and reads
+ * response data */
+static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
+{
+ int to = 1000;
+
+ /* Write a command and read data from the protocol engine */
+ writel((USBD_CDFULL | USBD_CCEMPTY),
+ USBD_DEVINTCLR(udc->udp_baseaddr));
+
+ /* Write command code */
+ udc_protocol_cmd_w(udc, cmd);
+
+ while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
+ && (to > 0))
+ to--;
+ if (!to)
+ dev_dbg(udc->dev,
+ "Protocol engine didn't receive response (CDFULL)\n");
+
+ return readl(USBD_CMDDATA(udc->udp_baseaddr));
+}
+
+/*
+ *
+ * USB device interrupt mask support functions
+ *
+ */
+/* Enable one or more USB device interrupts */
+static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask)
+{
+ udc->enabled_devints |= devmask;
+ writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
+}
+
+/* Disable one or more USB device interrupts */
+static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask)
+{
+ udc->enabled_devints &= ~mask;
+ writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
+}
+
+/* Clear one or more USB device interrupts */
+static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask)
+{
+ writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr));
+}
+
+/*
+ *
+ * Endpoint interrupt disable/enable functions
+ *
+ */
+/* Enable one or more USB endpoint interrupts */
+static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc->enabled_hwepints |= (1 << hwep);
+ writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
+}
+
+/* Disable one or more USB endpoint interrupts */
+static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc->enabled_hwepints &= ~(1 << hwep);
+ writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
+}
+
+/* Clear one or more USB endpoint interrupts */
+static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep)
+{
+ writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr));
+}
+
+/* Enable DMA for the HW channel */
+static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep)
+{
+ writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr));
+}
+
+/* Disable DMA for the HW channel */
+static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep)
+{
+ writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr));
+}
+
+/*
+ *
+ * Endpoint realize/unrealize functions
+ *
+ */
+/* Before an endpoint can be used, it needs to be realized
+ * in the USB protocol engine - this realizes the endpoint.
+ * The interrupt (FIFO or DMA) is not enabled with this function */
+static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep,
+ u32 maxpacket)
+{
+ int to = 1000;
+
+ writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
+ writel(hwep, USBD_EPIND(udc->udp_baseaddr));
+ udc->realized_eps |= (1 << hwep);
+ writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
+ writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr));
+
+ /* Wait until endpoint is realized in hardware */
+ while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) &
+ USBD_EP_RLZED)) && (to > 0))
+ to--;
+ if (!to)
+ dev_dbg(udc->dev, "EP not correctly realized in hardware\n");
+
+ writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
+}
+
+/* Unrealize an EP */
+static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc->realized_eps &= ~(1 << hwep);
+ writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
+}
+
+/*
+ *
+ * Endpoint support functions
+ *
+ */
+/* Select and clear endpoint interrupt */
+static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep));
+ return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep));
+}
+
+/* Disables the endpoint in the USB protocol engine */
+static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
+ DAT_WR_BYTE(EP_STAT_DA));
+}
+
+/* Stalls the endpoint - endpoint will return STALL */
+static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
+ DAT_WR_BYTE(EP_STAT_ST));
+}
+
+/* Clear stall or reset endpoint */
+static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
+ DAT_WR_BYTE(0));
+}
+
+/* Select an endpoint for endpoint status, clear, validate */
+static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep));
+}
+
+/*
+ *
+ * Endpoint buffer management functions
+ *
+ */
+/* Clear the current endpoint's buffer */
+static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc_select_hwep(udc, hwep);
+ udc_protocol_cmd_w(udc, CMD_CLR_BUF);
+}
+
+/* Validate the current endpoint's buffer */
+static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
+{
+ udc_select_hwep(udc, hwep);
+ udc_protocol_cmd_w(udc, CMD_VALID_BUF);
+}
+
+static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep)
+{
+ /* Clear EP interrupt */
+ uda_clear_hwepint(udc, hwep);
+ return udc_selep_clrint(udc, hwep);
+}
+
+/*
+ *
+ * USB EP DMA support
+ *
+ */
+/* Allocate a DMA Descriptor */
+static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
+{
+ dma_addr_t dma;
+ struct lpc32xx_usbd_dd_gad *dd;
+
+ dd = dma_pool_alloc(udc->dd_cache, GFP_ATOMIC | GFP_DMA, &dma);
+ if (dd)
+ dd->this_dma = dma;
+
+ return dd;
+}
+
+/* Free a DMA Descriptor */
+static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd)
+{
+ dma_pool_free(udc->dd_cache, dd, dd->this_dma);
+}
+
+/*
+ *
+ * USB setup and shutdown functions
+ *
+ */
+/* Enables or disables most of the USB system clocks when low power mode is
+ * needed. Clocks are typically started on a connection event, and disabled
+ * when a cable is disconnected */
+static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
+{
+ if (enable != 0) {
+ if (udc->clocked)
+ return;
+
+ udc->clocked = 1;
+ clk_prepare_enable(udc->usb_slv_clk);
+ } else {
+ if (!udc->clocked)
+ return;
+
+ udc->clocked = 0;
+ clk_disable_unprepare(udc->usb_slv_clk);
+ }
+}
+
+/* Set/reset USB device address */
+static void udc_set_address(struct lpc32xx_udc *udc, u32 addr)
+{
+ /* Address will be latched at the end of the status phase, or
+ latched immediately if function is called twice */
+ udc_protocol_cmd_data_w(udc, CMD_SET_ADDR,
+ DAT_WR_BYTE(DEV_EN | addr));
+}
+
+/* Setup up a IN request for DMA transfer - this consists of determining the
+ * list of DMA addresses for the transfer, allocating DMA Descriptors,
+ * installing the DD into the UDCA, and then enabling the DMA for that EP */
+static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
+{
+ struct lpc32xx_request *req;
+ u32 hwep = ep->hwep_num;
+
+ ep->req_pending = 1;
+
+ /* There will always be a request waiting here */
+ req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
+
+ /* Place the DD Descriptor into the UDCA */
+ udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
+
+ /* Enable DMA and interrupt for the HW EP */
+ udc_ep_dma_enable(udc, hwep);
+
+ /* Clear ZLP if last packet is not of MAXP size */
+ if (req->req.length % ep->ep.maxpacket)
+ req->send_zlp = 0;
+
+ return 0;
+}
+
+/* Setup up a OUT request for DMA transfer - this consists of determining the
+ * list of DMA addresses for the transfer, allocating DMA Descriptors,
+ * installing the DD into the UDCA, and then enabling the DMA for that EP */
+static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
+{
+ struct lpc32xx_request *req;
+ u32 hwep = ep->hwep_num;
+
+ ep->req_pending = 1;
+
+ /* There will always be a request waiting here */
+ req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
+
+ /* Place the DD Descriptor into the UDCA */
+ udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
+
+ /* Enable DMA and interrupt for the HW EP */
+ udc_ep_dma_enable(udc, hwep);
+ return 0;
+}
+
+static void udc_disable(struct lpc32xx_udc *udc)
+{
+ u32 i;
+
+ /* Disable device */
+ udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
+ udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0));
+
+ /* Disable all device interrupts (including EP0) */
+ uda_disable_devint(udc, 0x3FF);
+
+ /* Disable and reset all endpoint interrupts */
+ for (i = 0; i < 32; i++) {
+ uda_disable_hwepint(udc, i);
+ uda_clear_hwepint(udc, i);
+ udc_disable_hwep(udc, i);
+ udc_unrealize_hwep(udc, i);
+ udc->udca_v_base[i] = 0;
+
+ /* Disable and clear all interrupts and DMA */
+ udc_ep_dma_disable(udc, i);
+ writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr));
+ writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr));
+ writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr));
+ writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr));
+ }
+
+ /* Disable DMA interrupts */
+ writel(0, USBD_DMAINTEN(udc->udp_baseaddr));
+
+ writel(0, USBD_UDCAH(udc->udp_baseaddr));
+}
+
+static void udc_enable(struct lpc32xx_udc *udc)
+{
+ u32 i;
+ struct lpc32xx_ep *ep = &udc->ep[0];
+
+ /* Start with known state */
+ udc_disable(udc);
+
+ /* Enable device */
+ udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON));
+
+ /* EP interrupts on high priority, FRAME interrupt on low priority */
+ writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr));
+ writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr));
+
+ /* Clear any pending device interrupts */
+ writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr));
+
+ /* Setup UDCA - not yet used (DMA) */
+ writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr));
+
+ /* Only enable EP0 in and out for now, EP0 only works in FIFO mode */
+ for (i = 0; i <= 1; i++) {
+ udc_realize_hwep(udc, i, ep->ep.maxpacket);
+ uda_enable_hwepint(udc, i);
+ udc_select_hwep(udc, i);
+ udc_clrstall_hwep(udc, i);
+ udc_clr_buffer_hwep(udc, i);
+ }
+
+ /* Device interrupt setup */
+ uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
+ USBD_EP_FAST));
+ uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
+ USBD_EP_FAST));
+
+ /* Set device address to 0 - called twice to force a latch in the USB
+ engine without the need of a setup packet status closure */
+ udc_set_address(udc, 0);
+ udc_set_address(udc, 0);
+
+ /* Enable master DMA interrupts */
+ writel((USBD_SYS_ERR_INT | USBD_EOT_INT),
+ USBD_DMAINTEN(udc->udp_baseaddr));
+
+ udc->dev_status = 0;
+}
+
+/*
+ *
+ * USB device board specific events handled via callbacks
+ *
+ */
+/* Connection change event - notify board function of change */
+static void uda_power_event(struct lpc32xx_udc *udc, u32 conn)
+{
+ /* Just notify of a connection change event (optional) */
+ if (udc->board->conn_chgb != NULL)
+ udc->board->conn_chgb(conn);
+}
+
+/* Suspend/resume event - notify board function of change */
+static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn)
+{
+ /* Just notify of a Suspend/resume change event (optional) */
+ if (udc->board->susp_chgb != NULL)
+ udc->board->susp_chgb(conn);
+
+ if (conn)
+ udc->suspended = 0;
+ else
+ udc->suspended = 1;
+}
+
+/* Remote wakeup enable/disable - notify board function of change */
+static void uda_remwkp_cgh(struct lpc32xx_udc *udc)
+{
+ if (udc->board->rmwk_chgb != NULL)
+ udc->board->rmwk_chgb(udc->dev_status &
+ (1 << USB_DEVICE_REMOTE_WAKEUP));
+}
+
+/* Reads data from FIFO, adjusts for alignment and data size */
+static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
+{
+ int n, i, bl;
+ u16 *p16;
+ u32 *p32, tmp, cbytes;
+
+ /* Use optimal data transfer method based on source address and size */
+ switch (((uintptr_t) data) & 0x3) {
+ case 0: /* 32-bit aligned */
+ p32 = (u32 *) data;
+ cbytes = (bytes & ~0x3);
+
+ /* Copy 32-bit aligned data first */
+ for (n = 0; n < cbytes; n += 4)
+ *p32++ = readl(USBD_RXDATA(udc->udp_baseaddr));
+
+ /* Handle any remaining bytes */
+ bl = bytes - cbytes;
+ if (bl) {
+ tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
+ for (n = 0; n < bl; n++)
+ data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
+
+ }
+ break;
+
+ case 1: /* 8-bit aligned */
+ case 3:
+ /* Each byte has to be handled independently */
+ for (n = 0; n < bytes; n += 4) {
+ tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
+
+ bl = bytes - n;
+ if (bl > 4)
+ bl = 4;
+
+ for (i = 0; i < bl; i++)
+ data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
+ }
+ break;
+
+ case 2: /* 16-bit aligned */
+ p16 = (u16 *) data;
+ cbytes = (bytes & ~0x3);
+
+ /* Copy 32-bit sized objects first with 16-bit alignment */
+ for (n = 0; n < cbytes; n += 4) {
+ tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
+ *p16++ = (u16)(tmp & 0xFFFF);
+ *p16++ = (u16)((tmp >> 16) & 0xFFFF);
+ }
+
+ /* Handle any remaining bytes */
+ bl = bytes - cbytes;
+ if (bl) {
+ tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
+ for (n = 0; n < bl; n++)
+ data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
+ }
+ break;
+ }
+}
+
+/* Read data from the FIFO for an endpoint. This function is for endpoints (such
+ * as EP0) that don't use DMA. This function should only be called if a packet
+ * is known to be ready to read for the endpoint. Note that the endpoint must
+ * be selected in the protocol engine prior to this call. */
+static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
+ u32 bytes)
+{
+ u32 tmpv;
+ int to = 1000;
+ u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN;
+
+ /* Setup read of endpoint */
+ writel(hwrep, USBD_CTRL(udc->udp_baseaddr));
+
+ /* Wait until packet is ready */
+ while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) &
+ PKT_RDY) == 0) && (to > 0))
+ to--;
+ if (!to)
+ dev_dbg(udc->dev, "No packet ready on FIFO EP read\n");
+
+ /* Mask out count */
+ tmp = tmpv & PKT_LNGTH_MASK;
+ if (bytes < tmp)
+ tmp = bytes;
+
+ if ((tmp > 0) && (data != NULL))
+ udc_pop_fifo(udc, (u8 *) data, tmp);
+
+ writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
+
+ /* Clear the buffer */
+ udc_clr_buffer_hwep(udc, hwep);
+
+ return tmp;
+}
+
+/* Stuffs data into the FIFO, adjusts for alignment and data size */
+static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
+{
+ int n, i, bl;
+ u16 *p16;
+ u32 *p32, tmp, cbytes;
+
+ /* Use optimal data transfer method based on source address and size */
+ switch (((uintptr_t) data) & 0x3) {
+ case 0: /* 32-bit aligned */
+ p32 = (u32 *) data;
+ cbytes = (bytes & ~0x3);
+
+ /* Copy 32-bit aligned data first */
+ for (n = 0; n < cbytes; n += 4)
+ writel(*p32++, USBD_TXDATA(udc->udp_baseaddr));
+
+ /* Handle any remaining bytes */
+ bl = bytes - cbytes;
+ if (bl) {
+ tmp = 0;
+ for (n = 0; n < bl; n++)
+ tmp |= data[cbytes + n] << (n * 8);
+
+ writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
+ }
+ break;
+
+ case 1: /* 8-bit aligned */
+ case 3:
+ /* Each byte has to be handled independently */
+ for (n = 0; n < bytes; n += 4) {
+ bl = bytes - n;
+ if (bl > 4)
+ bl = 4;
+
+ tmp = 0;
+ for (i = 0; i < bl; i++)
+ tmp |= data[n + i] << (i * 8);
+
+ writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
+ }
+ break;
+
+ case 2: /* 16-bit aligned */
+ p16 = (u16 *) data;
+ cbytes = (bytes & ~0x3);
+
+ /* Copy 32-bit aligned data first */
+ for (n = 0; n < cbytes; n += 4) {
+ tmp = *p16++ & 0xFFFF;
+ tmp |= (*p16++ & 0xFFFF) << 16;
+ writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
+ }
+
+ /* Handle any remaining bytes */
+ bl = bytes - cbytes;
+ if (bl) {
+ tmp = 0;
+ for (n = 0; n < bl; n++)
+ tmp |= data[cbytes + n] << (n * 8);
+
+ writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
+ }
+ break;
+ }
+}
+
+/* Write data to the FIFO for an endpoint. This function is for endpoints (such
+ * as EP0) that don't use DMA. Note that the endpoint must be selected in the
+ * protocol engine prior to this call. */
+static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
+ u32 bytes)
+{
+ u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN;
+
+ if ((bytes > 0) && (data == NULL))
+ return;
+
+ /* Setup write of endpoint */
+ writel(hwwep, USBD_CTRL(udc->udp_baseaddr));
+
+ writel(bytes, USBD_TXPLEN(udc->udp_baseaddr));
+
+ /* Need at least 1 byte to trigger TX */
+ if (bytes == 0)
+ writel(0, USBD_TXDATA(udc->udp_baseaddr));
+ else
+ udc_stuff_fifo(udc, (u8 *) data, bytes);
+
+ writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
+
+ udc_val_buffer_hwep(udc, hwep);
+}
+
+/* USB device reset - resets USB to a default state with just EP0
+ enabled */
+static void uda_usb_reset(struct lpc32xx_udc *udc)
+{
+ u32 i = 0;
+ /* Re-init device controller and EP0 */
+ udc_enable(udc);
+ udc->gadget.speed = USB_SPEED_FULL;
+
+ for (i = 1; i < NUM_ENDPOINTS; i++) {
+ struct lpc32xx_ep *ep = &udc->ep[i];
+ ep->req_pending = 0;
+ }
+}
+
+/* Send a ZLP on EP0 */
+static void udc_ep0_send_zlp(struct lpc32xx_udc *udc)
+{
+ udc_write_hwep(udc, EP_IN, NULL, 0);
+}
+
+/* Get current frame number */
+static u16 udc_get_current_frame(struct lpc32xx_udc *udc)
+{
+ u16 flo, fhi;
+
+ udc_protocol_cmd_w(udc, CMD_RD_FRAME);
+ flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
+ fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
+
+ return (fhi << 8) | flo;
+}
+
+/* Set the device as configured - enables all endpoints */
+static inline void udc_set_device_configured(struct lpc32xx_udc *udc)
+{
+ udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE));
+}
+
+/* Set the device as unconfigured - disables all endpoints */
+static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc)
+{
+ udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
+}
+
+/* reinit == restore initial software state */
+static void udc_reinit(struct lpc32xx_udc *udc)
+{
+ u32 i;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ struct lpc32xx_ep *ep = &udc->ep[i];
+
+ if (i != 0)
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
+ INIT_LIST_HEAD(&ep->queue);
+ ep->req_pending = 0;
+ }
+
+ udc->ep0state = WAIT_FOR_SETUP;
+}
+
+/* Must be called with lock */
+static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status)
+{
+ struct lpc32xx_udc *udc = ep->udc;
+
+ list_del_init(&req->queue);
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ if (ep->lep) {
+ usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
+
+ /* Free DDs */
+ udc_dd_free(udc, req->dd_desc_ptr);
+ }
+
+ if (status && status != -ESHUTDOWN)
+ ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status);
+
+ ep->req_pending = 0;
+ spin_unlock(&udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&udc->lock);
+}
+
+/* Must be called with lock */
+static void nuke(struct lpc32xx_ep *ep, int status)
+{
+ struct lpc32xx_request *req;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
+ done(ep, req, status);
+ }
+
+ if (status == -ESHUTDOWN) {
+ uda_disable_hwepint(ep->udc, ep->hwep_num);
+ udc_disable_hwep(ep->udc, ep->hwep_num);
+ }
+}
+
+/* IN endpoint 0 transfer */
+static int udc_ep0_in_req(struct lpc32xx_udc *udc)
+{
+ struct lpc32xx_request *req;
+ struct lpc32xx_ep *ep0 = &udc->ep[0];
+ u32 tsend, ts = 0;
+
+ if (list_empty(&ep0->queue))
+ /* Nothing to send */
+ return 0;
+ else
+ req = list_entry(ep0->queue.next, struct lpc32xx_request,
+ queue);
+
+ tsend = ts = req->req.length - req->req.actual;
+ if (ts == 0) {
+ /* Send a ZLP */
+ udc_ep0_send_zlp(udc);
+ done(ep0, req, 0);
+ return 1;
+ } else if (ts > ep0->ep.maxpacket)
+ ts = ep0->ep.maxpacket; /* Just send what we can */
+
+ /* Write data to the EP0 FIFO and start transfer */
+ udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts);
+
+ /* Increment data pointer */
+ req->req.actual += ts;
+
+ if (tsend >= ep0->ep.maxpacket)
+ return 0; /* Stay in data transfer state */
+
+ /* Transfer request is complete */
+ udc->ep0state = WAIT_FOR_SETUP;
+ done(ep0, req, 0);
+ return 1;
+}
+
+/* OUT endpoint 0 transfer */
+static int udc_ep0_out_req(struct lpc32xx_udc *udc)
+{
+ struct lpc32xx_request *req;
+ struct lpc32xx_ep *ep0 = &udc->ep[0];
+ u32 tr, bufferspace;
+
+ if (list_empty(&ep0->queue))
+ return 0;
+ else
+ req = list_entry(ep0->queue.next, struct lpc32xx_request,
+ queue);
+
+ if (req) {
+ if (req->req.length == 0) {
+ /* Just dequeue request */
+ done(ep0, req, 0);
+ udc->ep0state = WAIT_FOR_SETUP;
+ return 1;
+ }
+
+ /* Get data from FIFO */
+ bufferspace = req->req.length - req->req.actual;
+ if (bufferspace > ep0->ep.maxpacket)
+ bufferspace = ep0->ep.maxpacket;
+
+ /* Copy data to buffer */
+ prefetchw(req->req.buf + req->req.actual);
+ tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
+ bufferspace);
+ req->req.actual += bufferspace;
+
+ if (tr < ep0->ep.maxpacket) {
+ /* This is the last packet */
+ done(ep0, req, 0);
+ udc->ep0state = WAIT_FOR_SETUP;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Must be called with lock */
+static void stop_activity(struct lpc32xx_udc *udc)
+{
+ struct usb_gadget_driver *driver = udc->driver;
+ int i;
+
+ if (udc->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->suspended = 0;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ struct lpc32xx_ep *ep = &udc->ep[i];
+ nuke(ep, -ESHUTDOWN);
+ }
+ if (driver) {
+ spin_unlock(&udc->lock);
+ driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+
+ isp1301_pullup_enable(udc, 0, 0);
+ udc_disable(udc);
+ udc_reinit(udc);
+}
+
+/*
+ * Activate or kill host pullup
+ * Can be called with or without lock
+ */
+static void pullup(struct lpc32xx_udc *udc, int is_on)
+{
+ if (!udc->clocked)
+ return;
+
+ if (!udc->enabled || !udc->vbus)
+ is_on = 0;
+
+ if (is_on != udc->pullup)
+ isp1301_pullup_enable(udc, is_on, 0);
+}
+
+/* Must be called without lock */
+static int lpc32xx_ep_disable(struct usb_ep *_ep)
+{
+ struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
+ struct lpc32xx_udc *udc = ep->udc;
+ unsigned long flags;
+
+ if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0))
+ return -EINVAL;
+ spin_lock_irqsave(&udc->lock, flags);
+
+ nuke(ep, -ESHUTDOWN);
+
+ /* Clear all DMA statuses for this EP */
+ udc_ep_dma_disable(udc, ep->hwep_num);
+ writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
+ writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
+ writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
+ writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
+
+ /* Remove the DD pointer in the UDCA */
+ udc->udca_v_base[ep->hwep_num] = 0;
+
+ /* Disable and reset endpoint and interrupt */
+ uda_clear_hwepint(udc, ep->hwep_num);
+ udc_unrealize_hwep(udc, ep->hwep_num);
+
+ ep->hwep_num = 0;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ atomic_dec(&udc->enabled_ep_cnt);
+ wake_up(&udc->ep_disable_wait_queue);
+
+ return 0;
+}
+
+/* Must be called without lock */
+static int lpc32xx_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
+ struct lpc32xx_udc *udc;
+ u16 maxpacket;
+ u32 tmp;
+ unsigned long flags;
+
+ /* Verify EP data */
+ if ((!_ep) || (!ep) || (!desc) ||
+ (desc->bDescriptorType != USB_DT_ENDPOINT))
+ return -EINVAL;
+
+ udc = ep->udc;
+ maxpacket = usb_endpoint_maxp(desc);
+ if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
+ dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
+ return -EINVAL;
+ }
+
+ /* Don't touch EP0 */
+ if (ep->hwep_num_base == 0) {
+ dev_dbg(udc->dev, "Can't re-enable EP0!!!\n");
+ return -EINVAL;
+ }
+
+ /* Is driver ready? */
+ if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
+ dev_dbg(udc->dev, "bogus device state\n");
+ return -ESHUTDOWN;
+ }
+
+ tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ switch (tmp) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ return -EINVAL;
+
+ case USB_ENDPOINT_XFER_INT:
+ if (maxpacket > ep->maxpacket) {
+ dev_dbg(udc->dev,
+ "Bad INT endpoint maxpacket %d\n", maxpacket);
+ return -EINVAL;
+ }
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ switch (maxpacket) {
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ break;
+
+ default:
+ dev_dbg(udc->dev,
+ "Bad BULK endpoint maxpacket %d\n", maxpacket);
+ return -EINVAL;
+ }
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ break;
+ }
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Initialize endpoint to match the selected descriptor */
+ ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
+ ep->ep.maxpacket = maxpacket;
+
+ /* Map hardware endpoint from base and direction */
+ if (ep->is_in)
+ /* IN endpoints are offset 1 from the OUT endpoint */
+ ep->hwep_num = ep->hwep_num_base + EP_IN;
+ else
+ ep->hwep_num = ep->hwep_num_base;
+
+ ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name,
+ ep->hwep_num, maxpacket, (ep->is_in == 1));
+
+ /* Realize the endpoint, interrupt is enabled later when
+ * buffers are queued, IN EPs will NAK until buffers are ready */
+ udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket);
+ udc_clr_buffer_hwep(udc, ep->hwep_num);
+ uda_disable_hwepint(udc, ep->hwep_num);
+ udc_clrstall_hwep(udc, ep->hwep_num);
+
+ /* Clear all DMA statuses for this EP */
+ udc_ep_dma_disable(udc, ep->hwep_num);
+ writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
+ writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
+ writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
+ writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ atomic_inc(&udc->enabled_ep_cnt);
+ return 0;
+}
+
+/*
+ * Allocate a USB request list
+ * Can be called with or without lock
+ */
+static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct lpc32xx_request *req;
+
+ req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+ return &req->req;
+}
+
+/*
+ * De-allocate a USB request list
+ * Can be called with or without lock
+ */
+static void lpc32xx_ep_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct lpc32xx_request *req;
+
+ req = container_of(_req, struct lpc32xx_request, req);
+ BUG_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+/* Must be called without lock */
+static int lpc32xx_ep_queue(struct usb_ep *_ep,
+ struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct lpc32xx_request *req;
+ struct lpc32xx_ep *ep;
+ struct lpc32xx_udc *udc;
+ unsigned long flags;
+ int status = 0;
+
+ req = container_of(_req, struct lpc32xx_request, req);
+ ep = container_of(_ep, struct lpc32xx_ep, ep);
+
+ if (!_ep || !_req || !_req->complete || !_req->buf ||
+ !list_empty(&req->queue))
+ return -EINVAL;
+
+ udc = ep->udc;
+
+ if (udc->gadget.speed == USB_SPEED_UNKNOWN)
+ return -EPIPE;
+
+ if (ep->lep) {
+ struct lpc32xx_usbd_dd_gad *dd;
+
+ status = usb_gadget_map_request(&udc->gadget, _req, ep->is_in);
+ if (status)
+ return status;
+
+ /* For the request, build a list of DDs */
+ dd = udc_dd_alloc(udc);
+ if (!dd) {
+ /* Error allocating DD */
+ return -ENOMEM;
+ }
+ req->dd_desc_ptr = dd;
+
+ /* Setup the DMA descriptor */
+ dd->dd_next_phy = dd->dd_next_v = 0;
+ dd->dd_buffer_addr = req->req.dma;
+ dd->dd_status = 0;
+
+ /* Special handling for ISO EPs */
+ if (ep->eptype == EP_ISO_TYPE) {
+ dd->dd_setup = DD_SETUP_ISO_EP |
+ DD_SETUP_PACKETLEN(0) |
+ DD_SETUP_DMALENBYTES(1);
+ dd->dd_iso_ps_mem_addr = dd->this_dma + 24;
+ if (ep->is_in)
+ dd->iso_status[0] = req->req.length;
+ else
+ dd->iso_status[0] = 0;
+ } else
+ dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) |
+ DD_SETUP_DMALENBYTES(req->req.length);
+ }
+
+ ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name,
+ _req, _req->length, _req->buf, ep->is_in, _req->zero);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+ req->send_zlp = _req->zero;
+
+ /* Kickstart empty queues */
+ if (list_empty(&ep->queue)) {
+ list_add_tail(&req->queue, &ep->queue);
+
+ if (ep->hwep_num_base == 0) {
+ /* Handle expected data direction */
+ if (ep->is_in) {
+ /* IN packet to host */
+ udc->ep0state = DATA_IN;
+ status = udc_ep0_in_req(udc);
+ } else {
+ /* OUT packet from host */
+ udc->ep0state = DATA_OUT;
+ status = udc_ep0_out_req(udc);
+ }
+ } else if (ep->is_in) {
+ /* IN packet to host and kick off transfer */
+ if (!ep->req_pending)
+ udc_ep_in_req_dma(udc, ep);
+ } else
+ /* OUT packet from host and kick off list */
+ if (!ep->req_pending)
+ udc_ep_out_req_dma(udc, ep);
+ } else
+ list_add_tail(&req->queue, &ep->queue);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return (status < 0) ? status : 0;
+}
+
+/* Must be called without lock */
+static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct lpc32xx_ep *ep;
+ struct lpc32xx_request *req = NULL, *iter;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct lpc32xx_ep, ep);
+ if (!_ep || ep->hwep_num_base == 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return -EINVAL;
+ }
+
+ done(ep, req, -ECONNRESET);
+
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ return 0;
+}
+
+/* Must be called without lock */
+static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
+ struct lpc32xx_udc *udc;
+ unsigned long flags;
+
+ if ((!ep) || (ep->hwep_num <= 1))
+ return -EINVAL;
+
+ /* Don't halt an IN EP */
+ if (ep->is_in)
+ return -EAGAIN;
+
+ udc = ep->udc;
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (value == 1) {
+ /* stall */
+ udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
+ DAT_WR_BYTE(EP_STAT_ST));
+ } else {
+ /* End stall */
+ ep->wedge = 0;
+ udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
+ DAT_WR_BYTE(0));
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+/* set the halt feature and ignores clear requests */
+static int lpc32xx_ep_set_wedge(struct usb_ep *_ep)
+{
+ struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
+
+ if (!_ep || !ep->udc)
+ return -EINVAL;
+
+ ep->wedge = 1;
+
+ return usb_ep_set_halt(_ep);
+}
+
+static const struct usb_ep_ops lpc32xx_ep_ops = {
+ .enable = lpc32xx_ep_enable,
+ .disable = lpc32xx_ep_disable,
+ .alloc_request = lpc32xx_ep_alloc_request,
+ .free_request = lpc32xx_ep_free_request,
+ .queue = lpc32xx_ep_queue,
+ .dequeue = lpc32xx_ep_dequeue,
+ .set_halt = lpc32xx_ep_set_halt,
+ .set_wedge = lpc32xx_ep_set_wedge,
+};
+
+/* Send a ZLP on a non-0 IN EP */
+static void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
+{
+ /* Clear EP status */
+ udc_clearep_getsts(udc, ep->hwep_num);
+
+ /* Send ZLP via FIFO mechanism */
+ udc_write_hwep(udc, ep->hwep_num, NULL, 0);
+}
+
+/*
+ * Handle EP completion for ZLP
+ * This function will only be called when a delayed ZLP needs to be sent out
+ * after a DMA transfer has filled both buffers.
+ */
+static void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
+{
+ u32 epstatus;
+ struct lpc32xx_request *req;
+
+ if (ep->hwep_num <= 0)
+ return;
+
+ uda_clear_hwepint(udc, ep->hwep_num);
+
+ /* If this interrupt isn't enabled, return now */
+ if (!(udc->enabled_hwepints & (1 << ep->hwep_num)))
+ return;
+
+ /* Get endpoint status */
+ epstatus = udc_clearep_getsts(udc, ep->hwep_num);
+
+ /*
+ * This should never happen, but protect against writing to the
+ * buffer when full.
+ */
+ if (epstatus & EP_SEL_F)
+ return;
+
+ if (ep->is_in) {
+ udc_send_in_zlp(udc, ep);
+ uda_disable_hwepint(udc, ep->hwep_num);
+ } else
+ return;
+
+ /* If there isn't a request waiting, something went wrong */
+ req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
+ if (req) {
+ done(ep, req, 0);
+
+ /* Start another request if ready */
+ if (!list_empty(&ep->queue)) {
+ if (ep->is_in)
+ udc_ep_in_req_dma(udc, ep);
+ else
+ udc_ep_out_req_dma(udc, ep);
+ } else
+ ep->req_pending = 0;
+ }
+}
+
+
+/* DMA end of transfer completion */
+static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
+{
+ u32 status;
+ struct lpc32xx_request *req;
+ struct lpc32xx_usbd_dd_gad *dd;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ ep->totalints++;
+#endif
+
+ req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
+ if (!req) {
+ ep_err(ep, "DMA interrupt on no req!\n");
+ return;
+ }
+ dd = req->dd_desc_ptr;
+
+ /* DMA descriptor should always be retired for this call */
+ if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
+ ep_warn(ep, "DMA descriptor did not retire\n");
+
+ /* Disable DMA */
+ udc_ep_dma_disable(udc, ep->hwep_num);
+ writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr));
+ writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr));
+
+ /* System error? */
+ if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) &
+ (1 << ep->hwep_num)) {
+ writel((1 << ep->hwep_num),
+ USBD_SYSERRTINTCLR(udc->udp_baseaddr));
+ ep_err(ep, "AHB critical error!\n");
+ ep->req_pending = 0;
+
+ /* The error could have occurred on a packet of a multipacket
+ * transfer, so recovering the transfer is not possible. Close
+ * the request with an error */
+ done(ep, req, -ECONNABORTED);
+ return;
+ }
+
+ /* Handle the current DD's status */
+ status = dd->dd_status;
+ switch (status & DD_STATUS_STS_MASK) {
+ case DD_STATUS_STS_NS:
+ /* DD not serviced? This shouldn't happen! */
+ ep->req_pending = 0;
+ ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n",
+ status);
+
+ done(ep, req, -ECONNABORTED);
+ return;
+
+ case DD_STATUS_STS_BS:
+ /* Interrupt only fires on EOT - This shouldn't happen! */
+ ep->req_pending = 0;
+ ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n",
+ status);
+ done(ep, req, -ECONNABORTED);
+ return;
+
+ case DD_STATUS_STS_NC:
+ case DD_STATUS_STS_DUR:
+ /* Really just a short packet, not an underrun */
+ /* This is a good status and what we expect */
+ break;
+
+ default:
+ /* Data overrun, system error, or unknown */
+ ep->req_pending = 0;
+ ep_err(ep, "DMA critical EP error: System error (0x%x)!\n",
+ status);
+ done(ep, req, -ECONNABORTED);
+ return;
+ }
+
+ /* ISO endpoints are handled differently */
+ if (ep->eptype == EP_ISO_TYPE) {
+ if (ep->is_in)
+ req->req.actual = req->req.length;
+ else
+ req->req.actual = dd->iso_status[0] & 0xFFFF;
+ } else
+ req->req.actual += DD_STATUS_CURDMACNT(status);
+
+ /* Send a ZLP if necessary. This will be done for non-int
+ * packets which have a size that is a divisor of MAXP */
+ if (req->send_zlp) {
+ /*
+ * If at least 1 buffer is available, send the ZLP now.
+ * Otherwise, the ZLP send needs to be deferred until a
+ * buffer is available.
+ */
+ if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
+ udc_clearep_getsts(udc, ep->hwep_num);
+ uda_enable_hwepint(udc, ep->hwep_num);
+ udc_clearep_getsts(udc, ep->hwep_num);
+
+ /* Let the EP interrupt handle the ZLP */
+ return;
+ } else
+ udc_send_in_zlp(udc, ep);
+ }
+
+ /* Transfer request is complete */
+ done(ep, req, 0);
+
+ /* Start another request if ready */
+ udc_clearep_getsts(udc, ep->hwep_num);
+ if (!list_empty((&ep->queue))) {
+ if (ep->is_in)
+ udc_ep_in_req_dma(udc, ep);
+ else
+ udc_ep_out_req_dma(udc, ep);
+ } else
+ ep->req_pending = 0;
+
+}
+
+/*
+ *
+ * Endpoint 0 functions
+ *
+ */
+static void udc_handle_dev(struct lpc32xx_udc *udc)
+{
+ u32 tmp;
+
+ udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT);
+ tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT);
+
+ if (tmp & DEV_RST)
+ uda_usb_reset(udc);
+ else if (tmp & DEV_CON_CH)
+ uda_power_event(udc, (tmp & DEV_CON));
+ else if (tmp & DEV_SUS_CH) {
+ if (tmp & DEV_SUS) {
+ if (udc->vbus == 0)
+ stop_activity(udc);
+ else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
+ udc->driver) {
+ /* Power down transceiver */
+ udc->poweron = 0;
+ schedule_work(&udc->pullup_job);
+ uda_resm_susp_event(udc, 1);
+ }
+ } else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
+ udc->driver && udc->vbus) {
+ uda_resm_susp_event(udc, 0);
+ /* Power up transceiver */
+ udc->poweron = 1;
+ schedule_work(&udc->pullup_job);
+ }
+ }
+}
+
+static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex)
+{
+ struct lpc32xx_ep *ep;
+ u32 ep0buff = 0, tmp;
+
+ switch (reqtype & USB_RECIP_MASK) {
+ case USB_RECIP_INTERFACE:
+ break; /* Not supported */
+
+ case USB_RECIP_DEVICE:
+ ep0buff = udc->gadget.is_selfpowered;
+ if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
+ ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP);
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ ep = &udc->ep[tmp];
+ if ((tmp == 0) || (tmp >= NUM_ENDPOINTS))
+ return -EOPNOTSUPP;
+
+ if (wIndex & USB_DIR_IN) {
+ if (!ep->is_in)
+ return -EOPNOTSUPP; /* Something's wrong */
+ } else if (ep->is_in)
+ return -EOPNOTSUPP; /* Not an IN endpoint */
+
+ /* Get status of the endpoint */
+ udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num));
+ tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num));
+
+ if (tmp & EP_SEL_ST)
+ ep0buff = (1 << USB_ENDPOINT_HALT);
+ else
+ ep0buff = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Return data */
+ udc_write_hwep(udc, EP_IN, &ep0buff, 2);
+
+ return 0;
+}
+
+static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
+{
+ struct lpc32xx_ep *ep, *ep0 = &udc->ep[0];
+ struct usb_ctrlrequest ctrlpkt;
+ int i, bytes;
+ u16 wIndex, wValue, reqtype, req, tmp;
+
+ /* Nuke previous transfers */
+ nuke(ep0, -EPROTO);
+
+ /* Get setup packet */
+ bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8);
+ if (bytes != 8) {
+ ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n",
+ bytes);
+ return;
+ }
+
+ /* Native endianness */
+ wIndex = le16_to_cpu(ctrlpkt.wIndex);
+ wValue = le16_to_cpu(ctrlpkt.wValue);
+ reqtype = le16_to_cpu(ctrlpkt.bRequestType);
+
+ /* Set direction of EP0 */
+ if (likely(reqtype & USB_DIR_IN))
+ ep0->is_in = 1;
+ else
+ ep0->is_in = 0;
+
+ /* Handle SETUP packet */
+ req = le16_to_cpu(ctrlpkt.bRequest);
+ switch (req) {
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ switch (reqtype) {
+ case (USB_TYPE_STANDARD | USB_RECIP_DEVICE):
+ if (wValue != USB_DEVICE_REMOTE_WAKEUP)
+ goto stall; /* Nothing else handled */
+
+ /* Tell board about event */
+ if (req == USB_REQ_CLEAR_FEATURE)
+ udc->dev_status &=
+ ~(1 << USB_DEVICE_REMOTE_WAKEUP);
+ else
+ udc->dev_status |=
+ (1 << USB_DEVICE_REMOTE_WAKEUP);
+ uda_remwkp_cgh(udc);
+ goto zlp_send;
+
+ case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
+ tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if ((wValue != USB_ENDPOINT_HALT) ||
+ (tmp >= NUM_ENDPOINTS))
+ break;
+
+ /* Find hardware endpoint from logical endpoint */
+ ep = &udc->ep[tmp];
+ tmp = ep->hwep_num;
+ if (tmp == 0)
+ break;
+
+ if (req == USB_REQ_SET_FEATURE)
+ udc_stall_hwep(udc, tmp);
+ else if (!ep->wedge)
+ udc_clrstall_hwep(udc, tmp);
+
+ goto zlp_send;
+
+ default:
+ break;
+ }
+ break;
+
+ case USB_REQ_SET_ADDRESS:
+ if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
+ udc_set_address(udc, wValue);
+ goto zlp_send;
+ }
+ break;
+
+ case USB_REQ_GET_STATUS:
+ udc_get_status(udc, reqtype, wIndex);
+ return;
+
+ default:
+ break; /* Let GadgetFS handle the descriptor instead */
+ }
+
+ if (likely(udc->driver)) {
+ /* device-2-host (IN) or no data setup command, process
+ * immediately */
+ spin_unlock(&udc->lock);
+ i = udc->driver->setup(&udc->gadget, &ctrlpkt);
+
+ spin_lock(&udc->lock);
+ if (req == USB_REQ_SET_CONFIGURATION) {
+ /* Configuration is set after endpoints are realized */
+ if (wValue) {
+ /* Set configuration */
+ udc_set_device_configured(udc);
+
+ udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
+ DAT_WR_BYTE(AP_CLK |
+ INAK_BI | INAK_II));
+ } else {
+ /* Clear configuration */
+ udc_set_device_unconfigured(udc);
+
+ /* Disable NAK interrupts */
+ udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
+ DAT_WR_BYTE(AP_CLK));
+ }
+ }
+
+ if (i < 0) {
+ /* setup processing failed, force stall */
+ dev_dbg(udc->dev,
+ "req %02x.%02x protocol STALL; stat %d\n",
+ reqtype, req, i);
+ udc->ep0state = WAIT_FOR_SETUP;
+ goto stall;
+ }
+ }
+
+ if (!ep0->is_in)
+ udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */
+
+ return;
+
+stall:
+ udc_stall_hwep(udc, EP_IN);
+ return;
+
+zlp_send:
+ udc_ep0_send_zlp(udc);
+ return;
+}
+
+/* IN endpoint 0 transfer */
+static void udc_handle_ep0_in(struct lpc32xx_udc *udc)
+{
+ struct lpc32xx_ep *ep0 = &udc->ep[0];
+ u32 epstatus;
+
+ /* Clear EP interrupt */
+ epstatus = udc_clearep_getsts(udc, EP_IN);
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ ep0->totalints++;
+#endif
+
+ /* Stalled? Clear stall and reset buffers */
+ if (epstatus & EP_SEL_ST) {
+ udc_clrstall_hwep(udc, EP_IN);
+ nuke(ep0, -ECONNABORTED);
+ udc->ep0state = WAIT_FOR_SETUP;
+ return;
+ }
+
+ /* Is a buffer available? */
+ if (!(epstatus & EP_SEL_F)) {
+ /* Handle based on current state */
+ if (udc->ep0state == DATA_IN)
+ udc_ep0_in_req(udc);
+ else {
+ /* Unknown state for EP0 oe end of DATA IN phase */
+ nuke(ep0, -ECONNABORTED);
+ udc->ep0state = WAIT_FOR_SETUP;
+ }
+ }
+}
+
+/* OUT endpoint 0 transfer */
+static void udc_handle_ep0_out(struct lpc32xx_udc *udc)
+{
+ struct lpc32xx_ep *ep0 = &udc->ep[0];
+ u32 epstatus;
+
+ /* Clear EP interrupt */
+ epstatus = udc_clearep_getsts(udc, EP_OUT);
+
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ ep0->totalints++;
+#endif
+
+ /* Stalled? */
+ if (epstatus & EP_SEL_ST) {
+ udc_clrstall_hwep(udc, EP_OUT);
+ nuke(ep0, -ECONNABORTED);
+ udc->ep0state = WAIT_FOR_SETUP;
+ return;
+ }
+
+ /* A NAK may occur if a packet couldn't be received yet */
+ if (epstatus & EP_SEL_EPN)
+ return;
+ /* Setup packet incoming? */
+ if (epstatus & EP_SEL_STP) {
+ nuke(ep0, 0);
+ udc->ep0state = WAIT_FOR_SETUP;
+ }
+
+ /* Data available? */
+ if (epstatus & EP_SEL_F)
+ /* Handle based on current state */
+ switch (udc->ep0state) {
+ case WAIT_FOR_SETUP:
+ udc_handle_ep0_setup(udc);
+ break;
+
+ case DATA_OUT:
+ udc_ep0_out_req(udc);
+ break;
+
+ default:
+ /* Unknown state for EP0 */
+ nuke(ep0, -ECONNABORTED);
+ udc->ep0state = WAIT_FOR_SETUP;
+ }
+}
+
+/* Must be called without lock */
+static int lpc32xx_get_frame(struct usb_gadget *gadget)
+{
+ int frame;
+ unsigned long flags;
+ struct lpc32xx_udc *udc = to_udc(gadget);
+
+ if (!udc->clocked)
+ return -EINVAL;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ frame = (int) udc_get_current_frame(udc);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return frame;
+}
+
+static int lpc32xx_wakeup(struct usb_gadget *gadget)
+{
+ return -ENOTSUPP;
+}
+
+static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on)
+{
+ gadget->is_selfpowered = (is_on != 0);
+
+ return 0;
+}
+
+/*
+ * vbus is here! turn everything on that's ready
+ * Must be called without lock
+ */
+static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ unsigned long flags;
+ struct lpc32xx_udc *udc = to_udc(gadget);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Doesn't need lock */
+ if (udc->driver) {
+ udc_clk_set(udc, 1);
+ udc_enable(udc);
+ pullup(udc, is_active);
+ } else {
+ stop_activity(udc);
+ pullup(udc, 0);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ /*
+ * Wait for all the endpoints to disable,
+ * before disabling clocks. Don't wait if
+ * endpoints are not enabled.
+ */
+ if (atomic_read(&udc->enabled_ep_cnt))
+ wait_event_interruptible(udc->ep_disable_wait_queue,
+ (atomic_read(&udc->enabled_ep_cnt) == 0));
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ udc_clk_set(udc, 0);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+/* Can be called with or without lock */
+static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct lpc32xx_udc *udc = to_udc(gadget);
+
+ /* Doesn't need lock */
+ pullup(udc, is_on);
+
+ return 0;
+}
+
+static int lpc32xx_start(struct usb_gadget *, struct usb_gadget_driver *);
+static int lpc32xx_stop(struct usb_gadget *);
+
+static const struct usb_gadget_ops lpc32xx_udc_ops = {
+ .get_frame = lpc32xx_get_frame,
+ .wakeup = lpc32xx_wakeup,
+ .set_selfpowered = lpc32xx_set_selfpowered,
+ .vbus_session = lpc32xx_vbus_session,
+ .pullup = lpc32xx_pullup,
+ .udc_start = lpc32xx_start,
+ .udc_stop = lpc32xx_stop,
+};
+
+static void nop_release(struct device *dev)
+{
+ /* nothing to free */
+}
+
+static const struct lpc32xx_udc controller_template = {
+ .gadget = {
+ .ops = &lpc32xx_udc_ops,
+ .name = driver_name,
+ .dev = {
+ .init_name = "gadget",
+ .release = nop_release,
+ }
+ },
+ .ep[0] = {
+ .ep = {
+ .name = "ep0",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 0,
+ .hwep_num = 0, /* Can be 0 or 1, has special handling */
+ .lep = 0,
+ .eptype = EP_CTL_TYPE,
+ },
+ .ep[1] = {
+ .ep = {
+ .name = "ep1-int",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 2,
+ .hwep_num = 0, /* 2 or 3, will be set later */
+ .lep = 1,
+ .eptype = EP_INT_TYPE,
+ },
+ .ep[2] = {
+ .ep = {
+ .name = "ep2-bulk",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 4,
+ .hwep_num = 0, /* 4 or 5, will be set later */
+ .lep = 2,
+ .eptype = EP_BLK_TYPE,
+ },
+ .ep[3] = {
+ .ep = {
+ .name = "ep3-iso",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 1023,
+ .hwep_num_base = 6,
+ .hwep_num = 0, /* 6 or 7, will be set later */
+ .lep = 3,
+ .eptype = EP_ISO_TYPE,
+ },
+ .ep[4] = {
+ .ep = {
+ .name = "ep4-int",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 8,
+ .hwep_num = 0, /* 8 or 9, will be set later */
+ .lep = 4,
+ .eptype = EP_INT_TYPE,
+ },
+ .ep[5] = {
+ .ep = {
+ .name = "ep5-bulk",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 10,
+ .hwep_num = 0, /* 10 or 11, will be set later */
+ .lep = 5,
+ .eptype = EP_BLK_TYPE,
+ },
+ .ep[6] = {
+ .ep = {
+ .name = "ep6-iso",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 1023,
+ .hwep_num_base = 12,
+ .hwep_num = 0, /* 12 or 13, will be set later */
+ .lep = 6,
+ .eptype = EP_ISO_TYPE,
+ },
+ .ep[7] = {
+ .ep = {
+ .name = "ep7-int",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 14,
+ .hwep_num = 0,
+ .lep = 7,
+ .eptype = EP_INT_TYPE,
+ },
+ .ep[8] = {
+ .ep = {
+ .name = "ep8-bulk",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 16,
+ .hwep_num = 0,
+ .lep = 8,
+ .eptype = EP_BLK_TYPE,
+ },
+ .ep[9] = {
+ .ep = {
+ .name = "ep9-iso",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 1023,
+ .hwep_num_base = 18,
+ .hwep_num = 0,
+ .lep = 9,
+ .eptype = EP_ISO_TYPE,
+ },
+ .ep[10] = {
+ .ep = {
+ .name = "ep10-int",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 20,
+ .hwep_num = 0,
+ .lep = 10,
+ .eptype = EP_INT_TYPE,
+ },
+ .ep[11] = {
+ .ep = {
+ .name = "ep11-bulk",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 22,
+ .hwep_num = 0,
+ .lep = 11,
+ .eptype = EP_BLK_TYPE,
+ },
+ .ep[12] = {
+ .ep = {
+ .name = "ep12-iso",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 1023,
+ .hwep_num_base = 24,
+ .hwep_num = 0,
+ .lep = 12,
+ .eptype = EP_ISO_TYPE,
+ },
+ .ep[13] = {
+ .ep = {
+ .name = "ep13-int",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 26,
+ .hwep_num = 0,
+ .lep = 13,
+ .eptype = EP_INT_TYPE,
+ },
+ .ep[14] = {
+ .ep = {
+ .name = "ep14-bulk",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 64,
+ .hwep_num_base = 28,
+ .hwep_num = 0,
+ .lep = 14,
+ .eptype = EP_BLK_TYPE,
+ },
+ .ep[15] = {
+ .ep = {
+ .name = "ep15-bulk",
+ .ops = &lpc32xx_ep_ops,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .maxpacket = 1023,
+ .hwep_num_base = 30,
+ .hwep_num = 0,
+ .lep = 15,
+ .eptype = EP_BLK_TYPE,
+ },
+};
+
+/* ISO and status interrupts */
+static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc)
+{
+ u32 tmp, devstat;
+ struct lpc32xx_udc *udc = _udc;
+
+ spin_lock(&udc->lock);
+
+ /* Read the device status register */
+ devstat = readl(USBD_DEVINTST(udc->udp_baseaddr));
+
+ devstat &= ~USBD_EP_FAST;
+ writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr));
+ devstat = devstat & udc->enabled_devints;
+
+ /* Device specific handling needed? */
+ if (devstat & USBD_DEV_STAT)
+ udc_handle_dev(udc);
+
+ /* Start of frame? (devstat & FRAME_INT):
+ * The frame interrupt isn't really needed for ISO support,
+ * as the driver will queue the necessary packets */
+
+ /* Error? */
+ if (devstat & ERR_INT) {
+ /* All types of errors, from cable removal during transfer to
+ * misc protocol and bit errors. These are mostly for just info,
+ * as the USB hardware will work around these. If these errors
+ * happen alot, something is wrong. */
+ udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT);
+ tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT);
+ dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp);
+ }
+
+ spin_unlock(&udc->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* EP interrupts */
+static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc)
+{
+ u32 tmp;
+ struct lpc32xx_udc *udc = _udc;
+
+ spin_lock(&udc->lock);
+
+ /* Read the device status register */
+ writel(USBD_EP_FAST, USBD_DEVINTCLR(udc->udp_baseaddr));
+
+ /* Endpoints */
+ tmp = readl(USBD_EPINTST(udc->udp_baseaddr));
+
+ /* Special handling for EP0 */
+ if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
+ /* Handle EP0 IN */
+ if (tmp & (EP_MASK_SEL(0, EP_IN)))
+ udc_handle_ep0_in(udc);
+
+ /* Handle EP0 OUT */
+ if (tmp & (EP_MASK_SEL(0, EP_OUT)))
+ udc_handle_ep0_out(udc);
+ }
+
+ /* All other EPs */
+ if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
+ int i;
+
+ /* Handle other EP interrupts */
+ for (i = 1; i < NUM_ENDPOINTS; i++) {
+ if (tmp & (1 << udc->ep[i].hwep_num))
+ udc_handle_eps(udc, &udc->ep[i]);
+ }
+ }
+
+ spin_unlock(&udc->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc)
+{
+ struct lpc32xx_udc *udc = _udc;
+
+ int i;
+ u32 tmp;
+
+ spin_lock(&udc->lock);
+
+ /* Handle EP DMA EOT interrupts */
+ tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) |
+ (readl(USBD_EPDMAST(udc->udp_baseaddr)) &
+ readl(USBD_NDDRTINTST(udc->udp_baseaddr))) |
+ readl(USBD_SYSERRTINTST(udc->udp_baseaddr));
+ for (i = 1; i < NUM_ENDPOINTS; i++) {
+ if (tmp & (1 << udc->ep[i].hwep_num))
+ udc_handle_dma_ep(udc, &udc->ep[i]);
+ }
+
+ spin_unlock(&udc->lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ *
+ * VBUS detection, pullup handler, and Gadget cable state notification
+ *
+ */
+static void vbus_work(struct lpc32xx_udc *udc)
+{
+ u8 value;
+
+ if (udc->enabled != 0) {
+ /* Discharge VBUS real quick */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
+
+ /* Give VBUS some time (100mS) to discharge */
+ msleep(100);
+
+ /* Disable VBUS discharge resistor */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
+ OTG1_VBUS_DISCHRG);
+
+ /* Clear interrupt */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_LATCH |
+ ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+
+ /* Get the VBUS status from the transceiver */
+ value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_SOURCE);
+
+ /* VBUS on or off? */
+ if (value & INT_SESS_VLD)
+ udc->vbus = 1;
+ else
+ udc->vbus = 0;
+
+ /* VBUS changed? */
+ if (udc->last_vbus != udc->vbus) {
+ udc->last_vbus = udc->vbus;
+ lpc32xx_vbus_session(&udc->gadget, udc->vbus);
+ }
+ }
+}
+
+static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc)
+{
+ struct lpc32xx_udc *udc = _udc;
+
+ vbus_work(udc);
+
+ return IRQ_HANDLED;
+}
+
+static int lpc32xx_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct lpc32xx_udc *udc = to_udc(gadget);
+
+ if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) {
+ dev_err(udc->dev, "bad parameter.\n");
+ return -EINVAL;
+ }
+
+ if (udc->driver) {
+ dev_err(udc->dev, "UDC already has a gadget driver\n");
+ return -EBUSY;
+ }
+
+ udc->driver = driver;
+ udc->gadget.dev.of_node = udc->dev->of_node;
+ udc->enabled = 1;
+ udc->gadget.is_selfpowered = 1;
+ udc->vbus = 0;
+
+ /* Force VBUS process once to check for cable insertion */
+ udc->last_vbus = udc->vbus = 0;
+ vbus_work(udc);
+
+ /* enable interrupts */
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_FALLING, INT_SESS_VLD | INT_VBUS_VLD);
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_RISING, INT_SESS_VLD | INT_VBUS_VLD);
+
+ return 0;
+}
+
+static int lpc32xx_stop(struct usb_gadget *gadget)
+{
+ struct lpc32xx_udc *udc = to_udc(gadget);
+
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+ i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
+ ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+
+ if (udc->clocked) {
+ spin_lock(&udc->lock);
+ stop_activity(udc);
+ spin_unlock(&udc->lock);
+
+ /*
+ * Wait for all the endpoints to disable,
+ * before disabling clocks. Don't wait if
+ * endpoints are not enabled.
+ */
+ if (atomic_read(&udc->enabled_ep_cnt))
+ wait_event_interruptible(udc->ep_disable_wait_queue,
+ (atomic_read(&udc->enabled_ep_cnt) == 0));
+
+ spin_lock(&udc->lock);
+ udc_clk_set(udc, 0);
+ spin_unlock(&udc->lock);
+ }
+
+ udc->enabled = 0;
+ udc->driver = NULL;
+
+ return 0;
+}
+
+static void lpc32xx_udc_shutdown(struct platform_device *dev)
+{
+ /* Force disconnect on reboot */
+ struct lpc32xx_udc *udc = platform_get_drvdata(dev);
+
+ pullup(udc, 0);
+}
+
+/*
+ * Callbacks to be overridden by options passed via OF (TODO)
+ */
+
+static void lpc32xx_usbd_conn_chg(int conn)
+{
+ /* Do nothing, it might be nice to enable an LED
+ * based on conn state being !0 */
+}
+
+static void lpc32xx_usbd_susp_chg(int susp)
+{
+ /* Device suspend if susp != 0 */
+}
+
+static void lpc32xx_rmwkup_chg(int remote_wakup_enable)
+{
+ /* Enable or disable USB remote wakeup */
+}
+
+static struct lpc32xx_usbd_cfg lpc32xx_usbddata = {
+ .vbus_drv_pol = 0,
+ .conn_chgb = &lpc32xx_usbd_conn_chg,
+ .susp_chgb = &lpc32xx_usbd_susp_chg,
+ .rmwk_chgb = &lpc32xx_rmwkup_chg,
+};
+
+
+static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F;
+
+static int lpc32xx_udc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct lpc32xx_udc *udc;
+ int retval, i;
+ dma_addr_t dma_handle;
+ struct device_node *isp1301_node;
+
+ udc = devm_kmemdup(dev, &controller_template, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ for (i = 0; i <= 15; i++)
+ udc->ep[i].udc = udc;
+ udc->gadget.ep0 = &udc->ep[0].ep;
+
+ /* init software state */
+ udc->gadget.dev.parent = dev;
+ udc->pdev = pdev;
+ udc->dev = &pdev->dev;
+ udc->enabled = 0;
+
+ if (pdev->dev.of_node) {
+ isp1301_node = of_parse_phandle(pdev->dev.of_node,
+ "transceiver", 0);
+ } else {
+ isp1301_node = NULL;
+ }
+
+ udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
+ of_node_put(isp1301_node);
+ if (!udc->isp1301_i2c_client) {
+ return -EPROBE_DEFER;
+ }
+
+ dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n",
+ udc->isp1301_i2c_client->addr);
+
+ pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
+ retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ return retval;
+
+ udc->board = &lpc32xx_usbddata;
+
+ /*
+ * Resources are mapped as follows:
+ * IORESOURCE_MEM, base address and size of USB space
+ * IORESOURCE_IRQ, USB device low priority interrupt number
+ * IORESOURCE_IRQ, USB device high priority interrupt number
+ * IORESOURCE_IRQ, USB device interrupt number
+ * IORESOURCE_IRQ, USB transceiver interrupt number
+ */
+
+ spin_lock_init(&udc->lock);
+
+ /* Get IRQs */
+ for (i = 0; i < 4; i++) {
+ udc->udp_irq[i] = platform_get_irq(pdev, i);
+ if (udc->udp_irq[i] < 0)
+ return udc->udp_irq[i];
+ }
+
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(udc->udp_baseaddr)) {
+ dev_err(udc->dev, "IO map failure\n");
+ return PTR_ERR(udc->udp_baseaddr);
+ }
+
+ /* Get USB device clock */
+ udc->usb_slv_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(udc->usb_slv_clk)) {
+ dev_err(udc->dev, "failed to acquire USB device clock\n");
+ return PTR_ERR(udc->usb_slv_clk);
+ }
+
+ /* Enable USB device clock */
+ retval = clk_prepare_enable(udc->usb_slv_clk);
+ if (retval < 0) {
+ dev_err(udc->dev, "failed to start USB device clock\n");
+ return retval;
+ }
+
+ /* Setup deferred workqueue data */
+ udc->poweron = udc->pullup = 0;
+ INIT_WORK(&udc->pullup_job, pullup_work);
+#ifdef CONFIG_PM
+ INIT_WORK(&udc->power_job, power_work);
+#endif
+
+ /* All clocks are now on */
+ udc->clocked = 1;
+
+ isp1301_udc_configure(udc);
+ /* Allocate memory for the UDCA */
+ udc->udca_v_base = dma_alloc_coherent(&pdev->dev, UDCA_BUFF_SIZE,
+ &dma_handle,
+ (GFP_KERNEL | GFP_DMA));
+ if (!udc->udca_v_base) {
+ dev_err(udc->dev, "error getting UDCA region\n");
+ retval = -ENOMEM;
+ goto i2c_fail;
+ }
+ udc->udca_p_base = dma_handle;
+ dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n",
+ UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base);
+
+ /* Setup the DD DMA memory pool */
+ udc->dd_cache = dma_pool_create("udc_dd", udc->dev,
+ sizeof(struct lpc32xx_usbd_dd_gad),
+ sizeof(u32), 0);
+ if (!udc->dd_cache) {
+ dev_err(udc->dev, "error getting DD DMA region\n");
+ retval = -ENOMEM;
+ goto dma_alloc_fail;
+ }
+
+ /* Clear USB peripheral and initialize gadget endpoints */
+ udc_disable(udc);
+ udc_reinit(udc);
+
+ /* Request IRQs - low and high priority USB device IRQs are routed to
+ * the same handler, while the DMA interrupt is routed elsewhere */
+ retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_LP],
+ lpc32xx_usb_lp_irq, 0, "udc_lp", udc);
+ if (retval < 0) {
+ dev_err(udc->dev, "LP request irq %d failed\n",
+ udc->udp_irq[IRQ_USB_LP]);
+ goto irq_req_fail;
+ }
+ retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_HP],
+ lpc32xx_usb_hp_irq, 0, "udc_hp", udc);
+ if (retval < 0) {
+ dev_err(udc->dev, "HP request irq %d failed\n",
+ udc->udp_irq[IRQ_USB_HP]);
+ goto irq_req_fail;
+ }
+
+ retval = devm_request_irq(dev, udc->udp_irq[IRQ_USB_DEVDMA],
+ lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
+ if (retval < 0) {
+ dev_err(udc->dev, "DEV request irq %d failed\n",
+ udc->udp_irq[IRQ_USB_DEVDMA]);
+ goto irq_req_fail;
+ }
+
+ /* The transceiver interrupt is used for VBUS detection and will
+ kick off the VBUS handler function */
+ retval = devm_request_threaded_irq(dev, udc->udp_irq[IRQ_USB_ATX], NULL,
+ lpc32xx_usb_vbus_irq, IRQF_ONESHOT,
+ "udc_otg", udc);
+ if (retval < 0) {
+ dev_err(udc->dev, "VBUS request irq %d failed\n",
+ udc->udp_irq[IRQ_USB_ATX]);
+ goto irq_req_fail;
+ }
+
+ /* Initialize wait queue */
+ init_waitqueue_head(&udc->ep_disable_wait_queue);
+ atomic_set(&udc->enabled_ep_cnt, 0);
+
+ retval = usb_add_gadget_udc(dev, &udc->gadget);
+ if (retval < 0)
+ goto add_gadget_fail;
+
+ dev_set_drvdata(dev, udc);
+ device_init_wakeup(dev, 1);
+ create_debug_file(udc);
+
+ /* Disable clocks for now */
+ udc_clk_set(udc, 0);
+
+ dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION);
+ return 0;
+
+add_gadget_fail:
+irq_req_fail:
+ dma_pool_destroy(udc->dd_cache);
+dma_alloc_fail:
+ dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
+ udc->udca_v_base, udc->udca_p_base);
+i2c_fail:
+ clk_disable_unprepare(udc->usb_slv_clk);
+ dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
+
+ return retval;
+}
+
+static int lpc32xx_udc_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&udc->gadget);
+ if (udc->driver)
+ return -EBUSY;
+
+ udc_clk_set(udc, 1);
+ udc_disable(udc);
+ pullup(udc, 0);
+
+ device_init_wakeup(&pdev->dev, 0);
+ remove_debug_file(udc);
+
+ dma_pool_destroy(udc->dd_cache);
+ dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
+ udc->udca_v_base, udc->udca_p_base);
+
+ clk_disable_unprepare(udc->usb_slv_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
+
+ if (udc->clocked) {
+ /* Power down ISP */
+ udc->poweron = 0;
+ isp1301_set_powerstate(udc, 0);
+
+ /* Disable clocking */
+ udc_clk_set(udc, 0);
+
+ /* Keep clock flag on, so we know to re-enable clocks
+ on resume */
+ udc->clocked = 1;
+
+ /* Kill global USB clock */
+ clk_disable_unprepare(udc->usb_slv_clk);
+ }
+
+ return 0;
+}
+
+static int lpc32xx_udc_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
+
+ if (udc->clocked) {
+ /* Enable global USB clock */
+ clk_prepare_enable(udc->usb_slv_clk);
+
+ /* Enable clocking */
+ udc_clk_set(udc, 1);
+
+ /* ISP back to normal power mode */
+ udc->poweron = 1;
+ isp1301_set_powerstate(udc, 1);
+ }
+
+ return 0;
+}
+#else
+#define lpc32xx_udc_suspend NULL
+#define lpc32xx_udc_resume NULL
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id lpc32xx_udc_of_match[] = {
+ { .compatible = "nxp,lpc3220-udc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
+#endif
+
+static struct platform_driver lpc32xx_udc_driver = {
+ .remove = lpc32xx_udc_remove,
+ .shutdown = lpc32xx_udc_shutdown,
+ .suspend = lpc32xx_udc_suspend,
+ .resume = lpc32xx_udc_resume,
+ .driver = {
+ .name = driver_name,
+ .of_match_table = of_match_ptr(lpc32xx_udc_of_match),
+ },
+};
+
+module_platform_driver_probe(lpc32xx_udc_driver, lpc32xx_udc_probe);
+
+MODULE_DESCRIPTION("LPC32XX udc driver");
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lpc32xx_udc");
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
new file mode 100644
index 000000000..d88871e71
--- /dev/null
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * M66592 UDC (USB gadget)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "m66592-udc.h"
+
+MODULE_DESCRIPTION("M66592 USB gadget driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:m66592_udc");
+
+#define DRIVER_VERSION "21 July 2009"
+
+static const char udc_name[] = "m66592_udc";
+static const char *m66592_ep_name[] = {
+ "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7"
+};
+
+static void disable_controller(struct m66592 *m66592);
+static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req);
+static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req);
+static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags);
+
+static void transfer_complete(struct m66592_ep *ep,
+ struct m66592_request *req, int status);
+
+/*-------------------------------------------------------------------------*/
+static inline u16 get_usb_speed(struct m66592 *m66592)
+{
+ return (m66592_read(m66592, M66592_DVSTCTR) & M66592_RHST);
+}
+
+static void enable_pipe_irq(struct m66592 *m66592, u16 pipenum,
+ unsigned long reg)
+{
+ u16 tmp;
+
+ tmp = m66592_read(m66592, M66592_INTENB0);
+ m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
+ M66592_INTENB0);
+ m66592_bset(m66592, (1 << pipenum), reg);
+ m66592_write(m66592, tmp, M66592_INTENB0);
+}
+
+static void disable_pipe_irq(struct m66592 *m66592, u16 pipenum,
+ unsigned long reg)
+{
+ u16 tmp;
+
+ tmp = m66592_read(m66592, M66592_INTENB0);
+ m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
+ M66592_INTENB0);
+ m66592_bclr(m66592, (1 << pipenum), reg);
+ m66592_write(m66592, tmp, M66592_INTENB0);
+}
+
+static void m66592_usb_connect(struct m66592 *m66592)
+{
+ m66592_bset(m66592, M66592_CTRE, M66592_INTENB0);
+ m66592_bset(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
+ M66592_INTENB0);
+ m66592_bset(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
+
+ m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG);
+}
+
+static void m66592_usb_disconnect(struct m66592 *m66592)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+ m66592_bclr(m66592, M66592_CTRE, M66592_INTENB0);
+ m66592_bclr(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
+ M66592_INTENB0);
+ m66592_bclr(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
+ m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+
+ m66592->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock(&m66592->lock);
+ m66592->driver->disconnect(&m66592->gadget);
+ spin_lock(&m66592->lock);
+
+ disable_controller(m66592);
+ INIT_LIST_HEAD(&m66592->ep[0].queue);
+}
+
+static inline u16 control_reg_get_pid(struct m66592 *m66592, u16 pipenum)
+{
+ u16 pid = 0;
+ unsigned long offset;
+
+ if (pipenum == 0)
+ pid = m66592_read(m66592, M66592_DCPCTR) & M66592_PID;
+ else if (pipenum < M66592_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ pid = m66592_read(m66592, offset) & M66592_PID;
+ } else
+ pr_err("unexpect pipe num (%d)\n", pipenum);
+
+ return pid;
+}
+
+static inline void control_reg_set_pid(struct m66592 *m66592, u16 pipenum,
+ u16 pid)
+{
+ unsigned long offset;
+
+ if (pipenum == 0)
+ m66592_mdfy(m66592, pid, M66592_PID, M66592_DCPCTR);
+ else if (pipenum < M66592_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ m66592_mdfy(m66592, pid, M66592_PID, offset);
+ } else
+ pr_err("unexpect pipe num (%d)\n", pipenum);
+}
+
+static inline void pipe_start(struct m66592 *m66592, u16 pipenum)
+{
+ control_reg_set_pid(m66592, pipenum, M66592_PID_BUF);
+}
+
+static inline void pipe_stop(struct m66592 *m66592, u16 pipenum)
+{
+ control_reg_set_pid(m66592, pipenum, M66592_PID_NAK);
+}
+
+static inline void pipe_stall(struct m66592 *m66592, u16 pipenum)
+{
+ control_reg_set_pid(m66592, pipenum, M66592_PID_STALL);
+}
+
+static inline u16 control_reg_get(struct m66592 *m66592, u16 pipenum)
+{
+ u16 ret = 0;
+ unsigned long offset;
+
+ if (pipenum == 0)
+ ret = m66592_read(m66592, M66592_DCPCTR);
+ else if (pipenum < M66592_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ ret = m66592_read(m66592, offset);
+ } else
+ pr_err("unexpect pipe num (%d)\n", pipenum);
+
+ return ret;
+}
+
+static inline void control_reg_sqclr(struct m66592 *m66592, u16 pipenum)
+{
+ unsigned long offset;
+
+ pipe_stop(m66592, pipenum);
+
+ if (pipenum == 0)
+ m66592_bset(m66592, M66592_SQCLR, M66592_DCPCTR);
+ else if (pipenum < M66592_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ m66592_bset(m66592, M66592_SQCLR, offset);
+ } else
+ pr_err("unexpect pipe num(%d)\n", pipenum);
+}
+
+static inline int get_buffer_size(struct m66592 *m66592, u16 pipenum)
+{
+ u16 tmp;
+ int size;
+
+ if (pipenum == 0) {
+ tmp = m66592_read(m66592, M66592_DCPCFG);
+ if ((tmp & M66592_CNTMD) != 0)
+ size = 256;
+ else {
+ tmp = m66592_read(m66592, M66592_DCPMAXP);
+ size = tmp & M66592_MAXP;
+ }
+ } else {
+ m66592_write(m66592, pipenum, M66592_PIPESEL);
+ tmp = m66592_read(m66592, M66592_PIPECFG);
+ if ((tmp & M66592_CNTMD) != 0) {
+ tmp = m66592_read(m66592, M66592_PIPEBUF);
+ size = ((tmp >> 10) + 1) * 64;
+ } else {
+ tmp = m66592_read(m66592, M66592_PIPEMAXP);
+ size = tmp & M66592_MXPS;
+ }
+ }
+
+ return size;
+}
+
+static inline void pipe_change(struct m66592 *m66592, u16 pipenum)
+{
+ struct m66592_ep *ep = m66592->pipenum2ep[pipenum];
+ unsigned short mbw;
+
+ if (ep->use_dma)
+ return;
+
+ m66592_mdfy(m66592, pipenum, M66592_CURPIPE, ep->fifosel);
+
+ ndelay(450);
+
+ if (m66592->pdata->on_chip)
+ mbw = M66592_MBW_32;
+ else
+ mbw = M66592_MBW_16;
+
+ m66592_bset(m66592, mbw, ep->fifosel);
+}
+
+static int pipe_buffer_setting(struct m66592 *m66592,
+ struct m66592_pipe_info *info)
+{
+ u16 bufnum = 0, buf_bsize = 0;
+ u16 pipecfg = 0;
+
+ if (info->pipe == 0)
+ return -EINVAL;
+
+ m66592_write(m66592, info->pipe, M66592_PIPESEL);
+
+ if (info->dir_in)
+ pipecfg |= M66592_DIR;
+ pipecfg |= info->type;
+ pipecfg |= info->epnum;
+ switch (info->type) {
+ case M66592_INT:
+ bufnum = 4 + (info->pipe - M66592_BASE_PIPENUM_INT);
+ buf_bsize = 0;
+ break;
+ case M66592_BULK:
+ /* isochronous pipes may be used as bulk pipes */
+ if (info->pipe >= M66592_BASE_PIPENUM_BULK)
+ bufnum = info->pipe - M66592_BASE_PIPENUM_BULK;
+ else
+ bufnum = info->pipe - M66592_BASE_PIPENUM_ISOC;
+
+ bufnum = M66592_BASE_BUFNUM + (bufnum * 16);
+ buf_bsize = 7;
+ pipecfg |= M66592_DBLB;
+ if (!info->dir_in)
+ pipecfg |= M66592_SHTNAK;
+ break;
+ case M66592_ISO:
+ bufnum = M66592_BASE_BUFNUM +
+ (info->pipe - M66592_BASE_PIPENUM_ISOC) * 16;
+ buf_bsize = 7;
+ break;
+ }
+
+ if (buf_bsize && ((bufnum + 16) >= M66592_MAX_BUFNUM)) {
+ pr_err("m66592 pipe memory is insufficient\n");
+ return -ENOMEM;
+ }
+
+ m66592_write(m66592, pipecfg, M66592_PIPECFG);
+ m66592_write(m66592, (buf_bsize << 10) | (bufnum), M66592_PIPEBUF);
+ m66592_write(m66592, info->maxpacket, M66592_PIPEMAXP);
+ if (info->interval)
+ info->interval--;
+ m66592_write(m66592, info->interval, M66592_PIPEPERI);
+
+ return 0;
+}
+
+static void pipe_buffer_release(struct m66592 *m66592,
+ struct m66592_pipe_info *info)
+{
+ if (info->pipe == 0)
+ return;
+
+ if (is_bulk_pipe(info->pipe)) {
+ m66592->bulk--;
+ } else if (is_interrupt_pipe(info->pipe))
+ m66592->interrupt--;
+ else if (is_isoc_pipe(info->pipe)) {
+ m66592->isochronous--;
+ if (info->type == M66592_BULK)
+ m66592->bulk--;
+ } else
+ pr_err("ep_release: unexpect pipenum (%d)\n",
+ info->pipe);
+}
+
+static void pipe_initialize(struct m66592_ep *ep)
+{
+ struct m66592 *m66592 = ep->m66592;
+ unsigned short mbw;
+
+ m66592_mdfy(m66592, 0, M66592_CURPIPE, ep->fifosel);
+
+ m66592_write(m66592, M66592_ACLRM, ep->pipectr);
+ m66592_write(m66592, 0, ep->pipectr);
+ m66592_write(m66592, M66592_SQCLR, ep->pipectr);
+ if (ep->use_dma) {
+ m66592_mdfy(m66592, ep->pipenum, M66592_CURPIPE, ep->fifosel);
+
+ ndelay(450);
+
+ if (m66592->pdata->on_chip)
+ mbw = M66592_MBW_32;
+ else
+ mbw = M66592_MBW_16;
+
+ m66592_bset(m66592, mbw, ep->fifosel);
+ }
+}
+
+static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep,
+ const struct usb_endpoint_descriptor *desc,
+ u16 pipenum, int dma)
+{
+ if ((pipenum != 0) && dma) {
+ if (m66592->num_dma == 0) {
+ m66592->num_dma++;
+ ep->use_dma = 1;
+ ep->fifoaddr = M66592_D0FIFO;
+ ep->fifosel = M66592_D0FIFOSEL;
+ ep->fifoctr = M66592_D0FIFOCTR;
+ ep->fifotrn = M66592_D0FIFOTRN;
+ } else if (!m66592->pdata->on_chip && m66592->num_dma == 1) {
+ m66592->num_dma++;
+ ep->use_dma = 1;
+ ep->fifoaddr = M66592_D1FIFO;
+ ep->fifosel = M66592_D1FIFOSEL;
+ ep->fifoctr = M66592_D1FIFOCTR;
+ ep->fifotrn = M66592_D1FIFOTRN;
+ } else {
+ ep->use_dma = 0;
+ ep->fifoaddr = M66592_CFIFO;
+ ep->fifosel = M66592_CFIFOSEL;
+ ep->fifoctr = M66592_CFIFOCTR;
+ ep->fifotrn = 0;
+ }
+ } else {
+ ep->use_dma = 0;
+ ep->fifoaddr = M66592_CFIFO;
+ ep->fifosel = M66592_CFIFOSEL;
+ ep->fifoctr = M66592_CFIFOCTR;
+ ep->fifotrn = 0;
+ }
+
+ ep->pipectr = get_pipectr_addr(pipenum);
+ ep->pipenum = pipenum;
+ ep->ep.maxpacket = usb_endpoint_maxp(desc);
+ m66592->pipenum2ep[pipenum] = ep;
+ m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep;
+ INIT_LIST_HEAD(&ep->queue);
+}
+
+static void m66592_ep_release(struct m66592_ep *ep)
+{
+ struct m66592 *m66592 = ep->m66592;
+ u16 pipenum = ep->pipenum;
+
+ if (pipenum == 0)
+ return;
+
+ if (ep->use_dma)
+ m66592->num_dma--;
+ ep->pipenum = 0;
+ ep->busy = 0;
+ ep->use_dma = 0;
+}
+
+static int alloc_pipe_config(struct m66592_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct m66592 *m66592 = ep->m66592;
+ struct m66592_pipe_info info;
+ int dma = 0;
+ int *counter;
+ int ret;
+
+ ep->ep.desc = desc;
+
+ BUG_ON(ep->pipenum);
+
+ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (m66592->bulk >= M66592_MAX_NUM_BULK) {
+ if (m66592->isochronous >= M66592_MAX_NUM_ISOC) {
+ pr_err("bulk pipe is insufficient\n");
+ return -ENODEV;
+ } else {
+ info.pipe = M66592_BASE_PIPENUM_ISOC
+ + m66592->isochronous;
+ counter = &m66592->isochronous;
+ }
+ } else {
+ info.pipe = M66592_BASE_PIPENUM_BULK + m66592->bulk;
+ counter = &m66592->bulk;
+ }
+ info.type = M66592_BULK;
+ dma = 1;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (m66592->interrupt >= M66592_MAX_NUM_INT) {
+ pr_err("interrupt pipe is insufficient\n");
+ return -ENODEV;
+ }
+ info.pipe = M66592_BASE_PIPENUM_INT + m66592->interrupt;
+ info.type = M66592_INT;
+ counter = &m66592->interrupt;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (m66592->isochronous >= M66592_MAX_NUM_ISOC) {
+ pr_err("isochronous pipe is insufficient\n");
+ return -ENODEV;
+ }
+ info.pipe = M66592_BASE_PIPENUM_ISOC + m66592->isochronous;
+ info.type = M66592_ISO;
+ counter = &m66592->isochronous;
+ break;
+ default:
+ pr_err("unexpect xfer type\n");
+ return -EINVAL;
+ }
+ ep->type = info.type;
+
+ info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ info.maxpacket = usb_endpoint_maxp(desc);
+ info.interval = desc->bInterval;
+ if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ info.dir_in = 1;
+ else
+ info.dir_in = 0;
+
+ ret = pipe_buffer_setting(m66592, &info);
+ if (ret < 0) {
+ pr_err("pipe_buffer_setting fail\n");
+ return ret;
+ }
+
+ (*counter)++;
+ if ((counter == &m66592->isochronous) && info.type == M66592_BULK)
+ m66592->bulk++;
+
+ m66592_ep_setting(m66592, ep, desc, info.pipe, dma);
+ pipe_initialize(ep);
+
+ return 0;
+}
+
+static int free_pipe_config(struct m66592_ep *ep)
+{
+ struct m66592 *m66592 = ep->m66592;
+ struct m66592_pipe_info info;
+
+ info.pipe = ep->pipenum;
+ info.type = ep->type;
+ pipe_buffer_release(m66592, &info);
+ m66592_ep_release(ep);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static void pipe_irq_enable(struct m66592 *m66592, u16 pipenum)
+{
+ enable_irq_ready(m66592, pipenum);
+ enable_irq_nrdy(m66592, pipenum);
+}
+
+static void pipe_irq_disable(struct m66592 *m66592, u16 pipenum)
+{
+ disable_irq_ready(m66592, pipenum);
+ disable_irq_nrdy(m66592, pipenum);
+}
+
+/* if complete is true, gadget driver complete function is not call */
+static void control_end(struct m66592 *m66592, unsigned ccpl)
+{
+ m66592->ep[0].internal_ccpl = ccpl;
+ pipe_start(m66592, 0);
+ m66592_bset(m66592, M66592_CCPL, M66592_DCPCTR);
+}
+
+static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+ struct m66592 *m66592 = ep->m66592;
+
+ pipe_change(m66592, ep->pipenum);
+ m66592_mdfy(m66592, M66592_ISEL | M66592_PIPE0,
+ (M66592_ISEL | M66592_CURPIPE),
+ M66592_CFIFOSEL);
+ m66592_write(m66592, M66592_BCLR, ep->fifoctr);
+ if (req->req.length == 0) {
+ m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
+ pipe_start(m66592, 0);
+ transfer_complete(ep, req, 0);
+ } else {
+ m66592_write(m66592, ~M66592_BEMP0, M66592_BEMPSTS);
+ irq_ep0_write(ep, req);
+ }
+}
+
+static void start_packet_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+ struct m66592 *m66592 = ep->m66592;
+ u16 tmp;
+
+ pipe_change(m66592, ep->pipenum);
+ disable_irq_empty(m66592, ep->pipenum);
+ pipe_start(m66592, ep->pipenum);
+
+ tmp = m66592_read(m66592, ep->fifoctr);
+ if (unlikely((tmp & M66592_FRDY) == 0))
+ pipe_irq_enable(m66592, ep->pipenum);
+ else
+ irq_packet_write(ep, req);
+}
+
+static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req)
+{
+ struct m66592 *m66592 = ep->m66592;
+ u16 pipenum = ep->pipenum;
+
+ if (ep->pipenum == 0) {
+ m66592_mdfy(m66592, M66592_PIPE0,
+ (M66592_ISEL | M66592_CURPIPE),
+ M66592_CFIFOSEL);
+ m66592_write(m66592, M66592_BCLR, ep->fifoctr);
+ pipe_start(m66592, pipenum);
+ pipe_irq_enable(m66592, pipenum);
+ } else {
+ if (ep->use_dma) {
+ m66592_bset(m66592, M66592_TRCLR, ep->fifosel);
+ pipe_change(m66592, pipenum);
+ m66592_bset(m66592, M66592_TRENB, ep->fifosel);
+ m66592_write(m66592,
+ (req->req.length + ep->ep.maxpacket - 1)
+ / ep->ep.maxpacket,
+ ep->fifotrn);
+ }
+ pipe_start(m66592, pipenum); /* trigger once */
+ pipe_irq_enable(m66592, pipenum);
+ }
+}
+
+static void start_packet(struct m66592_ep *ep, struct m66592_request *req)
+{
+ if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
+ start_packet_write(ep, req);
+ else
+ start_packet_read(ep, req);
+}
+
+static void start_ep0(struct m66592_ep *ep, struct m66592_request *req)
+{
+ u16 ctsq;
+
+ ctsq = m66592_read(ep->m66592, M66592_INTSTS0) & M66592_CTSQ;
+
+ switch (ctsq) {
+ case M66592_CS_RDDS:
+ start_ep0_write(ep, req);
+ break;
+ case M66592_CS_WRDS:
+ start_packet_read(ep, req);
+ break;
+
+ case M66592_CS_WRND:
+ control_end(ep->m66592, 0);
+ break;
+ default:
+ pr_err("start_ep0: unexpect ctsq(%x)\n", ctsq);
+ break;
+ }
+}
+
+static void init_controller(struct m66592 *m66592)
+{
+ unsigned int endian;
+
+ if (m66592->pdata->on_chip) {
+ if (m66592->pdata->endian)
+ endian = 0; /* big endian */
+ else
+ endian = M66592_LITTLE; /* little endian */
+
+ m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */
+ m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG);
+ m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+ m66592_bset(m66592, M66592_USBE, M66592_SYSCFG);
+
+ /* This is a workaound for SH7722 2nd cut */
+ m66592_bset(m66592, 0x8000, M66592_DVSTCTR);
+ m66592_bset(m66592, 0x1000, M66592_TESTMODE);
+ m66592_bclr(m66592, 0x8000, M66592_DVSTCTR);
+
+ m66592_bset(m66592, M66592_INTL, M66592_INTENB1);
+
+ m66592_write(m66592, 0, M66592_CFBCFG);
+ m66592_write(m66592, 0, M66592_D0FBCFG);
+ m66592_bset(m66592, endian, M66592_CFBCFG);
+ m66592_bset(m66592, endian, M66592_D0FBCFG);
+ } else {
+ unsigned int clock, vif, irq_sense;
+
+ if (m66592->pdata->endian)
+ endian = M66592_BIGEND; /* big endian */
+ else
+ endian = 0; /* little endian */
+
+ if (m66592->pdata->vif)
+ vif = M66592_LDRV; /* 3.3v */
+ else
+ vif = 0; /* 1.5v */
+
+ switch (m66592->pdata->xtal) {
+ case M66592_PLATDATA_XTAL_12MHZ:
+ clock = M66592_XTAL12;
+ break;
+ case M66592_PLATDATA_XTAL_24MHZ:
+ clock = M66592_XTAL24;
+ break;
+ case M66592_PLATDATA_XTAL_48MHZ:
+ clock = M66592_XTAL48;
+ break;
+ default:
+ pr_warn("m66592-udc: xtal configuration error\n");
+ clock = 0;
+ }
+
+ switch (m66592->irq_trigger) {
+ case IRQF_TRIGGER_LOW:
+ irq_sense = M66592_INTL;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ irq_sense = 0;
+ break;
+ default:
+ pr_warn("m66592-udc: irq trigger config error\n");
+ irq_sense = 0;
+ }
+
+ m66592_bset(m66592,
+ (vif & M66592_LDRV) | (endian & M66592_BIGEND),
+ M66592_PINCFG);
+ m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */
+ m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL,
+ M66592_SYSCFG);
+ m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG);
+ m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+ m66592_bset(m66592, M66592_USBE, M66592_SYSCFG);
+
+ m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG);
+
+ msleep(3);
+
+ m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG);
+
+ msleep(1);
+
+ m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG);
+
+ m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1);
+ m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR,
+ M66592_DMA0CFG);
+ }
+}
+
+static void disable_controller(struct m66592 *m66592)
+{
+ m66592_bclr(m66592, M66592_UTST, M66592_TESTMODE);
+ if (!m66592->pdata->on_chip) {
+ m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG);
+ udelay(1);
+ m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG);
+ udelay(1);
+ m66592_bclr(m66592, M66592_RCKE, M66592_SYSCFG);
+ udelay(1);
+ m66592_bclr(m66592, M66592_XCKE, M66592_SYSCFG);
+ }
+}
+
+static void m66592_start_xclock(struct m66592 *m66592)
+{
+ u16 tmp;
+
+ if (!m66592->pdata->on_chip) {
+ tmp = m66592_read(m66592, M66592_SYSCFG);
+ if (!(tmp & M66592_XCKE))
+ m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+static void transfer_complete(struct m66592_ep *ep,
+ struct m66592_request *req, int status)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+ int restart = 0;
+
+ if (unlikely(ep->pipenum == 0)) {
+ if (ep->internal_ccpl) {
+ ep->internal_ccpl = 0;
+ return;
+ }
+ }
+
+ list_del_init(&req->queue);
+ if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN)
+ req->req.status = -ESHUTDOWN;
+ else
+ req->req.status = status;
+
+ if (!list_empty(&ep->queue))
+ restart = 1;
+
+ spin_unlock(&ep->m66592->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&ep->m66592->lock);
+
+ if (restart) {
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ if (ep->ep.desc)
+ start_packet(ep, req);
+ }
+}
+
+static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+ int i;
+ u16 tmp;
+ unsigned bufsize;
+ size_t size;
+ void *buf;
+ u16 pipenum = ep->pipenum;
+ struct m66592 *m66592 = ep->m66592;
+
+ pipe_change(m66592, pipenum);
+ m66592_bset(m66592, M66592_ISEL, ep->fifosel);
+
+ i = 0;
+ do {
+ tmp = m66592_read(m66592, ep->fifoctr);
+ if (i++ > 100000) {
+ pr_err("pipe0 is busy. maybe cpu i/o bus "
+ "conflict. please power off this controller.");
+ return;
+ }
+ ndelay(1);
+ } while ((tmp & M66592_FRDY) == 0);
+
+ /* prepare parameters */
+ bufsize = get_buffer_size(m66592, pipenum);
+ buf = req->req.buf + req->req.actual;
+ size = min(bufsize, req->req.length - req->req.actual);
+
+ /* write fifo */
+ if (req->req.buf) {
+ if (size > 0)
+ m66592_write_fifo(m66592, ep, buf, size);
+ if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
+ m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
+ }
+
+ /* update parameters */
+ req->req.actual += size;
+
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (size % ep->ep.maxpacket)
+ || (size == 0)) {
+ disable_irq_ready(m66592, pipenum);
+ disable_irq_empty(m66592, pipenum);
+ } else {
+ disable_irq_ready(m66592, pipenum);
+ enable_irq_empty(m66592, pipenum);
+ }
+ pipe_start(m66592, pipenum);
+}
+
+static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+ u16 tmp;
+ unsigned bufsize;
+ size_t size;
+ void *buf;
+ u16 pipenum = ep->pipenum;
+ struct m66592 *m66592 = ep->m66592;
+
+ pipe_change(m66592, pipenum);
+ tmp = m66592_read(m66592, ep->fifoctr);
+ if (unlikely((tmp & M66592_FRDY) == 0)) {
+ pipe_stop(m66592, pipenum);
+ pipe_irq_disable(m66592, pipenum);
+ pr_err("write fifo not ready. pipnum=%d\n", pipenum);
+ return;
+ }
+
+ /* prepare parameters */
+ bufsize = get_buffer_size(m66592, pipenum);
+ buf = req->req.buf + req->req.actual;
+ size = min(bufsize, req->req.length - req->req.actual);
+
+ /* write fifo */
+ if (req->req.buf) {
+ m66592_write_fifo(m66592, ep, buf, size);
+ if ((size == 0)
+ || ((size % ep->ep.maxpacket) != 0)
+ || ((bufsize != ep->ep.maxpacket)
+ && (bufsize > size)))
+ m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
+ }
+
+ /* update parameters */
+ req->req.actual += size;
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (size % ep->ep.maxpacket)
+ || (size == 0)) {
+ disable_irq_ready(m66592, pipenum);
+ enable_irq_empty(m66592, pipenum);
+ } else {
+ disable_irq_empty(m66592, pipenum);
+ pipe_irq_enable(m66592, pipenum);
+ }
+}
+
+static void irq_packet_read(struct m66592_ep *ep, struct m66592_request *req)
+{
+ u16 tmp;
+ int rcv_len, bufsize, req_len;
+ int size;
+ void *buf;
+ u16 pipenum = ep->pipenum;
+ struct m66592 *m66592 = ep->m66592;
+ int finish = 0;
+
+ pipe_change(m66592, pipenum);
+ tmp = m66592_read(m66592, ep->fifoctr);
+ if (unlikely((tmp & M66592_FRDY) == 0)) {
+ req->req.status = -EPIPE;
+ pipe_stop(m66592, pipenum);
+ pipe_irq_disable(m66592, pipenum);
+ pr_err("read fifo not ready");
+ return;
+ }
+
+ /* prepare parameters */
+ rcv_len = tmp & M66592_DTLN;
+ bufsize = get_buffer_size(m66592, pipenum);
+
+ buf = req->req.buf + req->req.actual;
+ req_len = req->req.length - req->req.actual;
+ if (rcv_len < bufsize)
+ size = min(rcv_len, req_len);
+ else
+ size = min(bufsize, req_len);
+
+ /* update parameters */
+ req->req.actual += size;
+
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (size % ep->ep.maxpacket)
+ || (size == 0)) {
+ pipe_stop(m66592, pipenum);
+ pipe_irq_disable(m66592, pipenum);
+ finish = 1;
+ }
+
+ /* read fifo */
+ if (req->req.buf) {
+ if (size == 0)
+ m66592_write(m66592, M66592_BCLR, ep->fifoctr);
+ else
+ m66592_read_fifo(m66592, ep->fifoaddr, buf, size);
+ }
+
+ if ((ep->pipenum != 0) && finish)
+ transfer_complete(ep, req, 0);
+}
+
+static void irq_pipe_ready(struct m66592 *m66592, u16 status, u16 enb)
+{
+ u16 check;
+ u16 pipenum;
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+
+ if ((status & M66592_BRDY0) && (enb & M66592_BRDY0)) {
+ m66592_write(m66592, ~M66592_BRDY0, M66592_BRDYSTS);
+ m66592_mdfy(m66592, M66592_PIPE0, M66592_CURPIPE,
+ M66592_CFIFOSEL);
+
+ ep = &m66592->ep[0];
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ irq_packet_read(ep, req);
+ } else {
+ for (pipenum = 1; pipenum < M66592_MAX_NUM_PIPE; pipenum++) {
+ check = 1 << pipenum;
+ if ((status & check) && (enb & check)) {
+ m66592_write(m66592, ~check, M66592_BRDYSTS);
+ ep = m66592->pipenum2ep[pipenum];
+ req = list_entry(ep->queue.next,
+ struct m66592_request, queue);
+ if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
+ irq_packet_write(ep, req);
+ else
+ irq_packet_read(ep, req);
+ }
+ }
+ }
+}
+
+static void irq_pipe_empty(struct m66592 *m66592, u16 status, u16 enb)
+{
+ u16 tmp;
+ u16 check;
+ u16 pipenum;
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+
+ if ((status & M66592_BEMP0) && (enb & M66592_BEMP0)) {
+ m66592_write(m66592, ~M66592_BEMP0, M66592_BEMPSTS);
+
+ ep = &m66592->ep[0];
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ irq_ep0_write(ep, req);
+ } else {
+ for (pipenum = 1; pipenum < M66592_MAX_NUM_PIPE; pipenum++) {
+ check = 1 << pipenum;
+ if ((status & check) && (enb & check)) {
+ m66592_write(m66592, ~check, M66592_BEMPSTS);
+ tmp = control_reg_get(m66592, pipenum);
+ if ((tmp & M66592_INBUFM) == 0) {
+ disable_irq_empty(m66592, pipenum);
+ pipe_irq_disable(m66592, pipenum);
+ pipe_stop(m66592, pipenum);
+ ep = m66592->pipenum2ep[pipenum];
+ req = list_entry(ep->queue.next,
+ struct m66592_request,
+ queue);
+ if (!list_empty(&ep->queue))
+ transfer_complete(ep, req, 0);
+ }
+ }
+ }
+ }
+}
+
+static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+ struct m66592_ep *ep;
+ u16 pid;
+ u16 status = 0;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ status = 1 << USB_DEVICE_SELF_POWERED;
+ break;
+ case USB_RECIP_INTERFACE:
+ status = 0;
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+ pid = control_reg_get_pid(m66592, ep->pipenum);
+ if (pid == M66592_PID_STALL)
+ status = 1 << USB_ENDPOINT_HALT;
+ else
+ status = 0;
+ break;
+ default:
+ pipe_stall(m66592, 0);
+ return; /* exit */
+ }
+
+ m66592->ep0_data = cpu_to_le16(status);
+ m66592->ep0_req->buf = &m66592->ep0_data;
+ m66592->ep0_req->length = 2;
+ /* AV: what happens if we get called again before that gets through? */
+ spin_unlock(&m66592->lock);
+ m66592_queue(m66592->gadget.ep0, m66592->ep0_req, GFP_KERNEL);
+ spin_lock(&m66592->lock);
+}
+
+static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+{
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ control_end(m66592, 1);
+ break;
+ case USB_RECIP_INTERFACE:
+ control_end(m66592, 1);
+ break;
+ case USB_RECIP_ENDPOINT: {
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+ pipe_stop(m66592, ep->pipenum);
+ control_reg_sqclr(m66592, ep->pipenum);
+
+ control_end(m66592, 1);
+
+ req = list_entry(ep->queue.next,
+ struct m66592_request, queue);
+ if (ep->busy) {
+ ep->busy = 0;
+ if (list_empty(&ep->queue))
+ break;
+ start_packet(ep, req);
+ } else if (!list_empty(&ep->queue))
+ pipe_start(m66592, ep->pipenum);
+ }
+ break;
+ default:
+ pipe_stall(m66592, 0);
+ break;
+ }
+}
+
+static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+{
+ u16 tmp;
+ int timeout = 3000;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_DEVICE_TEST_MODE:
+ control_end(m66592, 1);
+ /* Wait for the completion of status stage */
+ do {
+ tmp = m66592_read(m66592, M66592_INTSTS0) &
+ M66592_CTSQ;
+ udelay(1);
+ } while (tmp != M66592_CS_IDST && timeout-- > 0);
+
+ if (tmp == M66592_CS_IDST)
+ m66592_bset(m66592,
+ le16_to_cpu(ctrl->wIndex >> 8),
+ M66592_TESTMODE);
+ break;
+ default:
+ pipe_stall(m66592, 0);
+ break;
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+ control_end(m66592, 1);
+ break;
+ case USB_RECIP_ENDPOINT: {
+ struct m66592_ep *ep;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+ pipe_stall(m66592, ep->pipenum);
+
+ control_end(m66592, 1);
+ }
+ break;
+ default:
+ pipe_stall(m66592, 0);
+ break;
+ }
+}
+
+/* if return value is true, call class driver's setup() */
+static int setup_packet(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+{
+ u16 *p = (u16 *)ctrl;
+ unsigned long offset = M66592_USBREQ;
+ int i, ret = 0;
+
+ /* read fifo */
+ m66592_write(m66592, ~M66592_VALID, M66592_INTSTS0);
+
+ for (i = 0; i < 4; i++)
+ p[i] = m66592_read(m66592, offset + i*2);
+
+ /* check request */
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ get_status(m66592, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ clear_feature(m66592, ctrl);
+ break;
+ case USB_REQ_SET_FEATURE:
+ set_feature(m66592, ctrl);
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ } else
+ ret = 1;
+ return ret;
+}
+
+static void m66592_update_usb_speed(struct m66592 *m66592)
+{
+ u16 speed = get_usb_speed(m66592);
+
+ switch (speed) {
+ case M66592_HSMODE:
+ m66592->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case M66592_FSMODE:
+ m66592->gadget.speed = USB_SPEED_FULL;
+ break;
+ default:
+ m66592->gadget.speed = USB_SPEED_UNKNOWN;
+ pr_err("USB speed unknown\n");
+ }
+}
+
+static void irq_device_state(struct m66592 *m66592)
+{
+ u16 dvsq;
+
+ dvsq = m66592_read(m66592, M66592_INTSTS0) & M66592_DVSQ;
+ m66592_write(m66592, ~M66592_DVST, M66592_INTSTS0);
+
+ if (dvsq == M66592_DS_DFLT) { /* bus reset */
+ usb_gadget_udc_reset(&m66592->gadget, m66592->driver);
+ m66592_update_usb_speed(m66592);
+ }
+ if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG)
+ m66592_update_usb_speed(m66592);
+ if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS)
+ && m66592->gadget.speed == USB_SPEED_UNKNOWN)
+ m66592_update_usb_speed(m66592);
+
+ m66592->old_dvsq = dvsq;
+}
+
+static void irq_control_stage(struct m66592 *m66592)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+ struct usb_ctrlrequest ctrl;
+ u16 ctsq;
+
+ ctsq = m66592_read(m66592, M66592_INTSTS0) & M66592_CTSQ;
+ m66592_write(m66592, ~M66592_CTRT, M66592_INTSTS0);
+
+ switch (ctsq) {
+ case M66592_CS_IDST: {
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ ep = &m66592->ep[0];
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ transfer_complete(ep, req, 0);
+ }
+ break;
+
+ case M66592_CS_RDDS:
+ case M66592_CS_WRDS:
+ case M66592_CS_WRND:
+ if (setup_packet(m66592, &ctrl)) {
+ spin_unlock(&m66592->lock);
+ if (m66592->driver->setup(&m66592->gadget, &ctrl) < 0)
+ pipe_stall(m66592, 0);
+ spin_lock(&m66592->lock);
+ }
+ break;
+ case M66592_CS_RDSS:
+ case M66592_CS_WRSS:
+ control_end(m66592, 0);
+ break;
+ default:
+ pr_err("ctrl_stage: unexpect ctsq(%x)\n", ctsq);
+ break;
+ }
+}
+
+static irqreturn_t m66592_irq(int irq, void *_m66592)
+{
+ struct m66592 *m66592 = _m66592;
+ u16 intsts0;
+ u16 intenb0;
+ u16 savepipe;
+ u16 mask0;
+
+ spin_lock(&m66592->lock);
+
+ intsts0 = m66592_read(m66592, M66592_INTSTS0);
+ intenb0 = m66592_read(m66592, M66592_INTENB0);
+
+ if (m66592->pdata->on_chip && !intsts0 && !intenb0) {
+ /*
+ * When USB clock stops, it cannot read register. Even if a
+ * clock stops, the interrupt occurs. So this driver turn on
+ * a clock by this timing and do re-reading of register.
+ */
+ m66592_start_xclock(m66592);
+ intsts0 = m66592_read(m66592, M66592_INTSTS0);
+ intenb0 = m66592_read(m66592, M66592_INTENB0);
+ }
+
+ savepipe = m66592_read(m66592, M66592_CFIFOSEL);
+
+ mask0 = intsts0 & intenb0;
+ if (mask0) {
+ u16 brdysts = m66592_read(m66592, M66592_BRDYSTS);
+ u16 bempsts = m66592_read(m66592, M66592_BEMPSTS);
+ u16 brdyenb = m66592_read(m66592, M66592_BRDYENB);
+ u16 bempenb = m66592_read(m66592, M66592_BEMPENB);
+
+ if (mask0 & M66592_VBINT) {
+ m66592_write(m66592, 0xffff & ~M66592_VBINT,
+ M66592_INTSTS0);
+ m66592_start_xclock(m66592);
+
+ /* start vbus sampling */
+ m66592->old_vbus = m66592_read(m66592, M66592_INTSTS0)
+ & M66592_VBSTS;
+ m66592->scount = M66592_MAX_SAMPLING;
+
+ mod_timer(&m66592->timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ if (intsts0 & M66592_DVSQ)
+ irq_device_state(m66592);
+
+ if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE)
+ && (brdysts & brdyenb)) {
+ irq_pipe_ready(m66592, brdysts, brdyenb);
+ }
+ if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE)
+ && (bempsts & bempenb)) {
+ irq_pipe_empty(m66592, bempsts, bempenb);
+ }
+
+ if (intsts0 & M66592_CTRT)
+ irq_control_stage(m66592);
+ }
+
+ m66592_write(m66592, savepipe, M66592_CFIFOSEL);
+
+ spin_unlock(&m66592->lock);
+ return IRQ_HANDLED;
+}
+
+static void m66592_timer(struct timer_list *t)
+{
+ struct m66592 *m66592 = from_timer(m66592, t, timer);
+ unsigned long flags;
+ u16 tmp;
+
+ spin_lock_irqsave(&m66592->lock, flags);
+ tmp = m66592_read(m66592, M66592_SYSCFG);
+ if (!(tmp & M66592_RCKE)) {
+ m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG);
+ udelay(10);
+ m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG);
+ }
+ if (m66592->scount > 0) {
+ tmp = m66592_read(m66592, M66592_INTSTS0) & M66592_VBSTS;
+ if (tmp == m66592->old_vbus) {
+ m66592->scount--;
+ if (m66592->scount == 0) {
+ if (tmp == M66592_VBSTS)
+ m66592_usb_connect(m66592);
+ else
+ m66592_usb_disconnect(m66592);
+ } else {
+ mod_timer(&m66592->timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ } else {
+ m66592->scount = M66592_MAX_SAMPLING;
+ m66592->old_vbus = tmp;
+ mod_timer(&m66592->timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ }
+ spin_unlock_irqrestore(&m66592->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+static int m66592_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct m66592_ep *ep;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ return alloc_pipe_config(ep, desc);
+}
+
+static int m66592_disable(struct usb_ep *_ep)
+{
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ BUG_ON(!ep);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+ transfer_complete(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+ }
+
+ pipe_irq_disable(ep->m66592, ep->pipenum);
+ return free_pipe_config(ep);
+}
+
+static struct usb_request *m66592_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct m66592_request *req;
+
+ req = kzalloc(sizeof(struct m66592_request), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void m66592_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct m66592_request *req;
+
+ req = container_of(_req, struct m66592_request, req);
+ kfree(req);
+}
+
+static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ unsigned long flags;
+ int request = 0;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ req = container_of(_req, struct m66592_request, req);
+
+ if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+
+ if (list_empty(&ep->queue))
+ request = 1;
+
+ list_add_tail(&req->queue, &ep->queue);
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+
+ if (ep->ep.desc == NULL) /* control */
+ start_ep0(ep, req);
+ else {
+ if (request && !ep->busy)
+ start_packet(ep, req);
+ }
+
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+
+ return 0;
+}
+
+static int m66592_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ req = container_of(_req, struct m66592_request, req);
+
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+ if (!list_empty(&ep->queue))
+ transfer_complete(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+
+ return 0;
+}
+
+static int m66592_set_halt(struct usb_ep *_ep, int value)
+{
+ struct m66592_ep *ep = container_of(_ep, struct m66592_ep, ep);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ } else if (value) {
+ ep->busy = 1;
+ pipe_stall(ep->m66592, ep->pipenum);
+ } else {
+ ep->busy = 0;
+ pipe_stop(ep->m66592, ep->pipenum);
+ }
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+ return ret;
+}
+
+static void m66592_fifo_flush(struct usb_ep *_ep)
+{
+ struct m66592_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+ if (list_empty(&ep->queue) && !ep->busy) {
+ pipe_stop(ep->m66592, ep->pipenum);
+ m66592_bclr(ep->m66592, M66592_BCLR, ep->fifoctr);
+ }
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+}
+
+static const struct usb_ep_ops m66592_ep_ops = {
+ .enable = m66592_enable,
+ .disable = m66592_disable,
+
+ .alloc_request = m66592_alloc_request,
+ .free_request = m66592_free_request,
+
+ .queue = m66592_queue,
+ .dequeue = m66592_dequeue,
+
+ .set_halt = m66592_set_halt,
+ .fifo_flush = m66592_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+static int m66592_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct m66592 *m66592 = to_m66592(g);
+
+ /* hook up the driver */
+ m66592->driver = driver;
+
+ m66592_bset(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
+ if (m66592_read(m66592, M66592_INTSTS0) & M66592_VBSTS) {
+ m66592_start_xclock(m66592);
+ /* start vbus sampling */
+ m66592->old_vbus = m66592_read(m66592,
+ M66592_INTSTS0) & M66592_VBSTS;
+ m66592->scount = M66592_MAX_SAMPLING;
+ mod_timer(&m66592->timer, jiffies + msecs_to_jiffies(50));
+ }
+
+ return 0;
+}
+
+static int m66592_udc_stop(struct usb_gadget *g)
+{
+ struct m66592 *m66592 = to_m66592(g);
+
+ m66592_bclr(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
+
+ init_controller(m66592);
+ disable_controller(m66592);
+
+ m66592->driver = NULL;
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static int m66592_get_frame(struct usb_gadget *_gadget)
+{
+ struct m66592 *m66592 = gadget_to_m66592(_gadget);
+ return m66592_read(m66592, M66592_FRMNUM) & 0x03FF;
+}
+
+static int m66592_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct m66592 *m66592 = gadget_to_m66592(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&m66592->lock, flags);
+ if (is_on)
+ m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG);
+ else
+ m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+ spin_unlock_irqrestore(&m66592->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops m66592_gadget_ops = {
+ .get_frame = m66592_get_frame,
+ .udc_start = m66592_udc_start,
+ .udc_stop = m66592_udc_stop,
+ .pullup = m66592_pullup,
+};
+
+static int m66592_remove(struct platform_device *pdev)
+{
+ struct m66592 *m66592 = platform_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&m66592->gadget);
+
+ del_timer_sync(&m66592->timer);
+ iounmap(m66592->reg);
+ free_irq(platform_get_irq(pdev, 0), m66592);
+ m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
+ if (m66592->pdata->on_chip) {
+ clk_disable(m66592->clk);
+ clk_put(m66592->clk);
+ }
+ kfree(m66592);
+ return 0;
+}
+
+static void nop_completion(struct usb_ep *ep, struct usb_request *r)
+{
+}
+
+static int m66592_probe(struct platform_device *pdev)
+{
+ struct resource *res, *ires;
+ void __iomem *reg = NULL;
+ struct m66592 *m66592 = NULL;
+ char clk_name[8];
+ int ret = 0;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ pr_err("platform_get_resource error.\n");
+ goto clean_up;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev,
+ "platform_get_resource IORESOURCE_IRQ error.\n");
+ goto clean_up;
+ }
+
+ reg = ioremap(res->start, resource_size(res));
+ if (reg == NULL) {
+ ret = -ENOMEM;
+ pr_err("ioremap error.\n");
+ goto clean_up;
+ }
+
+ if (dev_get_platdata(&pdev->dev) == NULL) {
+ dev_err(&pdev->dev, "no platform data\n");
+ ret = -ENODEV;
+ goto clean_up;
+ }
+
+ /* initialize ucd */
+ m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
+ if (m66592 == NULL) {
+ ret = -ENOMEM;
+ goto clean_up;
+ }
+
+ m66592->pdata = dev_get_platdata(&pdev->dev);
+ m66592->irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
+
+ spin_lock_init(&m66592->lock);
+ platform_set_drvdata(pdev, m66592);
+
+ m66592->gadget.ops = &m66592_gadget_ops;
+ m66592->gadget.max_speed = USB_SPEED_HIGH;
+ m66592->gadget.name = udc_name;
+
+ timer_setup(&m66592->timer, m66592_timer, 0);
+ m66592->reg = reg;
+
+ ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
+ udc_name, m66592);
+ if (ret < 0) {
+ pr_err("request_irq error (%d)\n", ret);
+ goto clean_up;
+ }
+
+ if (m66592->pdata->on_chip) {
+ snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id);
+ m66592->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(m66592->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
+ clk_name);
+ ret = PTR_ERR(m66592->clk);
+ goto clean_up2;
+ }
+ clk_enable(m66592->clk);
+ }
+
+ INIT_LIST_HEAD(&m66592->gadget.ep_list);
+ m66592->gadget.ep0 = &m66592->ep[0].ep;
+ INIT_LIST_HEAD(&m66592->gadget.ep0->ep_list);
+ for (i = 0; i < M66592_MAX_NUM_PIPE; i++) {
+ struct m66592_ep *ep = &m66592->ep[i];
+
+ if (i != 0) {
+ INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list);
+ list_add_tail(&m66592->ep[i].ep.ep_list,
+ &m66592->gadget.ep_list);
+ }
+ ep->m66592 = m66592;
+ INIT_LIST_HEAD(&ep->queue);
+ ep->ep.name = m66592_ep_name[i];
+ ep->ep.ops = &m66592_ep_ops;
+ usb_ep_set_maxpacket_limit(&ep->ep, 512);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ }
+ usb_ep_set_maxpacket_limit(&m66592->ep[0].ep, 64);
+ m66592->ep[0].pipenum = 0;
+ m66592->ep[0].fifoaddr = M66592_CFIFO;
+ m66592->ep[0].fifosel = M66592_CFIFOSEL;
+ m66592->ep[0].fifoctr = M66592_CFIFOCTR;
+ m66592->ep[0].fifotrn = 0;
+ m66592->ep[0].pipectr = get_pipectr_addr(0);
+ m66592->pipenum2ep[0] = &m66592->ep[0];
+ m66592->epaddr2ep[0] = &m66592->ep[0];
+
+ m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
+ if (m66592->ep0_req == NULL) {
+ ret = -ENOMEM;
+ goto clean_up3;
+ }
+ m66592->ep0_req->complete = nop_completion;
+
+ init_controller(m66592);
+
+ ret = usb_add_gadget_udc(&pdev->dev, &m66592->gadget);
+ if (ret)
+ goto err_add_udc;
+
+ dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+ return 0;
+
+err_add_udc:
+ m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
+ m66592->ep0_req = NULL;
+clean_up3:
+ if (m66592->pdata->on_chip) {
+ clk_disable(m66592->clk);
+ clk_put(m66592->clk);
+ }
+clean_up2:
+ free_irq(ires->start, m66592);
+clean_up:
+ if (m66592) {
+ if (m66592->ep0_req)
+ m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
+ kfree(m66592);
+ }
+ if (reg)
+ iounmap(reg);
+
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+static struct platform_driver m66592_driver = {
+ .remove = m66592_remove,
+ .driver = {
+ .name = udc_name,
+ },
+};
+
+module_platform_driver_probe(m66592_driver, m66592_probe);
diff --git a/drivers/usb/gadget/udc/m66592-udc.h b/drivers/usb/gadget/udc/m66592-udc.h
new file mode 100644
index 000000000..01a64685b
--- /dev/null
+++ b/drivers/usb/gadget/udc/m66592-udc.h
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * M66592 UDC (USB gadget)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ */
+
+#ifndef __M66592_UDC_H__
+#define __M66592_UDC_H__
+
+#include <linux/clk.h>
+#include <linux/usb/m66592.h>
+
+#define M66592_SYSCFG 0x00
+#define M66592_XTAL 0xC000 /* b15-14: Crystal selection */
+#define M66592_XTAL48 0x8000 /* 48MHz */
+#define M66592_XTAL24 0x4000 /* 24MHz */
+#define M66592_XTAL12 0x0000 /* 12MHz */
+#define M66592_XCKE 0x2000 /* b13: External clock enable */
+#define M66592_RCKE 0x1000 /* b12: Register clock enable */
+#define M66592_PLLC 0x0800 /* b11: PLL control */
+#define M66592_SCKE 0x0400 /* b10: USB clock enable */
+#define M66592_ATCKM 0x0100 /* b8: Automatic clock supply */
+#define M66592_HSE 0x0080 /* b7: Hi-speed enable */
+#define M66592_DCFM 0x0040 /* b6: Controller function select */
+#define M66592_DMRPD 0x0020 /* b5: D- pull down control */
+#define M66592_DPRPU 0x0010 /* b4: D+ pull up control */
+#define M66592_FSRPC 0x0004 /* b2: Full-speed receiver enable */
+#define M66592_PCUT 0x0002 /* b1: Low power sleep enable */
+#define M66592_USBE 0x0001 /* b0: USB module operation enable */
+
+#define M66592_SYSSTS 0x02
+#define M66592_LNST 0x0003 /* b1-0: D+, D- line status */
+#define M66592_SE1 0x0003 /* SE1 */
+#define M66592_KSTS 0x0002 /* K State */
+#define M66592_JSTS 0x0001 /* J State */
+#define M66592_SE0 0x0000 /* SE0 */
+
+#define M66592_DVSTCTR 0x04
+#define M66592_WKUP 0x0100 /* b8: Remote wakeup */
+#define M66592_RWUPE 0x0080 /* b7: Remote wakeup sense */
+#define M66592_USBRST 0x0040 /* b6: USB reset enable */
+#define M66592_RESUME 0x0020 /* b5: Resume enable */
+#define M66592_UACT 0x0010 /* b4: USB bus enable */
+#define M66592_RHST 0x0003 /* b1-0: Reset handshake status */
+#define M66592_HSMODE 0x0003 /* Hi-Speed mode */
+#define M66592_FSMODE 0x0002 /* Full-Speed mode */
+#define M66592_HSPROC 0x0001 /* HS handshake is processing */
+
+#define M66592_TESTMODE 0x06
+#define M66592_UTST 0x000F /* b4-0: Test select */
+#define M66592_H_TST_PACKET 0x000C /* HOST TEST Packet */
+#define M66592_H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */
+#define M66592_H_TST_K 0x000A /* HOST TEST K */
+#define M66592_H_TST_J 0x0009 /* HOST TEST J */
+#define M66592_H_TST_NORMAL 0x0000 /* HOST Normal Mode */
+#define M66592_P_TST_PACKET 0x0004 /* PERI TEST Packet */
+#define M66592_P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */
+#define M66592_P_TST_K 0x0002 /* PERI TEST K */
+#define M66592_P_TST_J 0x0001 /* PERI TEST J */
+#define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */
+
+/* built-in registers */
+#define M66592_CFBCFG 0x0A
+#define M66592_D0FBCFG 0x0C
+#define M66592_LITTLE 0x0100 /* b8: Little endian mode */
+/* external chip case */
+#define M66592_PINCFG 0x0A
+#define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */
+#define M66592_BIGEND 0x0100 /* b8: Big endian mode */
+
+#define M66592_DMA0CFG 0x0C
+#define M66592_DMA1CFG 0x0E
+#define M66592_DREQA 0x4000 /* b14: Dreq active select */
+#define M66592_BURST 0x2000 /* b13: Burst mode */
+#define M66592_DACKA 0x0400 /* b10: Dack active select */
+#define M66592_DFORM 0x0380 /* b9-7: DMA mode select */
+#define M66592_CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */
+#define M66592_CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */
+#define M66592_CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */
+#define M66592_SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */
+#define M66592_SPLIT_DACK_DSTB 0x0300 /* DACK + DSTB0 mode (SPLIT bus) */
+#define M66592_DENDA 0x0040 /* b6: Dend active select */
+#define M66592_PKTM 0x0020 /* b5: Packet mode */
+#define M66592_DENDE 0x0010 /* b4: Dend enable */
+#define M66592_OBUS 0x0004 /* b2: OUTbus mode */
+
+/* common case */
+#define M66592_CFIFO 0x10
+#define M66592_D0FIFO 0x14
+#define M66592_D1FIFO 0x18
+
+#define M66592_CFIFOSEL 0x1E
+#define M66592_D0FIFOSEL 0x24
+#define M66592_D1FIFOSEL 0x2A
+#define M66592_RCNT 0x8000 /* b15: Read count mode */
+#define M66592_REW 0x4000 /* b14: Buffer rewind */
+#define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */
+#define M66592_DREQE 0x1000 /* b12: DREQ output enable */
+#define M66592_MBW_8 0x0000 /* 8bit */
+#define M66592_MBW_16 0x0400 /* 16bit */
+#define M66592_MBW_32 0x0800 /* 32bit */
+#define M66592_TRENB 0x0200 /* b9: Transaction counter enable */
+#define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */
+#define M66592_DEZPM 0x0080 /* b7: Zero-length packet mode */
+#define M66592_ISEL 0x0020 /* b5: DCP FIFO port direction select */
+#define M66592_CURPIPE 0x0007 /* b2-0: PIPE select */
+
+#define M66592_CFIFOCTR 0x20
+#define M66592_D0FIFOCTR 0x26
+#define M66592_D1FIFOCTR 0x2c
+#define M66592_BVAL 0x8000 /* b15: Buffer valid flag */
+#define M66592_BCLR 0x4000 /* b14: Buffer clear */
+#define M66592_FRDY 0x2000 /* b13: FIFO ready */
+#define M66592_DTLN 0x0FFF /* b11-0: FIFO received data length */
+
+#define M66592_CFIFOSIE 0x22
+#define M66592_TGL 0x8000 /* b15: Buffer toggle */
+#define M66592_SCLR 0x4000 /* b14: Buffer clear */
+#define M66592_SBUSY 0x2000 /* b13: SIE_FIFO busy */
+
+#define M66592_D0FIFOTRN 0x28
+#define M66592_D1FIFOTRN 0x2E
+#define M66592_TRNCNT 0xFFFF /* b15-0: Transaction counter */
+
+#define M66592_INTENB0 0x30
+#define M66592_VBSE 0x8000 /* b15: VBUS interrupt */
+#define M66592_RSME 0x4000 /* b14: Resume interrupt */
+#define M66592_SOFE 0x2000 /* b13: Frame update interrupt */
+#define M66592_DVSE 0x1000 /* b12: Device state transition interrupt */
+#define M66592_CTRE 0x0800 /* b11: Control transfer stage transition irq */
+#define M66592_BEMPE 0x0400 /* b10: Buffer empty interrupt */
+#define M66592_NRDYE 0x0200 /* b9: Buffer not ready interrupt */
+#define M66592_BRDYE 0x0100 /* b8: Buffer ready interrupt */
+#define M66592_URST 0x0080 /* b7: USB reset detected interrupt */
+#define M66592_SADR 0x0040 /* b6: Set address executed interrupt */
+#define M66592_SCFG 0x0020 /* b5: Set configuration executed interrupt */
+#define M66592_SUSP 0x0010 /* b4: Suspend detected interrupt */
+#define M66592_WDST 0x0008 /* b3: Control write data stage completed irq */
+#define M66592_RDST 0x0004 /* b2: Control read data stage completed irq */
+#define M66592_CMPL 0x0002 /* b1: Control transfer complete interrupt */
+#define M66592_SERR 0x0001 /* b0: Sequence error interrupt */
+
+#define M66592_INTENB1 0x32
+#define M66592_BCHGE 0x4000 /* b14: USB us chenge interrupt */
+#define M66592_DTCHE 0x1000 /* b12: Detach sense interrupt */
+#define M66592_SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */
+#define M66592_SACKE 0x0010 /* b4: SETUP ACK interrupt */
+#define M66592_BRDYM 0x0004 /* b2: BRDY clear timing */
+#define M66592_INTL 0x0002 /* b1: Interrupt sense select */
+#define M66592_PCSE 0x0001 /* b0: PCUT enable by CS assert */
+
+#define M66592_BRDYENB 0x36
+#define M66592_BRDYSTS 0x46
+#define M66592_BRDY7 0x0080 /* b7: PIPE7 */
+#define M66592_BRDY6 0x0040 /* b6: PIPE6 */
+#define M66592_BRDY5 0x0020 /* b5: PIPE5 */
+#define M66592_BRDY4 0x0010 /* b4: PIPE4 */
+#define M66592_BRDY3 0x0008 /* b3: PIPE3 */
+#define M66592_BRDY2 0x0004 /* b2: PIPE2 */
+#define M66592_BRDY1 0x0002 /* b1: PIPE1 */
+#define M66592_BRDY0 0x0001 /* b1: PIPE0 */
+
+#define M66592_NRDYENB 0x38
+#define M66592_NRDYSTS 0x48
+#define M66592_NRDY7 0x0080 /* b7: PIPE7 */
+#define M66592_NRDY6 0x0040 /* b6: PIPE6 */
+#define M66592_NRDY5 0x0020 /* b5: PIPE5 */
+#define M66592_NRDY4 0x0010 /* b4: PIPE4 */
+#define M66592_NRDY3 0x0008 /* b3: PIPE3 */
+#define M66592_NRDY2 0x0004 /* b2: PIPE2 */
+#define M66592_NRDY1 0x0002 /* b1: PIPE1 */
+#define M66592_NRDY0 0x0001 /* b1: PIPE0 */
+
+#define M66592_BEMPENB 0x3A
+#define M66592_BEMPSTS 0x4A
+#define M66592_BEMP7 0x0080 /* b7: PIPE7 */
+#define M66592_BEMP6 0x0040 /* b6: PIPE6 */
+#define M66592_BEMP5 0x0020 /* b5: PIPE5 */
+#define M66592_BEMP4 0x0010 /* b4: PIPE4 */
+#define M66592_BEMP3 0x0008 /* b3: PIPE3 */
+#define M66592_BEMP2 0x0004 /* b2: PIPE2 */
+#define M66592_BEMP1 0x0002 /* b1: PIPE1 */
+#define M66592_BEMP0 0x0001 /* b0: PIPE0 */
+
+#define M66592_SOFCFG 0x3C
+#define M66592_SOFM 0x000C /* b3-2: SOF palse mode */
+#define M66592_SOF_125US 0x0008 /* SOF OUT 125us uFrame Signal */
+#define M66592_SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */
+#define M66592_SOF_DISABLE 0x0000 /* SOF OUT Disable */
+
+#define M66592_INTSTS0 0x40
+#define M66592_VBINT 0x8000 /* b15: VBUS interrupt */
+#define M66592_RESM 0x4000 /* b14: Resume interrupt */
+#define M66592_SOFR 0x2000 /* b13: SOF frame update interrupt */
+#define M66592_DVST 0x1000 /* b12: Device state transition */
+#define M66592_CTRT 0x0800 /* b11: Control stage transition */
+#define M66592_BEMP 0x0400 /* b10: Buffer empty interrupt */
+#define M66592_NRDY 0x0200 /* b9: Buffer not ready interrupt */
+#define M66592_BRDY 0x0100 /* b8: Buffer ready interrupt */
+#define M66592_VBSTS 0x0080 /* b7: VBUS input port */
+#define M66592_DVSQ 0x0070 /* b6-4: Device state */
+#define M66592_DS_SPD_CNFG 0x0070 /* Suspend Configured */
+#define M66592_DS_SPD_ADDR 0x0060 /* Suspend Address */
+#define M66592_DS_SPD_DFLT 0x0050 /* Suspend Default */
+#define M66592_DS_SPD_POWR 0x0040 /* Suspend Powered */
+#define M66592_DS_SUSP 0x0040 /* Suspend */
+#define M66592_DS_CNFG 0x0030 /* Configured */
+#define M66592_DS_ADDS 0x0020 /* Address */
+#define M66592_DS_DFLT 0x0010 /* Default */
+#define M66592_DS_POWR 0x0000 /* Powered */
+#define M66592_DVSQS 0x0030 /* b5-4: Device state */
+#define M66592_VALID 0x0008 /* b3: Setup packet detected flag */
+#define M66592_CTSQ 0x0007 /* b2-0: Control transfer stage */
+#define M66592_CS_SQER 0x0006 /* Sequence error */
+#define M66592_CS_WRND 0x0005 /* Control write nodata status */
+#define M66592_CS_WRSS 0x0004 /* Control write status stage */
+#define M66592_CS_WRDS 0x0003 /* Control write data stage */
+#define M66592_CS_RDSS 0x0002 /* Control read status stage */
+#define M66592_CS_RDDS 0x0001 /* Control read data stage */
+#define M66592_CS_IDST 0x0000 /* Idle or setup stage */
+
+#define M66592_INTSTS1 0x42
+#define M66592_BCHG 0x4000 /* b14: USB bus chenge interrupt */
+#define M66592_DTCH 0x1000 /* b12: Detach sense interrupt */
+#define M66592_SIGN 0x0020 /* b5: SETUP IGNORE interrupt */
+#define M66592_SACK 0x0010 /* b4: SETUP ACK interrupt */
+
+#define M66592_FRMNUM 0x4C
+#define M66592_OVRN 0x8000 /* b15: Overrun error */
+#define M66592_CRCE 0x4000 /* b14: Received data error */
+#define M66592_SOFRM 0x0800 /* b11: SOF output mode */
+#define M66592_FRNM 0x07FF /* b10-0: Frame number */
+
+#define M66592_UFRMNUM 0x4E
+#define M66592_UFRNM 0x0007 /* b2-0: Micro frame number */
+
+#define M66592_RECOVER 0x50
+#define M66592_STSRECOV 0x0700 /* Status recovery */
+#define M66592_STSR_HI 0x0400 /* FULL(0) or HI(1) Speed */
+#define M66592_STSR_DEFAULT 0x0100 /* Default state */
+#define M66592_STSR_ADDRESS 0x0200 /* Address state */
+#define M66592_STSR_CONFIG 0x0300 /* Configured state */
+#define M66592_USBADDR 0x007F /* b6-0: USB address */
+
+#define M66592_USBREQ 0x54
+#define M66592_bRequest 0xFF00 /* b15-8: bRequest */
+#define M66592_GET_STATUS 0x0000
+#define M66592_CLEAR_FEATURE 0x0100
+#define M66592_ReqRESERVED 0x0200
+#define M66592_SET_FEATURE 0x0300
+#define M66592_ReqRESERVED1 0x0400
+#define M66592_SET_ADDRESS 0x0500
+#define M66592_GET_DESCRIPTOR 0x0600
+#define M66592_SET_DESCRIPTOR 0x0700
+#define M66592_GET_CONFIGURATION 0x0800
+#define M66592_SET_CONFIGURATION 0x0900
+#define M66592_GET_INTERFACE 0x0A00
+#define M66592_SET_INTERFACE 0x0B00
+#define M66592_SYNCH_FRAME 0x0C00
+#define M66592_bmRequestType 0x00FF /* b7-0: bmRequestType */
+#define M66592_bmRequestTypeDir 0x0080 /* b7 : Data direction */
+#define M66592_HOST_TO_DEVICE 0x0000
+#define M66592_DEVICE_TO_HOST 0x0080
+#define M66592_bmRequestTypeType 0x0060 /* b6-5: Type */
+#define M66592_STANDARD 0x0000
+#define M66592_CLASS 0x0020
+#define M66592_VENDOR 0x0040
+#define M66592_bmRequestTypeRecip 0x001F /* b4-0: Recipient */
+#define M66592_DEVICE 0x0000
+#define M66592_INTERFACE 0x0001
+#define M66592_ENDPOINT 0x0002
+
+#define M66592_USBVAL 0x56
+#define M66592_wValue 0xFFFF /* b15-0: wValue */
+/* Standard Feature Selector */
+#define M66592_ENDPOINT_HALT 0x0000
+#define M66592_DEVICE_REMOTE_WAKEUP 0x0001
+#define M66592_TEST_MODE 0x0002
+/* Descriptor Types */
+#define M66592_DT_TYPE 0xFF00
+#define M66592_GET_DT_TYPE(v) (((v) & DT_TYPE) >> 8)
+#define M66592_DT_DEVICE 0x01
+#define M66592_DT_CONFIGURATION 0x02
+#define M66592_DT_STRING 0x03
+#define M66592_DT_INTERFACE 0x04
+#define M66592_DT_ENDPOINT 0x05
+#define M66592_DT_DEVICE_QUALIFIER 0x06
+#define M66592_DT_OTHER_SPEED_CONFIGURATION 0x07
+#define M66592_DT_INTERFACE_POWER 0x08
+#define M66592_DT_INDEX 0x00FF
+#define M66592_CONF_NUM 0x00FF
+#define M66592_ALT_SET 0x00FF
+
+#define M66592_USBINDEX 0x58
+#define M66592_wIndex 0xFFFF /* b15-0: wIndex */
+#define M66592_TEST_SELECT 0xFF00 /* b15-b8: Test Mode */
+#define M66592_TEST_J 0x0100 /* Test_J */
+#define M66592_TEST_K 0x0200 /* Test_K */
+#define M66592_TEST_SE0_NAK 0x0300 /* Test_SE0_NAK */
+#define M66592_TEST_PACKET 0x0400 /* Test_Packet */
+#define M66592_TEST_FORCE_ENABLE 0x0500 /* Test_Force_Enable */
+#define M66592_TEST_STSelectors 0x0600 /* Standard test selectors */
+#define M66592_TEST_Reserved 0x4000 /* Reserved */
+#define M66592_TEST_VSTModes 0xC000 /* Vendor-specific tests */
+#define M66592_EP_DIR 0x0080 /* b7: Endpoint Direction */
+#define M66592_EP_DIR_IN 0x0080
+#define M66592_EP_DIR_OUT 0x0000
+
+#define M66592_USBLENG 0x5A
+#define M66592_wLength 0xFFFF /* b15-0: wLength */
+
+#define M66592_DCPCFG 0x5C
+#define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */
+#define M66592_DIR 0x0010 /* b4: Control transfer DIR select */
+
+#define M66592_DCPMAXP 0x5E
+#define M66592_DEVSEL 0xC000 /* b15-14: Device address select */
+#define M66592_DEVICE_0 0x0000 /* Device address 0 */
+#define M66592_DEVICE_1 0x4000 /* Device address 1 */
+#define M66592_DEVICE_2 0x8000 /* Device address 2 */
+#define M66592_DEVICE_3 0xC000 /* Device address 3 */
+#define M66592_MAXP 0x007F /* b6-0: Maxpacket size of ep0 */
+
+#define M66592_DCPCTR 0x60
+#define M66592_BSTS 0x8000 /* b15: Buffer status */
+#define M66592_SUREQ 0x4000 /* b14: Send USB request */
+#define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */
+#define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */
+#define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */
+#define M66592_CCPL 0x0004 /* b2: control transfer complete */
+#define M66592_PID 0x0003 /* b1-0: Response PID */
+#define M66592_PID_STALL 0x0002 /* STALL */
+#define M66592_PID_BUF 0x0001 /* BUF */
+#define M66592_PID_NAK 0x0000 /* NAK */
+
+#define M66592_PIPESEL 0x64
+#define M66592_PIPENM 0x0007 /* b2-0: Pipe select */
+#define M66592_PIPE0 0x0000 /* PIPE 0 */
+#define M66592_PIPE1 0x0001 /* PIPE 1 */
+#define M66592_PIPE2 0x0002 /* PIPE 2 */
+#define M66592_PIPE3 0x0003 /* PIPE 3 */
+#define M66592_PIPE4 0x0004 /* PIPE 4 */
+#define M66592_PIPE5 0x0005 /* PIPE 5 */
+#define M66592_PIPE6 0x0006 /* PIPE 6 */
+#define M66592_PIPE7 0x0007 /* PIPE 7 */
+
+#define M66592_PIPECFG 0x66
+#define M66592_TYP 0xC000 /* b15-14: Transfer type */
+#define M66592_ISO 0xC000 /* Isochronous */
+#define M66592_INT 0x8000 /* Interrupt */
+#define M66592_BULK 0x4000 /* Bulk */
+#define M66592_BFRE 0x0400 /* b10: Buffer ready interrupt mode */
+#define M66592_DBLB 0x0200 /* b9: Double buffer mode select */
+#define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */
+#define M66592_SHTNAK 0x0080 /* b7: Transfer end NAK */
+#define M66592_DIR 0x0010 /* b4: Transfer direction select */
+#define M66592_DIR_H_OUT 0x0010 /* HOST OUT */
+#define M66592_DIR_P_IN 0x0010 /* PERI IN */
+#define M66592_DIR_H_IN 0x0000 /* HOST IN */
+#define M66592_DIR_P_OUT 0x0000 /* PERI OUT */
+#define M66592_EPNUM 0x000F /* b3-0: Eendpoint number select */
+#define M66592_EP1 0x0001
+#define M66592_EP2 0x0002
+#define M66592_EP3 0x0003
+#define M66592_EP4 0x0004
+#define M66592_EP5 0x0005
+#define M66592_EP6 0x0006
+#define M66592_EP7 0x0007
+#define M66592_EP8 0x0008
+#define M66592_EP9 0x0009
+#define M66592_EP10 0x000A
+#define M66592_EP11 0x000B
+#define M66592_EP12 0x000C
+#define M66592_EP13 0x000D
+#define M66592_EP14 0x000E
+#define M66592_EP15 0x000F
+
+#define M66592_PIPEBUF 0x68
+#define M66592_BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */
+#define M66592_BUF_SIZE(x) ((((x) / 64) - 1) << 10)
+#define M66592_BUFNMB 0x00FF /* b7-0: Pipe buffer number */
+
+#define M66592_PIPEMAXP 0x6A
+#define M66592_MXPS 0x07FF /* b10-0: Maxpacket size */
+
+#define M66592_PIPEPERI 0x6C
+#define M66592_IFIS 0x1000 /* b12: ISO in-buffer flush mode */
+#define M66592_IITV 0x0007 /* b2-0: ISO interval */
+
+#define M66592_PIPE1CTR 0x70
+#define M66592_PIPE2CTR 0x72
+#define M66592_PIPE3CTR 0x74
+#define M66592_PIPE4CTR 0x76
+#define M66592_PIPE5CTR 0x78
+#define M66592_PIPE6CTR 0x7A
+#define M66592_PIPE7CTR 0x7C
+#define M66592_BSTS 0x8000 /* b15: Buffer status */
+#define M66592_INBUFM 0x4000 /* b14: IN buffer monitor (PIPE 1-5) */
+#define M66592_ACLRM 0x0200 /* b9: Out buffer auto clear mode */
+#define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */
+#define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */
+#define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */
+#define M66592_PID 0x0003 /* b1-0: Response PID */
+
+#define M66592_INVALID_REG 0x7E
+
+
+#define get_pipectr_addr(pipenum) (M66592_PIPE1CTR + (pipenum - 1) * 2)
+
+#define M66592_MAX_SAMPLING 10
+
+#define M66592_MAX_NUM_PIPE 8
+#define M66592_MAX_NUM_BULK 3
+#define M66592_MAX_NUM_ISOC 2
+#define M66592_MAX_NUM_INT 2
+
+#define M66592_BASE_PIPENUM_BULK 3
+#define M66592_BASE_PIPENUM_ISOC 1
+#define M66592_BASE_PIPENUM_INT 6
+
+#define M66592_BASE_BUFNUM 6
+#define M66592_MAX_BUFNUM 0x4F
+
+struct m66592_pipe_info {
+ u16 pipe;
+ u16 epnum;
+ u16 maxpacket;
+ u16 type;
+ u16 interval;
+ u16 dir_in;
+};
+
+struct m66592_request {
+ struct usb_request req;
+ struct list_head queue;
+};
+
+struct m66592_ep {
+ struct usb_ep ep;
+ struct m66592 *m66592;
+
+ struct list_head queue;
+ unsigned busy:1;
+ unsigned internal_ccpl:1; /* use only control */
+
+ /* this member can able to after m66592_enable */
+ unsigned use_dma:1;
+ u16 pipenum;
+ u16 type;
+
+ /* register address */
+ unsigned long fifoaddr;
+ unsigned long fifosel;
+ unsigned long fifoctr;
+ unsigned long fifotrn;
+ unsigned long pipectr;
+};
+
+struct m66592 {
+ spinlock_t lock;
+ void __iomem *reg;
+ struct clk *clk;
+ struct m66592_platdata *pdata;
+ unsigned long irq_trigger;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+ struct m66592_ep ep[M66592_MAX_NUM_PIPE];
+ struct m66592_ep *pipenum2ep[M66592_MAX_NUM_PIPE];
+ struct m66592_ep *epaddr2ep[16];
+
+ struct usb_request *ep0_req; /* for internal request */
+ __le16 ep0_data; /* for internal request */
+ u16 old_vbus;
+
+ struct timer_list timer;
+
+ int scount;
+
+ int old_dvsq;
+
+ /* pipe config */
+ int bulk;
+ int interrupt;
+ int isochronous;
+ int num_dma;
+};
+#define to_m66592(g) (container_of((g), struct m66592, gadget))
+
+#define gadget_to_m66592(_gadget) container_of(_gadget, struct m66592, gadget)
+#define m66592_to_gadget(m66592) (&m66592->gadget)
+
+#define is_bulk_pipe(pipenum) \
+ ((pipenum >= M66592_BASE_PIPENUM_BULK) && \
+ (pipenum < (M66592_BASE_PIPENUM_BULK + M66592_MAX_NUM_BULK)))
+#define is_interrupt_pipe(pipenum) \
+ ((pipenum >= M66592_BASE_PIPENUM_INT) && \
+ (pipenum < (M66592_BASE_PIPENUM_INT + M66592_MAX_NUM_INT)))
+#define is_isoc_pipe(pipenum) \
+ ((pipenum >= M66592_BASE_PIPENUM_ISOC) && \
+ (pipenum < (M66592_BASE_PIPENUM_ISOC + M66592_MAX_NUM_ISOC)))
+
+#define enable_irq_ready(m66592, pipenum) \
+ enable_pipe_irq(m66592, pipenum, M66592_BRDYENB)
+#define disable_irq_ready(m66592, pipenum) \
+ disable_pipe_irq(m66592, pipenum, M66592_BRDYENB)
+#define enable_irq_empty(m66592, pipenum) \
+ enable_pipe_irq(m66592, pipenum, M66592_BEMPENB)
+#define disable_irq_empty(m66592, pipenum) \
+ disable_pipe_irq(m66592, pipenum, M66592_BEMPENB)
+#define enable_irq_nrdy(m66592, pipenum) \
+ enable_pipe_irq(m66592, pipenum, M66592_NRDYENB)
+#define disable_irq_nrdy(m66592, pipenum) \
+ disable_pipe_irq(m66592, pipenum, M66592_NRDYENB)
+
+/*-------------------------------------------------------------------------*/
+static inline u16 m66592_read(struct m66592 *m66592, unsigned long offset)
+{
+ return ioread16(m66592->reg + offset);
+}
+
+static inline void m66592_read_fifo(struct m66592 *m66592,
+ unsigned long offset,
+ void *buf, unsigned long len)
+{
+ void __iomem *fifoaddr = m66592->reg + offset;
+
+ if (m66592->pdata->on_chip) {
+ len = (len + 3) / 4;
+ ioread32_rep(fifoaddr, buf, len);
+ } else {
+ len = (len + 1) / 2;
+ ioread16_rep(fifoaddr, buf, len);
+ }
+}
+
+static inline void m66592_write(struct m66592 *m66592, u16 val,
+ unsigned long offset)
+{
+ iowrite16(val, m66592->reg + offset);
+}
+
+static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat,
+ unsigned long offset)
+{
+ u16 tmp;
+ tmp = m66592_read(m66592, offset);
+ tmp = tmp & (~pat);
+ tmp = tmp | val;
+ m66592_write(m66592, tmp, offset);
+}
+
+#define m66592_bclr(m66592, val, offset) \
+ m66592_mdfy(m66592, 0, val, offset)
+#define m66592_bset(m66592, val, offset) \
+ m66592_mdfy(m66592, val, 0, offset)
+
+static inline void m66592_write_fifo(struct m66592 *m66592,
+ struct m66592_ep *ep,
+ void *buf, unsigned long len)
+{
+ void __iomem *fifoaddr = m66592->reg + ep->fifoaddr;
+
+ if (m66592->pdata->on_chip) {
+ unsigned long count;
+ unsigned char *pb;
+ int i;
+
+ count = len / 4;
+ iowrite32_rep(fifoaddr, buf, count);
+
+ if (len & 0x00000003) {
+ pb = buf + count * 4;
+ for (i = 0; i < (len & 0x00000003); i++) {
+ if (m66592_read(m66592, M66592_CFBCFG)) /* le */
+ iowrite8(pb[i], fifoaddr + (3 - i));
+ else
+ iowrite8(pb[i], fifoaddr + i);
+ }
+ }
+ } else {
+ unsigned long odd = len & 0x0001;
+
+ len = len / 2;
+ iowrite16_rep(fifoaddr, buf, len);
+ if (odd) {
+ unsigned char *p = buf + len*2;
+ if (m66592->pdata->wr0_shorted_to_wr1)
+ m66592_bclr(m66592, M66592_MBW_16, ep->fifosel);
+ iowrite8(*p, fifoaddr);
+ if (m66592->pdata->wr0_shorted_to_wr1)
+ m66592_bset(m66592, M66592_MBW_16, ep->fifosel);
+ }
+ }
+}
+
+#endif /* ifndef __M66592_UDC_H__ */
+
+
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
new file mode 100644
index 000000000..ddf0ed3eb
--- /dev/null
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -0,0 +1,1332 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MAX3420 Device Controller driver for USB.
+ *
+ * Author: Jaswinder Singh Brar <jaswinder.singh@linaro.org>
+ * (C) Copyright 2019-2020 Linaro Ltd
+ *
+ * Based on:
+ * o MAX3420E datasheet
+ * https://datasheets.maximintegrated.com/en/ds/MAX3420E.pdf
+ * o MAX342{0,1}E Programming Guides
+ * https://pdfserv.maximintegrated.com/en/an/AN3598.pdf
+ * https://pdfserv.maximintegrated.com/en/an/AN3785.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/prefetch.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio/consumer.h>
+
+#define MAX3420_MAX_EPS 4
+#define MAX3420_EP_MAX_PACKET 64 /* Same for all Endpoints */
+#define MAX3420_EPNAME_SIZE 16 /* Buffer size for endpoint name */
+
+#define MAX3420_ACKSTAT BIT(0)
+
+#define MAX3420_SPI_DIR_RD 0 /* read register from MAX3420 */
+#define MAX3420_SPI_DIR_WR 1 /* write register to MAX3420 */
+
+/* SPI commands: */
+#define MAX3420_SPI_DIR_SHIFT 1
+#define MAX3420_SPI_REG_SHIFT 3
+
+#define MAX3420_REG_EP0FIFO 0
+#define MAX3420_REG_EP1FIFO 1
+#define MAX3420_REG_EP2FIFO 2
+#define MAX3420_REG_EP3FIFO 3
+#define MAX3420_REG_SUDFIFO 4
+#define MAX3420_REG_EP0BC 5
+#define MAX3420_REG_EP1BC 6
+#define MAX3420_REG_EP2BC 7
+#define MAX3420_REG_EP3BC 8
+
+#define MAX3420_REG_EPSTALLS 9
+ #define ACKSTAT BIT(6)
+ #define STLSTAT BIT(5)
+ #define STLEP3IN BIT(4)
+ #define STLEP2IN BIT(3)
+ #define STLEP1OUT BIT(2)
+ #define STLEP0OUT BIT(1)
+ #define STLEP0IN BIT(0)
+
+#define MAX3420_REG_CLRTOGS 10
+ #define EP3DISAB BIT(7)
+ #define EP2DISAB BIT(6)
+ #define EP1DISAB BIT(5)
+ #define CTGEP3IN BIT(4)
+ #define CTGEP2IN BIT(3)
+ #define CTGEP1OUT BIT(2)
+
+#define MAX3420_REG_EPIRQ 11
+#define MAX3420_REG_EPIEN 12
+ #define SUDAVIRQ BIT(5)
+ #define IN3BAVIRQ BIT(4)
+ #define IN2BAVIRQ BIT(3)
+ #define OUT1DAVIRQ BIT(2)
+ #define OUT0DAVIRQ BIT(1)
+ #define IN0BAVIRQ BIT(0)
+
+#define MAX3420_REG_USBIRQ 13
+#define MAX3420_REG_USBIEN 14
+ #define OSCOKIRQ BIT(0)
+ #define RWUDNIRQ BIT(1)
+ #define BUSACTIRQ BIT(2)
+ #define URESIRQ BIT(3)
+ #define SUSPIRQ BIT(4)
+ #define NOVBUSIRQ BIT(5)
+ #define VBUSIRQ BIT(6)
+ #define URESDNIRQ BIT(7)
+
+#define MAX3420_REG_USBCTL 15
+ #define HOSCSTEN BIT(7)
+ #define VBGATE BIT(6)
+ #define CHIPRES BIT(5)
+ #define PWRDOWN BIT(4)
+ #define CONNECT BIT(3)
+ #define SIGRWU BIT(2)
+
+#define MAX3420_REG_CPUCTL 16
+ #define IE BIT(0)
+
+#define MAX3420_REG_PINCTL 17
+ #define EP3INAK BIT(7)
+ #define EP2INAK BIT(6)
+ #define EP0INAK BIT(5)
+ #define FDUPSPI BIT(4)
+ #define INTLEVEL BIT(3)
+ #define POSINT BIT(2)
+ #define GPXB BIT(1)
+ #define GPXA BIT(0)
+
+#define MAX3420_REG_REVISION 18
+
+#define MAX3420_REG_FNADDR 19
+ #define FNADDR_MASK 0x7f
+
+#define MAX3420_REG_IOPINS 20
+#define MAX3420_REG_IOPINS2 21
+#define MAX3420_REG_GPINIRQ 22
+#define MAX3420_REG_GPINIEN 23
+#define MAX3420_REG_GPINPOL 24
+#define MAX3420_REG_HIRQ 25
+#define MAX3420_REG_HIEN 26
+#define MAX3420_REG_MODE 27
+#define MAX3420_REG_PERADDR 28
+#define MAX3420_REG_HCTL 29
+#define MAX3420_REG_HXFR 30
+#define MAX3420_REG_HRSL 31
+
+#define ENABLE_IRQ BIT(0)
+#define IOPIN_UPDATE BIT(1)
+#define REMOTE_WAKEUP BIT(2)
+#define CONNECT_HOST GENMASK(4, 3)
+#define HCONNECT (1 << 3)
+#define HDISCONNECT (3 << 3)
+#define UDC_START GENMASK(6, 5)
+#define START (1 << 5)
+#define STOP (3 << 5)
+#define ENABLE_EP GENMASK(8, 7)
+#define ENABLE (1 << 7)
+#define DISABLE (3 << 7)
+#define STALL_EP GENMASK(10, 9)
+#define STALL (1 << 9)
+#define UNSTALL (3 << 9)
+
+#define MAX3420_CMD(c) FIELD_PREP(GENMASK(7, 3), c)
+#define MAX3420_SPI_CMD_RD(c) (MAX3420_CMD(c) | (0 << 1))
+#define MAX3420_SPI_CMD_WR(c) (MAX3420_CMD(c) | (1 << 1))
+
+struct max3420_req {
+ struct usb_request usb_req;
+ struct list_head queue;
+ struct max3420_ep *ep;
+};
+
+struct max3420_ep {
+ struct usb_ep ep_usb;
+ struct max3420_udc *udc;
+ struct list_head queue;
+ char name[MAX3420_EPNAME_SIZE];
+ unsigned int maxpacket;
+ spinlock_t lock;
+ int halted;
+ u32 todo;
+ int id;
+};
+
+struct max3420_udc {
+ struct usb_gadget gadget;
+ struct max3420_ep ep[MAX3420_MAX_EPS];
+ struct usb_gadget_driver *driver;
+ struct task_struct *thread_task;
+ int remote_wkp, is_selfpowered;
+ bool vbus_active, softconnect;
+ struct usb_ctrlrequest setup;
+ struct mutex spi_bus_mutex;
+ struct max3420_req ep0req;
+ struct spi_device *spi;
+ struct device *dev;
+ spinlock_t lock;
+ bool suspended;
+ u8 ep0buf[64];
+ u32 todo;
+};
+
+#define to_max3420_req(r) container_of((r), struct max3420_req, usb_req)
+#define to_max3420_ep(e) container_of((e), struct max3420_ep, ep_usb)
+#define to_udc(g) container_of((g), struct max3420_udc, gadget)
+
+#define DRIVER_DESC "MAX3420 USB Device-Mode Driver"
+static const char driver_name[] = "max3420-udc";
+
+/* Control endpoint configuration.*/
+static const struct usb_endpoint_descriptor ep0_desc = {
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(MAX3420_EP_MAX_PACKET),
+};
+
+static void spi_ack_ctrl(struct max3420_udc *udc)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 txdata[1];
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ txdata[0] = MAX3420_ACKSTAT;
+ transfer.tx_buf = txdata;
+ transfer.len = 1;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+}
+
+static u8 spi_rd8_ack(struct max3420_udc *udc, u8 reg, int actstat)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 txdata[2], rxdata[2];
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ txdata[0] = MAX3420_SPI_CMD_RD(reg) | (actstat ? MAX3420_ACKSTAT : 0);
+ transfer.tx_buf = txdata;
+ transfer.rx_buf = rxdata;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+
+ return rxdata[1];
+}
+
+static u8 spi_rd8(struct max3420_udc *udc, u8 reg)
+{
+ return spi_rd8_ack(udc, reg, 0);
+}
+
+static void spi_wr8_ack(struct max3420_udc *udc, u8 reg, u8 val, int actstat)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 txdata[2];
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ txdata[0] = MAX3420_SPI_CMD_WR(reg) | (actstat ? MAX3420_ACKSTAT : 0);
+ txdata[1] = val;
+
+ transfer.tx_buf = txdata;
+ transfer.len = 2;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+}
+
+static void spi_wr8(struct max3420_udc *udc, u8 reg, u8 val)
+{
+ spi_wr8_ack(udc, reg, val, 0);
+}
+
+static void spi_rd_buf(struct max3420_udc *udc, u8 reg, void *buf, u8 len)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 local_buf[MAX3420_EP_MAX_PACKET + 1] = {};
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ local_buf[0] = MAX3420_SPI_CMD_RD(reg);
+ transfer.tx_buf = &local_buf[0];
+ transfer.rx_buf = &local_buf[0];
+ transfer.len = len + 1;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+
+ memcpy(buf, &local_buf[1], len);
+}
+
+static void spi_wr_buf(struct max3420_udc *udc, u8 reg, void *buf, u8 len)
+{
+ struct spi_device *spi = udc->spi;
+ struct spi_transfer transfer;
+ struct spi_message msg;
+ u8 local_buf[MAX3420_EP_MAX_PACKET + 1] = {};
+
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
+ local_buf[0] = MAX3420_SPI_CMD_WR(reg);
+ memcpy(&local_buf[1], buf, len);
+
+ transfer.tx_buf = local_buf;
+ transfer.len = len + 1;
+
+ spi_message_add_tail(&transfer, &msg);
+ spi_sync(spi, &msg);
+}
+
+static int spi_max3420_enable(struct max3420_ep *ep)
+{
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+ u8 epdis, epien;
+ int todo;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ todo = ep->todo & ENABLE_EP;
+ ep->todo &= ~ENABLE_EP;
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ if (!todo || ep->id == 0)
+ return false;
+
+ epien = spi_rd8(udc, MAX3420_REG_EPIEN);
+ epdis = spi_rd8(udc, MAX3420_REG_CLRTOGS);
+
+ if (todo == ENABLE) {
+ epdis &= ~BIT(ep->id + 4);
+ epien |= BIT(ep->id + 1);
+ } else {
+ epdis |= BIT(ep->id + 4);
+ epien &= ~BIT(ep->id + 1);
+ }
+
+ spi_wr8(udc, MAX3420_REG_CLRTOGS, epdis);
+ spi_wr8(udc, MAX3420_REG_EPIEN, epien);
+
+ return true;
+}
+
+static int spi_max3420_stall(struct max3420_ep *ep)
+{
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+ u8 epstalls;
+ int todo;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ todo = ep->todo & STALL_EP;
+ ep->todo &= ~STALL_EP;
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ if (!todo || ep->id == 0)
+ return false;
+
+ epstalls = spi_rd8(udc, MAX3420_REG_EPSTALLS);
+ if (todo == STALL) {
+ ep->halted = 1;
+ epstalls |= BIT(ep->id + 1);
+ } else {
+ u8 clrtogs;
+
+ ep->halted = 0;
+ epstalls &= ~BIT(ep->id + 1);
+ clrtogs = spi_rd8(udc, MAX3420_REG_CLRTOGS);
+ clrtogs |= BIT(ep->id + 1);
+ spi_wr8(udc, MAX3420_REG_CLRTOGS, clrtogs);
+ }
+ spi_wr8(udc, MAX3420_REG_EPSTALLS, epstalls | ACKSTAT);
+
+ return true;
+}
+
+static int spi_max3420_rwkup(struct max3420_udc *udc)
+{
+ unsigned long flags;
+ int wake_remote;
+ u8 usbctl;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ wake_remote = udc->todo & REMOTE_WAKEUP;
+ udc->todo &= ~REMOTE_WAKEUP;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (!wake_remote || !udc->suspended)
+ return false;
+
+ /* Set Remote-WkUp Signal*/
+ usbctl = spi_rd8(udc, MAX3420_REG_USBCTL);
+ usbctl |= SIGRWU;
+ spi_wr8(udc, MAX3420_REG_USBCTL, usbctl);
+
+ msleep_interruptible(5);
+
+ /* Clear Remote-WkUp Signal*/
+ usbctl = spi_rd8(udc, MAX3420_REG_USBCTL);
+ usbctl &= ~SIGRWU;
+ spi_wr8(udc, MAX3420_REG_USBCTL, usbctl);
+
+ udc->suspended = false;
+
+ return true;
+}
+
+static void max3420_nuke(struct max3420_ep *ep, int status);
+static void __max3420_stop(struct max3420_udc *udc)
+{
+ u8 val;
+ int i;
+
+ /* clear all pending requests */
+ for (i = 1; i < MAX3420_MAX_EPS; i++)
+ max3420_nuke(&udc->ep[i], -ECONNRESET);
+
+ /* Disable IRQ to CPU */
+ spi_wr8(udc, MAX3420_REG_CPUCTL, 0);
+
+ val = spi_rd8(udc, MAX3420_REG_USBCTL);
+ val |= PWRDOWN;
+ if (udc->is_selfpowered)
+ val &= ~HOSCSTEN;
+ else
+ val |= HOSCSTEN;
+ spi_wr8(udc, MAX3420_REG_USBCTL, val);
+}
+
+static void __max3420_start(struct max3420_udc *udc)
+{
+ u8 val;
+
+ /* Need this delay if bus-powered,
+ * but even for self-powered it helps stability
+ */
+ msleep_interruptible(250);
+
+ /* configure SPI */
+ spi_wr8(udc, MAX3420_REG_PINCTL, FDUPSPI);
+
+ /* Chip Reset */
+ spi_wr8(udc, MAX3420_REG_USBCTL, CHIPRES);
+ msleep_interruptible(5);
+ spi_wr8(udc, MAX3420_REG_USBCTL, 0);
+
+ /* Poll for OSC to stabilize */
+ while (1) {
+ val = spi_rd8(udc, MAX3420_REG_USBIRQ);
+ if (val & OSCOKIRQ)
+ break;
+ cond_resched();
+ }
+
+ /* Enable PULL-UP only when Vbus detected */
+ val = spi_rd8(udc, MAX3420_REG_USBCTL);
+ val |= VBGATE | CONNECT;
+ spi_wr8(udc, MAX3420_REG_USBCTL, val);
+
+ val = URESDNIRQ | URESIRQ;
+ if (udc->is_selfpowered)
+ val |= NOVBUSIRQ;
+ spi_wr8(udc, MAX3420_REG_USBIEN, val);
+
+ /* Enable only EP0 interrupts */
+ val = IN0BAVIRQ | OUT0DAVIRQ | SUDAVIRQ;
+ spi_wr8(udc, MAX3420_REG_EPIEN, val);
+
+ /* Enable IRQ to CPU */
+ spi_wr8(udc, MAX3420_REG_CPUCTL, IE);
+}
+
+static int max3420_start(struct max3420_udc *udc)
+{
+ unsigned long flags;
+ int todo;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ todo = udc->todo & UDC_START;
+ udc->todo &= ~UDC_START;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (!todo)
+ return false;
+
+ if (udc->vbus_active && udc->softconnect)
+ __max3420_start(udc);
+ else
+ __max3420_stop(udc);
+
+ return true;
+}
+
+static irqreturn_t max3420_vbus_handler(int irq, void *dev_id)
+{
+ struct max3420_udc *udc = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* its a vbus change interrupt */
+ udc->vbus_active = !udc->vbus_active;
+ udc->todo |= UDC_START;
+ usb_udc_vbus_handler(&udc->gadget, udc->vbus_active);
+ usb_gadget_set_state(&udc->gadget, udc->vbus_active
+ ? USB_STATE_POWERED : USB_STATE_NOTATTACHED);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task)
+ wake_up_process(udc->thread_task);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t max3420_irq_handler(int irq, void *dev_id)
+{
+ struct max3420_udc *udc = dev_id;
+ struct spi_device *spi = udc->spi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if ((udc->todo & ENABLE_IRQ) == 0) {
+ disable_irq_nosync(spi->irq);
+ udc->todo |= ENABLE_IRQ;
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task)
+ wake_up_process(udc->thread_task);
+
+ return IRQ_HANDLED;
+}
+
+static void max3420_getstatus(struct max3420_udc *udc)
+{
+ struct max3420_ep *ep;
+ u16 status = 0;
+
+ switch (udc->setup.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ /* Get device status */
+ status = udc->gadget.is_selfpowered << USB_DEVICE_SELF_POWERED;
+ status |= (udc->remote_wkp << USB_DEVICE_REMOTE_WAKEUP);
+ break;
+ case USB_RECIP_INTERFACE:
+ if (udc->driver->setup(&udc->gadget, &udc->setup) < 0)
+ goto stall;
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = &udc->ep[udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK];
+ if (udc->setup.wIndex & USB_DIR_IN) {
+ if (!ep->ep_usb.caps.dir_in)
+ goto stall;
+ } else {
+ if (!ep->ep_usb.caps.dir_out)
+ goto stall;
+ }
+ if (ep->halted)
+ status = 1 << USB_ENDPOINT_HALT;
+ break;
+ default:
+ goto stall;
+ }
+
+ status = cpu_to_le16(status);
+ spi_wr_buf(udc, MAX3420_REG_EP0FIFO, &status, 2);
+ spi_wr8_ack(udc, MAX3420_REG_EP0BC, 2, 1);
+ return;
+stall:
+ dev_err(udc->dev, "Can't respond to getstatus request\n");
+ spi_wr8(udc, MAX3420_REG_EPSTALLS, STLEP0IN | STLEP0OUT | STLSTAT);
+}
+
+static void max3420_set_clear_feature(struct max3420_udc *udc)
+{
+ struct max3420_ep *ep;
+ int set = udc->setup.bRequest == USB_REQ_SET_FEATURE;
+ unsigned long flags;
+ int id;
+
+ switch (udc->setup.bRequestType) {
+ case USB_RECIP_DEVICE:
+ if (udc->setup.wValue != USB_DEVICE_REMOTE_WAKEUP)
+ break;
+
+ if (udc->setup.bRequest == USB_REQ_SET_FEATURE)
+ udc->remote_wkp = 1;
+ else
+ udc->remote_wkp = 0;
+
+ return spi_ack_ctrl(udc);
+
+ case USB_RECIP_ENDPOINT:
+ if (udc->setup.wValue != USB_ENDPOINT_HALT)
+ break;
+
+ id = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ ep = &udc->ep[id];
+
+ spin_lock_irqsave(&ep->lock, flags);
+ ep->todo &= ~STALL_EP;
+ if (set)
+ ep->todo |= STALL;
+ else
+ ep->todo |= UNSTALL;
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ spi_max3420_stall(ep);
+ return;
+ default:
+ break;
+ }
+
+ dev_err(udc->dev, "Can't respond to SET/CLEAR FEATURE\n");
+ spi_wr8(udc, MAX3420_REG_EPSTALLS, STLEP0IN | STLEP0OUT | STLSTAT);
+}
+
+static void max3420_handle_setup(struct max3420_udc *udc)
+{
+ struct usb_ctrlrequest setup;
+
+ spi_rd_buf(udc, MAX3420_REG_SUDFIFO, (void *)&setup, 8);
+
+ udc->setup = setup;
+ udc->setup.wValue = cpu_to_le16(setup.wValue);
+ udc->setup.wIndex = cpu_to_le16(setup.wIndex);
+ udc->setup.wLength = cpu_to_le16(setup.wLength);
+
+ switch (udc->setup.bRequest) {
+ case USB_REQ_GET_STATUS:
+ /* Data+Status phase form udc */
+ if ((udc->setup.bRequestType &
+ (USB_DIR_IN | USB_TYPE_MASK)) !=
+ (USB_DIR_IN | USB_TYPE_STANDARD)) {
+ break;
+ }
+ return max3420_getstatus(udc);
+ case USB_REQ_SET_ADDRESS:
+ /* Status phase from udc */
+ if (udc->setup.bRequestType != (USB_DIR_OUT |
+ USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
+ break;
+ }
+ spi_rd8_ack(udc, MAX3420_REG_FNADDR, 1);
+ dev_dbg(udc->dev, "Assigned Address=%d\n", udc->setup.wValue);
+ return;
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ /* Requests with no data phase, status phase from udc */
+ if ((udc->setup.bRequestType & USB_TYPE_MASK)
+ != USB_TYPE_STANDARD)
+ break;
+ return max3420_set_clear_feature(udc);
+ default:
+ break;
+ }
+
+ if (udc->driver->setup(&udc->gadget, &setup) < 0) {
+ /* Stall EP0 */
+ spi_wr8(udc, MAX3420_REG_EPSTALLS,
+ STLEP0IN | STLEP0OUT | STLSTAT);
+ }
+}
+
+static void max3420_req_done(struct max3420_req *req, int status)
+{
+ struct max3420_ep *ep = req->ep;
+ struct max3420_udc *udc = ep->udc;
+
+ if (req->usb_req.status == -EINPROGRESS)
+ req->usb_req.status = status;
+ else
+ status = req->usb_req.status;
+
+ if (status && status != -ESHUTDOWN)
+ dev_err(udc->dev, "%s done %p, status %d\n",
+ ep->ep_usb.name, req, status);
+
+ if (req->usb_req.complete)
+ req->usb_req.complete(&ep->ep_usb, &req->usb_req);
+}
+
+static int max3420_do_data(struct max3420_udc *udc, int ep_id, int in)
+{
+ struct max3420_ep *ep = &udc->ep[ep_id];
+ struct max3420_req *req;
+ int done, length, psz;
+ void *buf;
+
+ if (list_empty(&ep->queue))
+ return false;
+
+ req = list_first_entry(&ep->queue, struct max3420_req, queue);
+ buf = req->usb_req.buf + req->usb_req.actual;
+
+ psz = ep->ep_usb.maxpacket;
+ length = req->usb_req.length - req->usb_req.actual;
+ length = min(length, psz);
+
+ if (length == 0) {
+ done = 1;
+ goto xfer_done;
+ }
+
+ done = 0;
+ if (in) {
+ prefetch(buf);
+ spi_wr_buf(udc, MAX3420_REG_EP0FIFO + ep_id, buf, length);
+ spi_wr8(udc, MAX3420_REG_EP0BC + ep_id, length);
+ if (length < psz)
+ done = 1;
+ } else {
+ psz = spi_rd8(udc, MAX3420_REG_EP0BC + ep_id);
+ length = min(length, psz);
+ prefetchw(buf);
+ spi_rd_buf(udc, MAX3420_REG_EP0FIFO + ep_id, buf, length);
+ if (length < ep->ep_usb.maxpacket)
+ done = 1;
+ }
+
+ req->usb_req.actual += length;
+
+ if (req->usb_req.actual == req->usb_req.length)
+ done = 1;
+
+xfer_done:
+ if (done) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ list_del_init(&req->queue);
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ if (ep_id == 0)
+ spi_ack_ctrl(udc);
+
+ max3420_req_done(req, 0);
+ }
+
+ return true;
+}
+
+static int max3420_handle_irqs(struct max3420_udc *udc)
+{
+ u8 epien, epirq, usbirq, usbien, reg[4];
+ bool ret = false;
+
+ spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 4);
+ epirq = reg[0];
+ epien = reg[1];
+ usbirq = reg[2];
+ usbien = reg[3];
+
+ usbirq &= usbien;
+ epirq &= epien;
+
+ if (epirq & SUDAVIRQ) {
+ spi_wr8(udc, MAX3420_REG_EPIRQ, SUDAVIRQ);
+ max3420_handle_setup(udc);
+ return true;
+ }
+
+ if (usbirq & VBUSIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, VBUSIRQ);
+ dev_dbg(udc->dev, "Cable plugged in\n");
+ return true;
+ }
+
+ if (usbirq & NOVBUSIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, NOVBUSIRQ);
+ dev_dbg(udc->dev, "Cable pulled out\n");
+ return true;
+ }
+
+ if (usbirq & URESIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, URESIRQ);
+ dev_dbg(udc->dev, "USB Reset - Start\n");
+ return true;
+ }
+
+ if (usbirq & URESDNIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, URESDNIRQ);
+ dev_dbg(udc->dev, "USB Reset - END\n");
+ spi_wr8(udc, MAX3420_REG_USBIEN, URESDNIRQ | URESIRQ);
+ spi_wr8(udc, MAX3420_REG_EPIEN, SUDAVIRQ | IN0BAVIRQ
+ | OUT0DAVIRQ);
+ return true;
+ }
+
+ if (usbirq & SUSPIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, SUSPIRQ);
+ dev_dbg(udc->dev, "USB Suspend - Enter\n");
+ udc->suspended = true;
+ return true;
+ }
+
+ if (usbirq & BUSACTIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, BUSACTIRQ);
+ dev_dbg(udc->dev, "USB Suspend - Exit\n");
+ udc->suspended = false;
+ return true;
+ }
+
+ if (usbirq & RWUDNIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, RWUDNIRQ);
+ dev_dbg(udc->dev, "Asked Host to wakeup\n");
+ return true;
+ }
+
+ if (usbirq & OSCOKIRQ) {
+ spi_wr8(udc, MAX3420_REG_USBIRQ, OSCOKIRQ);
+ dev_dbg(udc->dev, "Osc stabilized, start work\n");
+ return true;
+ }
+
+ if (epirq & OUT0DAVIRQ && max3420_do_data(udc, 0, 0)) {
+ spi_wr8_ack(udc, MAX3420_REG_EPIRQ, OUT0DAVIRQ, 1);
+ ret = true;
+ }
+
+ if (epirq & IN0BAVIRQ && max3420_do_data(udc, 0, 1))
+ ret = true;
+
+ if (epirq & OUT1DAVIRQ && max3420_do_data(udc, 1, 0)) {
+ spi_wr8_ack(udc, MAX3420_REG_EPIRQ, OUT1DAVIRQ, 1);
+ ret = true;
+ }
+
+ if (epirq & IN2BAVIRQ && max3420_do_data(udc, 2, 1))
+ ret = true;
+
+ if (epirq & IN3BAVIRQ && max3420_do_data(udc, 3, 1))
+ ret = true;
+
+ return ret;
+}
+
+static int max3420_thread(void *dev_id)
+{
+ struct max3420_udc *udc = dev_id;
+ struct spi_device *spi = udc->spi;
+ int i, loop_again = 1;
+ unsigned long flags;
+
+ while (!kthread_should_stop()) {
+ if (!loop_again) {
+ ktime_t kt = ns_to_ktime(1000 * 1000 * 250); /* 250ms */
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (udc->todo & ENABLE_IRQ) {
+ enable_irq(spi->irq);
+ udc->todo &= ~ENABLE_IRQ;
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
+ }
+ loop_again = 0;
+
+ mutex_lock(&udc->spi_bus_mutex);
+
+ /* If bus-vbus_active and disconnected */
+ if (!udc->vbus_active || !udc->softconnect)
+ goto loop;
+
+ if (max3420_start(udc)) {
+ loop_again = 1;
+ goto loop;
+ }
+
+ if (max3420_handle_irqs(udc)) {
+ loop_again = 1;
+ goto loop;
+ }
+
+ if (spi_max3420_rwkup(udc)) {
+ loop_again = 1;
+ goto loop;
+ }
+
+ max3420_do_data(udc, 0, 1); /* get done with the EP0 ZLP */
+
+ for (i = 1; i < MAX3420_MAX_EPS; i++) {
+ struct max3420_ep *ep = &udc->ep[i];
+
+ if (spi_max3420_enable(ep))
+ loop_again = 1;
+ if (spi_max3420_stall(ep))
+ loop_again = 1;
+ }
+loop:
+ mutex_unlock(&udc->spi_bus_mutex);
+ }
+
+ set_current_state(TASK_RUNNING);
+ dev_info(udc->dev, "SPI thread exiting\n");
+ return 0;
+}
+
+static int max3420_ep_set_halt(struct usb_ep *_ep, int stall)
+{
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ ep->todo &= ~STALL_EP;
+ if (stall)
+ ep->todo |= STALL;
+ else
+ ep->todo |= UNSTALL;
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ wake_up_process(udc->thread_task);
+
+ dev_dbg(udc->dev, "%sStall %s\n", stall ? "" : "Un", ep->name);
+ return 0;
+}
+
+static int __max3420_ep_enable(struct max3420_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned int maxp = usb_endpoint_maxp(desc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ ep->ep_usb.desc = desc;
+ ep->ep_usb.maxpacket = maxp;
+
+ ep->todo &= ~ENABLE_EP;
+ ep->todo |= ENABLE;
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ return 0;
+}
+
+static int max3420_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_udc *udc = ep->udc;
+
+ __max3420_ep_enable(ep, desc);
+
+ wake_up_process(udc->thread_task);
+
+ return 0;
+}
+
+static void max3420_nuke(struct max3420_ep *ep, int status)
+{
+ struct max3420_req *req, *r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ list_for_each_entry_safe(req, r, &ep->queue, queue) {
+ list_del_init(&req->queue);
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+ max3420_req_done(req, status);
+ spin_lock_irqsave(&ep->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+static void __max3420_ep_disable(struct max3420_ep *ep)
+{
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ ep->ep_usb.desc = NULL;
+
+ ep->todo &= ~ENABLE_EP;
+ ep->todo |= DISABLE;
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ dev_dbg(udc->dev, "Disabled %s\n", ep->name);
+}
+
+static int max3420_ep_disable(struct usb_ep *_ep)
+{
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_udc *udc = ep->udc;
+
+ max3420_nuke(ep, -ESHUTDOWN);
+
+ __max3420_ep_disable(ep);
+
+ wake_up_process(udc->thread_task);
+
+ return 0;
+}
+
+static struct usb_request *max3420_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_req *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->ep = ep;
+
+ return &req->usb_req;
+}
+
+static void max3420_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ kfree(to_max3420_req(_req));
+}
+
+static int max3420_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t ignored)
+{
+ struct max3420_req *req = to_max3420_req(_req);
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ struct max3420_udc *udc = ep->udc;
+ unsigned long flags;
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ list_add_tail(&req->queue, &ep->queue);
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ wake_up_process(udc->thread_task);
+ return 0;
+}
+
+static int max3420_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct max3420_req *t = NULL;
+ struct max3420_req *req = to_max3420_req(_req);
+ struct max3420_req *iter;
+ struct max3420_ep *ep = to_max3420_ep(_ep);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ /* Pluck the descriptor from queue */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (iter != req)
+ continue;
+ list_del_init(&req->queue);
+ t = iter;
+ break;
+ }
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ if (t)
+ max3420_req_done(req, -ECONNRESET);
+
+ return 0;
+}
+
+static const struct usb_ep_ops max3420_ep_ops = {
+ .enable = max3420_ep_enable,
+ .disable = max3420_ep_disable,
+ .alloc_request = max3420_alloc_request,
+ .free_request = max3420_free_request,
+ .queue = max3420_ep_queue,
+ .dequeue = max3420_ep_dequeue,
+ .set_halt = max3420_ep_set_halt,
+};
+
+static int max3420_wakeup(struct usb_gadget *gadget)
+{
+ struct max3420_udc *udc = to_udc(gadget);
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Only if wakeup allowed by host */
+ if (udc->remote_wkp) {
+ udc->todo |= REMOTE_WAKEUP;
+ ret = 0;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task)
+ wake_up_process(udc->thread_task);
+ return ret;
+}
+
+static int max3420_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct max3420_udc *udc = to_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* hook up the driver */
+ udc->driver = driver;
+ udc->gadget.speed = USB_SPEED_FULL;
+
+ udc->gadget.is_selfpowered = udc->is_selfpowered;
+ udc->remote_wkp = 0;
+ udc->softconnect = true;
+ udc->todo |= UDC_START;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task)
+ wake_up_process(udc->thread_task);
+
+ return 0;
+}
+
+static int max3420_udc_stop(struct usb_gadget *gadget)
+{
+ struct max3420_udc *udc = to_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->is_selfpowered = udc->gadget.is_selfpowered;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->driver = NULL;
+ udc->softconnect = false;
+ udc->todo |= UDC_START;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->thread_task)
+ wake_up_process(udc->thread_task);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops max3420_udc_ops = {
+ .udc_start = max3420_udc_start,
+ .udc_stop = max3420_udc_stop,
+ .wakeup = max3420_wakeup,
+};
+
+static void max3420_eps_init(struct max3420_udc *udc)
+{
+ int idx;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+
+ for (idx = 0; idx < MAX3420_MAX_EPS; idx++) {
+ struct max3420_ep *ep = &udc->ep[idx];
+
+ spin_lock_init(&ep->lock);
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->udc = udc;
+ ep->id = idx;
+ ep->halted = 0;
+ ep->maxpacket = 0;
+ ep->ep_usb.name = ep->name;
+ ep->ep_usb.ops = &max3420_ep_ops;
+ usb_ep_set_maxpacket_limit(&ep->ep_usb, MAX3420_EP_MAX_PACKET);
+
+ if (idx == 0) { /* For EP0 */
+ ep->ep_usb.desc = &ep0_desc;
+ ep->ep_usb.maxpacket = usb_endpoint_maxp(&ep0_desc);
+ ep->ep_usb.caps.type_control = true;
+ ep->ep_usb.caps.dir_in = true;
+ ep->ep_usb.caps.dir_out = true;
+ snprintf(ep->name, MAX3420_EPNAME_SIZE, "ep0");
+ continue;
+ }
+
+ if (idx == 1) { /* EP1 is OUT */
+ ep->ep_usb.caps.dir_in = false;
+ ep->ep_usb.caps.dir_out = true;
+ snprintf(ep->name, MAX3420_EPNAME_SIZE, "ep1-bulk-out");
+ } else { /* EP2 & EP3 are IN */
+ ep->ep_usb.caps.dir_in = true;
+ ep->ep_usb.caps.dir_out = false;
+ snprintf(ep->name, MAX3420_EPNAME_SIZE,
+ "ep%d-bulk-in", idx);
+ }
+ ep->ep_usb.caps.type_iso = false;
+ ep->ep_usb.caps.type_int = false;
+ ep->ep_usb.caps.type_bulk = true;
+
+ list_add_tail(&ep->ep_usb.ep_list,
+ &udc->gadget.ep_list);
+ }
+}
+
+static int max3420_probe(struct spi_device *spi)
+{
+ struct max3420_udc *udc;
+ int err, irq;
+ u8 reg[8];
+
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ dev_err(&spi->dev, "UDC needs full duplex to work\n");
+ return -EINVAL;
+ }
+
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "Unable to setup SPI bus\n");
+ return -EFAULT;
+ }
+
+ udc = devm_kzalloc(&spi->dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ udc->spi = spi;
+
+ udc->remote_wkp = 0;
+
+ /* Setup gadget structure */
+ udc->gadget.ops = &max3420_udc_ops;
+ udc->gadget.max_speed = USB_SPEED_FULL;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.ep0 = &udc->ep[0].ep_usb;
+ udc->gadget.name = driver_name;
+
+ spin_lock_init(&udc->lock);
+ mutex_init(&udc->spi_bus_mutex);
+
+ udc->ep0req.ep = &udc->ep[0];
+ udc->ep0req.usb_req.buf = udc->ep0buf;
+ INIT_LIST_HEAD(&udc->ep0req.queue);
+
+ /* setup Endpoints */
+ max3420_eps_init(udc);
+
+ /* configure SPI */
+ spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 8);
+ spi_wr8(udc, MAX3420_REG_PINCTL, FDUPSPI);
+
+ err = usb_add_gadget_udc(&spi->dev, &udc->gadget);
+ if (err)
+ return err;
+
+ udc->dev = &udc->gadget.dev;
+
+ spi_set_drvdata(spi, udc);
+
+ irq = of_irq_get_byname(spi->dev.of_node, "udc");
+ err = devm_request_irq(&spi->dev, irq, max3420_irq_handler, 0,
+ "max3420", udc);
+ if (err < 0)
+ goto del_gadget;
+
+ udc->thread_task = kthread_create(max3420_thread, udc,
+ "max3420-thread");
+ if (IS_ERR(udc->thread_task)) {
+ err = PTR_ERR(udc->thread_task);
+ goto del_gadget;
+ }
+
+ irq = of_irq_get_byname(spi->dev.of_node, "vbus");
+ if (irq <= 0) { /* no vbus irq implies self-powered design */
+ udc->is_selfpowered = 1;
+ udc->vbus_active = true;
+ udc->todo |= UDC_START;
+ usb_udc_vbus_handler(&udc->gadget, udc->vbus_active);
+ usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
+ max3420_start(udc);
+ } else {
+ udc->is_selfpowered = 0;
+ /* Detect current vbus status */
+ spi_rd_buf(udc, MAX3420_REG_EPIRQ, reg, 8);
+ if (reg[7] != 0xff)
+ udc->vbus_active = true;
+
+ err = devm_request_irq(&spi->dev, irq,
+ max3420_vbus_handler, 0, "vbus", udc);
+ if (err < 0)
+ goto del_gadget;
+ }
+
+ return 0;
+
+del_gadget:
+ usb_del_gadget_udc(&udc->gadget);
+ return err;
+}
+
+static void max3420_remove(struct spi_device *spi)
+{
+ struct max3420_udc *udc = spi_get_drvdata(spi);
+ unsigned long flags;
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ kthread_stop(udc->thread_task);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static const struct of_device_id max3420_udc_of_match[] = {
+ { .compatible = "maxim,max3420-udc"},
+ { .compatible = "maxim,max3421-udc"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, max3420_udc_of_match);
+
+static struct spi_driver max3420_driver = {
+ .driver = {
+ .name = "max3420-udc",
+ .of_match_table = of_match_ptr(max3420_udc_of_match),
+ },
+ .probe = max3420_probe,
+ .remove = max3420_remove,
+};
+
+module_spi_driver(max3420_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/mv_u3d.h b/drivers/usb/gadget/udc/mv_u3d.h
new file mode 100644
index 000000000..66b84f792
--- /dev/null
+++ b/drivers/usb/gadget/udc/mv_u3d.h
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ */
+
+#ifndef __MV_U3D_H
+#define __MV_U3D_H
+
+#define MV_U3D_EP_CONTEXT_ALIGNMENT 32
+#define MV_U3D_TRB_ALIGNMENT 16
+#define MV_U3D_DMA_BOUNDARY 4096
+#define MV_U3D_EP0_MAX_PKT_SIZE 512
+
+/* ep0 transfer state */
+#define MV_U3D_WAIT_FOR_SETUP 0
+#define MV_U3D_DATA_STATE_XMIT 1
+#define MV_U3D_DATA_STATE_NEED_ZLP 2
+#define MV_U3D_WAIT_FOR_OUT_STATUS 3
+#define MV_U3D_DATA_STATE_RECV 4
+#define MV_U3D_STATUS_STAGE 5
+
+#define MV_U3D_EP_MAX_LENGTH_TRANSFER 0x10000
+
+/* USB3 Interrupt Status */
+#define MV_U3D_USBINT_SETUP 0x00000001
+#define MV_U3D_USBINT_RX_COMPLETE 0x00000002
+#define MV_U3D_USBINT_TX_COMPLETE 0x00000004
+#define MV_U3D_USBINT_UNDER_RUN 0x00000008
+#define MV_U3D_USBINT_RXDESC_ERR 0x00000010
+#define MV_U3D_USBINT_TXDESC_ERR 0x00000020
+#define MV_U3D_USBINT_RX_TRB_COMPLETE 0x00000040
+#define MV_U3D_USBINT_TX_TRB_COMPLETE 0x00000080
+#define MV_U3D_USBINT_VBUS_VALID 0x00010000
+#define MV_U3D_USBINT_STORAGE_CMD_FULL 0x00020000
+#define MV_U3D_USBINT_LINK_CHG 0x01000000
+
+/* USB3 Interrupt Enable */
+#define MV_U3D_INTR_ENABLE_SETUP 0x00000001
+#define MV_U3D_INTR_ENABLE_RX_COMPLETE 0x00000002
+#define MV_U3D_INTR_ENABLE_TX_COMPLETE 0x00000004
+#define MV_U3D_INTR_ENABLE_UNDER_RUN 0x00000008
+#define MV_U3D_INTR_ENABLE_RXDESC_ERR 0x00000010
+#define MV_U3D_INTR_ENABLE_TXDESC_ERR 0x00000020
+#define MV_U3D_INTR_ENABLE_RX_TRB_COMPLETE 0x00000040
+#define MV_U3D_INTR_ENABLE_TX_TRB_COMPLETE 0x00000080
+#define MV_U3D_INTR_ENABLE_RX_BUFFER_ERR 0x00000100
+#define MV_U3D_INTR_ENABLE_VBUS_VALID 0x00010000
+#define MV_U3D_INTR_ENABLE_STORAGE_CMD_FULL 0x00020000
+#define MV_U3D_INTR_ENABLE_LINK_CHG 0x01000000
+#define MV_U3D_INTR_ENABLE_PRIME_STATUS 0x02000000
+
+/* USB3 Link Change */
+#define MV_U3D_LINK_CHANGE_LINK_UP 0x00000001
+#define MV_U3D_LINK_CHANGE_SUSPEND 0x00000002
+#define MV_U3D_LINK_CHANGE_RESUME 0x00000004
+#define MV_U3D_LINK_CHANGE_WRESET 0x00000008
+#define MV_U3D_LINK_CHANGE_HRESET 0x00000010
+#define MV_U3D_LINK_CHANGE_VBUS_INVALID 0x00000020
+#define MV_U3D_LINK_CHANGE_INACT 0x00000040
+#define MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0 0x00000080
+#define MV_U3D_LINK_CHANGE_U1 0x00000100
+#define MV_U3D_LINK_CHANGE_U2 0x00000200
+#define MV_U3D_LINK_CHANGE_U3 0x00000400
+
+/* bridge setting */
+#define MV_U3D_BRIDGE_SETTING_VBUS_VALID (1 << 16)
+
+/* Command Register Bit Masks */
+#define MV_U3D_CMD_RUN_STOP 0x00000001
+#define MV_U3D_CMD_CTRL_RESET 0x00000002
+
+/* ep control register */
+#define MV_U3D_EPXCR_EP_TYPE_CONTROL 0
+#define MV_U3D_EPXCR_EP_TYPE_ISOC 1
+#define MV_U3D_EPXCR_EP_TYPE_BULK 2
+#define MV_U3D_EPXCR_EP_TYPE_INT 3
+#define MV_U3D_EPXCR_EP_ENABLE_SHIFT 4
+#define MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT 12
+#define MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT 16
+#define MV_U3D_USB_BULK_BURST_OUT 6
+#define MV_U3D_USB_BULK_BURST_IN 14
+
+#define MV_U3D_EPXCR_EP_FLUSH (1 << 7)
+#define MV_U3D_EPXCR_EP_HALT (1 << 1)
+#define MV_U3D_EPXCR_EP_INIT (1)
+
+/* TX/RX Status Register */
+#define MV_U3D_XFERSTATUS_COMPLETE_SHIFT 24
+#define MV_U3D_COMPLETE_INVALID 0
+#define MV_U3D_COMPLETE_SUCCESS 1
+#define MV_U3D_COMPLETE_BUFF_ERR 2
+#define MV_U3D_COMPLETE_SHORT_PACKET 3
+#define MV_U3D_COMPLETE_TRB_ERR 5
+#define MV_U3D_XFERSTATUS_TRB_LENGTH_MASK (0xFFFFFF)
+
+#define MV_U3D_USB_LINK_BYPASS_VBUS 0x8
+
+#define MV_U3D_LTSSM_PHY_INIT_DONE 0x80000000
+#define MV_U3D_LTSSM_NEVER_GO_COMPLIANCE 0x40000000
+
+#define MV_U3D_USB3_OP_REGS_OFFSET 0x100
+#define MV_U3D_USB3_PHY_OFFSET 0xB800
+
+#define DCS_ENABLE 0x1
+
+/* timeout */
+#define MV_U3D_RESET_TIMEOUT 10000
+#define MV_U3D_FLUSH_TIMEOUT 100000
+#define MV_U3D_OWN_TIMEOUT 10000
+#define LOOPS_USEC_SHIFT 4
+#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
+#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
+
+/* ep direction */
+#define MV_U3D_EP_DIR_IN 1
+#define MV_U3D_EP_DIR_OUT 0
+#define mv_u3d_ep_dir(ep) (((ep)->ep_num == 0) ? \
+ ((ep)->u3d->ep0_dir) : ((ep)->direction))
+
+/* usb capability registers */
+struct mv_u3d_cap_regs {
+ u32 rsvd[5];
+ u32 dboff; /* doorbell register offset */
+ u32 rtsoff; /* runtime register offset */
+ u32 vuoff; /* vendor unique register offset */
+};
+
+/* operation registers */
+struct mv_u3d_op_regs {
+ u32 usbcmd; /* Command register */
+ u32 rsvd1[11];
+ u32 dcbaapl; /* Device Context Base Address low register */
+ u32 dcbaaph; /* Device Context Base Address high register */
+ u32 rsvd2[243];
+ u32 portsc; /* port status and control register*/
+ u32 portlinkinfo; /* port link info register*/
+ u32 rsvd3[9917];
+ u32 doorbell; /* doorbell register */
+};
+
+/* control endpoint enable registers */
+struct epxcr {
+ u32 epxoutcr0; /* ep out control 0 register */
+ u32 epxoutcr1; /* ep out control 1 register */
+ u32 epxincr0; /* ep in control 0 register */
+ u32 epxincr1; /* ep in control 1 register */
+};
+
+/* transfer status registers */
+struct xferstatus {
+ u32 curdeqlo; /* current TRB pointer low */
+ u32 curdeqhi; /* current TRB pointer high */
+ u32 statuslo; /* transfer status low */
+ u32 statushi; /* transfer status high */
+};
+
+/* vendor unique control registers */
+struct mv_u3d_vuc_regs {
+ u32 ctrlepenable; /* control endpoint enable register */
+ u32 setuplock; /* setup lock register */
+ u32 endcomplete; /* endpoint transfer complete register */
+ u32 intrcause; /* interrupt cause register */
+ u32 intrenable; /* interrupt enable register */
+ u32 trbcomplete; /* TRB complete register */
+ u32 linkchange; /* link change register */
+ u32 rsvd1[5];
+ u32 trbunderrun; /* TRB underrun register */
+ u32 rsvd2[43];
+ u32 bridgesetting; /* bridge setting register */
+ u32 rsvd3[7];
+ struct xferstatus txst[16]; /* TX status register */
+ struct xferstatus rxst[16]; /* RX status register */
+ u32 ltssm; /* LTSSM control register */
+ u32 pipe; /* PIPE control register */
+ u32 linkcr0; /* link control 0 register */
+ u32 linkcr1; /* link control 1 register */
+ u32 rsvd6[60];
+ u32 mib0; /* MIB0 counter register */
+ u32 usblink; /* usb link control register */
+ u32 ltssmstate; /* LTSSM state register */
+ u32 linkerrorcause; /* link error cause register */
+ u32 rsvd7[60];
+ u32 devaddrtiebrkr; /* device address and tie breaker */
+ u32 itpinfo0; /* ITP info 0 register */
+ u32 itpinfo1; /* ITP info 1 register */
+ u32 rsvd8[61];
+ struct epxcr epcr[16]; /* ep control register */
+ u32 rsvd9[64];
+ u32 phyaddr; /* PHY address register */
+ u32 phydata; /* PHY data register */
+};
+
+/* Endpoint context structure */
+struct mv_u3d_ep_context {
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 trb_addr_lo; /* TRB address low 32 bit */
+ u32 trb_addr_hi; /* TRB address high 32 bit */
+ u32 rsvd2;
+ u32 rsvd3;
+ struct usb_ctrlrequest setup_buffer; /* setup data buffer */
+};
+
+/* TRB control data structure */
+struct mv_u3d_trb_ctrl {
+ u32 own:1; /* owner of TRB */
+ u32 rsvd1:3;
+ u32 chain:1; /* associate this TRB with the
+ next TRB on the Ring */
+ u32 ioc:1; /* interrupt on complete */
+ u32 rsvd2:4;
+ u32 type:6; /* TRB type */
+#define TYPE_NORMAL 1
+#define TYPE_DATA 3
+#define TYPE_LINK 6
+ u32 dir:1; /* Working at data stage of control endpoint
+ operation. 0 is OUT and 1 is IN. */
+ u32 rsvd3:15;
+};
+
+/* TRB data structure
+ * For multiple TRB, all the TRBs' physical address should be continuous.
+ */
+struct mv_u3d_trb_hw {
+ u32 buf_addr_lo; /* data buffer address low 32 bit */
+ u32 buf_addr_hi; /* data buffer address high 32 bit */
+ u32 trb_len; /* transfer length */
+ struct mv_u3d_trb_ctrl ctrl; /* TRB control data */
+};
+
+/* TRB structure */
+struct mv_u3d_trb {
+ struct mv_u3d_trb_hw *trb_hw; /* point to the trb_hw structure */
+ dma_addr_t trb_dma; /* dma address for this trb_hw */
+ struct list_head trb_list; /* trb list */
+};
+
+/* device data structure */
+struct mv_u3d {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ spinlock_t lock; /* device lock */
+ struct completion *done;
+ struct device *dev;
+ int irq;
+
+ /* usb controller registers */
+ struct mv_u3d_cap_regs __iomem *cap_regs;
+ struct mv_u3d_op_regs __iomem *op_regs;
+ struct mv_u3d_vuc_regs __iomem *vuc_regs;
+ void __iomem *phy_regs;
+
+ unsigned int max_eps;
+ struct mv_u3d_ep_context *ep_context;
+ size_t ep_context_size;
+ dma_addr_t ep_context_dma;
+
+ struct dma_pool *trb_pool; /* for TRB data structure */
+ struct mv_u3d_ep *eps;
+
+ struct mv_u3d_req *status_req; /* ep0 status request */
+ struct usb_ctrlrequest local_setup_buff; /* store setup data*/
+
+ unsigned int resume_state; /* USB state to resume */
+ unsigned int usb_state; /* USB current state */
+ unsigned int ep0_state; /* Endpoint zero state */
+ unsigned int ep0_dir;
+
+ unsigned int dev_addr; /* device address */
+
+ unsigned int errors;
+
+ unsigned softconnect:1;
+ unsigned vbus_active:1; /* vbus is active or not */
+ unsigned remote_wakeup:1; /* support remote wakeup */
+ unsigned clock_gating:1; /* clock gating or not */
+ unsigned active:1; /* udc is active or not */
+ unsigned vbus_valid_detect:1; /* udc vbus detection */
+
+ struct mv_usb_addon_irq *vbus;
+ unsigned int power;
+
+ struct clk *clk;
+};
+
+/* endpoint data structure */
+struct mv_u3d_ep {
+ struct usb_ep ep;
+ struct mv_u3d *u3d;
+ struct list_head queue; /* ep request queued hardware */
+ struct list_head req_list; /* list of ep request */
+ struct mv_u3d_ep_context *ep_context; /* ep context */
+ u32 direction;
+ char name[14];
+ u32 processing; /* there is ep request
+ queued on haredware */
+ spinlock_t req_lock; /* ep lock */
+ unsigned wedge:1;
+ unsigned enabled:1;
+ unsigned ep_type:2;
+ unsigned ep_num:8;
+};
+
+/* request data structure */
+struct mv_u3d_req {
+ struct usb_request req;
+ struct mv_u3d_ep *ep;
+ struct list_head queue; /* ep requst queued on hardware */
+ struct list_head list; /* ep request list */
+ struct list_head trb_list; /* trb list of a request */
+
+ struct mv_u3d_trb *trb_head; /* point to first trb of a request */
+ unsigned trb_count; /* TRB number in the chain */
+ unsigned chain; /* TRB chain or not */
+};
+
+#endif
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
new file mode 100644
index 000000000..411b61797
--- /dev/null
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -0,0 +1,2064 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/mv_usb.h>
+#include <linux/clk.h>
+
+#include "mv_u3d.h"
+
+#define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
+
+static const char driver_name[] = "mv_u3d";
+
+static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);
+static void mv_u3d_stop_activity(struct mv_u3d *u3d,
+ struct usb_gadget_driver *driver);
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = MV_U3D_EP0_MAX_PKT_SIZE,
+};
+
+static void mv_u3d_ep0_reset(struct mv_u3d *u3d)
+{
+ struct mv_u3d_ep *ep;
+ u32 epxcr;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ ep = &u3d->eps[i];
+ ep->u3d = u3d;
+
+ /* ep0 ep context, ep0 in and out share the same ep context */
+ ep->ep_context = &u3d->ep_context[1];
+ }
+
+ /* reset ep state machine */
+ /* reset ep0 out */
+ epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
+ epxcr |= MV_U3D_EPXCR_EP_INIT;
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
+ udelay(5);
+ epxcr &= ~MV_U3D_EPXCR_EP_INIT;
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
+
+ epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
+ << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
+ | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
+ | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
+ | MV_U3D_EPXCR_EP_TYPE_CONTROL);
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
+
+ /* reset ep0 in */
+ epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
+ epxcr |= MV_U3D_EPXCR_EP_INIT;
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
+ udelay(5);
+ epxcr &= ~MV_U3D_EPXCR_EP_INIT;
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
+
+ epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
+ << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
+ | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
+ | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
+ | MV_U3D_EPXCR_EP_TYPE_CONTROL);
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
+}
+
+static void mv_u3d_ep0_stall(struct mv_u3d *u3d)
+{
+ u32 tmp;
+ dev_dbg(u3d->dev, "%s\n", __func__);
+
+ /* set TX and RX to stall */
+ tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
+ tmp |= MV_U3D_EPXCR_EP_HALT;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
+
+ tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
+ tmp |= MV_U3D_EPXCR_EP_HALT;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
+
+ /* update ep0 state */
+ u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
+ u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
+}
+
+static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
+ struct mv_u3d_req *curr_req)
+{
+ struct mv_u3d_trb *curr_trb;
+ int actual, remaining_length = 0;
+ int direction, ep_num;
+ int retval = 0;
+ u32 tmp, status, length;
+
+ direction = index % 2;
+ ep_num = index / 2;
+
+ actual = curr_req->req.length;
+
+ while (!list_empty(&curr_req->trb_list)) {
+ curr_trb = list_entry(curr_req->trb_list.next,
+ struct mv_u3d_trb, trb_list);
+ if (!curr_trb->trb_hw->ctrl.own) {
+ dev_err(u3d->dev, "%s, TRB own error!\n",
+ u3d->eps[index].name);
+ return 1;
+ }
+
+ curr_trb->trb_hw->ctrl.own = 0;
+ if (direction == MV_U3D_EP_DIR_OUT)
+ tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
+ else
+ tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
+
+ status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
+ length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
+
+ if (status == MV_U3D_COMPLETE_SUCCESS ||
+ (status == MV_U3D_COMPLETE_SHORT_PACKET &&
+ direction == MV_U3D_EP_DIR_OUT)) {
+ remaining_length += length;
+ actual -= remaining_length;
+ } else {
+ dev_err(u3d->dev,
+ "complete_tr error: ep=%d %s: error = 0x%x\n",
+ index >> 1, direction ? "SEND" : "RECV",
+ status);
+ retval = -EPROTO;
+ }
+
+ list_del_init(&curr_trb->trb_list);
+ }
+ if (retval)
+ return retval;
+
+ curr_req->req.actual = actual;
+ return 0;
+}
+
+/*
+ * mv_u3d_done() - retire a request; caller blocked irqs
+ * @status : request status to be set, only works when
+ * request is still in progress.
+ */
+static
+void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
+ __releases(&ep->udc->lock)
+ __acquires(&ep->udc->lock)
+{
+ struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
+
+ dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
+ /* Removed the req from ep queue */
+ list_del_init(&req->queue);
+
+ /* req.status should be set as -EINPROGRESS in ep_queue() */
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ /* Free trb for the request */
+ if (!req->chain)
+ dma_pool_free(u3d->trb_pool,
+ req->trb_head->trb_hw, req->trb_head->trb_dma);
+ else {
+ dma_unmap_single(ep->u3d->gadget.dev.parent,
+ (dma_addr_t)req->trb_head->trb_dma,
+ req->trb_count * sizeof(struct mv_u3d_trb_hw),
+ DMA_BIDIRECTIONAL);
+ kfree(req->trb_head->trb_hw);
+ }
+ kfree(req->trb_head);
+
+ usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
+
+ if (status && (status != -ESHUTDOWN)) {
+ dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+ }
+
+ spin_unlock(&ep->u3d->lock);
+
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+
+ spin_lock(&ep->u3d->lock);
+}
+
+static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req)
+{
+ u32 tmp, direction;
+ struct mv_u3d *u3d;
+ struct mv_u3d_ep_context *ep_context;
+ int retval = 0;
+
+ u3d = ep->u3d;
+ direction = mv_u3d_ep_dir(ep);
+
+ /* ep0 in and out share the same ep context slot 1*/
+ if (ep->ep_num == 0)
+ ep_context = &(u3d->ep_context[1]);
+ else
+ ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
+
+ /* check if the pipe is empty or not */
+ if (!list_empty(&ep->queue)) {
+ dev_err(u3d->dev, "add trb to non-empty queue!\n");
+ retval = -ENOMEM;
+ WARN_ON(1);
+ } else {
+ ep_context->rsvd0 = cpu_to_le32(1);
+ ep_context->rsvd1 = 0;
+
+ /* Configure the trb address and set the DCS bit.
+ * Both DCS bit and own bit in trb should be set.
+ */
+ ep_context->trb_addr_lo =
+ cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
+ ep_context->trb_addr_hi = 0;
+
+ /* Ensure that updates to the EP Context will
+ * occure before Ring Bell.
+ */
+ wmb();
+
+ /* ring bell the ep */
+ if (ep->ep_num == 0)
+ tmp = 0x1;
+ else
+ tmp = ep->ep_num * 2
+ + ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1);
+
+ iowrite32(tmp, &u3d->op_regs->doorbell);
+ }
+ return retval;
+}
+
+static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
+ unsigned *length, dma_addr_t *dma)
+{
+ u32 temp;
+ unsigned int direction;
+ struct mv_u3d_trb *trb;
+ struct mv_u3d_trb_hw *trb_hw;
+ struct mv_u3d *u3d;
+
+ /* how big will this transfer be? */
+ *length = req->req.length - req->req.actual;
+ BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
+
+ u3d = req->ep->u3d;
+
+ trb = kzalloc(sizeof(*trb), GFP_ATOMIC);
+ if (!trb)
+ return NULL;
+
+ /*
+ * Be careful that no _GFP_HIGHMEM is set,
+ * or we can not use dma_to_virt
+ * cannot use GFP_KERNEL in spin lock
+ */
+ trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
+ if (!trb_hw) {
+ kfree(trb);
+ dev_err(u3d->dev,
+ "%s, dma_pool_alloc fail\n", __func__);
+ return NULL;
+ }
+ trb->trb_dma = *dma;
+ trb->trb_hw = trb_hw;
+
+ /* initialize buffer page pointers */
+ temp = (u32)(req->req.dma + req->req.actual);
+
+ trb_hw->buf_addr_lo = cpu_to_le32(temp);
+ trb_hw->buf_addr_hi = 0;
+ trb_hw->trb_len = cpu_to_le32(*length);
+ trb_hw->ctrl.own = 1;
+
+ if (req->ep->ep_num == 0)
+ trb_hw->ctrl.type = TYPE_DATA;
+ else
+ trb_hw->ctrl.type = TYPE_NORMAL;
+
+ req->req.actual += *length;
+
+ direction = mv_u3d_ep_dir(req->ep);
+ if (direction == MV_U3D_EP_DIR_IN)
+ trb_hw->ctrl.dir = 1;
+ else
+ trb_hw->ctrl.dir = 0;
+
+ /* Enable interrupt for the last trb of a request */
+ if (!req->req.no_interrupt)
+ trb_hw->ctrl.ioc = 1;
+
+ trb_hw->ctrl.chain = 0;
+
+ wmb();
+ return trb;
+}
+
+static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length,
+ struct mv_u3d_trb *trb, int *is_last)
+{
+ u32 temp;
+ unsigned int direction;
+ struct mv_u3d *u3d;
+
+ /* how big will this transfer be? */
+ *length = min(req->req.length - req->req.actual,
+ (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
+
+ u3d = req->ep->u3d;
+
+ trb->trb_dma = 0;
+
+ /* initialize buffer page pointers */
+ temp = (u32)(req->req.dma + req->req.actual);
+
+ trb->trb_hw->buf_addr_lo = cpu_to_le32(temp);
+ trb->trb_hw->buf_addr_hi = 0;
+ trb->trb_hw->trb_len = cpu_to_le32(*length);
+ trb->trb_hw->ctrl.own = 1;
+
+ if (req->ep->ep_num == 0)
+ trb->trb_hw->ctrl.type = TYPE_DATA;
+ else
+ trb->trb_hw->ctrl.type = TYPE_NORMAL;
+
+ req->req.actual += *length;
+
+ direction = mv_u3d_ep_dir(req->ep);
+ if (direction == MV_U3D_EP_DIR_IN)
+ trb->trb_hw->ctrl.dir = 1;
+ else
+ trb->trb_hw->ctrl.dir = 0;
+
+ /* zlp is needed if req->req.zero is set */
+ if (req->req.zero) {
+ if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+ *is_last = 1;
+ else
+ *is_last = 0;
+ } else if (req->req.length == req->req.actual)
+ *is_last = 1;
+ else
+ *is_last = 0;
+
+ /* Enable interrupt for the last trb of a request */
+ if (*is_last && !req->req.no_interrupt)
+ trb->trb_hw->ctrl.ioc = 1;
+
+ if (*is_last)
+ trb->trb_hw->ctrl.chain = 0;
+ else {
+ trb->trb_hw->ctrl.chain = 1;
+ dev_dbg(u3d->dev, "chain trb\n");
+ }
+
+ wmb();
+
+ return 0;
+}
+
+/* generate TRB linked list for a request
+ * usb controller only supports continous trb chain,
+ * that trb structure physical address should be continous.
+ */
+static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
+{
+ unsigned count;
+ int is_last;
+ struct mv_u3d_trb *trb;
+ struct mv_u3d_trb_hw *trb_hw;
+ struct mv_u3d *u3d;
+ dma_addr_t dma;
+ unsigned length;
+ unsigned trb_num;
+
+ u3d = req->ep->u3d;
+
+ INIT_LIST_HEAD(&req->trb_list);
+
+ length = req->req.length - req->req.actual;
+ /* normally the request transfer length is less than 16KB.
+ * we use buil_trb_one() to optimize it.
+ */
+ if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) {
+ trb = mv_u3d_build_trb_one(req, &count, &dma);
+ list_add_tail(&trb->trb_list, &req->trb_list);
+ req->trb_head = trb;
+ req->trb_count = 1;
+ req->chain = 0;
+ } else {
+ trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER;
+ if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER)
+ trb_num++;
+
+ trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC);
+ if (!trb)
+ return -ENOMEM;
+
+ trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC);
+ if (!trb_hw) {
+ kfree(trb);
+ return -ENOMEM;
+ }
+
+ do {
+ trb->trb_hw = trb_hw;
+ if (mv_u3d_build_trb_chain(req, &count,
+ trb, &is_last)) {
+ dev_err(u3d->dev,
+ "%s, mv_u3d_build_trb_chain fail\n",
+ __func__);
+ return -EIO;
+ }
+
+ list_add_tail(&trb->trb_list, &req->trb_list);
+ req->trb_count++;
+ trb++;
+ trb_hw++;
+ } while (!is_last);
+
+ req->trb_head = list_entry(req->trb_list.next,
+ struct mv_u3d_trb, trb_list);
+ req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
+ req->trb_head->trb_hw,
+ trb_num * sizeof(*trb_hw),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(u3d->gadget.dev.parent,
+ req->trb_head->trb_dma)) {
+ kfree(req->trb_head->trb_hw);
+ kfree(req->trb_head);
+ return -EFAULT;
+ }
+
+ req->chain = 1;
+ }
+
+ return 0;
+}
+
+static int
+mv_u3d_start_queue(struct mv_u3d_ep *ep)
+{
+ struct mv_u3d *u3d = ep->u3d;
+ struct mv_u3d_req *req;
+ int ret;
+
+ if (!list_empty(&ep->req_list) && !ep->processing)
+ req = list_entry(ep->req_list.next, struct mv_u3d_req, list);
+ else
+ return 0;
+
+ ep->processing = 1;
+
+ /* set up dma mapping */
+ ret = usb_gadget_map_request(&u3d->gadget, &req->req,
+ mv_u3d_ep_dir(ep));
+ if (ret)
+ goto break_processing;
+
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->trb_count = 0;
+
+ /* build trbs */
+ ret = mv_u3d_req_to_trb(req);
+ if (ret) {
+ dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
+ goto break_processing;
+ }
+
+ /* and push them to device queue */
+ ret = mv_u3d_queue_trb(ep, req);
+ if (ret)
+ goto break_processing;
+
+ /* irq handler advances the queue */
+ list_add_tail(&req->queue, &ep->queue);
+
+ return 0;
+
+break_processing:
+ ep->processing = 0;
+ return ret;
+}
+
+static int mv_u3d_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct mv_u3d *u3d;
+ struct mv_u3d_ep *ep;
+ u16 max = 0;
+ unsigned maxburst = 0;
+ u32 epxcr, direction;
+
+ if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ ep = container_of(_ep, struct mv_u3d_ep, ep);
+ u3d = ep->u3d;
+
+ if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ direction = mv_u3d_ep_dir(ep);
+ max = le16_to_cpu(desc->wMaxPacketSize);
+
+ if (!_ep->maxburst)
+ _ep->maxburst = 1;
+ maxburst = _ep->maxburst;
+
+ /* Set the max burst size */
+ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (maxburst > 16) {
+ dev_dbg(u3d->dev,
+ "max burst should not be greater "
+ "than 16 on bulk ep\n");
+ maxburst = 1;
+ _ep->maxburst = maxburst;
+ }
+ dev_dbg(u3d->dev,
+ "maxburst: %d on bulk %s\n", maxburst, ep->name);
+ break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* control transfer only supports maxburst as one */
+ maxburst = 1;
+ _ep->maxburst = maxburst;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (maxburst != 1) {
+ dev_dbg(u3d->dev,
+ "max burst should be 1 on int ep "
+ "if transfer size is not 1024\n");
+ maxburst = 1;
+ _ep->maxburst = maxburst;
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (maxburst != 1) {
+ dev_dbg(u3d->dev,
+ "max burst should be 1 on isoc ep "
+ "if transfer size is not 1024\n");
+ maxburst = 1;
+ _ep->maxburst = maxburst;
+ }
+ break;
+ default:
+ goto en_done;
+ }
+
+ ep->ep.maxpacket = max;
+ ep->ep.desc = desc;
+ ep->enabled = 1;
+
+ /* Enable the endpoint for Rx or Tx and set the endpoint type */
+ if (direction == MV_U3D_EP_DIR_OUT) {
+ epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+ epxcr |= MV_U3D_EPXCR_EP_INIT;
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+ udelay(5);
+ epxcr &= ~MV_U3D_EPXCR_EP_INIT;
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+
+ epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
+ | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
+ | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
+ | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
+ } else {
+ epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+ epxcr |= MV_U3D_EPXCR_EP_INIT;
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+ udelay(5);
+ epxcr &= ~MV_U3D_EPXCR_EP_INIT;
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+
+ epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
+ | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
+ | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
+ | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
+ }
+
+ return 0;
+en_done:
+ return -EINVAL;
+}
+
+static int mv_u3d_ep_disable(struct usb_ep *_ep)
+{
+ struct mv_u3d *u3d;
+ struct mv_u3d_ep *ep;
+ u32 epxcr, direction;
+ unsigned long flags;
+
+ if (!_ep)
+ return -EINVAL;
+
+ ep = container_of(_ep, struct mv_u3d_ep, ep);
+ if (!ep->ep.desc)
+ return -EINVAL;
+
+ u3d = ep->u3d;
+
+ direction = mv_u3d_ep_dir(ep);
+
+ /* nuke all pending requests (does flush) */
+ spin_lock_irqsave(&u3d->lock, flags);
+ mv_u3d_nuke(ep, -ESHUTDOWN);
+ spin_unlock_irqrestore(&u3d->lock, flags);
+
+ /* Disable the endpoint for Rx or Tx and reset the endpoint type */
+ if (direction == MV_U3D_EP_DIR_OUT) {
+ epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
+ epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
+ | USB_ENDPOINT_XFERTYPE_MASK);
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
+ } else {
+ epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
+ epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
+ | USB_ENDPOINT_XFERTYPE_MASK);
+ iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
+ }
+
+ ep->enabled = 0;
+
+ ep->ep.desc = NULL;
+ return 0;
+}
+
+static struct usb_request *
+mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct mv_u3d_req *req = NULL;
+
+ req = kzalloc(sizeof *req, gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req);
+
+ kfree(req);
+}
+
+static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)
+{
+ struct mv_u3d *u3d;
+ u32 direction;
+ struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep);
+ unsigned int loops;
+ u32 tmp;
+
+ /* if endpoint is not enabled, cannot flush endpoint */
+ if (!ep->enabled)
+ return;
+
+ u3d = ep->u3d;
+ direction = mv_u3d_ep_dir(ep);
+
+ /* ep0 need clear bit after flushing fifo. */
+ if (!ep->ep_num) {
+ if (direction == MV_U3D_EP_DIR_OUT) {
+ tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
+ tmp |= MV_U3D_EPXCR_EP_FLUSH;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
+ udelay(10);
+ tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
+ } else {
+ tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
+ tmp |= MV_U3D_EPXCR_EP_FLUSH;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
+ udelay(10);
+ tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
+ }
+ return;
+ }
+
+ if (direction == MV_U3D_EP_DIR_OUT) {
+ tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+ tmp |= MV_U3D_EPXCR_EP_FLUSH;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+
+ /* Wait until flushing completed */
+ loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
+ while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
+ MV_U3D_EPXCR_EP_FLUSH) {
+ /*
+ * EP_FLUSH bit should be cleared to indicate this
+ * operation is complete
+ */
+ if (loops == 0) {
+ dev_dbg(u3d->dev,
+ "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
+ direction ? "in" : "out");
+ return;
+ }
+ loops--;
+ udelay(LOOPS_USEC);
+ }
+ } else { /* EP_DIR_IN */
+ tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+ tmp |= MV_U3D_EPXCR_EP_FLUSH;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+
+ /* Wait until flushing completed */
+ loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
+ while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
+ MV_U3D_EPXCR_EP_FLUSH) {
+ /*
+ * EP_FLUSH bit should be cleared to indicate this
+ * operation is complete
+ */
+ if (loops == 0) {
+ dev_dbg(u3d->dev,
+ "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
+ direction ? "in" : "out");
+ return;
+ }
+ loops--;
+ udelay(LOOPS_USEC);
+ }
+ }
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int
+mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct mv_u3d_ep *ep;
+ struct mv_u3d_req *req;
+ struct mv_u3d *u3d;
+ unsigned long flags;
+ int is_first_req = 0;
+
+ if (unlikely(!_ep || !_req))
+ return -EINVAL;
+
+ ep = container_of(_ep, struct mv_u3d_ep, ep);
+ u3d = ep->u3d;
+
+ req = container_of(_req, struct mv_u3d_req, req);
+
+ if (!ep->ep_num
+ && u3d->ep0_state == MV_U3D_STATUS_STAGE
+ && !_req->length) {
+ dev_dbg(u3d->dev, "ep0 status stage\n");
+ u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
+ return 0;
+ }
+
+ dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n",
+ __func__, _ep->name, req);
+
+ /* catch various bogus parameters */
+ if (!req->req.complete || !req->req.buf
+ || !list_empty(&req->queue)) {
+ dev_err(u3d->dev,
+ "%s, bad params, _req: 0x%p,"
+ "req->req.complete: 0x%p, req->req.buf: 0x%p,"
+ "list_empty: 0x%x\n",
+ __func__, _req,
+ req->req.complete, req->req.buf,
+ list_empty(&req->queue));
+ return -EINVAL;
+ }
+ if (unlikely(!ep->ep.desc)) {
+ dev_err(u3d->dev, "%s, bad ep\n", __func__);
+ return -EINVAL;
+ }
+ if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (req->req.length > ep->ep.maxpacket)
+ return -EMSGSIZE;
+ }
+
+ if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_err(u3d->dev,
+ "bad params of driver/speed\n");
+ return -ESHUTDOWN;
+ }
+
+ req->ep = ep;
+
+ /* Software list handles usb request. */
+ spin_lock_irqsave(&ep->req_lock, flags);
+ is_first_req = list_empty(&ep->req_list);
+ list_add_tail(&req->list, &ep->req_list);
+ spin_unlock_irqrestore(&ep->req_lock, flags);
+ if (!is_first_req) {
+ dev_dbg(u3d->dev, "list is not empty\n");
+ return 0;
+ }
+
+ dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n");
+ spin_lock_irqsave(&u3d->lock, flags);
+ mv_u3d_start_queue(ep);
+ spin_unlock_irqrestore(&u3d->lock, flags);
+ return 0;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct mv_u3d_ep *ep;
+ struct mv_u3d_req *req = NULL, *iter;
+ struct mv_u3d *u3d;
+ struct mv_u3d_ep_context *ep_context;
+ struct mv_u3d_req *next_req;
+
+ unsigned long flags;
+ int ret = 0;
+
+ if (!_ep || !_req)
+ return -EINVAL;
+
+ ep = container_of(_ep, struct mv_u3d_ep, ep);
+ u3d = ep->u3d;
+
+ spin_lock_irqsave(&ep->u3d->lock, flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* The request is in progress, or completed but not dequeued */
+ if (ep->queue.next == &req->queue) {
+ _req->status = -ECONNRESET;
+ mv_u3d_ep_fifo_flush(_ep);
+
+ /* The request isn't the last request in this ep queue */
+ if (req->queue.next != &ep->queue) {
+ dev_dbg(u3d->dev,
+ "it is the last request in this ep queue\n");
+ ep_context = ep->ep_context;
+ next_req = list_entry(req->queue.next,
+ struct mv_u3d_req, queue);
+
+ /* Point first TRB of next request to the EP context. */
+ iowrite32((unsigned long) next_req->trb_head,
+ &ep_context->trb_addr_lo);
+ } else {
+ struct mv_u3d_ep_context *ep_context;
+ ep_context = ep->ep_context;
+ ep_context->trb_addr_lo = 0;
+ ep_context->trb_addr_hi = 0;
+ }
+
+ } else
+ WARN_ON(1);
+
+ mv_u3d_done(ep, req, -ECONNRESET);
+
+ /* remove the req from the ep req list */
+ if (!list_empty(&ep->req_list)) {
+ struct mv_u3d_req *curr_req;
+ curr_req = list_entry(ep->req_list.next,
+ struct mv_u3d_req, list);
+ if (curr_req == req) {
+ list_del_init(&req->list);
+ ep->processing = 0;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&ep->u3d->lock, flags);
+ return ret;
+}
+
+static void
+mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
+{
+ u32 tmp;
+ struct mv_u3d_ep *ep = u3d->eps;
+
+ dev_dbg(u3d->dev, "%s\n", __func__);
+ if (direction == MV_U3D_EP_DIR_OUT) {
+ tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+ if (stall)
+ tmp |= MV_U3D_EPXCR_EP_HALT;
+ else
+ tmp &= ~MV_U3D_EPXCR_EP_HALT;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
+ } else {
+ tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+ if (stall)
+ tmp |= MV_U3D_EPXCR_EP_HALT;
+ else
+ tmp &= ~MV_U3D_EPXCR_EP_HALT;
+ iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
+ }
+}
+
+static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+{
+ struct mv_u3d_ep *ep;
+ unsigned long flags;
+ int status = 0;
+ struct mv_u3d *u3d;
+
+ ep = container_of(_ep, struct mv_u3d_ep, ep);
+ u3d = ep->u3d;
+ if (!ep->ep.desc) {
+ status = -EINVAL;
+ goto out;
+ }
+
+ if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ status = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /*
+ * Attempt to halt IN ep will fail if any transfer requests
+ * are still queue
+ */
+ if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN)
+ && !list_empty(&ep->queue)) {
+ status = -EAGAIN;
+ goto out;
+ }
+
+ spin_lock_irqsave(&ep->u3d->lock, flags);
+ mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt);
+ if (halt && wedge)
+ ep->wedge = 1;
+ else if (!halt)
+ ep->wedge = 0;
+ spin_unlock_irqrestore(&ep->u3d->lock, flags);
+
+ if (ep->ep_num == 0)
+ u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
+out:
+ return status;
+}
+
+static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)
+{
+ return mv_u3d_ep_set_halt_wedge(_ep, halt, 0);
+}
+
+static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)
+{
+ return mv_u3d_ep_set_halt_wedge(_ep, 1, 1);
+}
+
+static const struct usb_ep_ops mv_u3d_ep_ops = {
+ .enable = mv_u3d_ep_enable,
+ .disable = mv_u3d_ep_disable,
+
+ .alloc_request = mv_u3d_alloc_request,
+ .free_request = mv_u3d_free_request,
+
+ .queue = mv_u3d_ep_queue,
+ .dequeue = mv_u3d_ep_dequeue,
+
+ .set_wedge = mv_u3d_ep_set_wedge,
+ .set_halt = mv_u3d_ep_set_halt,
+ .fifo_flush = mv_u3d_ep_fifo_flush,
+};
+
+static void mv_u3d_controller_stop(struct mv_u3d *u3d)
+{
+ u32 tmp;
+
+ if (!u3d->clock_gating && u3d->vbus_valid_detect)
+ iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID,
+ &u3d->vuc_regs->intrenable);
+ else
+ iowrite32(0, &u3d->vuc_regs->intrenable);
+ iowrite32(~0x0, &u3d->vuc_regs->endcomplete);
+ iowrite32(~0x0, &u3d->vuc_regs->trbunderrun);
+ iowrite32(~0x0, &u3d->vuc_regs->trbcomplete);
+ iowrite32(~0x0, &u3d->vuc_regs->linkchange);
+ iowrite32(0x1, &u3d->vuc_regs->setuplock);
+
+ /* Reset the RUN bit in the command register to stop USB */
+ tmp = ioread32(&u3d->op_regs->usbcmd);
+ tmp &= ~MV_U3D_CMD_RUN_STOP;
+ iowrite32(tmp, &u3d->op_regs->usbcmd);
+ dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n",
+ ioread32(&u3d->op_regs->usbcmd));
+}
+
+static void mv_u3d_controller_start(struct mv_u3d *u3d)
+{
+ u32 usbintr;
+ u32 temp;
+
+ /* enable link LTSSM state machine */
+ temp = ioread32(&u3d->vuc_regs->ltssm);
+ temp |= MV_U3D_LTSSM_PHY_INIT_DONE;
+ iowrite32(temp, &u3d->vuc_regs->ltssm);
+
+ /* Enable interrupts */
+ usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR |
+ MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE |
+ MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP |
+ (u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0);
+ iowrite32(usbintr, &u3d->vuc_regs->intrenable);
+
+ /* Enable ctrl ep */
+ iowrite32(0x1, &u3d->vuc_regs->ctrlepenable);
+
+ /* Set the Run bit in the command register */
+ iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd);
+ dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n",
+ ioread32(&u3d->op_regs->usbcmd));
+}
+
+static int mv_u3d_controller_reset(struct mv_u3d *u3d)
+{
+ unsigned int loops;
+ u32 tmp;
+
+ /* Stop the controller */
+ tmp = ioread32(&u3d->op_regs->usbcmd);
+ tmp &= ~MV_U3D_CMD_RUN_STOP;
+ iowrite32(tmp, &u3d->op_regs->usbcmd);
+
+ /* Reset the controller to get default values */
+ iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
+
+ /* wait for reset to complete */
+ loops = LOOPS(MV_U3D_RESET_TIMEOUT);
+ while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) {
+ if (loops == 0) {
+ dev_err(u3d->dev,
+ "Wait for RESET completed TIMEOUT\n");
+ return -ETIMEDOUT;
+ }
+ loops--;
+ udelay(LOOPS_USEC);
+ }
+
+ /* Configure the Endpoint Context Address */
+ iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
+ iowrite32(0, &u3d->op_regs->dcbaaph);
+
+ return 0;
+}
+
+static int mv_u3d_enable(struct mv_u3d *u3d)
+{
+ struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
+ int retval;
+
+ if (u3d->active)
+ return 0;
+
+ if (!u3d->clock_gating) {
+ u3d->active = 1;
+ return 0;
+ }
+
+ dev_dbg(u3d->dev, "enable u3d\n");
+ clk_enable(u3d->clk);
+ if (pdata->phy_init) {
+ retval = pdata->phy_init(u3d->phy_regs);
+ if (retval) {
+ dev_err(u3d->dev,
+ "init phy error %d\n", retval);
+ clk_disable(u3d->clk);
+ return retval;
+ }
+ }
+ u3d->active = 1;
+
+ return 0;
+}
+
+static void mv_u3d_disable(struct mv_u3d *u3d)
+{
+ struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
+ if (u3d->clock_gating && u3d->active) {
+ dev_dbg(u3d->dev, "disable u3d\n");
+ if (pdata->phy_deinit)
+ pdata->phy_deinit(u3d->phy_regs);
+ clk_disable(u3d->clk);
+ u3d->active = 0;
+ }
+}
+
+static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct mv_u3d *u3d;
+ unsigned long flags;
+ int retval = 0;
+
+ u3d = container_of(gadget, struct mv_u3d, gadget);
+
+ spin_lock_irqsave(&u3d->lock, flags);
+
+ u3d->vbus_active = (is_active != 0);
+ dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
+ __func__, u3d->softconnect, u3d->vbus_active);
+ /*
+ * 1. external VBUS detect: we can disable/enable clock on demand.
+ * 2. UDC VBUS detect: we have to enable clock all the time.
+ * 3. No VBUS detect: we have to enable clock all the time.
+ */
+ if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
+ retval = mv_u3d_enable(u3d);
+ if (retval == 0) {
+ /*
+ * after clock is disabled, we lost all the register
+ * context. We have to re-init registers
+ */
+ mv_u3d_controller_reset(u3d);
+ mv_u3d_ep0_reset(u3d);
+ mv_u3d_controller_start(u3d);
+ }
+ } else if (u3d->driver && u3d->softconnect) {
+ if (!u3d->active)
+ goto out;
+
+ /* stop all the transfer in queue*/
+ mv_u3d_stop_activity(u3d, u3d->driver);
+ mv_u3d_controller_stop(u3d);
+ mv_u3d_disable(u3d);
+ }
+
+out:
+ spin_unlock_irqrestore(&u3d->lock, flags);
+ return retval;
+}
+
+/* constrain controller's VBUS power usage
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume. For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
+
+ u3d->power = mA;
+
+ return 0;
+}
+
+static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&u3d->lock, flags);
+
+ dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
+ __func__, u3d->softconnect, u3d->vbus_active);
+ u3d->softconnect = (is_on != 0);
+ if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
+ retval = mv_u3d_enable(u3d);
+ if (retval == 0) {
+ /*
+ * after clock is disabled, we lost all the register
+ * context. We have to re-init registers
+ */
+ mv_u3d_controller_reset(u3d);
+ mv_u3d_ep0_reset(u3d);
+ mv_u3d_controller_start(u3d);
+ }
+ } else if (u3d->driver && u3d->vbus_active) {
+ /* stop all the transfer in queue*/
+ mv_u3d_stop_activity(u3d, u3d->driver);
+ mv_u3d_controller_stop(u3d);
+ mv_u3d_disable(u3d);
+ }
+
+ spin_unlock_irqrestore(&u3d->lock, flags);
+
+ return retval;
+}
+
+static int mv_u3d_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
+ struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
+ unsigned long flags;
+
+ if (u3d->driver)
+ return -EBUSY;
+
+ spin_lock_irqsave(&u3d->lock, flags);
+
+ if (!u3d->clock_gating) {
+ clk_enable(u3d->clk);
+ if (pdata->phy_init)
+ pdata->phy_init(u3d->phy_regs);
+ }
+
+ /* hook up the driver ... */
+ u3d->driver = driver;
+
+ u3d->ep0_dir = USB_DIR_OUT;
+
+ spin_unlock_irqrestore(&u3d->lock, flags);
+
+ u3d->vbus_valid_detect = 1;
+
+ return 0;
+}
+
+static int mv_u3d_stop(struct usb_gadget *g)
+{
+ struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
+ struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
+ unsigned long flags;
+
+ u3d->vbus_valid_detect = 0;
+ spin_lock_irqsave(&u3d->lock, flags);
+
+ /* enable clock to access controller register */
+ clk_enable(u3d->clk);
+ if (pdata->phy_init)
+ pdata->phy_init(u3d->phy_regs);
+
+ mv_u3d_controller_stop(u3d);
+ /* stop all usb activities */
+ u3d->gadget.speed = USB_SPEED_UNKNOWN;
+ mv_u3d_stop_activity(u3d, NULL);
+ mv_u3d_disable(u3d);
+
+ if (pdata->phy_deinit)
+ pdata->phy_deinit(u3d->phy_regs);
+ clk_disable(u3d->clk);
+
+ spin_unlock_irqrestore(&u3d->lock, flags);
+
+ u3d->driver = NULL;
+
+ return 0;
+}
+
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops mv_u3d_ops = {
+ /* notify controller that VBUS is powered or not */
+ .vbus_session = mv_u3d_vbus_session,
+
+ /* constrain controller's VBUS power usage */
+ .vbus_draw = mv_u3d_vbus_draw,
+
+ .pullup = mv_u3d_pullup,
+ .udc_start = mv_u3d_start,
+ .udc_stop = mv_u3d_stop,
+};
+
+static int mv_u3d_eps_init(struct mv_u3d *u3d)
+{
+ struct mv_u3d_ep *ep;
+ char name[14];
+ int i;
+
+ /* initialize ep0, ep0 in/out use eps[1] */
+ ep = &u3d->eps[1];
+ ep->u3d = u3d;
+ strncpy(ep->name, "ep0", sizeof(ep->name));
+ ep->ep.name = ep->name;
+ ep->ep.ops = &mv_u3d_ep_ops;
+ ep->wedge = 0;
+ usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
+ ep->ep.caps.type_control = true;
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ ep->ep_num = 0;
+ ep->ep.desc = &mv_u3d_ep0_desc;
+ INIT_LIST_HEAD(&ep->queue);
+ INIT_LIST_HEAD(&ep->req_list);
+ ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+ /* add ep0 ep_context */
+ ep->ep_context = &u3d->ep_context[1];
+
+ /* initialize other endpoints */
+ for (i = 2; i < u3d->max_eps * 2; i++) {
+ ep = &u3d->eps[i];
+ if (i & 1) {
+ snprintf(name, sizeof(name), "ep%din", i >> 1);
+ ep->direction = MV_U3D_EP_DIR_IN;
+ ep->ep.caps.dir_in = true;
+ } else {
+ snprintf(name, sizeof(name), "ep%dout", i >> 1);
+ ep->direction = MV_U3D_EP_DIR_OUT;
+ ep->ep.caps.dir_out = true;
+ }
+ ep->u3d = u3d;
+ strncpy(ep->name, name, sizeof(ep->name));
+ ep->ep.name = ep->name;
+
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+
+ ep->ep.ops = &mv_u3d_ep_ops;
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
+ ep->ep_num = i / 2;
+
+ INIT_LIST_HEAD(&ep->queue);
+ list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
+
+ INIT_LIST_HEAD(&ep->req_list);
+ spin_lock_init(&ep->req_lock);
+ ep->ep_context = &u3d->ep_context[i];
+ }
+
+ return 0;
+}
+
+/* delete all endpoint requests, called with spinlock held */
+static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status)
+{
+ /* endpoint fifo flush */
+ mv_u3d_ep_fifo_flush(&ep->ep);
+
+ while (!list_empty(&ep->queue)) {
+ struct mv_u3d_req *req = NULL;
+ req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
+ mv_u3d_done(ep, req, status);
+ }
+}
+
+/* stop all USB activities */
+static
+void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
+{
+ struct mv_u3d_ep *ep;
+
+ mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN);
+
+ list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
+ mv_u3d_nuke(ep, -ESHUTDOWN);
+ }
+
+ /* report disconnect; the driver is already quiesced */
+ if (driver) {
+ spin_unlock(&u3d->lock);
+ driver->disconnect(&u3d->gadget);
+ spin_lock(&u3d->lock);
+ }
+}
+
+static void mv_u3d_irq_process_error(struct mv_u3d *u3d)
+{
+ /* Increment the error count */
+ u3d->errors++;
+ dev_err(u3d->dev, "%s\n", __func__);
+}
+
+static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d)
+{
+ u32 linkchange;
+
+ linkchange = ioread32(&u3d->vuc_regs->linkchange);
+ iowrite32(linkchange, &u3d->vuc_regs->linkchange);
+
+ dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange);
+
+ if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) {
+ dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n",
+ ioread32(&u3d->vuc_regs->ltssmstate));
+
+ u3d->usb_state = USB_STATE_DEFAULT;
+ u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
+ u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
+
+ /* set speed */
+ u3d->gadget.speed = USB_SPEED_SUPER;
+ }
+
+ if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) {
+ dev_dbg(u3d->dev, "link suspend\n");
+ u3d->resume_state = u3d->usb_state;
+ u3d->usb_state = USB_STATE_SUSPENDED;
+ }
+
+ if (linkchange & MV_U3D_LINK_CHANGE_RESUME) {
+ dev_dbg(u3d->dev, "link resume\n");
+ u3d->usb_state = u3d->resume_state;
+ u3d->resume_state = 0;
+ }
+
+ if (linkchange & MV_U3D_LINK_CHANGE_WRESET) {
+ dev_dbg(u3d->dev, "warm reset\n");
+ u3d->usb_state = USB_STATE_POWERED;
+ }
+
+ if (linkchange & MV_U3D_LINK_CHANGE_HRESET) {
+ dev_dbg(u3d->dev, "hot reset\n");
+ u3d->usb_state = USB_STATE_DEFAULT;
+ }
+
+ if (linkchange & MV_U3D_LINK_CHANGE_INACT)
+ dev_dbg(u3d->dev, "inactive\n");
+
+ if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0)
+ dev_dbg(u3d->dev, "ss.disabled\n");
+
+ if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) {
+ dev_dbg(u3d->dev, "vbus invalid\n");
+ u3d->usb_state = USB_STATE_ATTACHED;
+ u3d->vbus_valid_detect = 1;
+ /* if external vbus detect is not supported,
+ * we handle it here.
+ */
+ if (!u3d->vbus) {
+ spin_unlock(&u3d->lock);
+ mv_u3d_vbus_session(&u3d->gadget, 0);
+ spin_lock(&u3d->lock);
+ }
+ }
+}
+
+static void mv_u3d_ch9setaddress(struct mv_u3d *u3d,
+ struct usb_ctrlrequest *setup)
+{
+ u32 tmp;
+
+ if (u3d->usb_state != USB_STATE_DEFAULT) {
+ dev_err(u3d->dev,
+ "%s, cannot setaddr in this state (%d)\n",
+ __func__, u3d->usb_state);
+ goto err;
+ }
+
+ u3d->dev_addr = (u8)setup->wValue;
+
+ dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
+
+ if (u3d->dev_addr > 127) {
+ dev_err(u3d->dev,
+ "%s, u3d address is wrong (out of range)\n", __func__);
+ u3d->dev_addr = 0;
+ goto err;
+ }
+
+ /* update usb state */
+ u3d->usb_state = USB_STATE_ADDRESS;
+
+ /* set the new address */
+ tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr);
+ tmp &= ~0x7F;
+ tmp |= (u32)u3d->dev_addr;
+ iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr);
+
+ return;
+err:
+ mv_u3d_ep0_stall(u3d);
+}
+
+static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup)
+{
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ if (setup->bRequest == USB_REQ_SET_CONFIGURATION)
+ return 1;
+
+ return 0;
+}
+
+static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
+ struct usb_ctrlrequest *setup)
+ __releases(&u3c->lock)
+ __acquires(&u3c->lock)
+{
+ bool delegate = false;
+
+ mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN);
+
+ dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+ setup->bRequestType, setup->bRequest,
+ setup->wValue, setup->wIndex, setup->wLength);
+
+ /* We process some stardard setup requests here */
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (setup->bRequest) {
+ case USB_REQ_GET_STATUS:
+ delegate = true;
+ break;
+
+ case USB_REQ_SET_ADDRESS:
+ mv_u3d_ch9setaddress(u3d, setup);
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ delegate = true;
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ delegate = true;
+ break;
+
+ default:
+ delegate = true;
+ }
+ } else
+ delegate = true;
+
+ /* delegate USB standard requests to the gadget driver */
+ if (delegate) {
+ /* USB requests handled by gadget */
+ if (setup->wLength) {
+ /* DATA phase from gadget, STATUS phase from u3d */
+ u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+ ? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT;
+ spin_unlock(&u3d->lock);
+ if (u3d->driver->setup(&u3d->gadget,
+ &u3d->local_setup_buff) < 0) {
+ dev_err(u3d->dev, "setup error!\n");
+ mv_u3d_ep0_stall(u3d);
+ }
+ spin_lock(&u3d->lock);
+ } else {
+ /* no DATA phase, STATUS phase from gadget */
+ u3d->ep0_dir = MV_U3D_EP_DIR_IN;
+ u3d->ep0_state = MV_U3D_STATUS_STAGE;
+ spin_unlock(&u3d->lock);
+ if (u3d->driver->setup(&u3d->gadget,
+ &u3d->local_setup_buff) < 0)
+ mv_u3d_ep0_stall(u3d);
+ spin_lock(&u3d->lock);
+ }
+
+ if (mv_u3d_is_set_configuration(setup)) {
+ dev_dbg(u3d->dev, "u3d configured\n");
+ u3d->usb_state = USB_STATE_CONFIGURED;
+ }
+ }
+}
+
+static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
+{
+ struct mv_u3d_ep_context *epcontext;
+
+ epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN];
+
+ /* Copy the setup packet to local buffer */
+ memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
+}
+
+static void mv_u3d_irq_process_setup(struct mv_u3d *u3d)
+{
+ u32 tmp, i;
+ /* Process all Setup packet received interrupts */
+ tmp = ioread32(&u3d->vuc_regs->setuplock);
+ if (tmp) {
+ for (i = 0; i < u3d->max_eps; i++) {
+ if (tmp & (1 << i)) {
+ mv_u3d_get_setup_data(u3d, i,
+ (u8 *)(&u3d->local_setup_buff));
+ mv_u3d_handle_setup_packet(u3d, i,
+ &u3d->local_setup_buff);
+ }
+ }
+ }
+
+ iowrite32(tmp, &u3d->vuc_regs->setuplock);
+}
+
+static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d)
+{
+ u32 tmp, bit_pos;
+ int i, ep_num = 0, direction = 0;
+ struct mv_u3d_ep *curr_ep;
+ struct mv_u3d_req *curr_req, *temp_req;
+ int status;
+
+ tmp = ioread32(&u3d->vuc_regs->endcomplete);
+
+ dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp);
+ if (!tmp)
+ return;
+ iowrite32(tmp, &u3d->vuc_regs->endcomplete);
+
+ for (i = 0; i < u3d->max_eps * 2; i++) {
+ ep_num = i >> 1;
+ direction = i % 2;
+
+ bit_pos = 1 << (ep_num + 16 * direction);
+
+ if (!(bit_pos & tmp))
+ continue;
+
+ if (i == 0)
+ curr_ep = &u3d->eps[1];
+ else
+ curr_ep = &u3d->eps[i];
+
+ /* remove req out of ep request list after completion */
+ dev_dbg(u3d->dev, "tr comp: check req_list\n");
+ spin_lock(&curr_ep->req_lock);
+ if (!list_empty(&curr_ep->req_list)) {
+ struct mv_u3d_req *req;
+ req = list_entry(curr_ep->req_list.next,
+ struct mv_u3d_req, list);
+ list_del_init(&req->list);
+ curr_ep->processing = 0;
+ }
+ spin_unlock(&curr_ep->req_lock);
+
+ /* process the req queue until an uncomplete request */
+ list_for_each_entry_safe(curr_req, temp_req,
+ &curr_ep->queue, queue) {
+ status = mv_u3d_process_ep_req(u3d, i, curr_req);
+ if (status)
+ break;
+ /* write back status to req */
+ curr_req->req.status = status;
+
+ /* ep0 request completion */
+ if (ep_num == 0) {
+ mv_u3d_done(curr_ep, curr_req, 0);
+ break;
+ } else {
+ mv_u3d_done(curr_ep, curr_req, status);
+ }
+ }
+
+ dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n");
+ mv_u3d_start_queue(curr_ep);
+ }
+}
+
+static irqreturn_t mv_u3d_irq(int irq, void *dev)
+{
+ struct mv_u3d *u3d = (struct mv_u3d *)dev;
+ u32 status, intr;
+ u32 bridgesetting;
+ u32 trbunderrun;
+
+ spin_lock(&u3d->lock);
+
+ status = ioread32(&u3d->vuc_regs->intrcause);
+ intr = ioread32(&u3d->vuc_regs->intrenable);
+ status &= intr;
+
+ if (status == 0) {
+ spin_unlock(&u3d->lock);
+ dev_err(u3d->dev, "irq error!\n");
+ return IRQ_NONE;
+ }
+
+ if (status & MV_U3D_USBINT_VBUS_VALID) {
+ bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting);
+ if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) {
+ /* write vbus valid bit of bridge setting to clear */
+ bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID;
+ iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting);
+ dev_dbg(u3d->dev, "vbus valid\n");
+
+ u3d->usb_state = USB_STATE_POWERED;
+ u3d->vbus_valid_detect = 0;
+ /* if external vbus detect is not supported,
+ * we handle it here.
+ */
+ if (!u3d->vbus) {
+ spin_unlock(&u3d->lock);
+ mv_u3d_vbus_session(&u3d->gadget, 1);
+ spin_lock(&u3d->lock);
+ }
+ } else
+ dev_err(u3d->dev, "vbus bit is not set\n");
+ }
+
+ /* RX data is already in the 16KB FIFO.*/
+ if (status & MV_U3D_USBINT_UNDER_RUN) {
+ trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun);
+ dev_err(u3d->dev, "under run, ep%d\n", trbunderrun);
+ iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun);
+ mv_u3d_irq_process_error(u3d);
+ }
+
+ if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) {
+ /* write one to clear */
+ iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR
+ | MV_U3D_USBINT_TXDESC_ERR),
+ &u3d->vuc_regs->intrcause);
+ dev_err(u3d->dev, "desc err 0x%x\n", status);
+ mv_u3d_irq_process_error(u3d);
+ }
+
+ if (status & MV_U3D_USBINT_LINK_CHG)
+ mv_u3d_irq_process_link_change(u3d);
+
+ if (status & MV_U3D_USBINT_TX_COMPLETE)
+ mv_u3d_irq_process_tr_complete(u3d);
+
+ if (status & MV_U3D_USBINT_RX_COMPLETE)
+ mv_u3d_irq_process_tr_complete(u3d);
+
+ if (status & MV_U3D_USBINT_SETUP)
+ mv_u3d_irq_process_setup(u3d);
+
+ spin_unlock(&u3d->lock);
+ return IRQ_HANDLED;
+}
+
+static int mv_u3d_remove(struct platform_device *dev)
+{
+ struct mv_u3d *u3d = platform_get_drvdata(dev);
+
+ BUG_ON(u3d == NULL);
+
+ usb_del_gadget_udc(&u3d->gadget);
+
+ /* free memory allocated in probe */
+ dma_pool_destroy(u3d->trb_pool);
+
+ if (u3d->ep_context)
+ dma_free_coherent(&dev->dev, u3d->ep_context_size,
+ u3d->ep_context, u3d->ep_context_dma);
+
+ kfree(u3d->eps);
+
+ if (u3d->irq)
+ free_irq(u3d->irq, u3d);
+
+ if (u3d->cap_regs)
+ iounmap(u3d->cap_regs);
+ u3d->cap_regs = NULL;
+
+ kfree(u3d->status_req);
+
+ clk_put(u3d->clk);
+
+ kfree(u3d);
+
+ return 0;
+}
+
+static int mv_u3d_probe(struct platform_device *dev)
+{
+ struct mv_u3d *u3d = NULL;
+ struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev);
+ int retval = 0;
+ struct resource *r;
+ size_t size;
+
+ if (!dev_get_platdata(&dev->dev)) {
+ dev_err(&dev->dev, "missing platform_data\n");
+ retval = -ENODEV;
+ goto err_pdata;
+ }
+
+ u3d = kzalloc(sizeof(*u3d), GFP_KERNEL);
+ if (!u3d) {
+ retval = -ENOMEM;
+ goto err_alloc_private;
+ }
+
+ spin_lock_init(&u3d->lock);
+
+ platform_set_drvdata(dev, u3d);
+
+ u3d->dev = &dev->dev;
+ u3d->vbus = pdata->vbus;
+
+ u3d->clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(u3d->clk)) {
+ retval = PTR_ERR(u3d->clk);
+ goto err_get_clk;
+ }
+
+ r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs");
+ if (!r) {
+ dev_err(&dev->dev, "no I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_get_cap_regs;
+ }
+
+ u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
+ ioremap(r->start, resource_size(r));
+ if (!u3d->cap_regs) {
+ dev_err(&dev->dev, "failed to map I/O memory\n");
+ retval = -EBUSY;
+ goto err_map_cap_regs;
+ } else {
+ dev_dbg(&dev->dev, "cap_regs address: 0x%lx/0x%lx\n",
+ (unsigned long) r->start,
+ (unsigned long) u3d->cap_regs);
+ }
+
+ /* we will access controller register, so enable the u3d controller */
+ retval = clk_enable(u3d->clk);
+ if (retval) {
+ dev_err(&dev->dev, "clk_enable error %d\n", retval);
+ goto err_u3d_enable;
+ }
+
+ if (pdata->phy_init) {
+ retval = pdata->phy_init(u3d->phy_regs);
+ if (retval) {
+ dev_err(&dev->dev, "init phy error %d\n", retval);
+ clk_disable(u3d->clk);
+ goto err_phy_init;
+ }
+ }
+
+ u3d->op_regs = (struct mv_u3d_op_regs __iomem *)(u3d->cap_regs
+ + MV_U3D_USB3_OP_REGS_OFFSET);
+
+ u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)(u3d->cap_regs
+ + ioread32(&u3d->cap_regs->vuoff));
+
+ u3d->max_eps = 16;
+
+ /*
+ * some platform will use usb to download image, it may not disconnect
+ * usb gadget before loading kernel. So first stop u3d here.
+ */
+ mv_u3d_controller_stop(u3d);
+ iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
+
+ if (pdata->phy_deinit)
+ pdata->phy_deinit(u3d->phy_regs);
+ clk_disable(u3d->clk);
+
+ size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2;
+ size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1)
+ & ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1);
+ u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
+ &u3d->ep_context_dma, GFP_KERNEL);
+ if (!u3d->ep_context) {
+ dev_err(&dev->dev, "allocate ep context memory failed\n");
+ retval = -ENOMEM;
+ goto err_alloc_ep_context;
+ }
+ u3d->ep_context_size = size;
+
+ /* create TRB dma_pool resource */
+ u3d->trb_pool = dma_pool_create("u3d_trb",
+ &dev->dev,
+ sizeof(struct mv_u3d_trb_hw),
+ MV_U3D_TRB_ALIGNMENT,
+ MV_U3D_DMA_BOUNDARY);
+
+ if (!u3d->trb_pool) {
+ retval = -ENOMEM;
+ goto err_alloc_trb_pool;
+ }
+
+ size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2;
+ u3d->eps = kzalloc(size, GFP_KERNEL);
+ if (!u3d->eps) {
+ retval = -ENOMEM;
+ goto err_alloc_eps;
+ }
+
+ /* initialize ep0 status request structure */
+ u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL);
+ if (!u3d->status_req) {
+ retval = -ENOMEM;
+ goto err_alloc_status_req;
+ }
+ INIT_LIST_HEAD(&u3d->status_req->queue);
+
+ /* allocate a small amount of memory to get valid address */
+ u3d->status_req->req.buf = (char *)u3d->status_req
+ + sizeof(struct mv_u3d_req);
+ u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
+
+ u3d->resume_state = USB_STATE_NOTATTACHED;
+ u3d->usb_state = USB_STATE_ATTACHED;
+ u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
+ u3d->remote_wakeup = 0;
+
+ r = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+ if (!r) {
+ dev_err(&dev->dev, "no IRQ resource defined\n");
+ retval = -ENODEV;
+ goto err_get_irq;
+ }
+ u3d->irq = r->start;
+
+ /* initialize gadget structure */
+ u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
+ u3d->gadget.ep0 = &u3d->eps[1].ep; /* gadget ep0 */
+ INIT_LIST_HEAD(&u3d->gadget.ep_list); /* ep_list */
+ u3d->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ u3d->gadget.name = driver_name; /* gadget name */
+
+ mv_u3d_eps_init(u3d);
+
+ if (request_irq(u3d->irq, mv_u3d_irq,
+ IRQF_SHARED, driver_name, u3d)) {
+ u3d->irq = 0;
+ dev_err(&dev->dev, "Request irq %d for u3d failed\n",
+ u3d->irq);
+ retval = -ENODEV;
+ goto err_request_irq;
+ }
+
+ /* external vbus detection */
+ if (u3d->vbus) {
+ u3d->clock_gating = 1;
+ dev_err(&dev->dev, "external vbus detection\n");
+ }
+
+ if (!u3d->clock_gating)
+ u3d->vbus_active = 1;
+
+ /* enable usb3 controller vbus detection */
+ u3d->vbus_valid_detect = 1;
+
+ retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
+ if (retval)
+ goto err_unregister;
+
+ dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
+ u3d->clock_gating ? "with" : "without");
+
+ return 0;
+
+err_unregister:
+ free_irq(u3d->irq, u3d);
+err_get_irq:
+err_request_irq:
+ kfree(u3d->status_req);
+err_alloc_status_req:
+ kfree(u3d->eps);
+err_alloc_eps:
+ dma_pool_destroy(u3d->trb_pool);
+err_alloc_trb_pool:
+ dma_free_coherent(&dev->dev, u3d->ep_context_size,
+ u3d->ep_context, u3d->ep_context_dma);
+err_alloc_ep_context:
+err_phy_init:
+err_u3d_enable:
+ iounmap(u3d->cap_regs);
+err_map_cap_regs:
+err_get_cap_regs:
+ clk_put(u3d->clk);
+err_get_clk:
+ kfree(u3d);
+err_alloc_private:
+err_pdata:
+ return retval;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mv_u3d_suspend(struct device *dev)
+{
+ struct mv_u3d *u3d = dev_get_drvdata(dev);
+
+ /*
+ * only cable is unplugged, usb can suspend.
+ * So do not care about clock_gating == 1, it is handled by
+ * vbus session.
+ */
+ if (!u3d->clock_gating) {
+ mv_u3d_controller_stop(u3d);
+
+ spin_lock_irq(&u3d->lock);
+ /* stop all usb activities */
+ mv_u3d_stop_activity(u3d, u3d->driver);
+ spin_unlock_irq(&u3d->lock);
+
+ mv_u3d_disable(u3d);
+ }
+
+ return 0;
+}
+
+static int mv_u3d_resume(struct device *dev)
+{
+ struct mv_u3d *u3d = dev_get_drvdata(dev);
+ int retval;
+
+ if (!u3d->clock_gating) {
+ retval = mv_u3d_enable(u3d);
+ if (retval)
+ return retval;
+
+ if (u3d->driver && u3d->softconnect) {
+ mv_u3d_controller_reset(u3d);
+ mv_u3d_ep0_reset(u3d);
+ mv_u3d_controller_start(u3d);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
+
+static void mv_u3d_shutdown(struct platform_device *dev)
+{
+ struct mv_u3d *u3d = platform_get_drvdata(dev);
+ u32 tmp;
+
+ tmp = ioread32(&u3d->op_regs->usbcmd);
+ tmp &= ~MV_U3D_CMD_RUN_STOP;
+ iowrite32(tmp, &u3d->op_regs->usbcmd);
+}
+
+static struct platform_driver mv_u3d_driver = {
+ .probe = mv_u3d_probe,
+ .remove = mv_u3d_remove,
+ .shutdown = mv_u3d_shutdown,
+ .driver = {
+ .name = "mv-u3d",
+ .pm = &mv_u3d_pm_ops,
+ },
+};
+
+module_platform_driver(mv_u3d_driver);
+MODULE_ALIAS("platform:mv-u3d");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Yu Xu <yuxu@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/mv_udc.h b/drivers/usb/gadget/udc/mv_udc.h
new file mode 100644
index 000000000..b3f759c09
--- /dev/null
+++ b/drivers/usb/gadget/udc/mv_udc.h
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ */
+
+#ifndef __MV_UDC_H
+#define __MV_UDC_H
+
+#define VUSBHS_MAX_PORTS 8
+
+#define DQH_ALIGNMENT 2048
+#define DTD_ALIGNMENT 64
+#define DMA_BOUNDARY 4096
+
+#define EP_DIR_IN 1
+#define EP_DIR_OUT 0
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#define EP0_MAX_PKT_SIZE 64
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP 0
+#define DATA_STATE_XMIT 1
+#define DATA_STATE_NEED_ZLP 2
+#define WAIT_FOR_OUT_STATUS 3
+#define DATA_STATE_RECV 4
+
+#define CAPLENGTH_MASK (0xff)
+#define DCCPARAMS_DEN_MASK (0x1f)
+
+#define HCSPARAMS_PPC (0x10)
+
+/* Frame Index Register Bit Masks */
+#define USB_FRINDEX_MASKS 0x3fff
+
+/* Command Register Bit Masks */
+#define USBCMD_RUN_STOP (0x00000001)
+#define USBCMD_CTRL_RESET (0x00000002)
+#define USBCMD_SETUP_TRIPWIRE_SET (0x00002000)
+#define USBCMD_SETUP_TRIPWIRE_CLEAR (~USBCMD_SETUP_TRIPWIRE_SET)
+
+#define USBCMD_ATDTW_TRIPWIRE_SET (0x00004000)
+#define USBCMD_ATDTW_TRIPWIRE_CLEAR (~USBCMD_ATDTW_TRIPWIRE_SET)
+
+/* bit 15,3,2 are for frame list size */
+#define USBCMD_FRAME_SIZE_1024 (0x00000000) /* 000 */
+#define USBCMD_FRAME_SIZE_512 (0x00000004) /* 001 */
+#define USBCMD_FRAME_SIZE_256 (0x00000008) /* 010 */
+#define USBCMD_FRAME_SIZE_128 (0x0000000C) /* 011 */
+#define USBCMD_FRAME_SIZE_64 (0x00008000) /* 100 */
+#define USBCMD_FRAME_SIZE_32 (0x00008004) /* 101 */
+#define USBCMD_FRAME_SIZE_16 (0x00008008) /* 110 */
+#define USBCMD_FRAME_SIZE_8 (0x0000800C) /* 111 */
+
+#define EPCTRL_TX_ALL_MASK (0xFFFF0000)
+#define EPCTRL_RX_ALL_MASK (0x0000FFFF)
+
+#define EPCTRL_TX_DATA_TOGGLE_RST (0x00400000)
+#define EPCTRL_TX_EP_STALL (0x00010000)
+#define EPCTRL_RX_EP_STALL (0x00000001)
+#define EPCTRL_RX_DATA_TOGGLE_RST (0x00000040)
+#define EPCTRL_RX_ENABLE (0x00000080)
+#define EPCTRL_TX_ENABLE (0x00800000)
+#define EPCTRL_CONTROL (0x00000000)
+#define EPCTRL_ISOCHRONOUS (0x00040000)
+#define EPCTRL_BULK (0x00080000)
+#define EPCTRL_INT (0x000C0000)
+#define EPCTRL_TX_TYPE (0x000C0000)
+#define EPCTRL_RX_TYPE (0x0000000C)
+#define EPCTRL_DATA_TOGGLE_INHIBIT (0x00000020)
+#define EPCTRL_TX_EP_TYPE_SHIFT (18)
+#define EPCTRL_RX_EP_TYPE_SHIFT (2)
+
+#define EPCOMPLETE_MAX_ENDPOINTS (16)
+
+/* endpoint list address bit masks */
+#define USB_EP_LIST_ADDRESS_MASK 0xfffff800
+
+#define PORTSCX_W1C_BITS 0x2a
+#define PORTSCX_PORT_RESET 0x00000100
+#define PORTSCX_PORT_POWER 0x00001000
+#define PORTSCX_FORCE_FULL_SPEED_CONNECT 0x01000000
+#define PORTSCX_PAR_XCVR_SELECT 0xC0000000
+#define PORTSCX_PORT_FORCE_RESUME 0x00000040
+#define PORTSCX_PORT_SUSPEND 0x00000080
+#define PORTSCX_PORT_SPEED_FULL 0x00000000
+#define PORTSCX_PORT_SPEED_LOW 0x04000000
+#define PORTSCX_PORT_SPEED_HIGH 0x08000000
+#define PORTSCX_PORT_SPEED_MASK 0x0C000000
+
+/* USB MODE Register Bit Masks */
+#define USBMODE_CTRL_MODE_IDLE 0x00000000
+#define USBMODE_CTRL_MODE_DEVICE 0x00000002
+#define USBMODE_CTRL_MODE_HOST 0x00000003
+#define USBMODE_CTRL_MODE_RSV 0x00000001
+#define USBMODE_SETUP_LOCK_OFF 0x00000008
+#define USBMODE_STREAM_DISABLE 0x00000010
+
+/* USB STS Register Bit Masks */
+#define USBSTS_INT 0x00000001
+#define USBSTS_ERR 0x00000002
+#define USBSTS_PORT_CHANGE 0x00000004
+#define USBSTS_FRM_LST_ROLL 0x00000008
+#define USBSTS_SYS_ERR 0x00000010
+#define USBSTS_IAA 0x00000020
+#define USBSTS_RESET 0x00000040
+#define USBSTS_SOF 0x00000080
+#define USBSTS_SUSPEND 0x00000100
+#define USBSTS_HC_HALTED 0x00001000
+#define USBSTS_RCL 0x00002000
+#define USBSTS_PERIODIC_SCHEDULE 0x00004000
+#define USBSTS_ASYNC_SCHEDULE 0x00008000
+
+
+/* Interrupt Enable Register Bit Masks */
+#define USBINTR_INT_EN (0x00000001)
+#define USBINTR_ERR_INT_EN (0x00000002)
+#define USBINTR_PORT_CHANGE_DETECT_EN (0x00000004)
+
+#define USBINTR_ASYNC_ADV_AAE (0x00000020)
+#define USBINTR_ASYNC_ADV_AAE_ENABLE (0x00000020)
+#define USBINTR_ASYNC_ADV_AAE_DISABLE (0xFFFFFFDF)
+
+#define USBINTR_RESET_EN (0x00000040)
+#define USBINTR_SOF_UFRAME_EN (0x00000080)
+#define USBINTR_DEVICE_SUSPEND (0x00000100)
+
+#define USB_DEVICE_ADDRESS_MASK (0xfe000000)
+#define USB_DEVICE_ADDRESS_BIT_SHIFT (25)
+
+struct mv_cap_regs {
+ u32 caplength_hciversion;
+ u32 hcsparams; /* HC structural parameters */
+ u32 hccparams; /* HC Capability Parameters*/
+ u32 reserved[5];
+ u32 dciversion; /* DC version number and reserved 16 bits */
+ u32 dccparams; /* DC Capability Parameters */
+};
+
+struct mv_op_regs {
+ u32 usbcmd; /* Command register */
+ u32 usbsts; /* Status register */
+ u32 usbintr; /* Interrupt enable */
+ u32 frindex; /* Frame index */
+ u32 reserved1[1];
+ u32 deviceaddr; /* Device Address */
+ u32 eplistaddr; /* Endpoint List Address */
+ u32 ttctrl; /* HOST TT status and control */
+ u32 burstsize; /* Programmable Burst Size */
+ u32 txfilltuning; /* Host Transmit Pre-Buffer Packet Tuning */
+ u32 reserved[4];
+ u32 epnak; /* Endpoint NAK */
+ u32 epnaken; /* Endpoint NAK Enable */
+ u32 configflag; /* Configured Flag register */
+ u32 portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
+ u32 otgsc;
+ u32 usbmode; /* USB Host/Device mode */
+ u32 epsetupstat; /* Endpoint Setup Status */
+ u32 epprime; /* Endpoint Initialize */
+ u32 epflush; /* Endpoint De-initialize */
+ u32 epstatus; /* Endpoint Status */
+ u32 epcomplete; /* Endpoint Interrupt On Complete */
+ u32 epctrlx[16]; /* Endpoint Control, where x = 0.. 15 */
+ u32 mcr; /* Mux Control */
+ u32 isr; /* Interrupt Status */
+ u32 ier; /* Interrupt Enable */
+};
+
+struct mv_udc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ spinlock_t lock;
+ struct completion *done;
+ struct platform_device *dev;
+ int irq;
+
+ struct mv_cap_regs __iomem *cap_regs;
+ struct mv_op_regs __iomem *op_regs;
+ void __iomem *phy_regs;
+ unsigned int max_eps;
+ struct mv_dqh *ep_dqh;
+ size_t ep_dqh_size;
+ dma_addr_t ep_dqh_dma;
+
+ struct dma_pool *dtd_pool;
+ struct mv_ep *eps;
+
+ struct mv_dtd *dtd_head;
+ struct mv_dtd *dtd_tail;
+ unsigned int dtd_entries;
+
+ struct mv_req *status_req;
+ struct usb_ctrlrequest local_setup_buff;
+
+ unsigned int resume_state; /* USB state to resume */
+ unsigned int usb_state; /* USB current state */
+ unsigned int ep0_state; /* Endpoint zero state */
+ unsigned int ep0_dir;
+
+ unsigned int dev_addr;
+ unsigned int test_mode;
+
+ int errors;
+ unsigned softconnect:1,
+ vbus_active:1,
+ remote_wakeup:1,
+ softconnected:1,
+ force_fs:1,
+ clock_gating:1,
+ active:1,
+ stopped:1; /* stop bit is setted */
+
+ struct work_struct vbus_work;
+ struct workqueue_struct *qwork;
+
+ struct usb_phy *transceiver;
+
+ struct mv_usb_platform_data *pdata;
+
+ /* some SOC has mutiple clock sources for USB*/
+ struct clk *clk;
+};
+
+/* endpoint data structure */
+struct mv_ep {
+ struct usb_ep ep;
+ struct mv_udc *udc;
+ struct list_head queue;
+ struct mv_dqh *dqh;
+ u32 direction;
+ char name[14];
+ unsigned stopped:1,
+ wedge:1,
+ ep_type:2,
+ ep_num:8;
+};
+
+/* request data structure */
+struct mv_req {
+ struct usb_request req;
+ struct mv_dtd *dtd, *head, *tail;
+ struct mv_ep *ep;
+ struct list_head queue;
+ unsigned int test_mode;
+ unsigned dtd_count;
+ unsigned mapped:1;
+};
+
+#define EP_QUEUE_HEAD_MULT_POS 30
+#define EP_QUEUE_HEAD_ZLT_SEL 0x20000000
+#define EP_QUEUE_HEAD_MAX_PKT_LEN_POS 16
+#define EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info) (((ep_info)>>16)&0x07ff)
+#define EP_QUEUE_HEAD_IOS 0x00008000
+#define EP_QUEUE_HEAD_NEXT_TERMINATE 0x00000001
+#define EP_QUEUE_HEAD_IOC 0x00008000
+#define EP_QUEUE_HEAD_MULTO 0x00000C00
+#define EP_QUEUE_HEAD_STATUS_HALT 0x00000040
+#define EP_QUEUE_HEAD_STATUS_ACTIVE 0x00000080
+#define EP_QUEUE_CURRENT_OFFSET_MASK 0x00000FFF
+#define EP_QUEUE_HEAD_NEXT_POINTER_MASK 0xFFFFFFE0
+#define EP_QUEUE_FRINDEX_MASK 0x000007FF
+#define EP_MAX_LENGTH_TRANSFER 0x4000
+
+struct mv_dqh {
+ /* Bits 16..26 Bit 15 is Interrupt On Setup */
+ u32 max_packet_length;
+ u32 curr_dtd_ptr; /* Current dTD Pointer */
+ u32 next_dtd_ptr; /* Next dTD Pointer */
+ /* Total bytes (16..30), IOC (15), INT (8), STS (0-7) */
+ u32 size_ioc_int_sts;
+ u32 buff_ptr0; /* Buffer pointer Page 0 (12-31) */
+ u32 buff_ptr1; /* Buffer pointer Page 1 (12-31) */
+ u32 buff_ptr2; /* Buffer pointer Page 2 (12-31) */
+ u32 buff_ptr3; /* Buffer pointer Page 3 (12-31) */
+ u32 buff_ptr4; /* Buffer pointer Page 4 (12-31) */
+ u32 reserved1;
+ /* 8 bytes of setup data that follows the Setup PID */
+ u8 setup_buffer[8];
+ u32 reserved2[4];
+};
+
+
+#define DTD_NEXT_TERMINATE (0x00000001)
+#define DTD_IOC (0x00008000)
+#define DTD_STATUS_ACTIVE (0x00000080)
+#define DTD_STATUS_HALTED (0x00000040)
+#define DTD_STATUS_DATA_BUFF_ERR (0x00000020)
+#define DTD_STATUS_TRANSACTION_ERR (0x00000008)
+#define DTD_RESERVED_FIELDS (0x00007F00)
+#define DTD_ERROR_MASK (0x68)
+#define DTD_ADDR_MASK (0xFFFFFFE0)
+#define DTD_PACKET_SIZE 0x7FFF0000
+#define DTD_LENGTH_BIT_POS (16)
+
+struct mv_dtd {
+ u32 dtd_next;
+ u32 size_ioc_sts;
+ u32 buff_ptr0; /* Buffer pointer Page 0 */
+ u32 buff_ptr1; /* Buffer pointer Page 1 */
+ u32 buff_ptr2; /* Buffer pointer Page 2 */
+ u32 buff_ptr3; /* Buffer pointer Page 3 */
+ u32 buff_ptr4; /* Buffer pointer Page 4 */
+ u32 scratch_ptr;
+ /* 32 bytes */
+ dma_addr_t td_dma; /* dma address for this td */
+ struct mv_dtd *next_dtd_virt;
+};
+
+#endif
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
new file mode 100644
index 000000000..b397f3a84
--- /dev/null
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -0,0 +1,2424 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/platform_data/mv_usb.h>
+#include <asm/unaligned.h>
+
+#include "mv_udc.h"
+
+#define DRIVER_DESC "Marvell PXA USB Device Controller driver"
+
+#define ep_dir(ep) (((ep)->ep_num == 0) ? \
+ ((ep)->udc->ep0_dir) : ((ep)->direction))
+
+/* timeout value -- usec */
+#define RESET_TIMEOUT 10000
+#define FLUSH_TIMEOUT 10000
+#define EPSTATUS_TIMEOUT 10000
+#define PRIME_TIMEOUT 10000
+#define READSAFE_TIMEOUT 1000
+
+#define LOOPS_USEC_SHIFT 1
+#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
+#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
+
+static DECLARE_COMPLETION(release_done);
+
+static const char driver_name[] = "mv_udc";
+
+static void nuke(struct mv_ep *ep, int status);
+static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor mv_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = EP0_MAX_PKT_SIZE,
+};
+
+static void ep0_reset(struct mv_udc *udc)
+{
+ struct mv_ep *ep;
+ u32 epctrlx;
+ int i = 0;
+
+ /* ep0 in and out */
+ for (i = 0; i < 2; i++) {
+ ep = &udc->eps[i];
+ ep->udc = udc;
+
+ /* ep0 dQH */
+ ep->dqh = &udc->ep_dqh[i];
+
+ /* configure ep0 endpoint capabilities in dQH */
+ ep->dqh->max_packet_length =
+ (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+ | EP_QUEUE_HEAD_IOS;
+
+ ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
+
+ epctrlx = readl(&udc->op_regs->epctrlx[0]);
+ if (i) { /* TX */
+ epctrlx |= EPCTRL_TX_ENABLE
+ | (USB_ENDPOINT_XFER_CONTROL
+ << EPCTRL_TX_EP_TYPE_SHIFT);
+
+ } else { /* RX */
+ epctrlx |= EPCTRL_RX_ENABLE
+ | (USB_ENDPOINT_XFER_CONTROL
+ << EPCTRL_RX_EP_TYPE_SHIFT);
+ }
+
+ writel(epctrlx, &udc->op_regs->epctrlx[0]);
+ }
+}
+
+/* protocol ep0 stall, will automatically be cleared on new transaction */
+static void ep0_stall(struct mv_udc *udc)
+{
+ u32 epctrlx;
+
+ /* set TX and RX to stall */
+ epctrlx = readl(&udc->op_regs->epctrlx[0]);
+ epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
+ writel(epctrlx, &udc->op_regs->epctrlx[0]);
+
+ /* update ep0 state */
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = EP_DIR_OUT;
+}
+
+static int process_ep_req(struct mv_udc *udc, int index,
+ struct mv_req *curr_req)
+{
+ struct mv_dtd *curr_dtd;
+ struct mv_dqh *curr_dqh;
+ int actual, remaining_length;
+ int i, direction;
+ int retval = 0;
+ u32 errors;
+ u32 bit_pos;
+
+ curr_dqh = &udc->ep_dqh[index];
+ direction = index % 2;
+
+ curr_dtd = curr_req->head;
+ actual = curr_req->req.length;
+
+ for (i = 0; i < curr_req->dtd_count; i++) {
+ if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
+ dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
+ udc->eps[index].name);
+ return 1;
+ }
+
+ errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
+ if (!errors) {
+ remaining_length =
+ (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
+ >> DTD_LENGTH_BIT_POS;
+ actual -= remaining_length;
+
+ if (remaining_length) {
+ if (direction) {
+ dev_dbg(&udc->dev->dev,
+ "TX dTD remains data\n");
+ retval = -EPROTO;
+ break;
+ } else
+ break;
+ }
+ } else {
+ dev_info(&udc->dev->dev,
+ "complete_tr error: ep=%d %s: error = 0x%x\n",
+ index >> 1, direction ? "SEND" : "RECV",
+ errors);
+ if (errors & DTD_STATUS_HALTED) {
+ /* Clear the errors and Halt condition */
+ curr_dqh->size_ioc_int_sts &= ~errors;
+ retval = -EPIPE;
+ } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
+ retval = -EPROTO;
+ } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
+ retval = -EILSEQ;
+ }
+ }
+ if (i != curr_req->dtd_count - 1)
+ curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
+ }
+ if (retval)
+ return retval;
+
+ if (direction == EP_DIR_OUT)
+ bit_pos = 1 << curr_req->ep->ep_num;
+ else
+ bit_pos = 1 << (16 + curr_req->ep->ep_num);
+
+ while (curr_dqh->curr_dtd_ptr == curr_dtd->td_dma) {
+ if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
+ while (readl(&udc->op_regs->epstatus) & bit_pos)
+ udelay(1);
+ break;
+ }
+ udelay(1);
+ }
+
+ curr_req->req.actual = actual;
+
+ return 0;
+}
+
+/*
+ * done() - retire a request; caller blocked irqs
+ * @status : request status to be set, only works when
+ * request is still in progress.
+ */
+static void done(struct mv_ep *ep, struct mv_req *req, int status)
+ __releases(&ep->udc->lock)
+ __acquires(&ep->udc->lock)
+{
+ struct mv_udc *udc = NULL;
+ unsigned char stopped = ep->stopped;
+ struct mv_dtd *curr_td, *next_td;
+ int j;
+
+ udc = (struct mv_udc *)ep->udc;
+ /* Removed the req from fsl_ep->queue */
+ list_del_init(&req->queue);
+
+ /* req.status should be set as -EINPROGRESS in ep_queue() */
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ /* Free dtd for the request */
+ next_td = req->head;
+ for (j = 0; j < req->dtd_count; j++) {
+ curr_td = next_td;
+ if (j != req->dtd_count - 1)
+ next_td = curr_td->next_dtd_virt;
+ dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
+ }
+
+ usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
+
+ if (status && (status != -ESHUTDOWN))
+ dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+
+ ep->stopped = 1;
+
+ spin_unlock(&ep->udc->lock);
+
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+
+ spin_lock(&ep->udc->lock);
+ ep->stopped = stopped;
+}
+
+static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
+{
+ struct mv_udc *udc;
+ struct mv_dqh *dqh;
+ u32 bit_pos, direction;
+ u32 usbcmd, epstatus;
+ unsigned int loops;
+ int retval = 0;
+
+ udc = ep->udc;
+ direction = ep_dir(ep);
+ dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
+ bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+
+ /* check if the pipe is empty */
+ if (!(list_empty(&ep->queue))) {
+ struct mv_req *lastreq;
+ lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
+ lastreq->tail->dtd_next =
+ req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+
+ wmb();
+
+ if (readl(&udc->op_regs->epprime) & bit_pos)
+ goto done;
+
+ loops = LOOPS(READSAFE_TIMEOUT);
+ while (1) {
+ /* start with setting the semaphores */
+ usbcmd = readl(&udc->op_regs->usbcmd);
+ usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
+ writel(usbcmd, &udc->op_regs->usbcmd);
+
+ /* read the endpoint status */
+ epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
+
+ /*
+ * Reread the ATDTW semaphore bit to check if it is
+ * cleared. When hardware see a hazard, it will clear
+ * the bit or else we remain set to 1 and we can
+ * proceed with priming of endpoint if not already
+ * primed.
+ */
+ if (readl(&udc->op_regs->usbcmd)
+ & USBCMD_ATDTW_TRIPWIRE_SET)
+ break;
+
+ loops--;
+ if (loops == 0) {
+ dev_err(&udc->dev->dev,
+ "Timeout for ATDTW_TRIPWIRE...\n");
+ retval = -ETIME;
+ goto done;
+ }
+ udelay(LOOPS_USEC);
+ }
+
+ /* Clear the semaphore */
+ usbcmd = readl(&udc->op_regs->usbcmd);
+ usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
+ writel(usbcmd, &udc->op_regs->usbcmd);
+
+ if (epstatus)
+ goto done;
+ }
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ dqh->next_dtd_ptr = req->head->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+
+ /* clear active and halt bit, in case set from a previous error */
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
+
+ /* Ensure that updates to the QH will occur before priming. */
+ wmb();
+
+ /* Prime the Endpoint */
+ writel(bit_pos, &udc->op_regs->epprime);
+
+done:
+ return retval;
+}
+
+static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
+ dma_addr_t *dma, int *is_last)
+{
+ struct mv_dtd *dtd;
+ struct mv_udc *udc;
+ struct mv_dqh *dqh;
+ u32 temp, mult = 0;
+
+ /* how big will this transfer be? */
+ if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
+ dqh = req->ep->dqh;
+ mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
+ & 0x3;
+ *length = min(req->req.length - req->req.actual,
+ (unsigned)(mult * req->ep->ep.maxpacket));
+ } else
+ *length = min(req->req.length - req->req.actual,
+ (unsigned)EP_MAX_LENGTH_TRANSFER);
+
+ udc = req->ep->udc;
+
+ /*
+ * Be careful that no _GFP_HIGHMEM is set,
+ * or we can not use dma_to_virt
+ */
+ dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
+ if (dtd == NULL)
+ return dtd;
+
+ dtd->td_dma = *dma;
+ /* initialize buffer page pointers */
+ temp = (u32)(req->req.dma + req->req.actual);
+ dtd->buff_ptr0 = cpu_to_le32(temp);
+ temp &= ~0xFFF;
+ dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
+ dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
+ dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
+ dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
+
+ req->req.actual += *length;
+
+ /* zlp is needed if req->req.zero is set */
+ if (req->req.zero) {
+ if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+ *is_last = 1;
+ else
+ *is_last = 0;
+ } else if (req->req.length == req->req.actual)
+ *is_last = 1;
+ else
+ *is_last = 0;
+
+ /* Fill in the transfer size; set active bit */
+ temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
+
+ /* Enable interrupt for the last dtd of a request */
+ if (*is_last && !req->req.no_interrupt)
+ temp |= DTD_IOC;
+
+ temp |= mult << 10;
+
+ dtd->size_ioc_sts = temp;
+
+ mb();
+
+ return dtd;
+}
+
+/* generate dTD linked list for a request */
+static int req_to_dtd(struct mv_req *req)
+{
+ unsigned count;
+ int is_last, is_first = 1;
+ struct mv_dtd *dtd, *last_dtd = NULL;
+ dma_addr_t dma;
+
+ do {
+ dtd = build_dtd(req, &count, &dma, &is_last);
+ if (dtd == NULL)
+ return -ENOMEM;
+
+ if (is_first) {
+ is_first = 0;
+ req->head = dtd;
+ } else {
+ last_dtd->dtd_next = dma;
+ last_dtd->next_dtd_virt = dtd;
+ }
+ last_dtd = dtd;
+ req->dtd_count++;
+ } while (!is_last);
+
+ /* set terminate bit to 1 for the last dTD */
+ dtd->dtd_next = DTD_NEXT_TERMINATE;
+
+ req->tail = dtd;
+
+ return 0;
+}
+
+static int mv_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct mv_udc *udc;
+ struct mv_ep *ep;
+ struct mv_dqh *dqh;
+ u16 max = 0;
+ u32 bit_pos, epctrlx, direction;
+ const unsigned char zlt = 1;
+ unsigned char ios, mult;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct mv_ep, ep);
+ udc = ep->udc;
+
+ if (!_ep || !desc
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ direction = ep_dir(ep);
+ max = usb_endpoint_maxp(desc);
+
+ /*
+ * disable HW zero length termination select
+ * driver handles zero length packet through req->req.zero
+ */
+ bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
+
+ /* Check if the Endpoint is Primed */
+ if ((readl(&udc->op_regs->epprime) & bit_pos)
+ || (readl(&udc->op_regs->epstatus) & bit_pos)) {
+ dev_info(&udc->dev->dev,
+ "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
+ " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
+ (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
+ (unsigned)readl(&udc->op_regs->epprime),
+ (unsigned)readl(&udc->op_regs->epstatus),
+ (unsigned)bit_pos);
+ goto en_done;
+ }
+
+ /* Set the max packet length, interrupt on Setup and Mult fields */
+ ios = 0;
+ mult = 0;
+ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ ios = 1;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ /* Calculate transactions needed for high bandwidth iso */
+ mult = usb_endpoint_maxp_mult(desc);
+ /* 3 transactions at most */
+ if (mult > 3)
+ goto en_done;
+ break;
+ default:
+ goto en_done;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* Get the endpoint queue head address */
+ dqh = ep->dqh;
+ dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+ | (mult << EP_QUEUE_HEAD_MULT_POS)
+ | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
+ | (ios ? EP_QUEUE_HEAD_IOS : 0);
+ dqh->next_dtd_ptr = 1;
+ dqh->size_ioc_int_sts = 0;
+
+ ep->ep.maxpacket = max;
+ ep->ep.desc = desc;
+ ep->stopped = 0;
+
+ /* Enable the endpoint for Rx or Tx and set the endpoint type */
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+ if (direction == EP_DIR_IN) {
+ epctrlx &= ~EPCTRL_TX_ALL_MASK;
+ epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
+ | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ << EPCTRL_TX_EP_TYPE_SHIFT);
+ } else {
+ epctrlx &= ~EPCTRL_RX_ALL_MASK;
+ epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
+ | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ << EPCTRL_RX_EP_TYPE_SHIFT);
+ }
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+
+ /*
+ * Implement Guideline (GL# USB-7) The unused endpoint type must
+ * be programmed to bulk.
+ */
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+ if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
+ epctrlx |= (USB_ENDPOINT_XFER_BULK
+ << EPCTRL_RX_EP_TYPE_SHIFT);
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+ }
+
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+ if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
+ epctrlx |= (USB_ENDPOINT_XFER_BULK
+ << EPCTRL_TX_EP_TYPE_SHIFT);
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+en_done:
+ return -EINVAL;
+}
+
+static int mv_ep_disable(struct usb_ep *_ep)
+{
+ struct mv_udc *udc;
+ struct mv_ep *ep;
+ struct mv_dqh *dqh;
+ u32 epctrlx, direction;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct mv_ep, ep);
+ if ((_ep == NULL) || !ep->ep.desc)
+ return -EINVAL;
+
+ udc = ep->udc;
+
+ /* Get the endpoint queue head address */
+ dqh = ep->dqh;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ direction = ep_dir(ep);
+
+ /* Reset the max packet length and the interrupt on Setup */
+ dqh->max_packet_length = 0;
+
+ /* Disable the endpoint for Rx or Tx and reset the endpoint type */
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+ epctrlx &= ~((direction == EP_DIR_IN)
+ ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
+ : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+
+ /* nuke all pending requests (does flush) */
+ nuke(ep, -ESHUTDOWN);
+
+ ep->ep.desc = NULL;
+ ep->stopped = 1;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static struct usb_request *
+mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct mv_req *req = NULL;
+
+ req = kzalloc(sizeof *req, gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->req.dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct mv_req *req = NULL;
+
+ req = container_of(_req, struct mv_req, req);
+
+ if (_req)
+ kfree(req);
+}
+
+static void mv_ep_fifo_flush(struct usb_ep *_ep)
+{
+ struct mv_udc *udc;
+ u32 bit_pos, direction;
+ struct mv_ep *ep;
+ unsigned int loops;
+
+ if (!_ep)
+ return;
+
+ ep = container_of(_ep, struct mv_ep, ep);
+ if (!ep->ep.desc)
+ return;
+
+ udc = ep->udc;
+ direction = ep_dir(ep);
+
+ if (ep->ep_num == 0)
+ bit_pos = (1 << 16) | 1;
+ else if (direction == EP_DIR_OUT)
+ bit_pos = 1 << ep->ep_num;
+ else
+ bit_pos = 1 << (16 + ep->ep_num);
+
+ loops = LOOPS(EPSTATUS_TIMEOUT);
+ do {
+ unsigned int inter_loops;
+
+ if (loops == 0) {
+ dev_err(&udc->dev->dev,
+ "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
+ (unsigned)readl(&udc->op_regs->epstatus),
+ (unsigned)bit_pos);
+ return;
+ }
+ /* Write 1 to the Flush register */
+ writel(bit_pos, &udc->op_regs->epflush);
+
+ /* Wait until flushing completed */
+ inter_loops = LOOPS(FLUSH_TIMEOUT);
+ while (readl(&udc->op_regs->epflush)) {
+ /*
+ * ENDPTFLUSH bit should be cleared to indicate this
+ * operation is complete
+ */
+ if (inter_loops == 0) {
+ dev_err(&udc->dev->dev,
+ "TIMEOUT for ENDPTFLUSH=0x%x,"
+ "bit_pos=0x%x\n",
+ (unsigned)readl(&udc->op_regs->epflush),
+ (unsigned)bit_pos);
+ return;
+ }
+ inter_loops--;
+ udelay(LOOPS_USEC);
+ }
+ loops--;
+ } while (readl(&udc->op_regs->epstatus) & bit_pos);
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int
+mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+ struct mv_req *req = container_of(_req, struct mv_req, req);
+ struct mv_udc *udc = ep->udc;
+ unsigned long flags;
+ int retval;
+
+ /* catch various bogus parameters */
+ if (!_req || !req->req.complete || !req->req.buf
+ || !list_empty(&req->queue)) {
+ dev_err(&udc->dev->dev, "%s, bad params", __func__);
+ return -EINVAL;
+ }
+ if (unlikely(!_ep || !ep->ep.desc)) {
+ dev_err(&udc->dev->dev, "%s, bad ep", __func__);
+ return -EINVAL;
+ }
+
+ udc = ep->udc;
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ req->ep = ep;
+
+ /* map virtual address to hardware */
+ retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
+ if (retval)
+ return retval;
+
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ req->dtd_count = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* build dtds and push them to device queue */
+ if (!req_to_dtd(req)) {
+ retval = queue_dtd(ep, req);
+ if (retval) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ dev_err(&udc->dev->dev, "Failed to queue dtd\n");
+ goto err_unmap_dma;
+ }
+ } else {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
+ retval = -ENOMEM;
+ goto err_unmap_dma;
+ }
+
+ /* Update ep0 state */
+ if (ep->ep_num == 0)
+ udc->ep0_state = DATA_STATE_XMIT;
+
+ /* irq handler advances the queue */
+ list_add_tail(&req->queue, &ep->queue);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+
+err_unmap_dma:
+ usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
+
+ return retval;
+}
+
+static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
+{
+ struct mv_dqh *dqh = ep->dqh;
+ u32 bit_pos;
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ dqh->next_dtd_ptr = req->head->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+
+ /* clear active and halt bit, in case set from a previous error */
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
+
+ /* Ensure that updates to the QH will occure before priming. */
+ wmb();
+
+ bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+
+ /* Prime the Endpoint */
+ writel(bit_pos, &ep->udc->op_regs->epprime);
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+ struct mv_req *req = NULL, *iter;
+ struct mv_udc *udc = ep->udc;
+ unsigned long flags;
+ int stopped, ret = 0;
+ u32 epctrlx;
+
+ if (!_ep || !_req)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ stopped = ep->stopped;
+
+ /* Stop the ep before we deal with the queue */
+ ep->stopped = 1;
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+ if (ep_dir(ep) == EP_DIR_IN)
+ epctrlx &= ~EPCTRL_TX_ENABLE;
+ else
+ epctrlx &= ~EPCTRL_RX_ENABLE;
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* The request is in progress, or completed but not dequeued */
+ if (ep->queue.next == &req->queue) {
+ _req->status = -ECONNRESET;
+ mv_ep_fifo_flush(_ep); /* flush current transfer */
+
+ /* The request isn't the last request in this ep queue */
+ if (req->queue.next != &ep->queue) {
+ struct mv_req *next_req;
+
+ next_req = list_entry(req->queue.next,
+ struct mv_req, queue);
+
+ /* Point the QH to the first TD of next request */
+ mv_prime_ep(ep, next_req);
+ } else {
+ struct mv_dqh *qh;
+
+ qh = ep->dqh;
+ qh->next_dtd_ptr = 1;
+ qh->size_ioc_int_sts = 0;
+ }
+
+ /* The request hasn't been processed, patch up the TD chain */
+ } else {
+ struct mv_req *prev_req;
+
+ prev_req = list_entry(req->queue.prev, struct mv_req, queue);
+ writel(readl(&req->tail->dtd_next),
+ &prev_req->tail->dtd_next);
+
+ }
+
+ done(ep, req, -ECONNRESET);
+
+ /* Enable EP */
+out:
+ epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+ if (ep_dir(ep) == EP_DIR_IN)
+ epctrlx |= EPCTRL_TX_ENABLE;
+ else
+ epctrlx |= EPCTRL_RX_ENABLE;
+ writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+ ep->stopped = stopped;
+
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return ret;
+}
+
+static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
+{
+ u32 epctrlx;
+
+ epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
+
+ if (stall) {
+ if (direction == EP_DIR_IN)
+ epctrlx |= EPCTRL_TX_EP_STALL;
+ else
+ epctrlx |= EPCTRL_RX_EP_STALL;
+ } else {
+ if (direction == EP_DIR_IN) {
+ epctrlx &= ~EPCTRL_TX_EP_STALL;
+ epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
+ } else {
+ epctrlx &= ~EPCTRL_RX_EP_STALL;
+ epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
+ }
+ }
+ writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
+}
+
+static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
+{
+ u32 epctrlx;
+
+ epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
+
+ if (direction == EP_DIR_OUT)
+ return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
+ else
+ return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
+}
+
+static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+{
+ struct mv_ep *ep;
+ unsigned long flags;
+ int status = 0;
+ struct mv_udc *udc;
+
+ ep = container_of(_ep, struct mv_ep, ep);
+ udc = ep->udc;
+ if (!_ep || !ep->ep.desc) {
+ status = -EINVAL;
+ goto out;
+ }
+
+ if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ status = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /*
+ * Attempt to halt IN ep will fail if any transfer requests
+ * are still queue
+ */
+ if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
+ status = -EAGAIN;
+ goto out;
+ }
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
+ if (halt && wedge)
+ ep->wedge = 1;
+ else if (!halt)
+ ep->wedge = 0;
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ if (ep->ep_num == 0) {
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = EP_DIR_OUT;
+ }
+out:
+ return status;
+}
+
+static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
+{
+ return mv_ep_set_halt_wedge(_ep, halt, 0);
+}
+
+static int mv_ep_set_wedge(struct usb_ep *_ep)
+{
+ return mv_ep_set_halt_wedge(_ep, 1, 1);
+}
+
+static const struct usb_ep_ops mv_ep_ops = {
+ .enable = mv_ep_enable,
+ .disable = mv_ep_disable,
+
+ .alloc_request = mv_alloc_request,
+ .free_request = mv_free_request,
+
+ .queue = mv_ep_queue,
+ .dequeue = mv_ep_dequeue,
+
+ .set_wedge = mv_ep_set_wedge,
+ .set_halt = mv_ep_set_halt,
+ .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
+};
+
+static int udc_clock_enable(struct mv_udc *udc)
+{
+ return clk_prepare_enable(udc->clk);
+}
+
+static void udc_clock_disable(struct mv_udc *udc)
+{
+ clk_disable_unprepare(udc->clk);
+}
+
+static void udc_stop(struct mv_udc *udc)
+{
+ u32 tmp;
+
+ /* Disable interrupts */
+ tmp = readl(&udc->op_regs->usbintr);
+ tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
+ USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
+ writel(tmp, &udc->op_regs->usbintr);
+
+ udc->stopped = 1;
+
+ /* Reset the Run the bit in the command register to stop VUSB */
+ tmp = readl(&udc->op_regs->usbcmd);
+ tmp &= ~USBCMD_RUN_STOP;
+ writel(tmp, &udc->op_regs->usbcmd);
+}
+
+static void udc_start(struct mv_udc *udc)
+{
+ u32 usbintr;
+
+ usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
+ | USBINTR_PORT_CHANGE_DETECT_EN
+ | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
+ /* Enable interrupts */
+ writel(usbintr, &udc->op_regs->usbintr);
+
+ udc->stopped = 0;
+
+ /* Set the Run bit in the command register */
+ writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
+}
+
+static int udc_reset(struct mv_udc *udc)
+{
+ unsigned int loops;
+ u32 tmp, portsc;
+
+ /* Stop the controller */
+ tmp = readl(&udc->op_regs->usbcmd);
+ tmp &= ~USBCMD_RUN_STOP;
+ writel(tmp, &udc->op_regs->usbcmd);
+
+ /* Reset the controller to get default values */
+ writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
+
+ /* wait for reset to complete */
+ loops = LOOPS(RESET_TIMEOUT);
+ while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
+ if (loops == 0) {
+ dev_err(&udc->dev->dev,
+ "Wait for RESET completed TIMEOUT\n");
+ return -ETIMEDOUT;
+ }
+ loops--;
+ udelay(LOOPS_USEC);
+ }
+
+ /* set controller to device mode */
+ tmp = readl(&udc->op_regs->usbmode);
+ tmp |= USBMODE_CTRL_MODE_DEVICE;
+
+ /* turn setup lockout off, require setup tripwire in usbcmd */
+ tmp |= USBMODE_SETUP_LOCK_OFF;
+
+ writel(tmp, &udc->op_regs->usbmode);
+
+ writel(0x0, &udc->op_regs->epsetupstat);
+
+ /* Configure the Endpoint List Address */
+ writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
+ &udc->op_regs->eplistaddr);
+
+ portsc = readl(&udc->op_regs->portsc[0]);
+ if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
+ portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
+
+ if (udc->force_fs)
+ portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
+ else
+ portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
+
+ writel(portsc, &udc->op_regs->portsc[0]);
+
+ tmp = readl(&udc->op_regs->epctrlx[0]);
+ tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
+ writel(tmp, &udc->op_regs->epctrlx[0]);
+
+ return 0;
+}
+
+static int mv_udc_enable_internal(struct mv_udc *udc)
+{
+ int retval;
+
+ if (udc->active)
+ return 0;
+
+ dev_dbg(&udc->dev->dev, "enable udc\n");
+ retval = udc_clock_enable(udc);
+ if (retval)
+ return retval;
+
+ if (udc->pdata->phy_init) {
+ retval = udc->pdata->phy_init(udc->phy_regs);
+ if (retval) {
+ dev_err(&udc->dev->dev,
+ "init phy error %d\n", retval);
+ udc_clock_disable(udc);
+ return retval;
+ }
+ }
+ udc->active = 1;
+
+ return 0;
+}
+
+static int mv_udc_enable(struct mv_udc *udc)
+{
+ if (udc->clock_gating)
+ return mv_udc_enable_internal(udc);
+
+ return 0;
+}
+
+static void mv_udc_disable_internal(struct mv_udc *udc)
+{
+ if (udc->active) {
+ dev_dbg(&udc->dev->dev, "disable udc\n");
+ if (udc->pdata->phy_deinit)
+ udc->pdata->phy_deinit(udc->phy_regs);
+ udc_clock_disable(udc);
+ udc->active = 0;
+ }
+}
+
+static void mv_udc_disable(struct mv_udc *udc)
+{
+ if (udc->clock_gating)
+ mv_udc_disable_internal(udc);
+}
+
+static int mv_udc_get_frame(struct usb_gadget *gadget)
+{
+ struct mv_udc *udc;
+ u16 retval;
+
+ if (!gadget)
+ return -ENODEV;
+
+ udc = container_of(gadget, struct mv_udc, gadget);
+
+ retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
+
+ return retval;
+}
+
+/* Tries to wake up the host connected to this gadget */
+static int mv_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
+ u32 portsc;
+
+ /* Remote wakeup feature not enabled by host */
+ if (!udc->remote_wakeup)
+ return -ENOTSUPP;
+
+ portsc = readl(&udc->op_regs->portsc);
+ /* not suspended? */
+ if (!(portsc & PORTSCX_PORT_SUSPEND))
+ return 0;
+ /* trigger force resume */
+ portsc |= PORTSCX_PORT_FORCE_RESUME;
+ writel(portsc, &udc->op_regs->portsc[0]);
+ return 0;
+}
+
+static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct mv_udc *udc;
+ unsigned long flags;
+ int retval = 0;
+
+ udc = container_of(gadget, struct mv_udc, gadget);
+ spin_lock_irqsave(&udc->lock, flags);
+
+ udc->vbus_active = (is_active != 0);
+
+ dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+ __func__, udc->softconnect, udc->vbus_active);
+
+ if (udc->driver && udc->softconnect && udc->vbus_active) {
+ retval = mv_udc_enable(udc);
+ if (retval == 0) {
+ /* Clock is disabled, need re-init registers */
+ udc_reset(udc);
+ ep0_reset(udc);
+ udc_start(udc);
+ }
+ } else if (udc->driver && udc->softconnect) {
+ if (!udc->active)
+ goto out;
+
+ /* stop all the transfer in queue*/
+ stop_activity(udc, udc->driver);
+ udc_stop(udc);
+ mv_udc_disable(udc);
+ }
+
+out:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return retval;
+}
+
+static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct mv_udc *udc;
+ unsigned long flags;
+ int retval = 0;
+
+ udc = container_of(gadget, struct mv_udc, gadget);
+ spin_lock_irqsave(&udc->lock, flags);
+
+ udc->softconnect = (is_on != 0);
+
+ dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+ __func__, udc->softconnect, udc->vbus_active);
+
+ if (udc->driver && udc->softconnect && udc->vbus_active) {
+ retval = mv_udc_enable(udc);
+ if (retval == 0) {
+ /* Clock is disabled, need re-init registers */
+ udc_reset(udc);
+ ep0_reset(udc);
+ udc_start(udc);
+ }
+ } else if (udc->driver && udc->vbus_active) {
+ /* stop all the transfer in queue*/
+ stop_activity(udc, udc->driver);
+ udc_stop(udc);
+ mv_udc_disable(udc);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return retval;
+}
+
+static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
+static int mv_udc_stop(struct usb_gadget *);
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops mv_ops = {
+
+ /* returns the current frame number */
+ .get_frame = mv_udc_get_frame,
+
+ /* tries to wake up the host connected to this gadget */
+ .wakeup = mv_udc_wakeup,
+
+ /* notify controller that VBUS is powered or not */
+ .vbus_session = mv_udc_vbus_session,
+
+ /* D+ pullup, software-controlled connect/disconnect to USB host */
+ .pullup = mv_udc_pullup,
+ .udc_start = mv_udc_start,
+ .udc_stop = mv_udc_stop,
+};
+
+static int eps_init(struct mv_udc *udc)
+{
+ struct mv_ep *ep;
+ char name[14];
+ int i;
+
+ /* initialize ep0 */
+ ep = &udc->eps[0];
+ ep->udc = udc;
+ strncpy(ep->name, "ep0", sizeof(ep->name));
+ ep->ep.name = ep->name;
+ ep->ep.ops = &mv_ep_ops;
+ ep->wedge = 0;
+ ep->stopped = 0;
+ usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
+ ep->ep.caps.type_control = true;
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ ep->ep_num = 0;
+ ep->ep.desc = &mv_ep0_desc;
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+ /* initialize other endpoints */
+ for (i = 2; i < udc->max_eps * 2; i++) {
+ ep = &udc->eps[i];
+ if (i % 2) {
+ snprintf(name, sizeof(name), "ep%din", i / 2);
+ ep->direction = EP_DIR_IN;
+ ep->ep.caps.dir_in = true;
+ } else {
+ snprintf(name, sizeof(name), "ep%dout", i / 2);
+ ep->direction = EP_DIR_OUT;
+ ep->ep.caps.dir_out = true;
+ }
+ ep->udc = udc;
+ strncpy(ep->name, name, sizeof(ep->name));
+ ep->ep.name = ep->name;
+
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+
+ ep->ep.ops = &mv_ep_ops;
+ ep->stopped = 0;
+ usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
+ ep->ep_num = i / 2;
+
+ INIT_LIST_HEAD(&ep->queue);
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+
+ ep->dqh = &udc->ep_dqh[i];
+ }
+
+ return 0;
+}
+
+/* delete all endpoint requests, called with spinlock held */
+static void nuke(struct mv_ep *ep, int status)
+{
+ /* called with spinlock held */
+ ep->stopped = 1;
+
+ /* endpoint fifo flush */
+ mv_ep_fifo_flush(&ep->ep);
+
+ while (!list_empty(&ep->queue)) {
+ struct mv_req *req = NULL;
+ req = list_entry(ep->queue.next, struct mv_req, queue);
+ done(ep, req, status);
+ }
+}
+
+static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
+{
+ struct mv_ep *ep;
+
+ nuke(&udc->eps[0], -ESHUTDOWN);
+
+ list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+ nuke(ep, -ESHUTDOWN);
+ }
+
+ /* report reset; the driver is already quiesced */
+ if (driver) {
+ spin_unlock(&udc->lock);
+ usb_gadget_udc_reset(&udc->gadget, driver);
+ spin_lock(&udc->lock);
+ }
+}
+/* stop all USB activities */
+static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
+{
+ struct mv_ep *ep;
+
+ nuke(&udc->eps[0], -ESHUTDOWN);
+
+ list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+ nuke(ep, -ESHUTDOWN);
+ }
+
+ /* report disconnect; the driver is already quiesced */
+ if (driver) {
+ spin_unlock(&udc->lock);
+ driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+}
+
+static int mv_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct mv_udc *udc;
+ int retval = 0;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct mv_udc, gadget);
+
+ if (udc->driver)
+ return -EBUSY;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* hook up the driver ... */
+ udc->driver = driver;
+
+ udc->usb_state = USB_STATE_ATTACHED;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->ep0_dir = EP_DIR_OUT;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->transceiver) {
+ retval = otg_set_peripheral(udc->transceiver->otg,
+ &udc->gadget);
+ if (retval) {
+ dev_err(&udc->dev->dev,
+ "unable to register peripheral to otg\n");
+ udc->driver = NULL;
+ return retval;
+ }
+ }
+
+ /* When boot with cable attached, there will be no vbus irq occurred */
+ if (udc->qwork)
+ queue_work(udc->qwork, &udc->vbus_work);
+
+ return 0;
+}
+
+static int mv_udc_stop(struct usb_gadget *gadget)
+{
+ struct mv_udc *udc;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct mv_udc, gadget);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ mv_udc_enable(udc);
+ udc_stop(udc);
+
+ /* stop all usb activities */
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ stop_activity(udc, NULL);
+ mv_udc_disable(udc);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ /* unbind gadget driver */
+ udc->driver = NULL;
+
+ return 0;
+}
+
+static void mv_set_ptc(struct mv_udc *udc, u32 mode)
+{
+ u32 portsc;
+
+ portsc = readl(&udc->op_regs->portsc[0]);
+ portsc |= mode << 16;
+ writel(portsc, &udc->op_regs->portsc[0]);
+}
+
+static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
+{
+ struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
+ struct mv_req *req = container_of(_req, struct mv_req, req);
+ struct mv_udc *udc;
+ unsigned long flags;
+
+ udc = mvep->udc;
+
+ dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (req->test_mode) {
+ mv_set_ptc(udc, req->test_mode);
+ req->test_mode = 0;
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+static int
+udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
+{
+ int retval = 0;
+ struct mv_req *req;
+ struct mv_ep *ep;
+
+ ep = &udc->eps[0];
+ udc->ep0_dir = direction;
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
+
+ req = udc->status_req;
+
+ /* fill in the reqest structure */
+ if (empty == false) {
+ *((u16 *) req->req.buf) = cpu_to_le16(status);
+ req->req.length = 2;
+ } else
+ req->req.length = 0;
+
+ req->ep = ep;
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+ if (udc->test_mode) {
+ req->req.complete = prime_status_complete;
+ req->test_mode = udc->test_mode;
+ udc->test_mode = 0;
+ } else
+ req->req.complete = NULL;
+ req->dtd_count = 0;
+
+ if (req->req.dma == DMA_ADDR_INVALID) {
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf, req->req.length,
+ ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+ }
+
+ /* prime the data phase */
+ if (!req_to_dtd(req)) {
+ retval = queue_dtd(ep, req);
+ if (retval) {
+ dev_err(&udc->dev->dev,
+ "Failed to queue dtd when prime status\n");
+ goto out;
+ }
+ } else{ /* no mem */
+ retval = -ENOMEM;
+ dev_err(&udc->dev->dev,
+ "Failed to dma_pool_alloc when prime status\n");
+ goto out;
+ }
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ return 0;
+out:
+ usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
+
+ return retval;
+}
+
+static void mv_udc_testmode(struct mv_udc *udc, u16 index)
+{
+ if (index <= USB_TEST_FORCE_ENABLE) {
+ udc->test_mode = index;
+ if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+ ep0_stall(udc);
+ } else
+ dev_err(&udc->dev->dev,
+ "This test mode(%d) is not supported\n", index);
+}
+
+static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+{
+ udc->dev_addr = (u8)setup->wValue;
+
+ /* update usb state */
+ udc->usb_state = USB_STATE_ADDRESS;
+
+ if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+ ep0_stall(udc);
+}
+
+static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
+ struct usb_ctrlrequest *setup)
+{
+ u16 status = 0;
+ int retval;
+
+ if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+ != (USB_DIR_IN | USB_TYPE_STANDARD))
+ return;
+
+ if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ status = 1 << USB_DEVICE_SELF_POWERED;
+ status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+ } else if ((setup->bRequestType & USB_RECIP_MASK)
+ == USB_RECIP_INTERFACE) {
+ /* get interface status */
+ status = 0;
+ } else if ((setup->bRequestType & USB_RECIP_MASK)
+ == USB_RECIP_ENDPOINT) {
+ u8 ep_num, direction;
+
+ ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+ direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+ ? EP_DIR_IN : EP_DIR_OUT;
+ status = ep_is_stall(udc, ep_num, direction)
+ << USB_ENDPOINT_HALT;
+ }
+
+ retval = udc_prime_status(udc, EP_DIR_IN, status, false);
+ if (retval)
+ ep0_stall(udc);
+ else
+ udc->ep0_state = DATA_STATE_XMIT;
+}
+
+static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+{
+ u8 ep_num;
+ u8 direction;
+ struct mv_ep *ep;
+
+ if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+ == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
+ switch (setup->wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ udc->remote_wakeup = 0;
+ break;
+ default:
+ goto out;
+ }
+ } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+ == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
+ switch (setup->wValue) {
+ case USB_ENDPOINT_HALT:
+ ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+ direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+ ? EP_DIR_IN : EP_DIR_OUT;
+ if (setup->wValue != 0 || setup->wLength != 0
+ || ep_num > udc->max_eps)
+ goto out;
+ ep = &udc->eps[ep_num * 2 + direction];
+ if (ep->wedge == 1)
+ break;
+ spin_unlock(&udc->lock);
+ ep_set_stall(udc, ep_num, direction, 0);
+ spin_lock(&udc->lock);
+ break;
+ default:
+ goto out;
+ }
+ } else
+ goto out;
+
+ if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+ ep0_stall(udc);
+out:
+ return;
+}
+
+static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+{
+ u8 ep_num;
+ u8 direction;
+
+ if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+ == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
+ switch (setup->wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ udc->remote_wakeup = 1;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (setup->wIndex & 0xFF
+ || udc->gadget.speed != USB_SPEED_HIGH)
+ ep0_stall(udc);
+
+ if (udc->usb_state != USB_STATE_CONFIGURED
+ && udc->usb_state != USB_STATE_ADDRESS
+ && udc->usb_state != USB_STATE_DEFAULT)
+ ep0_stall(udc);
+
+ mv_udc_testmode(udc, (setup->wIndex >> 8));
+ goto out;
+ default:
+ goto out;
+ }
+ } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+ == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
+ switch (setup->wValue) {
+ case USB_ENDPOINT_HALT:
+ ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+ direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+ ? EP_DIR_IN : EP_DIR_OUT;
+ if (setup->wValue != 0 || setup->wLength != 0
+ || ep_num > udc->max_eps)
+ goto out;
+ spin_unlock(&udc->lock);
+ ep_set_stall(udc, ep_num, direction, 1);
+ spin_lock(&udc->lock);
+ break;
+ default:
+ goto out;
+ }
+ } else
+ goto out;
+
+ if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+ ep0_stall(udc);
+out:
+ return;
+}
+
+static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
+ struct usb_ctrlrequest *setup)
+ __releases(&ep->udc->lock)
+ __acquires(&ep->udc->lock)
+{
+ bool delegate = false;
+
+ nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
+
+ dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+ setup->bRequestType, setup->bRequest,
+ setup->wValue, setup->wIndex, setup->wLength);
+ /* We process some standard setup requests here */
+ if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (setup->bRequest) {
+ case USB_REQ_GET_STATUS:
+ ch9getstatus(udc, ep_num, setup);
+ break;
+
+ case USB_REQ_SET_ADDRESS:
+ ch9setaddress(udc, setup);
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ ch9clearfeature(udc, setup);
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ ch9setfeature(udc, setup);
+ break;
+
+ default:
+ delegate = true;
+ }
+ } else
+ delegate = true;
+
+ /* delegate USB standard requests to the gadget driver */
+ if (delegate == true) {
+ /* USB requests handled by gadget */
+ if (setup->wLength) {
+ /* DATA phase from gadget, STATUS phase from udc */
+ udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+ ? EP_DIR_IN : EP_DIR_OUT;
+ spin_unlock(&udc->lock);
+ if (udc->driver->setup(&udc->gadget,
+ &udc->local_setup_buff) < 0)
+ ep0_stall(udc);
+ spin_lock(&udc->lock);
+ udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
+ ? DATA_STATE_XMIT : DATA_STATE_RECV;
+ } else {
+ /* no DATA phase, IN STATUS phase from gadget */
+ udc->ep0_dir = EP_DIR_IN;
+ spin_unlock(&udc->lock);
+ if (udc->driver->setup(&udc->gadget,
+ &udc->local_setup_buff) < 0)
+ ep0_stall(udc);
+ spin_lock(&udc->lock);
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
+ }
+ }
+}
+
+/* complete DATA or STATUS phase of ep0 prime status phase if needed */
+static void ep0_req_complete(struct mv_udc *udc,
+ struct mv_ep *ep0, struct mv_req *req)
+{
+ u32 new_addr;
+
+ if (udc->usb_state == USB_STATE_ADDRESS) {
+ /* set the new address */
+ new_addr = (u32)udc->dev_addr;
+ writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
+ &udc->op_regs->deviceaddr);
+ }
+
+ done(ep0, req, 0);
+
+ switch (udc->ep0_state) {
+ case DATA_STATE_XMIT:
+ /* receive status phase */
+ if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
+ ep0_stall(udc);
+ break;
+ case DATA_STATE_RECV:
+ /* send status phase */
+ if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
+ ep0_stall(udc);
+ break;
+ case WAIT_FOR_OUT_STATUS:
+ udc->ep0_state = WAIT_FOR_SETUP;
+ break;
+ case WAIT_FOR_SETUP:
+ dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
+ break;
+ default:
+ ep0_stall(udc);
+ break;
+ }
+}
+
+static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
+{
+ u32 temp;
+ struct mv_dqh *dqh;
+
+ dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
+
+ /* Clear bit in ENDPTSETUPSTAT */
+ writel((1 << ep_num), &udc->op_regs->epsetupstat);
+
+ /* while a hazard exists when setup package arrives */
+ do {
+ /* Set Setup Tripwire */
+ temp = readl(&udc->op_regs->usbcmd);
+ writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
+
+ /* Copy the setup packet to local buffer */
+ memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
+ } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
+
+ /* Clear Setup Tripwire */
+ temp = readl(&udc->op_regs->usbcmd);
+ writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
+}
+
+static void irq_process_tr_complete(struct mv_udc *udc)
+{
+ u32 tmp, bit_pos;
+ int i, ep_num = 0, direction = 0;
+ struct mv_ep *curr_ep;
+ struct mv_req *curr_req, *temp_req;
+ int status;
+
+ /*
+ * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
+ * because the setup packets are to be read ASAP
+ */
+
+ /* Process all Setup packet received interrupts */
+ tmp = readl(&udc->op_regs->epsetupstat);
+
+ if (tmp) {
+ for (i = 0; i < udc->max_eps; i++) {
+ if (tmp & (1 << i)) {
+ get_setup_data(udc, i,
+ (u8 *)(&udc->local_setup_buff));
+ handle_setup_packet(udc, i,
+ &udc->local_setup_buff);
+ }
+ }
+ }
+
+ /* Don't clear the endpoint setup status register here.
+ * It is cleared as a setup packet is read out of the buffer
+ */
+
+ /* Process non-setup transaction complete interrupts */
+ tmp = readl(&udc->op_regs->epcomplete);
+
+ if (!tmp)
+ return;
+
+ writel(tmp, &udc->op_regs->epcomplete);
+
+ for (i = 0; i < udc->max_eps * 2; i++) {
+ ep_num = i >> 1;
+ direction = i % 2;
+
+ bit_pos = 1 << (ep_num + 16 * direction);
+
+ if (!(bit_pos & tmp))
+ continue;
+
+ if (i == 1)
+ curr_ep = &udc->eps[0];
+ else
+ curr_ep = &udc->eps[i];
+ /* process the req queue until an uncomplete request */
+ list_for_each_entry_safe(curr_req, temp_req,
+ &curr_ep->queue, queue) {
+ status = process_ep_req(udc, i, curr_req);
+ if (status)
+ break;
+
+ /* write back status to req */
+ curr_req->req.status = status;
+
+ /* ep0 request completion */
+ if (ep_num == 0) {
+ ep0_req_complete(udc, curr_ep, curr_req);
+ break;
+ } else {
+ done(curr_ep, curr_req, status);
+ }
+ }
+ }
+}
+
+static void irq_process_reset(struct mv_udc *udc)
+{
+ u32 tmp;
+ unsigned int loops;
+
+ udc->ep0_dir = EP_DIR_OUT;
+ udc->ep0_state = WAIT_FOR_SETUP;
+ udc->remote_wakeup = 0; /* default to 0 on reset */
+
+ /* The address bits are past bit 25-31. Set the address */
+ tmp = readl(&udc->op_regs->deviceaddr);
+ tmp &= ~(USB_DEVICE_ADDRESS_MASK);
+ writel(tmp, &udc->op_regs->deviceaddr);
+
+ /* Clear all the setup token semaphores */
+ tmp = readl(&udc->op_regs->epsetupstat);
+ writel(tmp, &udc->op_regs->epsetupstat);
+
+ /* Clear all the endpoint complete status bits */
+ tmp = readl(&udc->op_regs->epcomplete);
+ writel(tmp, &udc->op_regs->epcomplete);
+
+ /* wait until all endptprime bits cleared */
+ loops = LOOPS(PRIME_TIMEOUT);
+ while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
+ if (loops == 0) {
+ dev_err(&udc->dev->dev,
+ "Timeout for ENDPTPRIME = 0x%x\n",
+ readl(&udc->op_regs->epprime));
+ break;
+ }
+ loops--;
+ udelay(LOOPS_USEC);
+ }
+
+ /* Write 1s to the Flush register */
+ writel((u32)~0, &udc->op_regs->epflush);
+
+ if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
+ dev_info(&udc->dev->dev, "usb bus reset\n");
+ udc->usb_state = USB_STATE_DEFAULT;
+ /* reset all the queues, stop all USB activities */
+ gadget_reset(udc, udc->driver);
+ } else {
+ dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
+ readl(&udc->op_regs->portsc));
+
+ /*
+ * re-initialize
+ * controller reset
+ */
+ udc_reset(udc);
+
+ /* reset all the queues, stop all USB activities */
+ stop_activity(udc, udc->driver);
+
+ /* reset ep0 dQH and endptctrl */
+ ep0_reset(udc);
+
+ /* enable interrupt and set controller to run state */
+ udc_start(udc);
+
+ udc->usb_state = USB_STATE_ATTACHED;
+ }
+}
+
+static void handle_bus_resume(struct mv_udc *udc)
+{
+ udc->usb_state = udc->resume_state;
+ udc->resume_state = 0;
+
+ /* report resume to the driver */
+ if (udc->driver) {
+ if (udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+}
+
+static void irq_process_suspend(struct mv_udc *udc)
+{
+ udc->resume_state = udc->usb_state;
+ udc->usb_state = USB_STATE_SUSPENDED;
+
+ if (udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+}
+
+static void irq_process_port_change(struct mv_udc *udc)
+{
+ u32 portsc;
+
+ portsc = readl(&udc->op_regs->portsc[0]);
+ if (!(portsc & PORTSCX_PORT_RESET)) {
+ /* Get the speed */
+ u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
+ switch (speed) {
+ case PORTSCX_PORT_SPEED_HIGH:
+ udc->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case PORTSCX_PORT_SPEED_FULL:
+ udc->gadget.speed = USB_SPEED_FULL;
+ break;
+ case PORTSCX_PORT_SPEED_LOW:
+ udc->gadget.speed = USB_SPEED_LOW;
+ break;
+ default:
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+ }
+
+ if (portsc & PORTSCX_PORT_SUSPEND) {
+ udc->resume_state = udc->usb_state;
+ udc->usb_state = USB_STATE_SUSPENDED;
+ if (udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ if (!(portsc & PORTSCX_PORT_SUSPEND)
+ && udc->usb_state == USB_STATE_SUSPENDED) {
+ handle_bus_resume(udc);
+ }
+
+ if (!udc->resume_state)
+ udc->usb_state = USB_STATE_DEFAULT;
+}
+
+static void irq_process_error(struct mv_udc *udc)
+{
+ /* Increment the error count */
+ udc->errors++;
+}
+
+static irqreturn_t mv_udc_irq(int irq, void *dev)
+{
+ struct mv_udc *udc = (struct mv_udc *)dev;
+ u32 status, intr;
+
+ /* Disable ISR when stopped bit is set */
+ if (udc->stopped)
+ return IRQ_NONE;
+
+ spin_lock(&udc->lock);
+
+ status = readl(&udc->op_regs->usbsts);
+ intr = readl(&udc->op_regs->usbintr);
+ status &= intr;
+
+ if (status == 0) {
+ spin_unlock(&udc->lock);
+ return IRQ_NONE;
+ }
+
+ /* Clear all the interrupts occurred */
+ writel(status, &udc->op_regs->usbsts);
+
+ if (status & USBSTS_ERR)
+ irq_process_error(udc);
+
+ if (status & USBSTS_RESET)
+ irq_process_reset(udc);
+
+ if (status & USBSTS_PORT_CHANGE)
+ irq_process_port_change(udc);
+
+ if (status & USBSTS_INT)
+ irq_process_tr_complete(udc);
+
+ if (status & USBSTS_SUSPEND)
+ irq_process_suspend(udc);
+
+ spin_unlock(&udc->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
+{
+ struct mv_udc *udc = (struct mv_udc *)dev;
+
+ /* polling VBUS and init phy may cause too much time*/
+ if (udc->qwork)
+ queue_work(udc->qwork, &udc->vbus_work);
+
+ return IRQ_HANDLED;
+}
+
+static void mv_udc_vbus_work(struct work_struct *work)
+{
+ struct mv_udc *udc;
+ unsigned int vbus;
+
+ udc = container_of(work, struct mv_udc, vbus_work);
+ if (!udc->pdata->vbus)
+ return;
+
+ vbus = udc->pdata->vbus->poll();
+ dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
+
+ if (vbus == VBUS_HIGH)
+ mv_udc_vbus_session(&udc->gadget, 1);
+ else if (vbus == VBUS_LOW)
+ mv_udc_vbus_session(&udc->gadget, 0);
+}
+
+/* release device structure */
+static void gadget_release(struct device *_dev)
+{
+ struct mv_udc *udc;
+
+ udc = dev_get_drvdata(_dev);
+
+ complete(udc->done);
+}
+
+static int mv_udc_remove(struct platform_device *pdev)
+{
+ struct mv_udc *udc;
+
+ udc = platform_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ if (udc->qwork)
+ destroy_workqueue(udc->qwork);
+
+ /* free memory allocated in probe */
+ dma_pool_destroy(udc->dtd_pool);
+
+ if (udc->ep_dqh)
+ dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
+ udc->ep_dqh, udc->ep_dqh_dma);
+
+ mv_udc_disable(udc);
+
+ /* free dev, wait for the release() finished */
+ wait_for_completion(udc->done);
+
+ return 0;
+}
+
+static int mv_udc_probe(struct platform_device *pdev)
+{
+ struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mv_udc *udc;
+ int retval = 0;
+ struct resource *r;
+ size_t size;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
+ if (udc == NULL)
+ return -ENOMEM;
+
+ udc->done = &release_done;
+ udc->pdata = dev_get_platdata(&pdev->dev);
+ spin_lock_init(&udc->lock);
+
+ udc->dev = pdev;
+
+ if (pdata->mode == MV_USB_MODE_OTG) {
+ udc->transceiver = devm_usb_get_phy(&pdev->dev,
+ USB_PHY_TYPE_USB2);
+ if (IS_ERR(udc->transceiver)) {
+ retval = PTR_ERR(udc->transceiver);
+
+ if (retval == -ENXIO)
+ return retval;
+
+ udc->transceiver = NULL;
+ return -EPROBE_DEFER;
+ }
+ }
+
+ /* udc only have one sysclk. */
+ udc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(udc->clk))
+ return PTR_ERR(udc->clk);
+
+ r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ return -ENODEV;
+ }
+
+ udc->cap_regs = (struct mv_cap_regs __iomem *)
+ devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (udc->cap_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ return -EBUSY;
+ }
+
+ r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ return -ENODEV;
+ }
+
+ udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (udc->phy_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+ return -EBUSY;
+ }
+
+ /* we will acces controller register, so enable the clk */
+ retval = mv_udc_enable_internal(udc);
+ if (retval)
+ return retval;
+
+ udc->op_regs =
+ (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
+ + (readl(&udc->cap_regs->caplength_hciversion)
+ & CAPLENGTH_MASK));
+ udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
+
+ /*
+ * some platform will use usb to download image, it may not disconnect
+ * usb gadget before loading kernel. So first stop udc here.
+ */
+ udc_stop(udc);
+ writel(0xFFFFFFFF, &udc->op_regs->usbsts);
+
+ size = udc->max_eps * sizeof(struct mv_dqh) *2;
+ size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
+ udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
+ &udc->ep_dqh_dma, GFP_KERNEL);
+
+ if (udc->ep_dqh == NULL) {
+ dev_err(&pdev->dev, "allocate dQH memory failed\n");
+ retval = -ENOMEM;
+ goto err_disable_clock;
+ }
+ udc->ep_dqh_size = size;
+
+ /* create dTD dma_pool resource */
+ udc->dtd_pool = dma_pool_create("mv_dtd",
+ &pdev->dev,
+ sizeof(struct mv_dtd),
+ DTD_ALIGNMENT,
+ DMA_BOUNDARY);
+
+ if (!udc->dtd_pool) {
+ retval = -ENOMEM;
+ goto err_free_dma;
+ }
+
+ size = udc->max_eps * sizeof(struct mv_ep) *2;
+ udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (udc->eps == NULL) {
+ retval = -ENOMEM;
+ goto err_destroy_dma;
+ }
+
+ /* initialize ep0 status request structure */
+ udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
+ GFP_KERNEL);
+ if (!udc->status_req) {
+ retval = -ENOMEM;
+ goto err_destroy_dma;
+ }
+ INIT_LIST_HEAD(&udc->status_req->queue);
+
+ /* allocate a small amount of memory to get valid address */
+ udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
+ udc->status_req->req.dma = DMA_ADDR_INVALID;
+
+ udc->resume_state = USB_STATE_NOTATTACHED;
+ udc->usb_state = USB_STATE_POWERED;
+ udc->ep0_dir = EP_DIR_OUT;
+ udc->remote_wakeup = 0;
+
+ r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
+ retval = -ENODEV;
+ goto err_destroy_dma;
+ }
+ udc->irq = r->start;
+ if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
+ IRQF_SHARED, driver_name, udc)) {
+ dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
+ udc->irq);
+ retval = -ENODEV;
+ goto err_destroy_dma;
+ }
+
+ /* initialize gadget structure */
+ udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
+ udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
+ INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
+ udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
+ udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ udc->gadget.name = driver_name; /* gadget name */
+
+ eps_init(udc);
+
+ /* VBUS detect: we can disable/enable clock on demand.*/
+ if (udc->transceiver)
+ udc->clock_gating = 1;
+ else if (pdata->vbus) {
+ udc->clock_gating = 1;
+ retval = devm_request_threaded_irq(&pdev->dev,
+ pdata->vbus->irq, NULL,
+ mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
+ if (retval) {
+ dev_info(&pdev->dev,
+ "Can not request irq for VBUS, "
+ "disable clock gating\n");
+ udc->clock_gating = 0;
+ }
+
+ udc->qwork = create_singlethread_workqueue("mv_udc_queue");
+ if (!udc->qwork) {
+ dev_err(&pdev->dev, "cannot create workqueue\n");
+ retval = -ENOMEM;
+ goto err_destroy_dma;
+ }
+
+ INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
+ }
+
+ /*
+ * When clock gating is supported, we can disable clk and phy.
+ * If not, it means that VBUS detection is not supported, we
+ * have to enable vbus active all the time to let controller work.
+ */
+ if (udc->clock_gating)
+ mv_udc_disable_internal(udc);
+ else
+ udc->vbus_active = 1;
+
+ retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
+ gadget_release);
+ if (retval)
+ goto err_create_workqueue;
+
+ platform_set_drvdata(pdev, udc);
+ dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
+ udc->clock_gating ? "with" : "without");
+
+ return 0;
+
+err_create_workqueue:
+ if (udc->qwork)
+ destroy_workqueue(udc->qwork);
+err_destroy_dma:
+ dma_pool_destroy(udc->dtd_pool);
+err_free_dma:
+ dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
+ udc->ep_dqh, udc->ep_dqh_dma);
+err_disable_clock:
+ mv_udc_disable_internal(udc);
+
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int mv_udc_suspend(struct device *dev)
+{
+ struct mv_udc *udc;
+
+ udc = dev_get_drvdata(dev);
+
+ /* if OTG is enabled, the following will be done in OTG driver*/
+ if (udc->transceiver)
+ return 0;
+
+ if (udc->pdata->vbus && udc->pdata->vbus->poll)
+ if (udc->pdata->vbus->poll() == VBUS_HIGH) {
+ dev_info(&udc->dev->dev, "USB cable is connected!\n");
+ return -EAGAIN;
+ }
+
+ /*
+ * only cable is unplugged, udc can suspend.
+ * So do not care about clock_gating == 1.
+ */
+ if (!udc->clock_gating) {
+ udc_stop(udc);
+
+ spin_lock_irq(&udc->lock);
+ /* stop all usb activities */
+ stop_activity(udc, udc->driver);
+ spin_unlock_irq(&udc->lock);
+
+ mv_udc_disable_internal(udc);
+ }
+
+ return 0;
+}
+
+static int mv_udc_resume(struct device *dev)
+{
+ struct mv_udc *udc;
+ int retval;
+
+ udc = dev_get_drvdata(dev);
+
+ /* if OTG is enabled, the following will be done in OTG driver*/
+ if (udc->transceiver)
+ return 0;
+
+ if (!udc->clock_gating) {
+ retval = mv_udc_enable_internal(udc);
+ if (retval)
+ return retval;
+
+ if (udc->driver && udc->softconnect) {
+ udc_reset(udc);
+ ep0_reset(udc);
+ udc_start(udc);
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops mv_udc_pm_ops = {
+ .suspend = mv_udc_suspend,
+ .resume = mv_udc_resume,
+};
+#endif
+
+static void mv_udc_shutdown(struct platform_device *pdev)
+{
+ struct mv_udc *udc;
+ u32 mode;
+
+ udc = platform_get_drvdata(pdev);
+ /* reset controller mode to IDLE */
+ mv_udc_enable(udc);
+ mode = readl(&udc->op_regs->usbmode);
+ mode &= ~3;
+ writel(mode, &udc->op_regs->usbmode);
+ mv_udc_disable(udc);
+}
+
+static struct platform_driver udc_driver = {
+ .probe = mv_udc_probe,
+ .remove = mv_udc_remove,
+ .shutdown = mv_udc_shutdown,
+ .driver = {
+ .name = "mv-udc",
+#ifdef CONFIG_PM
+ .pm = &mv_udc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(udc_driver);
+MODULE_ALIAS("platform:mv-udc");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
new file mode 100644
index 000000000..538c1b9a2
--- /dev/null
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -0,0 +1,2725 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for PLX NET2272 USB device controller
+ *
+ * Copyright (C) 2005-2006 PLX Technology, Inc.
+ * Copyright (C) 2006-2011 Analog Devices, Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include "net2272.h"
+
+#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
+
+static const char driver_name[] = "net2272";
+static const char driver_vers[] = "2006 October 17/mainline";
+static const char driver_desc[] = DRIVER_DESC;
+
+static const char ep0name[] = "ep0";
+static const char * const ep_name[] = {
+ ep0name,
+ "ep-a", "ep-b", "ep-c",
+};
+
+#ifdef CONFIG_USB_NET2272_DMA
+/*
+ * use_dma: the NET2272 can use an external DMA controller.
+ * Note that since there is no generic DMA api, some functions,
+ * notably request_dma, start_dma, and cancel_dma will need to be
+ * modified for your platform's particular dma controller.
+ *
+ * If use_dma is disabled, pio will be used instead.
+ */
+static bool use_dma = false;
+module_param(use_dma, bool, 0644);
+
+/*
+ * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
+ * The NET2272 can only use dma for a single endpoint at a time.
+ * At some point this could be modified to allow either endpoint
+ * to take control of dma as it becomes available.
+ *
+ * Note that DMA should not be used on OUT endpoints unless it can
+ * be guaranteed that no short packets will arrive on an IN endpoint
+ * while the DMA operation is pending. Otherwise the OUT DMA will
+ * terminate prematurely (See NET2272 Errata 630-0213-0101)
+ */
+static ushort dma_ep = 1;
+module_param(dma_ep, ushort, 0644);
+
+/*
+ * dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
+ * mode 0 == Slow DREQ mode
+ * mode 1 == Fast DREQ mode
+ * mode 2 == Burst mode
+ */
+static ushort dma_mode = 2;
+module_param(dma_mode, ushort, 0644);
+#else
+#define use_dma 0
+#define dma_ep 1
+#define dma_mode 2
+#endif
+
+/*
+ * fifo_mode: net2272 buffer configuration:
+ * mode 0 == ep-{a,b,c} 512db each
+ * mode 1 == ep-a 1k, ep-{b,c} 512db
+ * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
+ * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
+ */
+static ushort fifo_mode;
+module_param(fifo_mode, ushort, 0644);
+
+/*
+ * enable_suspend: When enabled, the driver will respond to
+ * USB suspend requests by powering down the NET2272. Otherwise,
+ * USB suspend requests will be ignored. This is acceptable for
+ * self-powered devices. For bus powered devices set this to 1.
+ */
+static ushort enable_suspend;
+module_param(enable_suspend, ushort, 0644);
+
+static void assert_out_naking(struct net2272_ep *ep, const char *where)
+{
+ u8 tmp;
+
+#ifndef DEBUG
+ return;
+#endif
+
+ tmp = net2272_ep_read(ep, EP_STAT0);
+ if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
+ dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
+ ep->ep.name, where, tmp);
+ net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+ }
+}
+#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
+
+static void stop_out_naking(struct net2272_ep *ep)
+{
+ u8 tmp = net2272_ep_read(ep, EP_STAT0);
+
+ if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
+ net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+}
+
+#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
+
+static char *type_string(u8 bmAttributes)
+{
+ switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK: return "bulk";
+ case USB_ENDPOINT_XFER_ISOC: return "iso";
+ case USB_ENDPOINT_XFER_INT: return "intr";
+ default: return "control";
+ }
+}
+
+static char *buf_state_string(unsigned state)
+{
+ switch (state) {
+ case BUFF_FREE: return "free";
+ case BUFF_VALID: return "valid";
+ case BUFF_LCL: return "local";
+ case BUFF_USB: return "usb";
+ default: return "unknown";
+ }
+}
+
+static char *dma_mode_string(void)
+{
+ if (!use_dma)
+ return "PIO";
+ switch (dma_mode) {
+ case 0: return "SLOW DREQ";
+ case 1: return "FAST DREQ";
+ case 2: return "BURST";
+ default: return "invalid";
+ }
+}
+
+static void net2272_dequeue_all(struct net2272_ep *);
+static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
+static int net2272_fifo_status(struct usb_ep *);
+
+static const struct usb_ep_ops net2272_ep_ops;
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+ struct net2272 *dev;
+ struct net2272_ep *ep;
+ u32 max;
+ u8 tmp;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || !desc || ep->desc || _ep->name == ep0name
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ max = usb_endpoint_maxp(desc);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ _ep->maxpacket = max;
+ ep->desc = desc;
+
+ /* net2272_ep_reset() has already been called */
+ ep->stopped = 0;
+ ep->wedged = 0;
+
+ /* set speed-dependent max packet */
+ net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
+ net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
+
+ /* set type, direction, address; reset fifo counters */
+ net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+ tmp = usb_endpoint_type(desc);
+ if (usb_endpoint_xfer_bulk(desc)) {
+ /* catch some particularly blatant driver bugs */
+ if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
+ (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ERANGE;
+ }
+ }
+ ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
+ tmp <<= ENDPOINT_TYPE;
+ tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
+ tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
+ tmp |= (1 << ENDPOINT_ENABLE);
+
+ /* for OUT transfers, block the rx fifo until a read is posted */
+ ep->is_in = usb_endpoint_dir_in(desc);
+ if (!ep->is_in)
+ net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+
+ net2272_ep_write(ep, EP_CFG, tmp);
+
+ /* enable irqs */
+ tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
+ net2272_write(dev, IRQENB0, tmp);
+
+ tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+ | net2272_ep_read(ep, EP_IRQENB);
+ net2272_ep_write(ep, EP_IRQENB, tmp);
+
+ tmp = desc->bEndpointAddress;
+ dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
+ _ep->name, tmp & 0x0f, PIPEDIR(tmp),
+ type_string(desc->bmAttributes), max,
+ net2272_ep_read(ep, EP_CFG));
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+static void net2272_ep_reset(struct net2272_ep *ep)
+{
+ u8 tmp;
+
+ ep->desc = NULL;
+ INIT_LIST_HEAD(&ep->queue);
+
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
+ ep->ep.ops = &net2272_ep_ops;
+
+ /* disable irqs, endpoint */
+ net2272_ep_write(ep, EP_IRQENB, 0);
+
+ /* init to our chosen defaults, notably so that we NAK OUT
+ * packets until the driver queues a read.
+ */
+ tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
+ net2272_ep_write(ep, EP_RSPSET, tmp);
+
+ tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
+ if (ep->num != 0)
+ tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
+
+ net2272_ep_write(ep, EP_RSPCLR, tmp);
+
+ /* scrub most status bits, and flush any fifo state */
+ net2272_ep_write(ep, EP_STAT0,
+ (1 << DATA_IN_TOKEN_INTERRUPT)
+ | (1 << DATA_OUT_TOKEN_INTERRUPT)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+ | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+ | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
+
+ net2272_ep_write(ep, EP_STAT1,
+ (1 << TIMEOUT)
+ | (1 << USB_OUT_ACK_SENT)
+ | (1 << USB_OUT_NAK_SENT)
+ | (1 << USB_IN_ACK_RCVD)
+ | (1 << USB_IN_NAK_SENT)
+ | (1 << USB_STALL_SENT)
+ | (1 << LOCAL_OUT_ZLP)
+ | (1 << BUFFER_FLUSH));
+
+ /* fifo size is handled separately */
+}
+
+static int net2272_disable(struct usb_ep *_ep)
+{
+ struct net2272_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || !ep->desc || _ep->name == ep0name)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ net2272_dequeue_all(ep);
+ net2272_ep_reset(ep);
+
+ dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
+
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static struct usb_request *
+net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct net2272_request *req;
+
+ if (!_ep)
+ return NULL;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void
+net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct net2272_request *req;
+
+ if (!_ep || !_req)
+ return;
+
+ req = container_of(_req, struct net2272_request, req);
+ WARN_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+static void
+net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
+{
+ struct net2272 *dev;
+ unsigned stopped = ep->stopped;
+
+ if (ep->num == 0) {
+ if (ep->dev->protocol_stall) {
+ ep->stopped = 1;
+ set_halt(ep);
+ }
+ allow_status(ep);
+ }
+
+ list_del_init(&req->queue);
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ dev = ep->dev;
+ if (use_dma && ep->dma)
+ usb_gadget_unmap_request(&dev->gadget, &req->req,
+ ep->is_in);
+
+ if (status && status != -ESHUTDOWN)
+ dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length, req->req.buf);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+ spin_unlock(&dev->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&dev->lock);
+ ep->stopped = stopped;
+}
+
+static int
+net2272_write_packet(struct net2272_ep *ep, u8 *buf,
+ struct net2272_request *req, unsigned max)
+{
+ u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
+ u16 *bufp;
+ unsigned length, count;
+ u8 tmp;
+
+ length = min(req->req.length - req->req.actual, max);
+ req->req.actual += length;
+
+ dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
+ ep->ep.name, req, max, length,
+ (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
+
+ count = length;
+ bufp = (u16 *)buf;
+
+ while (likely(count >= 2)) {
+ /* no byte-swap required; chip endian set during init */
+ writew(*bufp++, ep_data);
+ count -= 2;
+ }
+ buf = (u8 *)bufp;
+
+ /* write final byte by placing the NET2272 into 8-bit mode */
+ if (unlikely(count)) {
+ tmp = net2272_read(ep->dev, LOCCTL);
+ net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
+ writeb(*buf, ep_data);
+ net2272_write(ep->dev, LOCCTL, tmp);
+ }
+ return length;
+}
+
+/* returns: 0: still running, 1: completed, negative: errno */
+static int
+net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
+{
+ u8 *buf;
+ unsigned count, max;
+ int status;
+
+ dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
+ ep->ep.name, req->req.actual, req->req.length);
+
+ /*
+ * Keep loading the endpoint until the final packet is loaded,
+ * or the endpoint buffer is full.
+ */
+ top:
+ /*
+ * Clear interrupt status
+ * - Packet Transmitted interrupt will become set again when the
+ * host successfully takes another packet
+ */
+ net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
+ while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
+ buf = req->req.buf + req->req.actual;
+ prefetch(buf);
+
+ /* force pagesel */
+ net2272_ep_read(ep, EP_STAT0);
+
+ max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
+ (net2272_ep_read(ep, EP_AVAIL0));
+
+ if (max < ep->ep.maxpacket)
+ max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
+ | (net2272_ep_read(ep, EP_AVAIL0));
+
+ count = net2272_write_packet(ep, buf, req, max);
+ /* see if we are done */
+ if (req->req.length == req->req.actual) {
+ /* validate short or zlp packet */
+ if (count < ep->ep.maxpacket)
+ set_fifo_bytecount(ep, 0);
+ net2272_done(ep, req, 0);
+
+ if (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request,
+ queue);
+ status = net2272_kick_dma(ep, req);
+
+ if (status < 0)
+ if ((net2272_ep_read(ep, EP_STAT0)
+ & (1 << BUFFER_EMPTY)))
+ goto top;
+ }
+ return 1;
+ }
+ net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
+ }
+ return 0;
+}
+
+static void
+net2272_out_flush(struct net2272_ep *ep)
+{
+ ASSERT_OUT_NAKING(ep);
+
+ net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
+ | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
+ net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+}
+
+static int
+net2272_read_packet(struct net2272_ep *ep, u8 *buf,
+ struct net2272_request *req, unsigned avail)
+{
+ u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
+ unsigned is_short;
+ u16 *bufp;
+
+ req->req.actual += avail;
+
+ dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
+ ep->ep.name, req, avail,
+ (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
+
+ is_short = (avail < ep->ep.maxpacket);
+
+ if (unlikely(avail == 0)) {
+ /* remove any zlp from the buffer */
+ (void)readw(ep_data);
+ return is_short;
+ }
+
+ /* Ensure we get the final byte */
+ if (unlikely(avail % 2))
+ avail++;
+ bufp = (u16 *)buf;
+
+ do {
+ *bufp++ = readw(ep_data);
+ avail -= 2;
+ } while (avail);
+
+ /*
+ * To avoid false endpoint available race condition must read
+ * ep stat0 twice in the case of a short transfer
+ */
+ if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
+ net2272_ep_read(ep, EP_STAT0);
+
+ return is_short;
+}
+
+static int
+net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
+{
+ u8 *buf;
+ unsigned is_short;
+ int count;
+ int tmp;
+ int cleanup = 0;
+
+ dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
+ ep->ep.name, req->req.actual, req->req.length);
+
+ top:
+ do {
+ buf = req->req.buf + req->req.actual;
+ prefetchw(buf);
+
+ count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
+ | net2272_ep_read(ep, EP_AVAIL0);
+
+ net2272_ep_write(ep, EP_STAT0,
+ (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
+ (1 << DATA_PACKET_RECEIVED_INTERRUPT));
+
+ tmp = req->req.length - req->req.actual;
+
+ if (count > tmp) {
+ if ((tmp % ep->ep.maxpacket) != 0) {
+ dev_err(ep->dev->dev,
+ "%s out fifo %d bytes, expected %d\n",
+ ep->ep.name, count, tmp);
+ cleanup = 1;
+ }
+ count = (tmp > 0) ? tmp : 0;
+ }
+
+ is_short = net2272_read_packet(ep, buf, req, count);
+
+ /* completion */
+ if (unlikely(cleanup || is_short ||
+ req->req.actual == req->req.length)) {
+
+ if (cleanup) {
+ net2272_out_flush(ep);
+ net2272_done(ep, req, -EOVERFLOW);
+ } else
+ net2272_done(ep, req, 0);
+
+ /* re-initialize endpoint transfer registers
+ * otherwise they may result in erroneous pre-validation
+ * for subsequent control reads
+ */
+ if (unlikely(ep->num == 0)) {
+ net2272_ep_write(ep, EP_TRANSFER2, 0);
+ net2272_ep_write(ep, EP_TRANSFER1, 0);
+ net2272_ep_write(ep, EP_TRANSFER0, 0);
+ }
+
+ if (!list_empty(&ep->queue)) {
+ int status;
+
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ status = net2272_kick_dma(ep, req);
+ if ((status < 0) &&
+ !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
+ goto top;
+ }
+ return 1;
+ }
+ } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
+
+ return 0;
+}
+
+static void
+net2272_pio_advance(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+
+ if (unlikely(list_empty(&ep->queue)))
+ return;
+
+ req = list_entry(ep->queue.next, struct net2272_request, queue);
+ (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
+}
+
+/* returns 0 on success, else negative errno */
+static int
+net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
+ unsigned len, unsigned dir)
+{
+ dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
+ ep, buf, len, dir);
+
+ /* The NET2272 only supports a single dma channel */
+ if (dev->dma_busy)
+ return -EBUSY;
+ /*
+ * EP_TRANSFER (used to determine the number of bytes received
+ * in an OUT transfer) is 24 bits wide; don't ask for more than that.
+ */
+ if ((dir == 1) && (len > 0x1000000))
+ return -EINVAL;
+
+ dev->dma_busy = 1;
+
+ /* initialize platform's dma */
+#ifdef CONFIG_USB_PCI
+ /* NET2272 addr, buffer addr, length, etc. */
+ switch (dev->dev_id) {
+ case PCI_DEVICE_ID_RDK1:
+ /* Setup PLX 9054 DMA mode */
+ writel((1 << LOCAL_BUS_WIDTH) |
+ (1 << TA_READY_INPUT_ENABLE) |
+ (0 << LOCAL_BURST_ENABLE) |
+ (1 << DONE_INTERRUPT_ENABLE) |
+ (1 << LOCAL_ADDRESSING_MODE) |
+ (1 << DEMAND_MODE) |
+ (1 << DMA_EOT_ENABLE) |
+ (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
+ (1 << DMA_CHANNEL_INTERRUPT_SELECT),
+ dev->rdk1.plx9054_base_addr + DMAMODE0);
+
+ writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
+ writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
+ writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
+ writel((dir << DIRECTION_OF_TRANSFER) |
+ (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
+ dev->rdk1.plx9054_base_addr + DMADPR0);
+ writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
+ readl(dev->rdk1.plx9054_base_addr + INTCSR),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+
+ break;
+ }
+#endif
+
+ net2272_write(dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID) |
+ (1 << DMA_REQUEST_ENABLE) |
+ (1 << DMA_CONTROL_DACK) |
+ (dev->dma_eot_polarity << EOT_POLARITY) |
+ (dev->dma_dack_polarity << DACK_POLARITY) |
+ (dev->dma_dreq_polarity << DREQ_POLARITY) |
+ ((ep >> 1) << DMA_ENDPOINT_SELECT));
+
+ (void) net2272_read(dev, SCRATCH);
+
+ return 0;
+}
+
+static void
+net2272_start_dma(struct net2272 *dev)
+{
+ /* start platform's dma controller */
+#ifdef CONFIG_USB_PCI
+ switch (dev->dev_id) {
+ case PCI_DEVICE_ID_RDK1:
+ writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+ break;
+ }
+#endif
+}
+
+/* returns 0 on success, else negative errno */
+static int
+net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
+{
+ unsigned size;
+ u8 tmp;
+
+ if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
+ return -EINVAL;
+
+ /* don't use dma for odd-length transfers
+ * otherwise, we'd need to deal with the last byte with pio
+ */
+ if (req->req.length & 1)
+ return -EINVAL;
+
+ dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
+ ep->ep.name, req, (unsigned long long) req->req.dma);
+
+ net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+
+ /* The NET2272 can only use DMA on one endpoint at a time */
+ if (ep->dev->dma_busy)
+ return -EBUSY;
+
+ /* Make sure we only DMA an even number of bytes (we'll use
+ * pio to complete the transfer)
+ */
+ size = req->req.length;
+ size &= ~1;
+
+ /* device-to-host transfer */
+ if (ep->is_in) {
+ /* initialize platform's dma controller */
+ if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
+ /* unable to obtain DMA channel; return error and use pio mode */
+ return -EBUSY;
+ req->req.actual += size;
+
+ /* host-to-device transfer */
+ } else {
+ tmp = net2272_ep_read(ep, EP_STAT0);
+
+ /* initialize platform's dma controller */
+ if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
+ /* unable to obtain DMA channel; return error and use pio mode */
+ return -EBUSY;
+
+ if (!(tmp & (1 << BUFFER_EMPTY)))
+ ep->not_empty = 1;
+ else
+ ep->not_empty = 0;
+
+
+ /* allow the endpoint's buffer to fill */
+ net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+
+ /* this transfer completed and data's already in the fifo
+ * return error so pio gets used.
+ */
+ if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
+
+ /* deassert dreq */
+ net2272_write(ep->dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID) |
+ (0 << DMA_REQUEST_ENABLE) |
+ (1 << DMA_CONTROL_DACK) |
+ (ep->dev->dma_eot_polarity << EOT_POLARITY) |
+ (ep->dev->dma_dack_polarity << DACK_POLARITY) |
+ (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
+ ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
+
+ return -EBUSY;
+ }
+ }
+
+ /* Don't use per-packet interrupts: use dma interrupts only */
+ net2272_ep_write(ep, EP_IRQENB, 0);
+
+ net2272_start_dma(ep->dev);
+
+ return 0;
+}
+
+static void net2272_cancel_dma(struct net2272 *dev)
+{
+#ifdef CONFIG_USB_PCI
+ switch (dev->dev_id) {
+ case PCI_DEVICE_ID_RDK1:
+ writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
+ writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
+ while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
+ (1 << CHANNEL_DONE)))
+ continue; /* wait for dma to stabalize */
+
+ /* dma abort generates an interrupt */
+ writeb(1 << CHANNEL_CLEAR_INTERRUPT,
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+ break;
+ }
+#endif
+
+ dev->dma_busy = 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct net2272_request *req;
+ struct net2272_ep *ep;
+ struct net2272 *dev;
+ unsigned long flags;
+ int status = -1;
+ u8 s;
+
+ req = container_of(_req, struct net2272_request, req);
+ if (!_req || !_req->complete || !_req->buf
+ || !list_empty(&req->queue))
+ return -EINVAL;
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return -EINVAL;
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ /* set up dma mapping in case the caller didn't */
+ if (use_dma && ep->dma) {
+ status = usb_gadget_map_request(&dev->gadget, _req,
+ ep->is_in);
+ if (status)
+ return status;
+ }
+
+ dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
+ _ep->name, _req, _req->length, _req->buf,
+ (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ /* kickstart this i/o queue? */
+ if (list_empty(&ep->queue) && !ep->stopped) {
+ /* maybe there's no control data, just status ack */
+ if (ep->num == 0 && _req->length == 0) {
+ net2272_done(ep, req, 0);
+ dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
+ goto done;
+ }
+
+ /* Return zlp, don't let it block subsequent packets */
+ s = net2272_ep_read(ep, EP_STAT0);
+ if (s & (1 << BUFFER_EMPTY)) {
+ /* Buffer is empty check for a blocking zlp, handle it */
+ if ((s & (1 << NAK_OUT_PACKETS)) &&
+ net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
+ dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
+ /*
+ * Request is going to terminate with a short packet ...
+ * hope the client is ready for it!
+ */
+ status = net2272_read_fifo(ep, req);
+ /* clear short packet naking */
+ net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
+ goto done;
+ }
+ }
+
+ /* try dma first */
+ status = net2272_kick_dma(ep, req);
+
+ if (status < 0) {
+ /* dma failed (most likely in use by another endpoint)
+ * fallback to pio
+ */
+ status = 0;
+
+ if (ep->is_in)
+ status = net2272_write_fifo(ep, req);
+ else {
+ s = net2272_ep_read(ep, EP_STAT0);
+ if ((s & (1 << BUFFER_EMPTY)) == 0)
+ status = net2272_read_fifo(ep, req);
+ }
+
+ if (unlikely(status != 0)) {
+ if (status > 0)
+ status = 0;
+ req = NULL;
+ }
+ }
+ }
+ if (likely(req))
+ list_add_tail(&req->queue, &ep->queue);
+
+ if (likely(!list_empty(&ep->queue)))
+ net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+ done:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/* dequeue ALL requests */
+static void
+net2272_dequeue_all(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+
+ /* called with spinlock held */
+ ep->stopped = 1;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request,
+ queue);
+ net2272_done(ep, req, -ESHUTDOWN);
+ }
+}
+
+/* dequeue JUST ONE request */
+static int
+net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct net2272_ep *ep;
+ struct net2272_request *req = NULL, *iter;
+ unsigned long flags;
+ int stopped;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0) || !_req)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ stopped = ep->stopped;
+ ep->stopped = 1;
+
+ /* make sure it's still queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ ep->stopped = stopped;
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return -EINVAL;
+ }
+
+ /* queue head may be partially complete */
+ if (ep->queue.next == &req->queue) {
+ dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
+ net2272_done(ep, req, -ECONNRESET);
+ }
+ ep->stopped = stopped;
+
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
+{
+ struct net2272_ep *ep;
+ unsigned long flags;
+ int ret = 0;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return -EINVAL;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+ if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ if (!list_empty(&ep->queue))
+ ret = -EAGAIN;
+ else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
+ ret = -EAGAIN;
+ else {
+ dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
+ value ? "set" : "clear",
+ wedged ? "wedge" : "halt");
+ /* set/clear */
+ if (value) {
+ if (ep->num == 0)
+ ep->dev->protocol_stall = 1;
+ else
+ set_halt(ep);
+ if (wedged)
+ ep->wedged = 1;
+ } else {
+ clear_halt(ep);
+ ep->wedged = 0;
+ }
+ }
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+ return ret;
+}
+
+static int
+net2272_set_halt(struct usb_ep *_ep, int value)
+{
+ return net2272_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int
+net2272_set_wedge(struct usb_ep *_ep)
+{
+ if (!_ep || _ep->name == ep0name)
+ return -EINVAL;
+ return net2272_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static int
+net2272_fifo_status(struct usb_ep *_ep)
+{
+ struct net2272_ep *ep;
+ u16 avail;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return -ENODEV;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
+ avail |= net2272_ep_read(ep, EP_AVAIL0);
+ if (avail > ep->fifo_size)
+ return -EOVERFLOW;
+ if (ep->is_in)
+ avail = ep->fifo_size - avail;
+ return avail;
+}
+
+static void
+net2272_fifo_flush(struct usb_ep *_ep)
+{
+ struct net2272_ep *ep;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return;
+
+ net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+}
+
+static const struct usb_ep_ops net2272_ep_ops = {
+ .enable = net2272_enable,
+ .disable = net2272_disable,
+
+ .alloc_request = net2272_alloc_request,
+ .free_request = net2272_free_request,
+
+ .queue = net2272_queue,
+ .dequeue = net2272_dequeue,
+
+ .set_halt = net2272_set_halt,
+ .set_wedge = net2272_set_wedge,
+ .fifo_status = net2272_fifo_status,
+ .fifo_flush = net2272_fifo_flush,
+};
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_get_frame(struct usb_gadget *_gadget)
+{
+ struct net2272 *dev;
+ unsigned long flags;
+ u16 ret;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct net2272, gadget);
+ spin_lock_irqsave(&dev->lock, flags);
+
+ ret = net2272_read(dev, FRAME1) << 8;
+ ret |= net2272_read(dev, FRAME0);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int
+net2272_wakeup(struct usb_gadget *_gadget)
+{
+ struct net2272 *dev;
+ u8 tmp;
+ unsigned long flags;
+
+ if (!_gadget)
+ return 0;
+ dev = container_of(_gadget, struct net2272, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tmp = net2272_read(dev, USBCTL0);
+ if (tmp & (1 << IO_WAKEUP_ENABLE))
+ net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+static int
+net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
+{
+ if (!_gadget)
+ return -ENODEV;
+
+ _gadget->is_selfpowered = (value != 0);
+
+ return 0;
+}
+
+static int
+net2272_pullup(struct usb_gadget *_gadget, int is_on)
+{
+ struct net2272 *dev;
+ u8 tmp;
+ unsigned long flags;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct net2272, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tmp = net2272_read(dev, USBCTL0);
+ dev->softconnect = (is_on != 0);
+ if (is_on)
+ tmp |= (1 << USB_DETECT_ENABLE);
+ else
+ tmp &= ~(1 << USB_DETECT_ENABLE);
+ net2272_write(dev, USBCTL0, tmp);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+static int net2272_start(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver);
+static int net2272_stop(struct usb_gadget *_gadget);
+static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable);
+
+static const struct usb_gadget_ops net2272_ops = {
+ .get_frame = net2272_get_frame,
+ .wakeup = net2272_wakeup,
+ .set_selfpowered = net2272_set_selfpowered,
+ .pullup = net2272_pullup,
+ .udc_start = net2272_start,
+ .udc_stop = net2272_stop,
+ .udc_async_callbacks = net2272_async_callbacks,
+};
+
+/*---------------------------------------------------------------------------*/
+
+static ssize_t
+registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ struct net2272 *dev;
+ char *next;
+ unsigned size, t;
+ unsigned long flags;
+ u8 t1, t2;
+ int i;
+ const char *s;
+
+ dev = dev_get_drvdata(_dev);
+ next = buf;
+ size = PAGE_SIZE;
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* Main Control Registers */
+ t = scnprintf(next, size, "%s version %s,"
+ "chiprev %02x, locctl %02x\n"
+ "irqenb0 %02x irqenb1 %02x "
+ "irqstat0 %02x irqstat1 %02x\n",
+ driver_name, driver_vers, dev->chiprev,
+ net2272_read(dev, LOCCTL),
+ net2272_read(dev, IRQENB0),
+ net2272_read(dev, IRQENB1),
+ net2272_read(dev, IRQSTAT0),
+ net2272_read(dev, IRQSTAT1));
+ size -= t;
+ next += t;
+
+ /* DMA */
+ t1 = net2272_read(dev, DMAREQ);
+ t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
+ t1, ep_name[(t1 & 0x01) + 1],
+ t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
+ t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
+ t1 & (1 << DMA_REQUEST) ? "req " : "",
+ t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
+ size -= t;
+ next += t;
+
+ /* USB Control Registers */
+ t1 = net2272_read(dev, USBCTL1);
+ if (t1 & (1 << VBUS_PIN)) {
+ if (t1 & (1 << USB_HIGH_SPEED))
+ s = "high speed";
+ else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+ s = "powered";
+ else
+ s = "full speed";
+ } else
+ s = "not attached";
+ t = scnprintf(next, size,
+ "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
+ net2272_read(dev, USBCTL0), t1,
+ net2272_read(dev, OURADDR), s);
+ size -= t;
+ next += t;
+
+ /* Endpoint Registers */
+ for (i = 0; i < 4; ++i) {
+ struct net2272_ep *ep;
+
+ ep = &dev->ep[i];
+ if (i && !ep->desc)
+ continue;
+
+ t1 = net2272_ep_read(ep, EP_CFG);
+ t2 = net2272_ep_read(ep, EP_RSPSET);
+ t = scnprintf(next, size,
+ "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
+ "irqenb %02x\n",
+ ep->ep.name, t1, t2,
+ (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
+ (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
+ (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
+ (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
+ (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
+ (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
+ (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
+ (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
+ net2272_ep_read(ep, EP_IRQENB));
+ size -= t;
+ next += t;
+
+ t = scnprintf(next, size,
+ "\tstat0 %02x stat1 %02x avail %04x "
+ "(ep%d%s-%s)%s\n",
+ net2272_ep_read(ep, EP_STAT0),
+ net2272_ep_read(ep, EP_STAT1),
+ (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
+ t1 & 0x0f,
+ ep->is_in ? "in" : "out",
+ type_string(t1 >> 5),
+ ep->stopped ? "*" : "");
+ size -= t;
+ next += t;
+
+ t = scnprintf(next, size,
+ "\tep_transfer %06x\n",
+ ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
+ ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
+ ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
+ size -= t;
+ next += t;
+
+ t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
+ t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
+ t = scnprintf(next, size,
+ "\tbuf-a %s buf-b %s\n",
+ buf_state_string(t1),
+ buf_state_string(t2));
+ size -= t;
+ next += t;
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR_RO(registers);
+
+/*---------------------------------------------------------------------------*/
+
+static void
+net2272_set_fifo_mode(struct net2272 *dev, int mode)
+{
+ u8 tmp;
+
+ tmp = net2272_read(dev, LOCCTL) & 0x3f;
+ tmp |= (mode << 6);
+ net2272_write(dev, LOCCTL, tmp);
+
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+
+ /* always ep-a, ep-c ... maybe not ep-b */
+ list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
+
+ switch (mode) {
+ case 0:
+ list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
+ break;
+ case 1:
+ list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[1].fifo_size = 1024;
+ dev->ep[2].fifo_size = 512;
+ break;
+ case 2:
+ list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
+ break;
+ case 3:
+ dev->ep[1].fifo_size = 1024;
+ break;
+ }
+
+ /* ep-c is always 2 512 byte buffers */
+ list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[3].fifo_size = 512;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static void
+net2272_usb_reset(struct net2272 *dev)
+{
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ net2272_cancel_dma(dev);
+
+ net2272_write(dev, IRQENB0, 0);
+ net2272_write(dev, IRQENB1, 0);
+
+ /* clear irq state */
+ net2272_write(dev, IRQSTAT0, 0xff);
+ net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
+
+ net2272_write(dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID) |
+ (0 << DMA_REQUEST_ENABLE) |
+ (1 << DMA_CONTROL_DACK) |
+ (dev->dma_eot_polarity << EOT_POLARITY) |
+ (dev->dma_dack_polarity << DACK_POLARITY) |
+ (dev->dma_dreq_polarity << DREQ_POLARITY) |
+ ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
+
+ net2272_cancel_dma(dev);
+ net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
+
+ /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
+ * note that the higher level gadget drivers are expected to convert data to little endian.
+ * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
+ */
+ net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
+ net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
+}
+
+static void
+net2272_usb_reinit(struct net2272 *dev)
+{
+ int i;
+
+ /* basic endpoint init */
+ for (i = 0; i < 4; ++i) {
+ struct net2272_ep *ep = &dev->ep[i];
+
+ ep->ep.name = ep_name[i];
+ ep->dev = dev;
+ ep->num = i;
+ ep->not_empty = 0;
+
+ if (use_dma && ep->num == dma_ep)
+ ep->dma = 1;
+
+ if (i > 0 && i <= 3)
+ ep->fifo_size = 512;
+ else
+ ep->fifo_size = 64;
+ net2272_ep_reset(ep);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ }
+ usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
+
+ dev->gadget.ep0 = &dev->ep[0].ep;
+ dev->ep[0].stopped = 0;
+ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+}
+
+static void
+net2272_ep0_start(struct net2272 *dev)
+{
+ struct net2272_ep *ep0 = &dev->ep[0];
+
+ net2272_ep_write(ep0, EP_RSPSET,
+ (1 << NAK_OUT_PACKETS_MODE) |
+ (1 << ALT_NAK_OUT_PACKETS));
+ net2272_ep_write(ep0, EP_RSPCLR,
+ (1 << HIDE_STATUS_PHASE) |
+ (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
+ net2272_write(dev, USBCTL0,
+ (dev->softconnect << USB_DETECT_ENABLE) |
+ (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
+ (1 << IO_WAKEUP_ENABLE));
+ net2272_write(dev, IRQENB0,
+ (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
+ (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
+ (1 << DMA_DONE_INTERRUPT_ENABLE));
+ net2272_write(dev, IRQENB1,
+ (1 << VBUS_INTERRUPT_ENABLE) |
+ (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
+ (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
+}
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests. then usb traffic follows until a
+ * disconnect is reported. then a host may connect again, or
+ * the driver might get unbound.
+ */
+static int net2272_start(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct net2272 *dev;
+ unsigned i;
+
+ if (!driver || !driver->setup ||
+ driver->max_speed != USB_SPEED_HIGH)
+ return -EINVAL;
+
+ dev = container_of(_gadget, struct net2272, gadget);
+
+ for (i = 0; i < 4; ++i)
+ dev->ep[i].irqs = 0;
+ /* hook up the driver ... */
+ dev->softconnect = 1;
+ dev->driver = driver;
+
+ /* ... then enable host detection and ep0; and we're ready
+ * for set_configuration as well as eventual disconnect.
+ */
+ net2272_ep0_start(dev);
+
+ return 0;
+}
+
+static void
+stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
+{
+ int i;
+
+ /* don't disconnect if it's not connected */
+ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+
+ /* stop hardware; prevent new request submissions;
+ * and kill any outstanding requests.
+ */
+ net2272_usb_reset(dev);
+ for (i = 0; i < 4; ++i)
+ net2272_dequeue_all(&dev->ep[i]);
+
+ /* report disconnect; the driver is already quiesced */
+ if (dev->async_callbacks && driver) {
+ spin_unlock(&dev->lock);
+ driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+
+ net2272_usb_reinit(dev);
+}
+
+static int net2272_stop(struct usb_gadget *_gadget)
+{
+ struct net2272 *dev;
+ unsigned long flags;
+
+ dev = container_of(_gadget, struct net2272, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ stop_activity(dev, NULL);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ dev->driver = NULL;
+
+ return 0;
+}
+
+static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable)
+{
+ struct net2272 *dev = container_of(_gadget, struct net2272, gadget);
+
+ spin_lock_irq(&dev->lock);
+ dev->async_callbacks = enable;
+ spin_unlock_irq(&dev->lock);
+}
+
+/*---------------------------------------------------------------------------*/
+/* handle ep-a/ep-b dma completions */
+static void
+net2272_handle_dma(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+ unsigned len;
+ int status;
+
+ if (!list_empty(&ep->queue))
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ else
+ req = NULL;
+
+ dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
+
+ /* Ensure DREQ is de-asserted */
+ net2272_write(ep->dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID)
+ | (0 << DMA_REQUEST_ENABLE)
+ | (1 << DMA_CONTROL_DACK)
+ | (ep->dev->dma_eot_polarity << EOT_POLARITY)
+ | (ep->dev->dma_dack_polarity << DACK_POLARITY)
+ | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
+ | (ep->dma << DMA_ENDPOINT_SELECT));
+
+ ep->dev->dma_busy = 0;
+
+ net2272_ep_write(ep, EP_IRQENB,
+ (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+ | net2272_ep_read(ep, EP_IRQENB));
+
+ /* device-to-host transfer completed */
+ if (ep->is_in) {
+ /* validate a short packet or zlp if necessary */
+ if ((req->req.length % ep->ep.maxpacket != 0) ||
+ req->req.zero)
+ set_fifo_bytecount(ep, 0);
+
+ net2272_done(ep, req, 0);
+ if (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ status = net2272_kick_dma(ep, req);
+ if (status < 0)
+ net2272_pio_advance(ep);
+ }
+
+ /* host-to-device transfer completed */
+ } else {
+ /* terminated with a short packet? */
+ if (net2272_read(ep->dev, IRQSTAT0) &
+ (1 << DMA_DONE_INTERRUPT)) {
+ /* abort system dma */
+ net2272_cancel_dma(ep->dev);
+ }
+
+ /* EP_TRANSFER will contain the number of bytes
+ * actually received.
+ * NOTE: There is no overflow detection on EP_TRANSFER:
+ * We can't deal with transfers larger than 2^24 bytes!
+ */
+ len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
+ | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
+ | (net2272_ep_read(ep, EP_TRANSFER0));
+
+ if (ep->not_empty)
+ len += 4;
+
+ req->req.actual += len;
+
+ /* get any remaining data */
+ net2272_pio_advance(ep);
+ }
+}
+
+/*---------------------------------------------------------------------------*/
+
+static void
+net2272_handle_ep(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+ u8 stat0, stat1;
+
+ if (!list_empty(&ep->queue))
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ else
+ req = NULL;
+
+ /* ack all, and handle what we care about */
+ stat0 = net2272_ep_read(ep, EP_STAT0);
+ stat1 = net2272_ep_read(ep, EP_STAT1);
+ ep->irqs++;
+
+ dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
+ ep->ep.name, stat0, stat1, req ? &req->req : NULL);
+
+ net2272_ep_write(ep, EP_STAT0, stat0 &
+ ~((1 << NAK_OUT_PACKETS)
+ | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
+ net2272_ep_write(ep, EP_STAT1, stat1);
+
+ /* data packet(s) received (in the fifo, OUT)
+ * direction must be validated, otherwise control read status phase
+ * could be interpreted as a valid packet
+ */
+ if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
+ net2272_pio_advance(ep);
+ /* data packet(s) transmitted (IN) */
+ else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
+ net2272_pio_advance(ep);
+}
+
+static struct net2272_ep *
+net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
+{
+ struct net2272_ep *ep;
+
+ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+ return &dev->ep[0];
+
+ list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+ u8 bEndpointAddress;
+
+ if (!ep->desc)
+ continue;
+ bEndpointAddress = ep->desc->bEndpointAddress;
+ if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+ continue;
+ if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
+ return ep;
+ }
+ return NULL;
+}
+
+/*
+ * USB Test Packet:
+ * JKJKJKJK * 9
+ * JJKKJJKK * 8
+ * JJJJKKKK * 8
+ * JJJJJJJKKKKKKK * 8
+ * JJJJJJJK * 8
+ * {JKKKKKKK * 10}, JK
+ */
+static const u8 net2272_test_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
+ 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
+};
+
+static void
+net2272_set_test_mode(struct net2272 *dev, int mode)
+{
+ int i;
+
+ /* Disable all net2272 interrupts:
+ * Nothing but a power cycle should stop the test.
+ */
+ net2272_write(dev, IRQENB0, 0x00);
+ net2272_write(dev, IRQENB1, 0x00);
+
+ /* Force tranceiver to high-speed */
+ net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
+
+ net2272_write(dev, PAGESEL, 0);
+ net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
+ net2272_write(dev, EP_RSPCLR,
+ (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
+ | (1 << HIDE_STATUS_PHASE));
+ net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
+ net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
+
+ /* wait for status phase to complete */
+ while (!(net2272_read(dev, EP_STAT0) &
+ (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
+ ;
+
+ /* Enable test mode */
+ net2272_write(dev, USBTEST, mode);
+
+ /* load test packet */
+ if (mode == USB_TEST_PACKET) {
+ /* switch to 8 bit mode */
+ net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
+ ~(1 << DATA_WIDTH));
+
+ for (i = 0; i < sizeof(net2272_test_packet); ++i)
+ net2272_write(dev, EP_DATA, net2272_test_packet[i]);
+
+ /* Validate test packet */
+ net2272_write(dev, EP_TRANSFER0, 0);
+ }
+}
+
+static void
+net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
+{
+ struct net2272_ep *ep;
+ u8 num, scratch;
+
+ /* starting a control request? */
+ if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
+ union {
+ u8 raw[8];
+ struct usb_ctrlrequest r;
+ } u;
+ int tmp = 0;
+ struct net2272_request *req;
+
+ if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
+ dev->gadget.speed = USB_SPEED_HIGH;
+ else
+ dev->gadget.speed = USB_SPEED_FULL;
+ dev_dbg(dev->dev, "%s\n",
+ usb_speed_string(dev->gadget.speed));
+ }
+
+ ep = &dev->ep[0];
+ ep->irqs++;
+
+ /* make sure any leftover interrupt state is cleared */
+ stat &= ~(1 << ENDPOINT_0_INTERRUPT);
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ net2272_done(ep, req,
+ (req->req.actual == req->req.length) ? 0 : -EPROTO);
+ }
+ ep->stopped = 0;
+ dev->protocol_stall = 0;
+ net2272_ep_write(ep, EP_STAT0,
+ (1 << DATA_IN_TOKEN_INTERRUPT)
+ | (1 << DATA_OUT_TOKEN_INTERRUPT)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+ | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+ | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
+ net2272_ep_write(ep, EP_STAT1,
+ (1 << TIMEOUT)
+ | (1 << USB_OUT_ACK_SENT)
+ | (1 << USB_OUT_NAK_SENT)
+ | (1 << USB_IN_ACK_RCVD)
+ | (1 << USB_IN_NAK_SENT)
+ | (1 << USB_STALL_SENT)
+ | (1 << LOCAL_OUT_ZLP));
+
+ /*
+ * Ensure Control Read pre-validation setting is beyond maximum size
+ * - Control Writes can leave non-zero values in EP_TRANSFER. If
+ * an EP0 transfer following the Control Write is a Control Read,
+ * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
+ * pre-validation count.
+ * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
+ * the pre-validation count cannot cause an unexpected validatation
+ */
+ net2272_write(dev, PAGESEL, 0);
+ net2272_write(dev, EP_TRANSFER2, 0xff);
+ net2272_write(dev, EP_TRANSFER1, 0xff);
+ net2272_write(dev, EP_TRANSFER0, 0xff);
+
+ u.raw[0] = net2272_read(dev, SETUP0);
+ u.raw[1] = net2272_read(dev, SETUP1);
+ u.raw[2] = net2272_read(dev, SETUP2);
+ u.raw[3] = net2272_read(dev, SETUP3);
+ u.raw[4] = net2272_read(dev, SETUP4);
+ u.raw[5] = net2272_read(dev, SETUP5);
+ u.raw[6] = net2272_read(dev, SETUP6);
+ u.raw[7] = net2272_read(dev, SETUP7);
+ /*
+ * If you have a big endian cpu make sure le16_to_cpus
+ * performs the proper byte swapping here...
+ */
+ le16_to_cpus(&u.r.wValue);
+ le16_to_cpus(&u.r.wIndex);
+ le16_to_cpus(&u.r.wLength);
+
+ /* ack the irq */
+ net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
+ stat ^= (1 << SETUP_PACKET_INTERRUPT);
+
+ /* watch control traffic at the token level, and force
+ * synchronization before letting the status phase happen.
+ */
+ ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
+ if (ep->is_in) {
+ scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+ | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
+ | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
+ stop_out_naking(ep);
+ } else
+ scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+ | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
+ | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
+ net2272_ep_write(ep, EP_IRQENB, scratch);
+
+ if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
+ goto delegate;
+ switch (u.r.bRequest) {
+ case USB_REQ_GET_STATUS: {
+ struct net2272_ep *e;
+ u16 status = 0;
+
+ switch (u.r.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_ENDPOINT:
+ e = net2272_get_ep_by_addr(dev, u.r.wIndex);
+ if (!e || u.r.wLength > 2)
+ goto do_stall;
+ if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
+ status = cpu_to_le16(1);
+ else
+ status = cpu_to_le16(0);
+
+ /* don't bother with a request object! */
+ net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
+ writew(status, net2272_reg_addr(dev, EP_DATA));
+ set_fifo_bytecount(&dev->ep[0], 0);
+ allow_status(ep);
+ dev_vdbg(dev->dev, "%s stat %02x\n",
+ ep->ep.name, status);
+ goto next_endpoints;
+ case USB_RECIP_DEVICE:
+ if (u.r.wLength > 2)
+ goto do_stall;
+ if (dev->gadget.is_selfpowered)
+ status = (1 << USB_DEVICE_SELF_POWERED);
+
+ /* don't bother with a request object! */
+ net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
+ writew(status, net2272_reg_addr(dev, EP_DATA));
+ set_fifo_bytecount(&dev->ep[0], 0);
+ allow_status(ep);
+ dev_vdbg(dev->dev, "device stat %02x\n", status);
+ goto next_endpoints;
+ case USB_RECIP_INTERFACE:
+ if (u.r.wLength > 2)
+ goto do_stall;
+
+ /* don't bother with a request object! */
+ net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
+ writew(status, net2272_reg_addr(dev, EP_DATA));
+ set_fifo_bytecount(&dev->ep[0], 0);
+ allow_status(ep);
+ dev_vdbg(dev->dev, "interface status %02x\n", status);
+ goto next_endpoints;
+ }
+
+ break;
+ }
+ case USB_REQ_CLEAR_FEATURE: {
+ struct net2272_ep *e;
+
+ if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+ goto delegate;
+ if (u.r.wValue != USB_ENDPOINT_HALT ||
+ u.r.wLength != 0)
+ goto do_stall;
+ e = net2272_get_ep_by_addr(dev, u.r.wIndex);
+ if (!e)
+ goto do_stall;
+ if (e->wedged) {
+ dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
+ ep->ep.name);
+ } else {
+ dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
+ clear_halt(e);
+ }
+ allow_status(ep);
+ goto next_endpoints;
+ }
+ case USB_REQ_SET_FEATURE: {
+ struct net2272_ep *e;
+
+ if (u.r.bRequestType == USB_RECIP_DEVICE) {
+ if (u.r.wIndex != NORMAL_OPERATION)
+ net2272_set_test_mode(dev, (u.r.wIndex >> 8));
+ allow_status(ep);
+ dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
+ goto next_endpoints;
+ } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+ goto delegate;
+ if (u.r.wValue != USB_ENDPOINT_HALT ||
+ u.r.wLength != 0)
+ goto do_stall;
+ e = net2272_get_ep_by_addr(dev, u.r.wIndex);
+ if (!e)
+ goto do_stall;
+ set_halt(e);
+ allow_status(ep);
+ dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
+ goto next_endpoints;
+ }
+ case USB_REQ_SET_ADDRESS: {
+ net2272_write(dev, OURADDR, u.r.wValue & 0xff);
+ allow_status(ep);
+ break;
+ }
+ default:
+ delegate:
+ dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
+ "ep_cfg %08x\n",
+ u.r.bRequestType, u.r.bRequest,
+ u.r.wValue, u.r.wIndex,
+ net2272_ep_read(ep, EP_CFG));
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &u.r);
+ spin_lock(&dev->lock);
+ }
+ }
+
+ /* stall ep0 on error */
+ if (tmp < 0) {
+ do_stall:
+ dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
+ u.r.bRequestType, u.r.bRequest, tmp);
+ dev->protocol_stall = 1;
+ }
+ /* endpoint dma irq? */
+ } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
+ net2272_cancel_dma(dev);
+ net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
+ stat &= ~(1 << DMA_DONE_INTERRUPT);
+ num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
+ ? 2 : 1;
+
+ ep = &dev->ep[num];
+ net2272_handle_dma(ep);
+ }
+
+ next_endpoints:
+ /* endpoint data irq? */
+ scratch = stat & 0x0f;
+ stat &= ~0x0f;
+ for (num = 0; scratch; num++) {
+ u8 t;
+
+ /* does this endpoint's FIFO and queue need tending? */
+ t = 1 << num;
+ if ((scratch & t) == 0)
+ continue;
+ scratch ^= t;
+
+ ep = &dev->ep[num];
+ net2272_handle_ep(ep);
+ }
+
+ /* some interrupts we can just ignore */
+ stat &= ~(1 << SOF_INTERRUPT);
+
+ if (stat)
+ dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
+}
+
+static void
+net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
+{
+ u8 tmp, mask;
+
+ /* after disconnect there's nothing else to do! */
+ tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
+ mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
+
+ if (stat & tmp) {
+ bool reset = false;
+ bool disconnect = false;
+
+ /*
+ * Ignore disconnects and resets if the speed hasn't been set.
+ * VBUS can bounce and there's always an initial reset.
+ */
+ net2272_write(dev, IRQSTAT1, tmp);
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
+ if ((stat & (1 << VBUS_INTERRUPT)) &&
+ (net2272_read(dev, USBCTL1) &
+ (1 << VBUS_PIN)) == 0) {
+ disconnect = true;
+ dev_dbg(dev->dev, "disconnect %s\n",
+ dev->driver->driver.name);
+ } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
+ (net2272_read(dev, USBCTL1) & mask)
+ == 0) {
+ reset = true;
+ dev_dbg(dev->dev, "reset %s\n",
+ dev->driver->driver.name);
+ }
+
+ if (disconnect || reset) {
+ stop_activity(dev, dev->driver);
+ net2272_ep0_start(dev);
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ if (reset)
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
+ else
+ (dev->driver->disconnect)(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+ return;
+ }
+ }
+ stat &= ~tmp;
+
+ if (!stat)
+ return;
+ }
+
+ tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
+ if (stat & tmp) {
+ net2272_write(dev, IRQSTAT1, tmp);
+ if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
+ if (dev->async_callbacks && dev->driver->suspend)
+ dev->driver->suspend(&dev->gadget);
+ if (!enable_suspend) {
+ stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
+ dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
+ }
+ } else {
+ if (dev->async_callbacks && dev->driver->resume)
+ dev->driver->resume(&dev->gadget);
+ }
+ stat &= ~tmp;
+ }
+
+ /* clear any other status/irqs */
+ if (stat)
+ net2272_write(dev, IRQSTAT1, stat);
+
+ /* some status we can just ignore */
+ stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
+ | (1 << SUSPEND_REQUEST_INTERRUPT)
+ | (1 << RESUME_INTERRUPT));
+ if (!stat)
+ return;
+ else
+ dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
+}
+
+static irqreturn_t net2272_irq(int irq, void *_dev)
+{
+ struct net2272 *dev = _dev;
+#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
+ u32 intcsr;
+#endif
+#if defined(PLX_PCI_RDK)
+ u8 dmareq;
+#endif
+ spin_lock(&dev->lock);
+#if defined(PLX_PCI_RDK)
+ intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
+
+ if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
+ writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+ net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
+ net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
+ intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
+ writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+ }
+ if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
+ writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+
+ dmareq = net2272_read(dev, DMAREQ);
+ if (dmareq & 0x01)
+ net2272_handle_dma(&dev->ep[2]);
+ else
+ net2272_handle_dma(&dev->ep[1]);
+ }
+#endif
+#if defined(PLX_PCI_RDK2)
+ /* see if PCI int for us by checking irqstat */
+ intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
+ if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
+ spin_unlock(&dev->lock);
+ return IRQ_NONE;
+ }
+ /* check dma interrupts */
+#endif
+ /* Platform/devcice interrupt handler */
+#if !defined(PLX_PCI_RDK)
+ net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
+ net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
+#endif
+ spin_unlock(&dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int net2272_present(struct net2272 *dev)
+{
+ /*
+ * Quick test to see if CPU can communicate properly with the NET2272.
+ * Verifies connection using writes and reads to write/read and
+ * read-only registers.
+ *
+ * This routine is strongly recommended especially during early bring-up
+ * of new hardware, however for designs that do not apply Power On System
+ * Tests (POST) it may discarded (or perhaps minimized).
+ */
+ unsigned int ii;
+ u8 val, refval;
+
+ /* Verify NET2272 write/read SCRATCH register can write and read */
+ refval = net2272_read(dev, SCRATCH);
+ for (ii = 0; ii < 0x100; ii += 7) {
+ net2272_write(dev, SCRATCH, ii);
+ val = net2272_read(dev, SCRATCH);
+ if (val != ii) {
+ dev_dbg(dev->dev,
+ "%s: write/read SCRATCH register test failed: "
+ "wrote:0x%2.2x, read:0x%2.2x\n",
+ __func__, ii, val);
+ return -EINVAL;
+ }
+ }
+ /* To be nice, we write the original SCRATCH value back: */
+ net2272_write(dev, SCRATCH, refval);
+
+ /* Verify NET2272 CHIPREV register is read-only: */
+ refval = net2272_read(dev, CHIPREV_2272);
+ for (ii = 0; ii < 0x100; ii += 7) {
+ net2272_write(dev, CHIPREV_2272, ii);
+ val = net2272_read(dev, CHIPREV_2272);
+ if (val != refval) {
+ dev_dbg(dev->dev,
+ "%s: write/read CHIPREV register test failed: "
+ "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
+ __func__, ii, val, refval);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Verify NET2272's "NET2270 legacy revision" register
+ * - NET2272 has two revision registers. The NET2270 legacy revision
+ * register should read the same value, regardless of the NET2272
+ * silicon revision. The legacy register applies to NET2270
+ * firmware being applied to the NET2272.
+ */
+ val = net2272_read(dev, CHIPREV_LEGACY);
+ if (val != NET2270_LEGACY_REV) {
+ /*
+ * Unexpected legacy revision value
+ * - Perhaps the chip is a NET2270?
+ */
+ dev_dbg(dev->dev,
+ "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
+ " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
+ __func__, NET2270_LEGACY_REV, val);
+ return -EINVAL;
+ }
+
+ /*
+ * Verify NET2272 silicon revision
+ * - This revision register is appropriate for the silicon version
+ * of the NET2272
+ */
+ val = net2272_read(dev, CHIPREV_2272);
+ switch (val) {
+ case CHIPREV_NET2272_R1:
+ /*
+ * NET2272 Rev 1 has DMA related errata:
+ * - Newer silicon (Rev 1A or better) required
+ */
+ dev_dbg(dev->dev,
+ "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
+ __func__);
+ break;
+ case CHIPREV_NET2272_R1A:
+ break;
+ default:
+ /* NET2272 silicon version *may* not work with this firmware */
+ dev_dbg(dev->dev,
+ "%s: unexpected silicon revision register value: "
+ " CHIPREV_2272: 0x%2.2x\n",
+ __func__, val);
+ /*
+ * Return Success, even though the chip rev is not an expected value
+ * - Older, pre-built firmware can attempt to operate on newer silicon
+ * - Often, new silicon is perfectly compatible
+ */
+ }
+
+ /* Success: NET2272 checks out OK */
+ return 0;
+}
+
+static void
+net2272_gadget_release(struct device *_dev)
+{
+ struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev);
+
+ kfree(dev);
+}
+
+/*---------------------------------------------------------------------------*/
+
+static void
+net2272_remove(struct net2272 *dev)
+{
+ if (dev->added)
+ usb_del_gadget(&dev->gadget);
+ free_irq(dev->irq, dev);
+ iounmap(dev->base_addr);
+ device_remove_file(dev->dev, &dev_attr_registers);
+
+ dev_info(dev->dev, "unbind\n");
+}
+
+static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
+{
+ struct net2272 *ret;
+
+ if (!irq) {
+ dev_dbg(dev, "No IRQ!\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* alloc, and start init */
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&ret->lock);
+ ret->irq = irq;
+ ret->dev = dev;
+ ret->gadget.ops = &net2272_ops;
+ ret->gadget.max_speed = USB_SPEED_HIGH;
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ ret->gadget.name = driver_name;
+ usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release);
+
+ return ret;
+}
+
+static int
+net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
+{
+ int ret;
+
+ /* See if there... */
+ if (net2272_present(dev)) {
+ dev_warn(dev->dev, "2272 not found!\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ net2272_usb_reset(dev);
+ net2272_usb_reinit(dev);
+
+ ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
+ if (ret) {
+ dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
+ goto err;
+ }
+
+ dev->chiprev = net2272_read(dev, CHIPREV_2272);
+
+ /* done */
+ dev_info(dev->dev, "%s\n", driver_desc);
+ dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
+ dev->irq, dev->base_addr, dev->chiprev,
+ dma_mode_string());
+ dev_info(dev->dev, "version: %s\n", driver_vers);
+
+ ret = device_create_file(dev->dev, &dev_attr_registers);
+ if (ret)
+ goto err_irq;
+
+ ret = usb_add_gadget(&dev->gadget);
+ if (ret)
+ goto err_add_udc;
+ dev->added = 1;
+
+ return 0;
+
+err_add_udc:
+ device_remove_file(dev->dev, &dev_attr_registers);
+ err_irq:
+ free_irq(dev->irq, dev);
+ err:
+ return ret;
+}
+
+#ifdef CONFIG_USB_PCI
+
+/*
+ * wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us
+ */
+
+static int
+net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
+{
+ unsigned long resource, len, tmp;
+ void __iomem *mem_mapped_addr[4];
+ int ret, i;
+
+ /*
+ * BAR 0 holds PLX 9054 config registers
+ * BAR 1 is i/o memory; unused here
+ * BAR 2 holds EPLD config registers
+ * BAR 3 holds NET2272 registers
+ */
+
+ /* Find and map all address spaces */
+ for (i = 0; i < 4; ++i) {
+ if (i == 1)
+ continue; /* BAR1 unused */
+
+ resource = pci_resource_start(pdev, i);
+ len = pci_resource_len(pdev, i);
+
+ if (!request_mem_region(resource, len, driver_name)) {
+ dev_dbg(dev->dev, "controller already in use\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ mem_mapped_addr[i] = ioremap(resource, len);
+ if (mem_mapped_addr[i] == NULL) {
+ release_mem_region(resource, len);
+ dev_dbg(dev->dev, "can't map memory\n");
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+
+ dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
+ dev->rdk1.epld_base_addr = mem_mapped_addr[2];
+ dev->base_addr = mem_mapped_addr[3];
+
+ /* Set PLX 9054 bus width (16 bits) */
+ tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
+ writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
+ dev->rdk1.plx9054_base_addr + LBRD1);
+
+ /* Enable PLX 9054 Interrupts */
+ writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
+ (1 << PCI_INTERRUPT_ENABLE) |
+ (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+
+ writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+
+ /* reset */
+ writeb((1 << EPLD_DMA_ENABLE) |
+ (1 << DMA_CTL_DACK) |
+ (1 << DMA_TIMEOUT_ENABLE) |
+ (1 << USER) |
+ (0 << MPX_MODE) |
+ (1 << BUSWIDTH) |
+ (1 << NET2272_RESET),
+ dev->base_addr + EPLD_IO_CONTROL_REGISTER);
+
+ mb();
+ writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
+ ~(1 << NET2272_RESET),
+ dev->base_addr + EPLD_IO_CONTROL_REGISTER);
+ udelay(200);
+
+ return 0;
+
+ err:
+ while (--i >= 0) {
+ if (i == 1)
+ continue; /* BAR1 unused */
+ iounmap(mem_mapped_addr[i]);
+ release_mem_region(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ }
+
+ return ret;
+}
+
+static int
+net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
+{
+ unsigned long resource, len;
+ void __iomem *mem_mapped_addr[2];
+ int ret, i;
+
+ /*
+ * BAR 0 holds FGPA config registers
+ * BAR 1 holds NET2272 registers
+ */
+
+ /* Find and map all address spaces, bar2-3 unused in rdk 2 */
+ for (i = 0; i < 2; ++i) {
+ resource = pci_resource_start(pdev, i);
+ len = pci_resource_len(pdev, i);
+
+ if (!request_mem_region(resource, len, driver_name)) {
+ dev_dbg(dev->dev, "controller already in use\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ mem_mapped_addr[i] = ioremap(resource, len);
+ if (mem_mapped_addr[i] == NULL) {
+ release_mem_region(resource, len);
+ dev_dbg(dev->dev, "can't map memory\n");
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+
+ dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
+ dev->base_addr = mem_mapped_addr[1];
+
+ mb();
+ /* Set 2272 bus width (16 bits) and reset */
+ writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
+ udelay(200);
+ writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
+ /* Print fpga version number */
+ dev_info(dev->dev, "RDK2 FPGA version %08x\n",
+ readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
+ /* Enable FPGA Interrupts */
+ writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
+
+ return 0;
+
+ err:
+ while (--i >= 0) {
+ iounmap(mem_mapped_addr[i]);
+ release_mem_region(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ }
+
+ return ret;
+}
+
+static int
+net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net2272 *dev;
+ int ret;
+
+ dev = net2272_probe_init(&pdev->dev, pdev->irq);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ dev->dev_id = pdev->device;
+
+ if (pci_enable_device(pdev) < 0) {
+ ret = -ENODEV;
+ goto err_put;
+ }
+
+ pci_set_master(pdev);
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
+ case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
+ default: BUG();
+ }
+ if (ret)
+ goto err_pci;
+
+ ret = net2272_probe_fin(dev, 0);
+ if (ret)
+ goto err_pci;
+
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+
+ err_pci:
+ pci_disable_device(pdev);
+ err_put:
+ usb_put_gadget(&dev->gadget);
+
+ return ret;
+}
+
+static void
+net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
+{
+ int i;
+
+ /* disable PLX 9054 interrupts */
+ writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
+ ~(1 << PCI_INTERRUPT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+
+ /* clean up resources allocated during probe() */
+ iounmap(dev->rdk1.plx9054_base_addr);
+ iounmap(dev->rdk1.epld_base_addr);
+
+ for (i = 0; i < 4; ++i) {
+ if (i == 1)
+ continue; /* BAR1 unused */
+ release_mem_region(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ }
+}
+
+static void
+net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
+{
+ int i;
+
+ /* disable fpga interrupts
+ writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
+ ~(1 << PCI_INTERRUPT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+ */
+
+ /* clean up resources allocated during probe() */
+ iounmap(dev->rdk2.fpga_base_addr);
+
+ for (i = 0; i < 2; ++i)
+ release_mem_region(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+}
+
+static void
+net2272_pci_remove(struct pci_dev *pdev)
+{
+ struct net2272 *dev = pci_get_drvdata(pdev);
+
+ net2272_remove(dev);
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
+ case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
+ default: BUG();
+ }
+
+ pci_disable_device(pdev);
+
+ usb_put_gadget(&dev->gadget);
+}
+
+/* Table of matching PCI IDs */
+static struct pci_device_id pci_ids[] = {
+ { /* RDK 1 card */
+ .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
+ .class_mask = 0,
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_RDK1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { /* RDK 2 card */
+ .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
+ .class_mask = 0,
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_RDK2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver net2272_pci_driver = {
+ .name = driver_name,
+ .id_table = pci_ids,
+
+ .probe = net2272_pci_probe,
+ .remove = net2272_pci_remove,
+};
+
+static int net2272_pci_register(void)
+{
+ return pci_register_driver(&net2272_pci_driver);
+}
+
+static void net2272_pci_unregister(void)
+{
+ pci_unregister_driver(&net2272_pci_driver);
+}
+
+#else
+static inline int net2272_pci_register(void) { return 0; }
+static inline void net2272_pci_unregister(void) { }
+#endif
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_plat_probe(struct platform_device *pdev)
+{
+ struct net2272 *dev;
+ int ret;
+ unsigned int irqflags;
+ resource_size_t base, len;
+ struct resource *iomem, *iomem_bus, *irq_res;
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
+ if (!irq_res || !iomem) {
+ dev_err(&pdev->dev, "must provide irq/base addr");
+ return -EINVAL;
+ }
+
+ dev = net2272_probe_init(&pdev->dev, irq_res->start);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ irqflags = 0;
+ if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
+ irqflags |= IRQF_TRIGGER_RISING;
+ if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
+ irqflags |= IRQF_TRIGGER_FALLING;
+ if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
+ irqflags |= IRQF_TRIGGER_HIGH;
+ if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
+ irqflags |= IRQF_TRIGGER_LOW;
+
+ base = iomem->start;
+ len = resource_size(iomem);
+ if (iomem_bus)
+ dev->base_shift = iomem_bus->start;
+
+ if (!request_mem_region(base, len, driver_name)) {
+ dev_dbg(dev->dev, "get request memory region!\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ dev->base_addr = ioremap(base, len);
+ if (!dev->base_addr) {
+ dev_dbg(dev->dev, "can't map memory\n");
+ ret = -EFAULT;
+ goto err_req;
+ }
+
+ ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
+ if (ret)
+ goto err_io;
+
+ platform_set_drvdata(pdev, dev);
+ dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
+ (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
+
+ return 0;
+
+ err_io:
+ iounmap(dev->base_addr);
+ err_req:
+ release_mem_region(base, len);
+ err:
+ usb_put_gadget(&dev->gadget);
+
+ return ret;
+}
+
+static int
+net2272_plat_remove(struct platform_device *pdev)
+{
+ struct net2272 *dev = platform_get_drvdata(pdev);
+
+ net2272_remove(dev);
+
+ release_mem_region(pdev->resource[0].start,
+ resource_size(&pdev->resource[0]));
+
+ usb_put_gadget(&dev->gadget);
+
+ return 0;
+}
+
+static struct platform_driver net2272_plat_driver = {
+ .probe = net2272_plat_probe,
+ .remove = net2272_plat_remove,
+ .driver = {
+ .name = driver_name,
+ },
+ /* FIXME .suspend, .resume */
+};
+MODULE_ALIAS("platform:net2272");
+
+static int __init net2272_init(void)
+{
+ int ret;
+
+ ret = net2272_pci_register();
+ if (ret)
+ return ret;
+ ret = platform_driver_register(&net2272_plat_driver);
+ if (ret)
+ goto err_pci;
+ return ret;
+
+err_pci:
+ net2272_pci_unregister();
+ return ret;
+}
+module_init(net2272_init);
+
+static void __exit net2272_cleanup(void)
+{
+ net2272_pci_unregister();
+ platform_driver_unregister(&net2272_plat_driver);
+}
+module_exit(net2272_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("PLX Technology, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h
new file mode 100644
index 000000000..a9994f737
--- /dev/null
+++ b/drivers/usb/gadget/udc/net2272.h
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PLX NET2272 high/full speed USB device controller
+ *
+ * Copyright (C) 2005-2006 PLX Technology, Inc.
+ * Copyright (C) 2006-2011 Analog Devices, Inc.
+ */
+
+#ifndef __NET2272_H__
+#define __NET2272_H__
+
+/* Main Registers */
+#define REGADDRPTR 0x00
+#define REGDATA 0x01
+#define IRQSTAT0 0x02
+#define ENDPOINT_0_INTERRUPT 0
+#define ENDPOINT_A_INTERRUPT 1
+#define ENDPOINT_B_INTERRUPT 2
+#define ENDPOINT_C_INTERRUPT 3
+#define VIRTUALIZED_ENDPOINT_INTERRUPT 4
+#define SETUP_PACKET_INTERRUPT 5
+#define DMA_DONE_INTERRUPT 6
+#define SOF_INTERRUPT 7
+#define IRQSTAT1 0x03
+#define CONTROL_STATUS_INTERRUPT 1
+#define VBUS_INTERRUPT 2
+#define SUSPEND_REQUEST_INTERRUPT 3
+#define SUSPEND_REQUEST_CHANGE_INTERRUPT 4
+#define RESUME_INTERRUPT 5
+#define ROOT_PORT_RESET_INTERRUPT 6
+#define RESET_STATUS 7
+#define PAGESEL 0x04
+#define DMAREQ 0x1c
+#define DMA_ENDPOINT_SELECT 0
+#define DREQ_POLARITY 1
+#define DACK_POLARITY 2
+#define EOT_POLARITY 3
+#define DMA_CONTROL_DACK 4
+#define DMA_REQUEST_ENABLE 5
+#define DMA_REQUEST 6
+#define DMA_BUFFER_VALID 7
+#define SCRATCH 0x1d
+#define IRQENB0 0x20
+#define ENDPOINT_0_INTERRUPT_ENABLE 0
+#define ENDPOINT_A_INTERRUPT_ENABLE 1
+#define ENDPOINT_B_INTERRUPT_ENABLE 2
+#define ENDPOINT_C_INTERRUPT_ENABLE 3
+#define VIRTUALIZED_ENDPOINT_INTERRUPT_ENABLE 4
+#define SETUP_PACKET_INTERRUPT_ENABLE 5
+#define DMA_DONE_INTERRUPT_ENABLE 6
+#define SOF_INTERRUPT_ENABLE 7
+#define IRQENB1 0x21
+#define VBUS_INTERRUPT_ENABLE 2
+#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
+#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 4
+#define RESUME_INTERRUPT_ENABLE 5
+#define ROOT_PORT_RESET_INTERRUPT_ENABLE 6
+#define LOCCTL 0x22
+#define DATA_WIDTH 0
+#define LOCAL_CLOCK_OUTPUT 1
+#define LOCAL_CLOCK_OUTPUT_OFF 0
+#define LOCAL_CLOCK_OUTPUT_3_75MHZ 1
+#define LOCAL_CLOCK_OUTPUT_7_5MHZ 2
+#define LOCAL_CLOCK_OUTPUT_15MHZ 3
+#define LOCAL_CLOCK_OUTPUT_30MHZ 4
+#define LOCAL_CLOCK_OUTPUT_60MHZ 5
+#define DMA_SPLIT_BUS_MODE 4
+#define BYTE_SWAP 5
+#define BUFFER_CONFIGURATION 6
+#define BUFFER_CONFIGURATION_EPA512_EPB512 0
+#define BUFFER_CONFIGURATION_EPA1024_EPB512 1
+#define BUFFER_CONFIGURATION_EPA1024_EPB1024 2
+#define BUFFER_CONFIGURATION_EPA1024DB 3
+#define CHIPREV_LEGACY 0x23
+#define NET2270_LEGACY_REV 0x40
+#define LOCCTL1 0x24
+#define DMA_MODE 0
+#define SLOW_DREQ 0
+#define FAST_DREQ 1
+#define BURST_MODE 2
+#define DMA_DACK_ENABLE 2
+#define CHIPREV_2272 0x25
+#define CHIPREV_NET2272_R1 0x10
+#define CHIPREV_NET2272_R1A 0x11
+/* USB Registers */
+#define USBCTL0 0x18
+#define IO_WAKEUP_ENABLE 1
+#define USB_DETECT_ENABLE 3
+#define USB_ROOT_PORT_WAKEUP_ENABLE 5
+#define USBCTL1 0x19
+#define VBUS_PIN 0
+#define USB_FULL_SPEED 1
+#define USB_HIGH_SPEED 2
+#define GENERATE_RESUME 3
+#define VIRTUAL_ENDPOINT_ENABLE 4
+#define FRAME0 0x1a
+#define FRAME1 0x1b
+#define OURADDR 0x30
+#define FORCE_IMMEDIATE 7
+#define USBDIAG 0x31
+#define FORCE_TRANSMIT_CRC_ERROR 0
+#define PREVENT_TRANSMIT_BIT_STUFF 1
+#define FORCE_RECEIVE_ERROR 2
+#define FAST_TIMES 4
+#define USBTEST 0x32
+#define TEST_MODE_SELECT 0
+#define NORMAL_OPERATION 0
+#define XCVRDIAG 0x33
+#define FORCE_FULL_SPEED 2
+#define FORCE_HIGH_SPEED 3
+#define OPMODE 4
+#define NORMAL_OPERATION 0
+#define NON_DRIVING 1
+#define DISABLE_BITSTUFF_AND_NRZI_ENCODE 2
+#define LINESTATE 6
+#define SE0_STATE 0
+#define J_STATE 1
+#define K_STATE 2
+#define SE1_STATE 3
+#define VIRTOUT0 0x34
+#define VIRTOUT1 0x35
+#define VIRTIN0 0x36
+#define VIRTIN1 0x37
+#define SETUP0 0x40
+#define SETUP1 0x41
+#define SETUP2 0x42
+#define SETUP3 0x43
+#define SETUP4 0x44
+#define SETUP5 0x45
+#define SETUP6 0x46
+#define SETUP7 0x47
+/* Endpoint Registers (Paged via PAGESEL) */
+#define EP_DATA 0x05
+#define EP_STAT0 0x06
+#define DATA_IN_TOKEN_INTERRUPT 0
+#define DATA_OUT_TOKEN_INTERRUPT 1
+#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
+#define DATA_PACKET_RECEIVED_INTERRUPT 3
+#define SHORT_PACKET_TRANSFERRED_INTERRUPT 4
+#define NAK_OUT_PACKETS 5
+#define BUFFER_EMPTY 6
+#define BUFFER_FULL 7
+#define EP_STAT1 0x07
+#define TIMEOUT 0
+#define USB_OUT_ACK_SENT 1
+#define USB_OUT_NAK_SENT 2
+#define USB_IN_ACK_RCVD 3
+#define USB_IN_NAK_SENT 4
+#define USB_STALL_SENT 5
+#define LOCAL_OUT_ZLP 6
+#define BUFFER_FLUSH 7
+#define EP_TRANSFER0 0x08
+#define EP_TRANSFER1 0x09
+#define EP_TRANSFER2 0x0a
+#define EP_IRQENB 0x0b
+#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
+#define DATA_OUT_TOKEN_INTERRUPT_ENABLE 1
+#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
+#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
+#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 4
+#define EP_AVAIL0 0x0c
+#define EP_AVAIL1 0x0d
+#define EP_RSPCLR 0x0e
+#define EP_RSPSET 0x0f
+#define ENDPOINT_HALT 0
+#define ENDPOINT_TOGGLE 1
+#define NAK_OUT_PACKETS_MODE 2
+#define CONTROL_STATUS_PHASE_HANDSHAKE 3
+#define INTERRUPT_MODE 4
+#define AUTOVALIDATE 5
+#define HIDE_STATUS_PHASE 6
+#define ALT_NAK_OUT_PACKETS 7
+#define EP_MAXPKT0 0x28
+#define EP_MAXPKT1 0x29
+#define ADDITIONAL_TRANSACTION_OPPORTUNITIES 3
+#define NONE_ADDITIONAL_TRANSACTION 0
+#define ONE_ADDITIONAL_TRANSACTION 1
+#define TWO_ADDITIONAL_TRANSACTION 2
+#define EP_CFG 0x2a
+#define ENDPOINT_NUMBER 0
+#define ENDPOINT_DIRECTION 4
+#define ENDPOINT_TYPE 5
+#define ENDPOINT_ENABLE 7
+#define EP_HBW 0x2b
+#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 0
+#define DATA0_PID 0
+#define DATA1_PID 1
+#define DATA2_PID 2
+#define MDATA_PID 3
+#define EP_BUFF_STATES 0x2c
+#define BUFFER_A_STATE 0
+#define BUFFER_B_STATE 2
+#define BUFF_FREE 0
+#define BUFF_VALID 1
+#define BUFF_LCL 2
+#define BUFF_USB 3
+
+/*---------------------------------------------------------------------------*/
+
+#define PCI_DEVICE_ID_RDK1 0x9054
+
+/* PCI-RDK EPLD Registers */
+#define RDK_EPLD_IO_REGISTER1 0x00000000
+#define RDK_EPLD_USB_RESET 0
+#define RDK_EPLD_USB_POWERDOWN 1
+#define RDK_EPLD_USB_WAKEUP 2
+#define RDK_EPLD_USB_EOT 3
+#define RDK_EPLD_DPPULL 4
+#define RDK_EPLD_IO_REGISTER2 0x00000004
+#define RDK_EPLD_BUSWIDTH 0
+#define RDK_EPLD_USER 2
+#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3
+#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4
+#define RDK_EPLD_STATUS_REGISTER 0x00000008
+#define RDK_EPLD_USB_LRESET 0
+#define RDK_EPLD_REVISION_REGISTER 0x0000000c
+
+/* PCI-RDK PLX 9054 Registers */
+#define INTCSR 0x68
+#define PCI_INTERRUPT_ENABLE 8
+#define LOCAL_INTERRUPT_INPUT_ENABLE 11
+#define LOCAL_INPUT_INTERRUPT_ACTIVE 15
+#define LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE 18
+#define LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE 19
+#define DMA_CHANNEL_0_INTERRUPT_ACTIVE 21
+#define DMA_CHANNEL_1_INTERRUPT_ACTIVE 22
+#define CNTRL 0x6C
+#define RELOAD_CONFIGURATION_REGISTERS 29
+#define PCI_ADAPTER_SOFTWARE_RESET 30
+#define DMAMODE0 0x80
+#define LOCAL_BUS_WIDTH 0
+#define INTERNAL_WAIT_STATES 2
+#define TA_READY_INPUT_ENABLE 6
+#define LOCAL_BURST_ENABLE 8
+#define SCATTER_GATHER_MODE 9
+#define DONE_INTERRUPT_ENABLE 10
+#define LOCAL_ADDRESSING_MODE 11
+#define DEMAND_MODE 12
+#define DMA_EOT_ENABLE 14
+#define FAST_SLOW_TERMINATE_MODE_SELECT 15
+#define DMA_CHANNEL_INTERRUPT_SELECT 17
+#define DMAPADR0 0x84
+#define DMALADR0 0x88
+#define DMASIZ0 0x8c
+#define DMADPR0 0x90
+#define DESCRIPTOR_LOCATION 0
+#define END_OF_CHAIN 1
+#define INTERRUPT_AFTER_TERMINAL_COUNT 2
+#define DIRECTION_OF_TRANSFER 3
+#define DMACSR0 0xa8
+#define CHANNEL_ENABLE 0
+#define CHANNEL_START 1
+#define CHANNEL_ABORT 2
+#define CHANNEL_CLEAR_INTERRUPT 3
+#define CHANNEL_DONE 4
+#define DMATHR 0xb0
+#define LBRD1 0xf8
+#define MEMORY_SPACE_LOCAL_BUS_WIDTH 0
+#define W8_BIT 0
+#define W16_BIT 1
+
+/* Special OR'ing of INTCSR bits */
+#define LOCAL_INTERRUPT_TEST \
+ ((1 << LOCAL_INPUT_INTERRUPT_ACTIVE) | \
+ (1 << LOCAL_INTERRUPT_INPUT_ENABLE))
+
+#define DMA_CHANNEL_0_TEST \
+ ((1 << DMA_CHANNEL_0_INTERRUPT_ACTIVE) | \
+ (1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE))
+
+#define DMA_CHANNEL_1_TEST \
+ ((1 << DMA_CHANNEL_1_INTERRUPT_ACTIVE) | \
+ (1 << LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE))
+
+/* EPLD Registers */
+#define RDK_EPLD_IO_REGISTER1 0x00000000
+#define RDK_EPLD_USB_RESET 0
+#define RDK_EPLD_USB_POWERDOWN 1
+#define RDK_EPLD_USB_WAKEUP 2
+#define RDK_EPLD_USB_EOT 3
+#define RDK_EPLD_DPPULL 4
+#define RDK_EPLD_IO_REGISTER2 0x00000004
+#define RDK_EPLD_BUSWIDTH 0
+#define RDK_EPLD_USER 2
+#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3
+#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4
+#define RDK_EPLD_STATUS_REGISTER 0x00000008
+#define RDK_EPLD_USB_LRESET 0
+#define RDK_EPLD_REVISION_REGISTER 0x0000000c
+
+#define EPLD_IO_CONTROL_REGISTER 0x400
+#define NET2272_RESET 0
+#define BUSWIDTH 1
+#define MPX_MODE 3
+#define USER 4
+#define DMA_TIMEOUT_ENABLE 5
+#define DMA_CTL_DACK 6
+#define EPLD_DMA_ENABLE 7
+#define EPLD_DMA_CONTROL_REGISTER 0x800
+#define SPLIT_DMA_MODE 0
+#define SPLIT_DMA_DIRECTION 1
+#define SPLIT_DMA_ENABLE 2
+#define SPLIT_DMA_INTERRUPT_ENABLE 3
+#define SPLIT_DMA_INTERRUPT 4
+#define EPLD_DMA_MODE 5
+#define EPLD_DMA_CONTROLLER_ENABLE 7
+#define SPLIT_DMA_ADDRESS_LOW 0xc00
+#define SPLIT_DMA_ADDRESS_HIGH 0x1000
+#define SPLIT_DMA_BYTE_COUNT_LOW 0x1400
+#define SPLIT_DMA_BYTE_COUNT_HIGH 0x1800
+#define EPLD_REVISION_REGISTER 0x1c00
+#define SPLIT_DMA_RAM 0x4000
+#define DMA_RAM_SIZE 0x1000
+
+/*---------------------------------------------------------------------------*/
+
+#define PCI_DEVICE_ID_RDK2 0x3272
+
+/* PCI-RDK version 2 registers */
+
+/* Main Control Registers */
+
+#define RDK2_IRQENB 0x00
+#define RDK2_IRQSTAT 0x04
+#define PB7 23
+#define PB6 22
+#define PB5 21
+#define PB4 20
+#define PB3 19
+#define PB2 18
+#define PB1 17
+#define PB0 16
+#define GP3 23
+#define GP2 23
+#define GP1 23
+#define GP0 23
+#define DMA_RETRY_ABORT 6
+#define DMA_PAUSE_DONE 5
+#define DMA_ABORT_DONE 4
+#define DMA_OUT_FIFO_TRANSFER_DONE 3
+#define DMA_LOCAL_DONE 2
+#define DMA_PCI_DONE 1
+#define NET2272_PCI_IRQ 0
+
+#define RDK2_LOCCTLRDK 0x08
+#define CHIP_RESET 3
+#define SPLIT_DMA 2
+#define MULTIPLEX_MODE 1
+#define BUS_WIDTH 0
+
+#define RDK2_GPIOCTL 0x10
+#define GP3_OUT_ENABLE 7
+#define GP2_OUT_ENABLE 6
+#define GP1_OUT_ENABLE 5
+#define GP0_OUT_ENABLE 4
+#define GP3_DATA 3
+#define GP2_DATA 2
+#define GP1_DATA 1
+#define GP0_DATA 0
+
+#define RDK2_LEDSW 0x14
+#define LED3 27
+#define LED2 26
+#define LED1 25
+#define LED0 24
+#define PBUTTON 16
+#define DIPSW 0
+
+#define RDK2_DIAG 0x18
+#define RDK2_FAST_TIMES 2
+#define FORCE_PCI_SERR 1
+#define FORCE_PCI_INT 0
+#define RDK2_FPGAREV 0x1C
+
+/* Dma Control registers */
+#define RDK2_DMACTL 0x80
+#define ADDR_HOLD 24
+#define RETRY_COUNT 16 /* 23:16 */
+#define FIFO_THRESHOLD 11 /* 15:11 */
+#define MEM_WRITE_INVALIDATE 10
+#define READ_MULTIPLE 9
+#define READ_LINE 8
+#define RDK2_DMA_MODE 6 /* 7:6 */
+#define CONTROL_DACK 5
+#define EOT_ENABLE 4
+#define EOT_POLARITY 3
+#define DACK_POLARITY 2
+#define DREQ_POLARITY 1
+#define DMA_ENABLE 0
+
+#define RDK2_DMASTAT 0x84
+#define GATHER_COUNT 12 /* 14:12 */
+#define FIFO_COUNT 6 /* 11:6 */
+#define FIFO_FLUSH 5
+#define FIFO_TRANSFER 4
+#define PAUSE_DONE 3
+#define ABORT_DONE 2
+#define DMA_ABORT 1
+#define DMA_START 0
+
+#define RDK2_DMAPCICOUNT 0x88
+#define DMA_DIRECTION 31
+#define DMA_PCI_BYTE_COUNT 0 /* 0:23 */
+
+#define RDK2_DMALOCCOUNT 0x8C /* 0:23 dma local byte count */
+
+#define RDK2_DMAADDR 0x90 /* 2:31 PCI bus starting address */
+
+/*---------------------------------------------------------------------------*/
+
+#define REG_INDEXED_THRESHOLD (1 << 5)
+
+/* DRIVER DATA STRUCTURES and UTILITIES */
+struct net2272_ep {
+ struct usb_ep ep;
+ struct net2272 *dev;
+ unsigned long irqs;
+
+ /* analogous to a host-side qh */
+ struct list_head queue;
+ const struct usb_endpoint_descriptor *desc;
+ unsigned num:8,
+ fifo_size:12,
+ stopped:1,
+ wedged:1,
+ is_in:1,
+ is_iso:1,
+ dma:1,
+ not_empty:1;
+};
+
+struct net2272 {
+ /* each device provides one gadget, several endpoints */
+ struct usb_gadget gadget;
+ struct device *dev;
+ unsigned short dev_id;
+
+ spinlock_t lock;
+ struct net2272_ep ep[4];
+ struct usb_gadget_driver *driver;
+ unsigned protocol_stall:1,
+ softconnect:1,
+ wakeup:1,
+ added:1,
+ async_callbacks:1,
+ dma_eot_polarity:1,
+ dma_dack_polarity:1,
+ dma_dreq_polarity:1,
+ dma_busy:1;
+ u16 chiprev;
+ u8 pagesel;
+
+ unsigned int irq;
+ unsigned short fifo_mode;
+
+ unsigned int base_shift;
+ u16 __iomem *base_addr;
+ union {
+#ifdef CONFIG_USB_PCI
+ struct {
+ void __iomem *plx9054_base_addr;
+ void __iomem *epld_base_addr;
+ } rdk1;
+ struct {
+ /* Bar0, Bar1 is base_addr both mem-mapped */
+ void __iomem *fpga_base_addr;
+ } rdk2;
+#endif
+ };
+};
+
+static void __iomem *
+net2272_reg_addr(struct net2272 *dev, unsigned int reg)
+{
+ return dev->base_addr + (reg << dev->base_shift);
+}
+
+static void
+net2272_write(struct net2272 *dev, unsigned int reg, u8 value)
+{
+ if (reg >= REG_INDEXED_THRESHOLD) {
+ /*
+ * Indexed register; use REGADDRPTR/REGDATA
+ * - Save and restore REGADDRPTR. This prevents REGADDRPTR from
+ * changes between other code sections, but it is time consuming.
+ * - Performance tips: either do not save and restore REGADDRPTR (if it
+ * is safe) or do save/restore operations only in critical sections.
+ u8 tmp = readb(dev->base_addr + REGADDRPTR);
+ */
+ writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR));
+ writeb(value, net2272_reg_addr(dev, REGDATA));
+ /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */
+ } else
+ writeb(value, net2272_reg_addr(dev, reg));
+}
+
+static u8
+net2272_read(struct net2272 *dev, unsigned int reg)
+{
+ u8 ret;
+
+ if (reg >= REG_INDEXED_THRESHOLD) {
+ /*
+ * Indexed register; use REGADDRPTR/REGDATA
+ * - Save and restore REGADDRPTR. This prevents REGADDRPTR from
+ * changes between other code sections, but it is time consuming.
+ * - Performance tips: either do not save and restore REGADDRPTR (if it
+ * is safe) or do save/restore operations only in critical sections.
+ u8 tmp = readb(dev->base_addr + REGADDRPTR);
+ */
+ writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR));
+ ret = readb(net2272_reg_addr(dev, REGDATA));
+ /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */
+ } else
+ ret = readb(net2272_reg_addr(dev, reg));
+
+ return ret;
+}
+
+static void
+net2272_ep_write(struct net2272_ep *ep, unsigned int reg, u8 value)
+{
+ struct net2272 *dev = ep->dev;
+
+ if (dev->pagesel != ep->num) {
+ net2272_write(dev, PAGESEL, ep->num);
+ dev->pagesel = ep->num;
+ }
+ net2272_write(dev, reg, value);
+}
+
+static u8
+net2272_ep_read(struct net2272_ep *ep, unsigned int reg)
+{
+ struct net2272 *dev = ep->dev;
+
+ if (dev->pagesel != ep->num) {
+ net2272_write(dev, PAGESEL, ep->num);
+ dev->pagesel = ep->num;
+ }
+ return net2272_read(dev, reg);
+}
+
+static void allow_status(struct net2272_ep *ep)
+{
+ /* ep0 only */
+ net2272_ep_write(ep, EP_RSPCLR,
+ (1 << CONTROL_STATUS_PHASE_HANDSHAKE) |
+ (1 << ALT_NAK_OUT_PACKETS) |
+ (1 << NAK_OUT_PACKETS_MODE));
+ ep->stopped = 1;
+}
+
+static void set_halt(struct net2272_ep *ep)
+{
+ /* ep0 and bulk/intr endpoints */
+ net2272_ep_write(ep, EP_RSPCLR, 1 << CONTROL_STATUS_PHASE_HANDSHAKE);
+ net2272_ep_write(ep, EP_RSPSET, 1 << ENDPOINT_HALT);
+}
+
+static void clear_halt(struct net2272_ep *ep)
+{
+ /* ep0 and bulk/intr endpoints */
+ net2272_ep_write(ep, EP_RSPCLR,
+ (1 << ENDPOINT_HALT) | (1 << ENDPOINT_TOGGLE));
+}
+
+/* count (<= 4) bytes in the next fifo write will be valid */
+static void set_fifo_bytecount(struct net2272_ep *ep, unsigned count)
+{
+ /* net2272_ep_write will truncate to u8 for us */
+ net2272_ep_write(ep, EP_TRANSFER2, count >> 16);
+ net2272_ep_write(ep, EP_TRANSFER1, count >> 8);
+ net2272_ep_write(ep, EP_TRANSFER0, count);
+}
+
+struct net2272_request {
+ struct usb_request req;
+ struct list_head queue;
+ unsigned mapped:1,
+ valid:1;
+};
+
+#endif
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
new file mode 100644
index 000000000..1b929c519
--- /dev/null
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -0,0 +1,3884 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for the PLX NET2280 USB device controller.
+ * Specs and errata are available from <http://www.plxtech.com>.
+ *
+ * PLX Technology Inc. (formerly NetChip Technology) supported the
+ * development of this driver.
+ *
+ *
+ * CODE STATUS HIGHLIGHTS
+ *
+ * This driver should work well with most "gadget" drivers, including
+ * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
+ * as well as Gadget Zero and Gadgetfs.
+ *
+ * DMA is enabled by default.
+ *
+ * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
+ * be enabled.
+ *
+ * Note that almost all the errata workarounds here are only needed for
+ * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
+ */
+
+/*
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003-2005 PLX Technology, Inc.
+ * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
+ *
+ * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
+ * with 2282 chip
+ *
+ * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
+ * with usb 338x chip. Based on PLX driver
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
+#define DRIVER_VERSION "2005 Sept 27/v3.0"
+
+#define EP_DONTUSE 13 /* nonzero */
+
+#define USE_RDK_LEDS /* GPIO pins control three LEDs */
+
+
+static const char driver_name[] = "net2280";
+static const char driver_desc[] = DRIVER_DESC;
+
+static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
+static const char ep0name[] = "ep0";
+
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} ep_info_dft[] = { /* Default endpoint configuration */
+ EP_INFO(ep0name,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-a",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-b",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-c",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-d",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-e",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-f",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-g",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep-h",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+}, ep_info_adv[] = { /* Endpoints for usb3380 advance mode */
+ EP_INFO(ep0name,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+ EP_INFO("ep1in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep2out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep3in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep4out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep1out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep2in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep3out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep4in",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+};
+
+#undef EP_INFO
+
+/* mode 0 == ep-{a,b,c,d} 1K fifo each
+ * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
+ * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
+ */
+static ushort fifo_mode;
+
+/* "modprobe net2280 fifo_mode=1" etc */
+module_param(fifo_mode, ushort, 0644);
+
+/* enable_suspend -- When enabled, the driver will respond to
+ * USB suspend requests by powering down the NET2280. Otherwise,
+ * USB suspend requests will be ignored. This is acceptable for
+ * self-powered devices
+ */
+static bool enable_suspend;
+
+/* "modprobe net2280 enable_suspend=1" etc */
+module_param(enable_suspend, bool, 0444);
+
+#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
+
+static char *type_string(u8 bmAttributes)
+{
+ switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK: return "bulk";
+ case USB_ENDPOINT_XFER_ISOC: return "iso";
+ case USB_ENDPOINT_XFER_INT: return "intr";
+ }
+ return "control";
+}
+
+#include "net2280.h"
+
+#define valid_bit cpu_to_le32(BIT(VALID_BIT))
+#define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
+
+static void ep_clear_seqnum(struct net2280_ep *ep);
+static void stop_activity(struct net2280 *dev,
+ struct usb_gadget_driver *driver);
+static void ep0_start(struct net2280 *dev);
+
+/*-------------------------------------------------------------------------*/
+static inline void enable_pciirqenb(struct net2280_ep *ep)
+{
+ u32 tmp = readl(&ep->dev->regs->pciirqenb0);
+
+ if (ep->dev->quirks & PLX_LEGACY)
+ tmp |= BIT(ep->num);
+ else
+ tmp |= BIT(ep_bit[ep->num]);
+ writel(tmp, &ep->dev->regs->pciirqenb0);
+
+ return;
+}
+
+static int
+net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+ struct net2280 *dev;
+ struct net2280_ep *ep;
+ u32 max;
+ u32 tmp = 0;
+ u32 type;
+ unsigned long flags;
+ static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
+ int ret = 0;
+
+ ep = container_of(_ep, struct net2280_ep, ep);
+ if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
+ desc->bDescriptorType != USB_DT_ENDPOINT) {
+ pr_err("%s: failed at line=%d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ ret = -ESHUTDOWN;
+ goto print_err;
+ }
+
+ /* erratum 0119 workaround ties up an endpoint number */
+ if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) {
+ ret = -EDOM;
+ goto print_err;
+ }
+
+ if (dev->quirks & PLX_PCIE) {
+ if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
+ ret = -EDOM;
+ goto print_err;
+ }
+ ep->is_in = !!usb_endpoint_dir_in(desc);
+ if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) {
+ ret = -EINVAL;
+ goto print_err;
+ }
+ }
+
+ /* sanity check ep-e/ep-f since their fifos are small */
+ max = usb_endpoint_maxp(desc);
+ if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
+ ret = -ERANGE;
+ goto print_err;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ _ep->maxpacket = max;
+ ep->desc = desc;
+
+ /* ep_reset() has already been called */
+ ep->stopped = 0;
+ ep->wedged = 0;
+ ep->out_overflow = 0;
+
+ /* set speed-dependent max packet; may kick in high bandwidth */
+ set_max_speed(ep, max);
+
+ /* set type, direction, address; reset fifo counters */
+ writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
+
+ if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
+ tmp = readl(&ep->cfg->ep_cfg);
+ /* If USB ep number doesn't match hardware ep number */
+ if ((tmp & 0xf) != usb_endpoint_num(desc)) {
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ goto print_err;
+ }
+ if (ep->is_in)
+ tmp &= ~USB3380_EP_CFG_MASK_IN;
+ else
+ tmp &= ~USB3380_EP_CFG_MASK_OUT;
+ }
+ type = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
+ if (type == USB_ENDPOINT_XFER_INT) {
+ /* erratum 0105 workaround prevents hs NYET */
+ if (dev->chiprev == 0100 &&
+ dev->gadget.speed == USB_SPEED_HIGH &&
+ !(desc->bEndpointAddress & USB_DIR_IN))
+ writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
+ &ep->regs->ep_rsp);
+ } else if (type == USB_ENDPOINT_XFER_BULK) {
+ /* catch some particularly blatant driver bugs */
+ if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
+ (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
+ (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ ret = -ERANGE;
+ goto print_err;
+ }
+ }
+ ep->is_iso = (type == USB_ENDPOINT_XFER_ISOC);
+ /* Enable this endpoint */
+ if (dev->quirks & PLX_LEGACY) {
+ tmp |= type << ENDPOINT_TYPE;
+ tmp |= desc->bEndpointAddress;
+ /* default full fifo lines */
+ tmp |= (4 << ENDPOINT_BYTE_COUNT);
+ tmp |= BIT(ENDPOINT_ENABLE);
+ ep->is_in = (tmp & USB_DIR_IN) != 0;
+ } else {
+ /* In Legacy mode, only OUT endpoints are used */
+ if (dev->enhanced_mode && ep->is_in) {
+ tmp |= type << IN_ENDPOINT_TYPE;
+ tmp |= BIT(IN_ENDPOINT_ENABLE);
+ } else {
+ tmp |= type << OUT_ENDPOINT_TYPE;
+ tmp |= BIT(OUT_ENDPOINT_ENABLE);
+ tmp |= (ep->is_in << ENDPOINT_DIRECTION);
+ }
+
+ tmp |= (4 << ENDPOINT_BYTE_COUNT);
+ if (!dev->enhanced_mode)
+ tmp |= usb_endpoint_num(desc);
+ tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
+ }
+
+ /* Make sure all the registers are written before ep_rsp*/
+ wmb();
+
+ /* for OUT transfers, block the rx fifo until a read is posted */
+ if (!ep->is_in)
+ writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+ else if (!(dev->quirks & PLX_2280)) {
+ /* Added for 2282, Don't use nak packets on an in endpoint,
+ * this was ignored on 2280
+ */
+ writel(BIT(CLEAR_NAK_OUT_PACKETS) |
+ BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
+ }
+
+ if (dev->quirks & PLX_PCIE)
+ ep_clear_seqnum(ep);
+ writel(tmp, &ep->cfg->ep_cfg);
+
+ /* enable irqs */
+ if (!ep->dma) { /* pio, per-packet */
+ enable_pciirqenb(ep);
+
+ tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
+ BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
+ if (dev->quirks & PLX_2280)
+ tmp |= readl(&ep->regs->ep_irqenb);
+ writel(tmp, &ep->regs->ep_irqenb);
+ } else { /* dma, per-request */
+ tmp = BIT((8 + ep->num)); /* completion */
+ tmp |= readl(&dev->regs->pciirqenb1);
+ writel(tmp, &dev->regs->pciirqenb1);
+
+ /* for short OUT transfers, dma completions can't
+ * advance the queue; do it pio-style, by hand.
+ * NOTE erratum 0112 workaround #2
+ */
+ if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
+ tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
+ writel(tmp, &ep->regs->ep_irqenb);
+
+ enable_pciirqenb(ep);
+ }
+ }
+
+ tmp = desc->bEndpointAddress;
+ ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
+ _ep->name, tmp & 0x0f, DIR_STRING(tmp),
+ type_string(desc->bmAttributes),
+ ep->dma ? "dma" : "pio", max);
+
+ /* pci writes may still be posted */
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+
+print_err:
+ dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
+ return ret;
+}
+
+static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
+{
+ u32 result;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(ptr, result,
+ ((result & mask) == done ||
+ result == U32_MAX),
+ 1, usec);
+ if (result == U32_MAX) /* device unplugged */
+ return -ENODEV;
+
+ return ret;
+}
+
+static const struct usb_ep_ops net2280_ep_ops;
+
+static void ep_reset_228x(struct net2280_regs __iomem *regs,
+ struct net2280_ep *ep)
+{
+ u32 tmp;
+
+ ep->desc = NULL;
+ INIT_LIST_HEAD(&ep->queue);
+
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
+ ep->ep.ops = &net2280_ep_ops;
+
+ /* disable the dma, irqs, endpoint... */
+ if (ep->dma) {
+ writel(0, &ep->dma->dmactl);
+ writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
+ BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
+ BIT(DMA_ABORT),
+ &ep->dma->dmastat);
+
+ tmp = readl(&regs->pciirqenb0);
+ tmp &= ~BIT(ep->num);
+ writel(tmp, &regs->pciirqenb0);
+ } else {
+ tmp = readl(&regs->pciirqenb1);
+ tmp &= ~BIT((8 + ep->num)); /* completion */
+ writel(tmp, &regs->pciirqenb1);
+ }
+ writel(0, &ep->regs->ep_irqenb);
+
+ /* init to our chosen defaults, notably so that we NAK OUT
+ * packets until the driver queues a read (+note erratum 0112)
+ */
+ if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
+ tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
+ BIT(SET_NAK_OUT_PACKETS) |
+ BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
+ BIT(CLEAR_INTERRUPT_MODE);
+ } else {
+ /* added for 2282 */
+ tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
+ BIT(CLEAR_NAK_OUT_PACKETS) |
+ BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
+ BIT(CLEAR_INTERRUPT_MODE);
+ }
+
+ if (ep->num != 0) {
+ tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
+ BIT(CLEAR_ENDPOINT_HALT);
+ }
+ writel(tmp, &ep->regs->ep_rsp);
+
+ /* scrub most status bits, and flush any fifo state */
+ if (ep->dev->quirks & PLX_2280)
+ tmp = BIT(FIFO_OVERFLOW) |
+ BIT(FIFO_UNDERFLOW);
+ else
+ tmp = 0;
+
+ writel(tmp | BIT(TIMEOUT) |
+ BIT(USB_STALL_SENT) |
+ BIT(USB_IN_NAK_SENT) |
+ BIT(USB_IN_ACK_RCVD) |
+ BIT(USB_OUT_PING_NAK_SENT) |
+ BIT(USB_OUT_ACK_SENT) |
+ BIT(FIFO_FLUSH) |
+ BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
+ BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
+ BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
+ BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
+ BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
+ BIT(DATA_IN_TOKEN_INTERRUPT),
+ &ep->regs->ep_stat);
+
+ /* fifo size is handled separately */
+}
+
+static void ep_reset_338x(struct net2280_regs __iomem *regs,
+ struct net2280_ep *ep)
+{
+ u32 tmp, dmastat;
+
+ ep->desc = NULL;
+ INIT_LIST_HEAD(&ep->queue);
+
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
+ ep->ep.ops = &net2280_ep_ops;
+
+ /* disable the dma, irqs, endpoint... */
+ if (ep->dma) {
+ writel(0, &ep->dma->dmactl);
+ writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
+ BIT(DMA_PAUSE_DONE_INTERRUPT) |
+ BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
+ BIT(DMA_TRANSACTION_DONE_INTERRUPT),
+ /* | BIT(DMA_ABORT), */
+ &ep->dma->dmastat);
+
+ dmastat = readl(&ep->dma->dmastat);
+ if (dmastat == 0x5002) {
+ ep_warn(ep->dev, "The dmastat return = %x!!\n",
+ dmastat);
+ writel(0x5a, &ep->dma->dmastat);
+ }
+
+ tmp = readl(&regs->pciirqenb0);
+ tmp &= ~BIT(ep_bit[ep->num]);
+ writel(tmp, &regs->pciirqenb0);
+ } else {
+ if (ep->num < 5) {
+ tmp = readl(&regs->pciirqenb1);
+ tmp &= ~BIT((8 + ep->num)); /* completion */
+ writel(tmp, &regs->pciirqenb1);
+ }
+ }
+ writel(0, &ep->regs->ep_irqenb);
+
+ writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
+ BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
+ BIT(FIFO_OVERFLOW) |
+ BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
+ BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
+ BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
+ BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
+
+ tmp = readl(&ep->cfg->ep_cfg);
+ if (ep->is_in)
+ tmp &= ~USB3380_EP_CFG_MASK_IN;
+ else
+ tmp &= ~USB3380_EP_CFG_MASK_OUT;
+ writel(tmp, &ep->cfg->ep_cfg);
+}
+
+static void nuke(struct net2280_ep *);
+
+static int net2280_disable(struct usb_ep *_ep)
+{
+ struct net2280_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct net2280_ep, ep);
+ if (!_ep || _ep->name == ep0name) {
+ pr_err("%s: Invalid ep=%p\n", __func__, _ep);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ nuke(ep);
+
+ if (ep->dev->quirks & PLX_PCIE)
+ ep_reset_338x(ep->dev->regs, ep);
+ else
+ ep_reset_228x(ep->dev->regs, ep);
+
+ ep_vdbg(ep->dev, "disabled %s %s\n",
+ ep->dma ? "dma" : "pio", _ep->name);
+
+ /* synch memory views with the device */
+ (void)readl(&ep->cfg->ep_cfg);
+
+ if (!ep->dma && ep->num >= 1 && ep->num <= 4)
+ ep->dma = &ep->dev->dma[ep->num - 1];
+
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request
+*net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct net2280_ep *ep;
+ struct net2280_request *req;
+
+ if (!_ep) {
+ pr_err("%s: Invalid ep\n", __func__);
+ return NULL;
+ }
+ ep = container_of(_ep, struct net2280_ep, ep);
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ /* this dma descriptor may be swapped with the previous dummy */
+ if (ep->dma) {
+ struct net2280_dma *td;
+
+ td = dma_pool_alloc(ep->dev->requests, gfp_flags,
+ &req->td_dma);
+ if (!td) {
+ kfree(req);
+ return NULL;
+ }
+ td->dmacount = 0; /* not VALID */
+ td->dmadesc = td->dmaaddr;
+ req->td = td;
+ }
+ return &req->req;
+}
+
+static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct net2280_ep *ep;
+ struct net2280_request *req;
+
+ ep = container_of(_ep, struct net2280_ep, ep);
+ if (!_ep || !_req) {
+ dev_err(&ep->dev->pdev->dev, "%s: Invalid ep=%p or req=%p\n",
+ __func__, _ep, _req);
+ return;
+ }
+
+ req = container_of(_req, struct net2280_request, req);
+ WARN_ON(!list_empty(&req->queue));
+ if (req->td)
+ dma_pool_free(ep->dev->requests, req->td, req->td_dma);
+ kfree(req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* load a packet into the fifo we use for usb IN transfers.
+ * works for all endpoints.
+ *
+ * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
+ * at a time, but this code is simpler because it knows it only writes
+ * one packet. ep-a..ep-d should use dma instead.
+ */
+static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
+{
+ struct net2280_ep_regs __iomem *regs = ep->regs;
+ u8 *buf;
+ u32 tmp;
+ unsigned count, total;
+
+ /* INVARIANT: fifo is currently empty. (testable) */
+
+ if (req) {
+ buf = req->buf + req->actual;
+ prefetch(buf);
+ total = req->length - req->actual;
+ } else {
+ total = 0;
+ buf = NULL;
+ }
+
+ /* write just one packet at a time */
+ count = ep->ep.maxpacket;
+ if (count > total) /* min() cannot be used on a bitfield */
+ count = total;
+
+ ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
+ ep->ep.name, count,
+ (count != ep->ep.maxpacket) ? " (short)" : "",
+ req);
+ while (count >= 4) {
+ /* NOTE be careful if you try to align these. fifo lines
+ * should normally be full (4 bytes) and successive partial
+ * lines are ok only in certain cases.
+ */
+ tmp = get_unaligned((u32 *)buf);
+ cpu_to_le32s(&tmp);
+ writel(tmp, &regs->ep_data);
+ buf += 4;
+ count -= 4;
+ }
+
+ /* last fifo entry is "short" unless we wrote a full packet.
+ * also explicitly validate last word in (periodic) transfers
+ * when maxpacket is not a multiple of 4 bytes.
+ */
+ if (count || total < ep->ep.maxpacket) {
+ tmp = count ? get_unaligned((u32 *)buf) : count;
+ cpu_to_le32s(&tmp);
+ set_fifo_bytecount(ep, count & 0x03);
+ writel(tmp, &regs->ep_data);
+ }
+
+ /* pci writes may still be posted */
+}
+
+/* work around erratum 0106: PCI and USB race over the OUT fifo.
+ * caller guarantees chiprev 0100, out endpoint is NAKing, and
+ * there's no real data in the fifo.
+ *
+ * NOTE: also used in cases where that erratum doesn't apply:
+ * where the host wrote "too much" data to us.
+ */
+static void out_flush(struct net2280_ep *ep)
+{
+ u32 __iomem *statp;
+ u32 tmp;
+
+ statp = &ep->regs->ep_stat;
+
+ tmp = readl(statp);
+ if (tmp & BIT(NAK_OUT_PACKETS)) {
+ ep_dbg(ep->dev, "%s %s %08x !NAK\n",
+ ep->ep.name, __func__, tmp);
+ writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+ }
+
+ writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
+ BIT(DATA_PACKET_RECEIVED_INTERRUPT),
+ statp);
+ writel(BIT(FIFO_FLUSH), statp);
+ /* Make sure that stap is written */
+ mb();
+ tmp = readl(statp);
+ if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
+ /* high speed did bulk NYET; fifo isn't filling */
+ ep->dev->gadget.speed == USB_SPEED_FULL) {
+ unsigned usec;
+
+ usec = 50; /* 64 byte bulk/interrupt */
+ handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
+ BIT(USB_OUT_PING_NAK_SENT), usec);
+ /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
+ }
+}
+
+/* unload packet(s) from the fifo we use for usb OUT transfers.
+ * returns true iff the request completed, because of short packet
+ * or the request buffer having filled with full packets.
+ *
+ * for ep-a..ep-d this will read multiple packets out when they
+ * have been accepted.
+ */
+static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
+{
+ struct net2280_ep_regs __iomem *regs = ep->regs;
+ u8 *buf = req->req.buf + req->req.actual;
+ unsigned count, tmp, is_short;
+ unsigned cleanup = 0, prevent = 0;
+
+ /* erratum 0106 ... packets coming in during fifo reads might
+ * be incompletely rejected. not all cases have workarounds.
+ */
+ if (ep->dev->chiprev == 0x0100 &&
+ ep->dev->gadget.speed == USB_SPEED_FULL) {
+ udelay(1);
+ tmp = readl(&ep->regs->ep_stat);
+ if ((tmp & BIT(NAK_OUT_PACKETS)))
+ cleanup = 1;
+ else if ((tmp & BIT(FIFO_FULL))) {
+ start_out_naking(ep);
+ prevent = 1;
+ }
+ /* else: hope we don't see the problem */
+ }
+
+ /* never overflow the rx buffer. the fifo reads packets until
+ * it sees a short one; we might not be ready for them all.
+ */
+ prefetchw(buf);
+ count = readl(&regs->ep_avail);
+ if (unlikely(count == 0)) {
+ udelay(1);
+ tmp = readl(&ep->regs->ep_stat);
+ count = readl(&regs->ep_avail);
+ /* handled that data already? */
+ if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
+ return 0;
+ }
+
+ tmp = req->req.length - req->req.actual;
+ if (count > tmp) {
+ /* as with DMA, data overflow gets flushed */
+ if ((tmp % ep->ep.maxpacket) != 0) {
+ ep_err(ep->dev,
+ "%s out fifo %d bytes, expected %d\n",
+ ep->ep.name, count, tmp);
+ req->req.status = -EOVERFLOW;
+ cleanup = 1;
+ /* NAK_OUT_PACKETS will be set, so flushing is safe;
+ * the next read will start with the next packet
+ */
+ } /* else it's a ZLP, no worries */
+ count = tmp;
+ }
+ req->req.actual += count;
+
+ is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
+
+ ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
+ ep->ep.name, count, is_short ? " (short)" : "",
+ cleanup ? " flush" : "", prevent ? " nak" : "",
+ req, req->req.actual, req->req.length);
+
+ while (count >= 4) {
+ tmp = readl(&regs->ep_data);
+ cpu_to_le32s(&tmp);
+ put_unaligned(tmp, (u32 *)buf);
+ buf += 4;
+ count -= 4;
+ }
+ if (count) {
+ tmp = readl(&regs->ep_data);
+ /* LE conversion is implicit here: */
+ do {
+ *buf++ = (u8) tmp;
+ tmp >>= 8;
+ } while (--count);
+ }
+ if (cleanup)
+ out_flush(ep);
+ if (prevent) {
+ writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+ (void) readl(&ep->regs->ep_rsp);
+ }
+
+ return is_short || req->req.actual == req->req.length;
+}
+
+/* fill out dma descriptor to match a given request */
+static void fill_dma_desc(struct net2280_ep *ep,
+ struct net2280_request *req, int valid)
+{
+ struct net2280_dma *td = req->td;
+ u32 dmacount = req->req.length;
+
+ /* don't let DMA continue after a short OUT packet,
+ * so overruns can't affect the next transfer.
+ * in case of overruns on max-size packets, we can't
+ * stop the fifo from filling but we can flush it.
+ */
+ if (ep->is_in)
+ dmacount |= BIT(DMA_DIRECTION);
+ if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
+ !(ep->dev->quirks & PLX_2280))
+ dmacount |= BIT(END_OF_CHAIN);
+
+ req->valid = valid;
+ if (valid)
+ dmacount |= BIT(VALID_BIT);
+ dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
+
+ /* td->dmadesc = previously set by caller */
+ td->dmaaddr = cpu_to_le32 (req->req.dma);
+
+ /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
+ wmb();
+ td->dmacount = cpu_to_le32(dmacount);
+}
+
+static const u32 dmactl_default =
+ BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
+ BIT(DMA_CLEAR_COUNT_ENABLE) |
+ /* erratum 0116 workaround part 1 (use POLLING) */
+ (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
+ BIT(DMA_VALID_BIT_POLLING_ENABLE) |
+ BIT(DMA_VALID_BIT_ENABLE) |
+ BIT(DMA_SCATTER_GATHER_ENABLE) |
+ /* erratum 0116 workaround part 2 (no AUTOSTART) */
+ BIT(DMA_ENABLE);
+
+static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
+{
+ handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
+}
+
+static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
+{
+ writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
+ spin_stop_dma(dma);
+}
+
+static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
+{
+ struct net2280_dma_regs __iomem *dma = ep->dma;
+ unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
+
+ if (!(ep->dev->quirks & PLX_2280))
+ tmp |= BIT(END_OF_CHAIN);
+
+ writel(tmp, &dma->dmacount);
+ writel(readl(&dma->dmastat), &dma->dmastat);
+
+ writel(td_dma, &dma->dmadesc);
+ if (ep->dev->quirks & PLX_PCIE)
+ dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
+ writel(dmactl, &dma->dmactl);
+
+ /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
+ (void) readl(&ep->dev->pci->pcimstctl);
+
+ writel(BIT(DMA_START), &dma->dmastat);
+}
+
+static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
+{
+ u32 tmp;
+ struct net2280_dma_regs __iomem *dma = ep->dma;
+
+ /* FIXME can't use DMA for ZLPs */
+
+ /* on this path we "know" there's no dma active (yet) */
+ WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
+ writel(0, &ep->dma->dmactl);
+
+ /* previous OUT packet might have been short */
+ if (!ep->is_in && (readl(&ep->regs->ep_stat) &
+ BIT(NAK_OUT_PACKETS))) {
+ writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
+ &ep->regs->ep_stat);
+
+ tmp = readl(&ep->regs->ep_avail);
+ if (tmp) {
+ writel(readl(&dma->dmastat), &dma->dmastat);
+
+ /* transfer all/some fifo data */
+ writel(req->req.dma, &dma->dmaaddr);
+ tmp = min(tmp, req->req.length);
+
+ /* dma irq, faking scatterlist status */
+ req->td->dmacount = cpu_to_le32(req->req.length - tmp);
+ writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
+ &dma->dmacount);
+ req->td->dmadesc = 0;
+ req->valid = 1;
+
+ writel(BIT(DMA_ENABLE), &dma->dmactl);
+ writel(BIT(DMA_START), &dma->dmastat);
+ return;
+ }
+ stop_out_naking(ep);
+ }
+
+ tmp = dmactl_default;
+
+ /* force packet boundaries between dma requests, but prevent the
+ * controller from automagically writing a last "short" packet
+ * (zero length) unless the driver explicitly said to do that.
+ */
+ if (ep->is_in) {
+ if (likely((req->req.length % ep->ep.maxpacket) ||
+ req->req.zero)){
+ tmp |= BIT(DMA_FIFO_VALIDATE);
+ ep->in_fifo_validate = 1;
+ } else
+ ep->in_fifo_validate = 0;
+ }
+
+ /* init req->td, pointing to the current dummy */
+ req->td->dmadesc = cpu_to_le32 (ep->td_dma);
+ fill_dma_desc(ep, req, 1);
+
+ req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
+
+ start_queue(ep, tmp, req->td_dma);
+}
+
+static inline void
+queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
+{
+ /* swap new dummy for old, link; fill and maybe activate */
+ swap(ep->dummy, req->td);
+ swap(ep->td_dma, req->td_dma);
+
+ req->td->dmadesc = cpu_to_le32 (ep->td_dma);
+
+ fill_dma_desc(ep, req, valid);
+}
+
+static void
+done(struct net2280_ep *ep, struct net2280_request *req, int status)
+{
+ struct net2280 *dev;
+ unsigned stopped = ep->stopped;
+
+ list_del_init(&req->queue);
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ dev = ep->dev;
+ if (ep->dma)
+ usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
+
+ if (status && status != -ESHUTDOWN)
+ ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+ spin_unlock(&dev->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&dev->lock);
+ ep->stopped = stopped;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct net2280_request *req;
+ struct net2280_ep *ep;
+ struct net2280 *dev;
+ unsigned long flags;
+ int ret = 0;
+
+ /* we always require a cpu-view buffer, so that we can
+ * always use pio (as fallback or whatever).
+ */
+ ep = container_of(_ep, struct net2280_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0)) {
+ pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
+ return -EINVAL;
+ }
+ req = container_of(_req, struct net2280_request, req);
+ if (!_req || !_req->complete || !_req->buf ||
+ !list_empty(&req->queue)) {
+ ret = -EINVAL;
+ goto print_err;
+ }
+ if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) {
+ ret = -EDOM;
+ goto print_err;
+ }
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ ret = -ESHUTDOWN;
+ goto print_err;
+ }
+
+ /* FIXME implement PIO fallback for ZLPs with DMA */
+ if (ep->dma && _req->length == 0) {
+ ret = -EOPNOTSUPP;
+ goto print_err;
+ }
+
+ /* set up dma mapping in case the caller didn't */
+ if (ep->dma) {
+ ret = usb_gadget_map_request(&dev->gadget, _req,
+ ep->is_in);
+ if (ret)
+ goto print_err;
+ }
+
+ ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
+ _ep->name, _req, _req->length, _req->buf);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ /* kickstart this i/o queue? */
+ if (list_empty(&ep->queue) && !ep->stopped &&
+ !((dev->quirks & PLX_PCIE) && ep->dma &&
+ (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
+
+ /* use DMA if the endpoint supports it, else pio */
+ if (ep->dma)
+ start_dma(ep, req);
+ else {
+ /* maybe there's no control data, just status ack */
+ if (ep->num == 0 && _req->length == 0) {
+ allow_status(ep);
+ done(ep, req, 0);
+ ep_vdbg(dev, "%s status ack\n", ep->ep.name);
+ goto done;
+ }
+
+ /* PIO ... stuff the fifo, or unblock it. */
+ if (ep->is_in)
+ write_fifo(ep, _req);
+ else {
+ u32 s;
+
+ /* OUT FIFO might have packet(s) buffered */
+ s = readl(&ep->regs->ep_stat);
+ if ((s & BIT(FIFO_EMPTY)) == 0) {
+ /* note: _req->short_not_ok is
+ * ignored here since PIO _always_
+ * stops queue advance here, and
+ * _req->status doesn't change for
+ * short reads (only _req->actual)
+ */
+ if (read_fifo(ep, req) &&
+ ep->num == 0) {
+ done(ep, req, 0);
+ allow_status(ep);
+ /* don't queue it */
+ req = NULL;
+ } else if (read_fifo(ep, req) &&
+ ep->num != 0) {
+ done(ep, req, 0);
+ req = NULL;
+ } else
+ s = readl(&ep->regs->ep_stat);
+ }
+
+ /* don't NAK, let the fifo fill */
+ if (req && (s & BIT(NAK_OUT_PACKETS)))
+ writel(BIT(CLEAR_NAK_OUT_PACKETS),
+ &ep->regs->ep_rsp);
+ }
+ }
+
+ } else if (ep->dma) {
+ int valid = 1;
+
+ if (ep->is_in) {
+ int expect;
+
+ /* preventing magic zlps is per-engine state, not
+ * per-transfer; irq logic must recover hiccups.
+ */
+ expect = likely(req->req.zero ||
+ (req->req.length % ep->ep.maxpacket));
+ if (expect != ep->in_fifo_validate)
+ valid = 0;
+ }
+ queue_dma(ep, req, valid);
+
+ } /* else the irq handler advances the queue. */
+
+ ep->responded = 1;
+ if (req)
+ list_add_tail(&req->queue, &ep->queue);
+done:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* pci writes may still be posted */
+ return ret;
+
+print_err:
+ dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
+ return ret;
+}
+
+static inline void
+dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
+ int status)
+{
+ req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
+ done(ep, req, status);
+}
+
+static int scan_dma_completions(struct net2280_ep *ep)
+{
+ int num_completed = 0;
+
+ /* only look at descriptors that were "naturally" retired,
+ * so fifo and list head state won't matter
+ */
+ while (!list_empty(&ep->queue)) {
+ struct net2280_request *req;
+ u32 req_dma_count;
+
+ req = list_entry(ep->queue.next,
+ struct net2280_request, queue);
+ if (!req->valid)
+ break;
+ rmb();
+ req_dma_count = le32_to_cpup(&req->td->dmacount);
+ if ((req_dma_count & BIT(VALID_BIT)) != 0)
+ break;
+
+ /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
+ * cases where DMA must be aborted; this code handles
+ * all non-abort DMA completions.
+ */
+ if (unlikely(req->td->dmadesc == 0)) {
+ /* paranoia */
+ u32 const ep_dmacount = readl(&ep->dma->dmacount);
+
+ if (ep_dmacount & DMA_BYTE_COUNT_MASK)
+ break;
+ /* single transfer mode */
+ dma_done(ep, req, req_dma_count, 0);
+ num_completed++;
+ break;
+ } else if (!ep->is_in &&
+ (req->req.length % ep->ep.maxpacket) &&
+ !(ep->dev->quirks & PLX_PCIE)) {
+
+ u32 const ep_stat = readl(&ep->regs->ep_stat);
+ /* AVOID TROUBLE HERE by not issuing short reads from
+ * your gadget driver. That helps avoids errata 0121,
+ * 0122, and 0124; not all cases trigger the warning.
+ */
+ if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
+ ep_warn(ep->dev, "%s lost packet sync!\n",
+ ep->ep.name);
+ req->req.status = -EOVERFLOW;
+ } else {
+ u32 const ep_avail = readl(&ep->regs->ep_avail);
+ if (ep_avail) {
+ /* fifo gets flushed later */
+ ep->out_overflow = 1;
+ ep_dbg(ep->dev,
+ "%s dma, discard %d len %d\n",
+ ep->ep.name, ep_avail,
+ req->req.length);
+ req->req.status = -EOVERFLOW;
+ }
+ }
+ }
+ dma_done(ep, req, req_dma_count, 0);
+ num_completed++;
+ }
+
+ return num_completed;
+}
+
+static void restart_dma(struct net2280_ep *ep)
+{
+ struct net2280_request *req;
+
+ if (ep->stopped)
+ return;
+ req = list_entry(ep->queue.next, struct net2280_request, queue);
+
+ start_dma(ep, req);
+}
+
+static void abort_dma(struct net2280_ep *ep)
+{
+ /* abort the current transfer */
+ if (likely(!list_empty(&ep->queue))) {
+ /* FIXME work around errata 0121, 0122, 0124 */
+ writel(BIT(DMA_ABORT), &ep->dma->dmastat);
+ spin_stop_dma(ep->dma);
+ } else
+ stop_dma(ep->dma);
+ scan_dma_completions(ep);
+}
+
+/* dequeue ALL requests */
+static void nuke(struct net2280_ep *ep)
+{
+ struct net2280_request *req;
+
+ /* called with spinlock held */
+ ep->stopped = 1;
+ if (ep->dma)
+ abort_dma(ep);
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2280_request,
+ queue);
+ done(ep, req, -ESHUTDOWN);
+ }
+}
+
+/* dequeue JUST ONE request */
+static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct net2280_ep *ep;
+ struct net2280_request *req = NULL;
+ struct net2280_request *iter;
+ unsigned long flags;
+ u32 dmactl;
+ int stopped;
+
+ ep = container_of(_ep, struct net2280_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0) || !_req) {
+ pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n",
+ __func__, _ep, _req);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ stopped = ep->stopped;
+
+ /* quiesce dma while we patch the queue */
+ dmactl = 0;
+ ep->stopped = 1;
+ if (ep->dma) {
+ dmactl = readl(&ep->dma->dmactl);
+ /* WARNING erratum 0127 may kick in ... */
+ stop_dma(ep->dma);
+ scan_dma_completions(ep);
+ }
+
+ /* make sure it's still queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ ep->stopped = stopped;
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
+ return -EINVAL;
+ }
+
+ /* queue head may be partially complete. */
+ if (ep->queue.next == &req->queue) {
+ if (ep->dma) {
+ ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
+ _req->status = -ECONNRESET;
+ abort_dma(ep);
+ if (likely(ep->queue.next == &req->queue)) {
+ /* NOTE: misreports single-transfer mode*/
+ req->td->dmacount = 0; /* invalidate */
+ dma_done(ep, req,
+ readl(&ep->dma->dmacount),
+ -ECONNRESET);
+ }
+ } else {
+ ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
+ done(ep, req, -ECONNRESET);
+ }
+ req = NULL;
+ }
+
+ if (req)
+ done(ep, req, -ECONNRESET);
+ ep->stopped = stopped;
+
+ if (ep->dma) {
+ /* turn off dma on inactive queues */
+ if (list_empty(&ep->queue))
+ stop_dma(ep->dma);
+ else if (!ep->stopped) {
+ /* resume current request, or start new one */
+ if (req)
+ writel(dmactl, &ep->dma->dmactl);
+ else
+ start_dma(ep, list_entry(ep->queue.next,
+ struct net2280_request, queue));
+ }
+ }
+
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int net2280_fifo_status(struct usb_ep *_ep);
+
+static int
+net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
+{
+ struct net2280_ep *ep;
+ unsigned long flags;
+ int retval = 0;
+
+ ep = container_of(_ep, struct net2280_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0)) {
+ pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
+ return -EINVAL;
+ }
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ retval = -ESHUTDOWN;
+ goto print_err;
+ }
+ if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
+ == USB_ENDPOINT_XFER_ISOC) {
+ retval = -EINVAL;
+ goto print_err;
+ }
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ if (!list_empty(&ep->queue)) {
+ retval = -EAGAIN;
+ goto print_unlock;
+ } else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) {
+ retval = -EAGAIN;
+ goto print_unlock;
+ } else {
+ ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
+ value ? "set" : "clear",
+ wedged ? "wedge" : "halt");
+ /* set/clear, then synch memory views with the device */
+ if (value) {
+ if (ep->num == 0)
+ ep->dev->protocol_stall = 1;
+ else
+ set_halt(ep);
+ if (wedged)
+ ep->wedged = 1;
+ } else {
+ clear_halt(ep);
+ if (ep->dev->quirks & PLX_PCIE &&
+ !list_empty(&ep->queue) && ep->td_dma)
+ restart_dma(ep);
+ ep->wedged = 0;
+ }
+ (void) readl(&ep->regs->ep_rsp);
+ }
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+ return retval;
+
+print_unlock:
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+print_err:
+ dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval);
+ return retval;
+}
+
+static int net2280_set_halt(struct usb_ep *_ep, int value)
+{
+ return net2280_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int net2280_set_wedge(struct usb_ep *_ep)
+{
+ if (!_ep || _ep->name == ep0name) {
+ pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep);
+ return -EINVAL;
+ }
+ return net2280_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static int net2280_fifo_status(struct usb_ep *_ep)
+{
+ struct net2280_ep *ep;
+ u32 avail;
+
+ ep = container_of(_ep, struct net2280_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0)) {
+ pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
+ return -ENODEV;
+ }
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_err(&ep->dev->pdev->dev,
+ "%s: Invalid driver=%p or speed=%d\n",
+ __func__, ep->dev->driver, ep->dev->gadget.speed);
+ return -ESHUTDOWN;
+ }
+
+ avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
+ if (avail > ep->fifo_size) {
+ dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__);
+ return -EOVERFLOW;
+ }
+ if (ep->is_in)
+ avail = ep->fifo_size - avail;
+ return avail;
+}
+
+static void net2280_fifo_flush(struct usb_ep *_ep)
+{
+ struct net2280_ep *ep;
+
+ ep = container_of(_ep, struct net2280_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0)) {
+ pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
+ return;
+ }
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_err(&ep->dev->pdev->dev,
+ "%s: Invalid driver=%p or speed=%d\n",
+ __func__, ep->dev->driver, ep->dev->gadget.speed);
+ return;
+ }
+
+ writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
+ (void) readl(&ep->regs->ep_rsp);
+}
+
+static const struct usb_ep_ops net2280_ep_ops = {
+ .enable = net2280_enable,
+ .disable = net2280_disable,
+
+ .alloc_request = net2280_alloc_request,
+ .free_request = net2280_free_request,
+
+ .queue = net2280_queue,
+ .dequeue = net2280_dequeue,
+
+ .set_halt = net2280_set_halt,
+ .set_wedge = net2280_set_wedge,
+ .fifo_status = net2280_fifo_status,
+ .fifo_flush = net2280_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int net2280_get_frame(struct usb_gadget *_gadget)
+{
+ struct net2280 *dev;
+ unsigned long flags;
+ u16 retval;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct net2280, gadget);
+ spin_lock_irqsave(&dev->lock, flags);
+ retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return retval;
+}
+
+static int net2280_wakeup(struct usb_gadget *_gadget)
+{
+ struct net2280 *dev;
+ u32 tmp;
+ unsigned long flags;
+
+ if (!_gadget)
+ return 0;
+ dev = container_of(_gadget, struct net2280, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tmp = readl(&dev->usb->usbctl);
+ if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
+ writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* pci writes may still be posted */
+ return 0;
+}
+
+static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
+{
+ struct net2280 *dev;
+ u32 tmp;
+ unsigned long flags;
+
+ if (!_gadget)
+ return 0;
+ dev = container_of(_gadget, struct net2280, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tmp = readl(&dev->usb->usbctl);
+ if (value) {
+ tmp |= BIT(SELF_POWERED_STATUS);
+ _gadget->is_selfpowered = 1;
+ } else {
+ tmp &= ~BIT(SELF_POWERED_STATUS);
+ _gadget->is_selfpowered = 0;
+ }
+ writel(tmp, &dev->usb->usbctl);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
+{
+ struct net2280 *dev;
+ u32 tmp;
+ unsigned long flags;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct net2280, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tmp = readl(&dev->usb->usbctl);
+ dev->softconnect = (is_on != 0);
+ if (is_on) {
+ ep0_start(dev);
+ writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
+ } else {
+ writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
+ stop_activity(dev, NULL);
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+static struct usb_ep *net2280_match_ep(struct usb_gadget *_gadget,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ char name[8];
+ struct usb_ep *ep;
+
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) {
+ /* ep-e, ep-f are PIO with only 64 byte fifos */
+ ep = gadget_find_ep_by_name(_gadget, "ep-e");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep-f");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ }
+
+ /* USB3380: Only first four endpoints have DMA channels. Allocate
+ * slower interrupt endpoints from PIO hw endpoints, to allow bulk/isoc
+ * endpoints use DMA hw endpoints.
+ */
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
+ usb_endpoint_dir_in(desc)) {
+ ep = gadget_find_ep_by_name(_gadget, "ep2in");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep4in");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ } else if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
+ !usb_endpoint_dir_in(desc)) {
+ ep = gadget_find_ep_by_name(_gadget, "ep1out");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep3out");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ } else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
+ usb_endpoint_dir_in(desc)) {
+ ep = gadget_find_ep_by_name(_gadget, "ep1in");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep3in");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ } else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
+ !usb_endpoint_dir_in(desc)) {
+ ep = gadget_find_ep_by_name(_gadget, "ep2out");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ ep = gadget_find_ep_by_name(_gadget, "ep4out");
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+ }
+
+ /* USB3380: use same address for usb and hardware endpoints */
+ snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
+ usb_endpoint_dir_in(desc) ? "in" : "out");
+ ep = gadget_find_ep_by_name(_gadget, name);
+ if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+ return ep;
+
+ return NULL;
+}
+
+static int net2280_start(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver);
+static int net2280_stop(struct usb_gadget *_gadget);
+static void net2280_async_callbacks(struct usb_gadget *_gadget, bool enable);
+
+static const struct usb_gadget_ops net2280_ops = {
+ .get_frame = net2280_get_frame,
+ .wakeup = net2280_wakeup,
+ .set_selfpowered = net2280_set_selfpowered,
+ .pullup = net2280_pullup,
+ .udc_start = net2280_start,
+ .udc_stop = net2280_stop,
+ .udc_async_callbacks = net2280_async_callbacks,
+ .match_ep = net2280_match_ep,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+/* FIXME move these into procfs, and use seq_file.
+ * Sysfs _still_ doesn't behave for arbitrarily sized files,
+ * and also doesn't help products using this with 2.4 kernels.
+ */
+
+/* "function" sysfs attribute */
+static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net2280 *dev = dev_get_drvdata(_dev);
+
+ if (!dev->driver || !dev->driver->function ||
+ strlen(dev->driver->function) > PAGE_SIZE)
+ return 0;
+ return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
+}
+static DEVICE_ATTR_RO(function);
+
+static ssize_t registers_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net2280 *dev;
+ char *next;
+ unsigned size, t;
+ unsigned long flags;
+ int i;
+ u32 t1, t2;
+ const char *s;
+
+ dev = dev_get_drvdata(_dev);
+ next = buf;
+ size = PAGE_SIZE;
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->driver)
+ s = dev->driver->driver.name;
+ else
+ s = "(none)";
+
+ /* Main Control Registers */
+ t = scnprintf(next, size, "%s version " DRIVER_VERSION
+ ", chiprev %04x\n\n"
+ "devinit %03x fifoctl %08x gadget '%s'\n"
+ "pci irqenb0 %02x irqenb1 %08x "
+ "irqstat0 %04x irqstat1 %08x\n",
+ driver_name, dev->chiprev,
+ readl(&dev->regs->devinit),
+ readl(&dev->regs->fifoctl),
+ s,
+ readl(&dev->regs->pciirqenb0),
+ readl(&dev->regs->pciirqenb1),
+ readl(&dev->regs->irqstat0),
+ readl(&dev->regs->irqstat1));
+ size -= t;
+ next += t;
+
+ /* USB Control Registers */
+ t1 = readl(&dev->usb->usbctl);
+ t2 = readl(&dev->usb->usbstat);
+ if (t1 & BIT(VBUS_PIN)) {
+ if (t2 & BIT(HIGH_SPEED))
+ s = "high speed";
+ else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+ s = "powered";
+ else
+ s = "full speed";
+ /* full speed bit (6) not working?? */
+ } else
+ s = "not attached";
+ t = scnprintf(next, size,
+ "stdrsp %08x usbctl %08x usbstat %08x "
+ "addr 0x%02x (%s)\n",
+ readl(&dev->usb->stdrsp), t1, t2,
+ readl(&dev->usb->ouraddr), s);
+ size -= t;
+ next += t;
+
+ /* PCI Master Control Registers */
+
+ /* DMA Control Registers */
+
+ /* Configurable EP Control Registers */
+ for (i = 0; i < dev->n_ep; i++) {
+ struct net2280_ep *ep;
+
+ ep = &dev->ep[i];
+ if (i && !ep->desc)
+ continue;
+
+ t1 = readl(&ep->cfg->ep_cfg);
+ t2 = readl(&ep->regs->ep_rsp) & 0xff;
+ t = scnprintf(next, size,
+ "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
+ "irqenb %02x\n",
+ ep->ep.name, t1, t2,
+ (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
+ ? "NAK " : "",
+ (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
+ ? "hide " : "",
+ (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
+ ? "CRC " : "",
+ (t2 & BIT(CLEAR_INTERRUPT_MODE))
+ ? "interrupt " : "",
+ (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
+ ? "status " : "",
+ (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
+ ? "NAKmode " : "",
+ (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
+ ? "DATA1 " : "DATA0 ",
+ (t2 & BIT(CLEAR_ENDPOINT_HALT))
+ ? "HALT " : "",
+ readl(&ep->regs->ep_irqenb));
+ size -= t;
+ next += t;
+
+ t = scnprintf(next, size,
+ "\tstat %08x avail %04x "
+ "(ep%d%s-%s)%s\n",
+ readl(&ep->regs->ep_stat),
+ readl(&ep->regs->ep_avail),
+ t1 & 0x0f, DIR_STRING(t1),
+ type_string(t1 >> 8),
+ ep->stopped ? "*" : "");
+ size -= t;
+ next += t;
+
+ if (!ep->dma)
+ continue;
+
+ t = scnprintf(next, size,
+ " dma\tctl %08x stat %08x count %08x\n"
+ "\taddr %08x desc %08x\n",
+ readl(&ep->dma->dmactl),
+ readl(&ep->dma->dmastat),
+ readl(&ep->dma->dmacount),
+ readl(&ep->dma->dmaaddr),
+ readl(&ep->dma->dmadesc));
+ size -= t;
+ next += t;
+
+ }
+
+ /* Indexed Registers (none yet) */
+
+ /* Statistics */
+ t = scnprintf(next, size, "\nirqs: ");
+ size -= t;
+ next += t;
+ for (i = 0; i < dev->n_ep; i++) {
+ struct net2280_ep *ep;
+
+ ep = &dev->ep[i];
+ if (i && !ep->irqs)
+ continue;
+ t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
+ size -= t;
+ next += t;
+
+ }
+ t = scnprintf(next, size, "\n");
+ size -= t;
+ next += t;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR_RO(registers);
+
+static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net2280 *dev;
+ char *next;
+ unsigned size;
+ unsigned long flags;
+ int i;
+
+ dev = dev_get_drvdata(_dev);
+ next = buf;
+ size = PAGE_SIZE;
+ spin_lock_irqsave(&dev->lock, flags);
+
+ for (i = 0; i < dev->n_ep; i++) {
+ struct net2280_ep *ep = &dev->ep[i];
+ struct net2280_request *req;
+ int t;
+
+ if (i != 0) {
+ const struct usb_endpoint_descriptor *d;
+
+ d = ep->desc;
+ if (!d)
+ continue;
+ t = d->bEndpointAddress;
+ t = scnprintf(next, size,
+ "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
+ ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
+ (t & USB_DIR_IN) ? "in" : "out",
+ type_string(d->bmAttributes),
+ usb_endpoint_maxp(d),
+ ep->dma ? "dma" : "pio", ep->fifo_size
+ );
+ } else /* ep0 should only have one transfer queued */
+ t = scnprintf(next, size, "ep0 max 64 pio %s\n",
+ ep->is_in ? "in" : "out");
+ if (t <= 0 || t > size)
+ goto done;
+ size -= t;
+ next += t;
+
+ if (list_empty(&ep->queue)) {
+ t = scnprintf(next, size, "\t(nothing queued)\n");
+ if (t <= 0 || t > size)
+ goto done;
+ size -= t;
+ next += t;
+ continue;
+ }
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
+ t = scnprintf(next, size,
+ "\treq %p len %d/%d "
+ "buf %p (dmacount %08x)\n",
+ &req->req, req->req.actual,
+ req->req.length, req->req.buf,
+ readl(&ep->dma->dmacount));
+ else
+ t = scnprintf(next, size,
+ "\treq %p len %d/%d buf %p\n",
+ &req->req, req->req.actual,
+ req->req.length, req->req.buf);
+ if (t <= 0 || t > size)
+ goto done;
+ size -= t;
+ next += t;
+
+ if (ep->dma) {
+ struct net2280_dma *td;
+
+ td = req->td;
+ t = scnprintf(next, size, "\t td %08x "
+ " count %08x buf %08x desc %08x\n",
+ (u32) req->td_dma,
+ le32_to_cpu(td->dmacount),
+ le32_to_cpu(td->dmaaddr),
+ le32_to_cpu(td->dmadesc));
+ if (t <= 0 || t > size)
+ goto done;
+ size -= t;
+ next += t;
+ }
+ }
+ }
+
+done:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR_RO(queues);
+
+
+#else
+
+#define device_create_file(a, b) (0)
+#define device_remove_file(a, b) do { } while (0)
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* another driver-specific mode might be a request type doing dma
+ * to/from another device fifo instead of to/from memory.
+ */
+
+static void set_fifo_mode(struct net2280 *dev, int mode)
+{
+ /* keeping high bits preserves BAR2 */
+ writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
+
+ /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+ list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
+ list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+ switch (mode) {
+ case 0:
+ list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
+ list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
+ break;
+ case 1:
+ dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
+ break;
+ case 2:
+ list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[1].fifo_size = 2048;
+ dev->ep[2].fifo_size = 1024;
+ break;
+ }
+ /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
+ list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
+ list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
+}
+
+static void defect7374_disable_data_eps(struct net2280 *dev)
+{
+ /*
+ * For Defect 7374, disable data EPs (and more):
+ * - This phase undoes the earlier phase of the Defect 7374 workaround,
+ * returing ep regs back to normal.
+ */
+ struct net2280_ep *ep;
+ int i;
+ unsigned char ep_sel;
+ u32 tmp_reg;
+
+ for (i = 1; i < 5; i++) {
+ ep = &dev->ep[i];
+ writel(i, &ep->cfg->ep_cfg);
+ }
+
+ /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
+ for (i = 0; i < 6; i++)
+ writel(0, &dev->dep[i].dep_cfg);
+
+ for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
+ /* Select an endpoint for subsequent operations: */
+ tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
+ writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
+
+ if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
+ ep_sel == 18 || ep_sel == 20)
+ continue;
+
+ /* Change settings on some selected endpoints */
+ tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
+ tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
+ writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
+ tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
+ tmp_reg |= BIT(EP_INITIALIZED);
+ writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
+ }
+}
+
+static void defect7374_enable_data_eps_zero(struct net2280 *dev)
+{
+ u32 tmp = 0, tmp_reg;
+ u32 scratch;
+ int i;
+ unsigned char ep_sel;
+
+ scratch = get_idx_reg(dev->regs, SCRATCH);
+
+ WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
+ == DEFECT7374_FSM_SS_CONTROL_READ);
+
+ scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
+
+ ep_warn(dev, "Operate Defect 7374 workaround soft this time");
+ ep_warn(dev, "It will operate on cold-reboot and SS connect");
+
+ /*GPEPs:*/
+ tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
+ (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
+ ((dev->enhanced_mode) ?
+ BIT(OUT_ENDPOINT_ENABLE) | BIT(IN_ENDPOINT_ENABLE) :
+ BIT(ENDPOINT_ENABLE)));
+
+ for (i = 1; i < 5; i++)
+ writel(tmp, &dev->ep[i].cfg->ep_cfg);
+
+ /* CSRIN, PCIIN, STATIN, RCIN*/
+ tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
+ writel(tmp, &dev->dep[1].dep_cfg);
+ writel(tmp, &dev->dep[3].dep_cfg);
+ writel(tmp, &dev->dep[4].dep_cfg);
+ writel(tmp, &dev->dep[5].dep_cfg);
+
+ /*Implemented for development and debug.
+ * Can be refined/tuned later.*/
+ for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
+ /* Select an endpoint for subsequent operations: */
+ tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
+ writel(((tmp_reg & ~0x1f) | ep_sel),
+ &dev->plregs->pl_ep_ctrl);
+
+ if (ep_sel == 1) {
+ tmp =
+ (readl(&dev->plregs->pl_ep_ctrl) |
+ BIT(CLEAR_ACK_ERROR_CODE) | 0);
+ writel(tmp, &dev->plregs->pl_ep_ctrl);
+ continue;
+ }
+
+ if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
+ ep_sel == 18 || ep_sel == 20)
+ continue;
+
+ tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
+ BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
+ writel(tmp, &dev->plregs->pl_ep_cfg_4);
+
+ tmp = readl(&dev->plregs->pl_ep_ctrl) &
+ ~BIT(EP_INITIALIZED);
+ writel(tmp, &dev->plregs->pl_ep_ctrl);
+
+ }
+
+ /* Set FSM to focus on the first Control Read:
+ * - Tip: Connection speed is known upon the first
+ * setup request.*/
+ scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
+ set_idx_reg(dev->regs, SCRATCH, scratch);
+
+}
+
+/* keeping it simple:
+ * - one bus driver, initted first;
+ * - one function driver, initted second
+ *
+ * most of the work to support multiple net2280 controllers would
+ * be to associate this gadget driver (yes?) with all of them, or
+ * perhaps to bind specific drivers to specific devices.
+ */
+
+static void usb_reset_228x(struct net2280 *dev)
+{
+ u32 tmp;
+
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ (void) readl(&dev->usb->usbctl);
+
+ net2280_led_init(dev);
+
+ /* disable automatic responses, and irqs */
+ writel(0, &dev->usb->stdrsp);
+ writel(0, &dev->regs->pciirqenb0);
+ writel(0, &dev->regs->pciirqenb1);
+
+ /* clear old dma and irq state */
+ for (tmp = 0; tmp < 4; tmp++) {
+ struct net2280_ep *ep = &dev->ep[tmp + 1];
+ if (ep->dma)
+ abort_dma(ep);
+ }
+
+ writel(~0, &dev->regs->irqstat0),
+ writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
+
+ /* reset, and enable pci */
+ tmp = readl(&dev->regs->devinit) |
+ BIT(PCI_ENABLE) |
+ BIT(FIFO_SOFT_RESET) |
+ BIT(USB_SOFT_RESET) |
+ BIT(M8051_RESET);
+ writel(tmp, &dev->regs->devinit);
+
+ /* standard fifo and endpoint allocations */
+ set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
+}
+
+static void usb_reset_338x(struct net2280 *dev)
+{
+ u32 tmp;
+
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ (void)readl(&dev->usb->usbctl);
+
+ net2280_led_init(dev);
+
+ if (dev->bug7734_patched) {
+ /* disable automatic responses, and irqs */
+ writel(0, &dev->usb->stdrsp);
+ writel(0, &dev->regs->pciirqenb0);
+ writel(0, &dev->regs->pciirqenb1);
+ }
+
+ /* clear old dma and irq state */
+ for (tmp = 0; tmp < 4; tmp++) {
+ struct net2280_ep *ep = &dev->ep[tmp + 1];
+ struct net2280_dma_regs __iomem *dma;
+
+ if (ep->dma) {
+ abort_dma(ep);
+ } else {
+ dma = &dev->dma[tmp];
+ writel(BIT(DMA_ABORT), &dma->dmastat);
+ writel(0, &dma->dmactl);
+ }
+ }
+
+ writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
+
+ if (dev->bug7734_patched) {
+ /* reset, and enable pci */
+ tmp = readl(&dev->regs->devinit) |
+ BIT(PCI_ENABLE) |
+ BIT(FIFO_SOFT_RESET) |
+ BIT(USB_SOFT_RESET) |
+ BIT(M8051_RESET);
+
+ writel(tmp, &dev->regs->devinit);
+ }
+
+ /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+
+ for (tmp = 1; tmp < dev->n_ep; tmp++)
+ list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
+
+}
+
+static void usb_reset(struct net2280 *dev)
+{
+ if (dev->quirks & PLX_LEGACY)
+ return usb_reset_228x(dev);
+ return usb_reset_338x(dev);
+}
+
+static void usb_reinit_228x(struct net2280 *dev)
+{
+ u32 tmp;
+
+ /* basic endpoint init */
+ for (tmp = 0; tmp < 7; tmp++) {
+ struct net2280_ep *ep = &dev->ep[tmp];
+
+ ep->ep.name = ep_info_dft[tmp].name;
+ ep->ep.caps = ep_info_dft[tmp].caps;
+ ep->dev = dev;
+ ep->num = tmp;
+
+ if (tmp > 0 && tmp <= 4) {
+ ep->fifo_size = 1024;
+ ep->dma = &dev->dma[tmp - 1];
+ } else
+ ep->fifo_size = 64;
+ ep->regs = &dev->epregs[tmp];
+ ep->cfg = &dev->epregs[tmp];
+ ep_reset_228x(dev->regs, ep);
+ }
+ usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
+ usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
+ usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
+
+ dev->gadget.ep0 = &dev->ep[0].ep;
+ dev->ep[0].stopped = 0;
+ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+
+ /* we want to prevent lowlevel/insecure access from the USB host,
+ * but erratum 0119 means this enable bit is ignored
+ */
+ for (tmp = 0; tmp < 5; tmp++)
+ writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
+}
+
+static void usb_reinit_338x(struct net2280 *dev)
+{
+ int i;
+ u32 tmp, val;
+ static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
+ static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
+ 0x00, 0xC0, 0x00, 0xC0 };
+
+ /* basic endpoint init */
+ for (i = 0; i < dev->n_ep; i++) {
+ struct net2280_ep *ep = &dev->ep[i];
+
+ ep->ep.name = dev->enhanced_mode ? ep_info_adv[i].name :
+ ep_info_dft[i].name;
+ ep->ep.caps = dev->enhanced_mode ? ep_info_adv[i].caps :
+ ep_info_dft[i].caps;
+ ep->dev = dev;
+ ep->num = i;
+
+ if (i > 0 && i <= 4)
+ ep->dma = &dev->dma[i - 1];
+
+ if (dev->enhanced_mode) {
+ ep->cfg = &dev->epregs[ne[i]];
+ /*
+ * Set USB endpoint number, hardware allows same number
+ * in both directions.
+ */
+ if (i > 0 && i < 5)
+ writel(ne[i], &ep->cfg->ep_cfg);
+ ep->regs = (struct net2280_ep_regs __iomem *)
+ (((void __iomem *)&dev->epregs[ne[i]]) +
+ ep_reg_addr[i]);
+ } else {
+ ep->cfg = &dev->epregs[i];
+ ep->regs = &dev->epregs[i];
+ }
+
+ ep->fifo_size = (i != 0) ? 2048 : 512;
+
+ ep_reset_338x(dev->regs, ep);
+ }
+ usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
+
+ dev->gadget.ep0 = &dev->ep[0].ep;
+ dev->ep[0].stopped = 0;
+
+ /* Link layer set up */
+ if (dev->bug7734_patched) {
+ tmp = readl(&dev->usb_ext->usbctl2) &
+ ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
+ writel(tmp, &dev->usb_ext->usbctl2);
+ }
+
+ /* Hardware Defect and Workaround */
+ val = readl(&dev->llregs->ll_lfps_5);
+ val &= ~(0xf << TIMER_LFPS_6US);
+ val |= 0x5 << TIMER_LFPS_6US;
+ writel(val, &dev->llregs->ll_lfps_5);
+
+ val = readl(&dev->llregs->ll_lfps_6);
+ val &= ~(0xffff << TIMER_LFPS_80US);
+ val |= 0x0100 << TIMER_LFPS_80US;
+ writel(val, &dev->llregs->ll_lfps_6);
+
+ /*
+ * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
+ * Hot Reset Exit Handshake may Fail in Specific Case using
+ * Default Register Settings. Workaround for Enumeration test.
+ */
+ val = readl(&dev->llregs->ll_tsn_counters_2);
+ val &= ~(0x1f << HOT_TX_NORESET_TS2);
+ val |= 0x10 << HOT_TX_NORESET_TS2;
+ writel(val, &dev->llregs->ll_tsn_counters_2);
+
+ val = readl(&dev->llregs->ll_tsn_counters_3);
+ val &= ~(0x1f << HOT_RX_RESET_TS2);
+ val |= 0x3 << HOT_RX_RESET_TS2;
+ writel(val, &dev->llregs->ll_tsn_counters_3);
+
+ /*
+ * AB errata. Errata 11. Workaround for Default Duration of LFPS
+ * Handshake Signaling for Device-Initiated U1 Exit is too short.
+ * Without this, various enumeration failures observed with
+ * modern superspeed hosts.
+ */
+ val = readl(&dev->llregs->ll_lfps_timers_2);
+ writel((val & 0xffff0000) | LFPS_TIMERS_2_WORKAROUND_VALUE,
+ &dev->llregs->ll_lfps_timers_2);
+
+ /*
+ * Set Recovery Idle to Recover bit:
+ * - On SS connections, setting Recovery Idle to Recover Fmw improves
+ * link robustness with various hosts and hubs.
+ * - It is safe to set for all connection speeds; all chip revisions.
+ * - R-M-W to leave other bits undisturbed.
+ * - Reference PLX TT-7372
+ */
+ val = readl(&dev->llregs->ll_tsn_chicken_bit);
+ val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
+ writel(val, &dev->llregs->ll_tsn_chicken_bit);
+
+ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+
+ /* disable dedicated endpoints */
+ writel(0x0D, &dev->dep[0].dep_cfg);
+ writel(0x0D, &dev->dep[1].dep_cfg);
+ writel(0x0E, &dev->dep[2].dep_cfg);
+ writel(0x0E, &dev->dep[3].dep_cfg);
+ writel(0x0F, &dev->dep[4].dep_cfg);
+ writel(0x0C, &dev->dep[5].dep_cfg);
+}
+
+static void usb_reinit(struct net2280 *dev)
+{
+ if (dev->quirks & PLX_LEGACY)
+ return usb_reinit_228x(dev);
+ return usb_reinit_338x(dev);
+}
+
+static void ep0_start_228x(struct net2280 *dev)
+{
+ writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
+ BIT(CLEAR_NAK_OUT_PACKETS) |
+ BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
+ &dev->epregs[0].ep_rsp);
+
+ /*
+ * hardware optionally handles a bunch of standard requests
+ * that the API hides from drivers anyway. have it do so.
+ * endpoint status/features are handled in software, to
+ * help pass tests for some dubious behavior.
+ */
+ writel(BIT(SET_TEST_MODE) |
+ BIT(SET_ADDRESS) |
+ BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
+ BIT(GET_DEVICE_STATUS) |
+ BIT(GET_INTERFACE_STATUS),
+ &dev->usb->stdrsp);
+ writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
+ BIT(SELF_POWERED_USB_DEVICE) |
+ BIT(REMOTE_WAKEUP_SUPPORT) |
+ (dev->softconnect << USB_DETECT_ENABLE) |
+ BIT(SELF_POWERED_STATUS),
+ &dev->usb->usbctl);
+
+ /* enable irqs so we can see ep0 and general operation */
+ writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
+ BIT(ENDPOINT_0_INTERRUPT_ENABLE),
+ &dev->regs->pciirqenb0);
+ writel(BIT(PCI_INTERRUPT_ENABLE) |
+ BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
+ BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
+ BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
+ BIT(VBUS_INTERRUPT_ENABLE) |
+ BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
+ BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
+ &dev->regs->pciirqenb1);
+
+ /* don't leave any writes posted */
+ (void) readl(&dev->usb->usbctl);
+}
+
+static void ep0_start_338x(struct net2280 *dev)
+{
+
+ if (dev->bug7734_patched)
+ writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
+ BIT(SET_EP_HIDE_STATUS_PHASE),
+ &dev->epregs[0].ep_rsp);
+
+ /*
+ * hardware optionally handles a bunch of standard requests
+ * that the API hides from drivers anyway. have it do so.
+ * endpoint status/features are handled in software, to
+ * help pass tests for some dubious behavior.
+ */
+ writel(BIT(SET_ISOCHRONOUS_DELAY) |
+ BIT(SET_SEL) |
+ BIT(SET_TEST_MODE) |
+ BIT(SET_ADDRESS) |
+ BIT(GET_INTERFACE_STATUS) |
+ BIT(GET_DEVICE_STATUS),
+ &dev->usb->stdrsp);
+ dev->wakeup_enable = 1;
+ writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
+ (dev->softconnect << USB_DETECT_ENABLE) |
+ BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
+ &dev->usb->usbctl);
+
+ /* enable irqs so we can see ep0 and general operation */
+ writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
+ BIT(ENDPOINT_0_INTERRUPT_ENABLE),
+ &dev->regs->pciirqenb0);
+ writel(BIT(PCI_INTERRUPT_ENABLE) |
+ BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
+ BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
+ BIT(VBUS_INTERRUPT_ENABLE),
+ &dev->regs->pciirqenb1);
+
+ /* don't leave any writes posted */
+ (void)readl(&dev->usb->usbctl);
+}
+
+static void ep0_start(struct net2280 *dev)
+{
+ if (dev->quirks & PLX_LEGACY)
+ return ep0_start_228x(dev);
+ return ep0_start_338x(dev);
+}
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests. then usb traffic follows until a
+ * disconnect is reported. then a host may connect again, or
+ * the driver might get unbound.
+ */
+static int net2280_start(struct usb_gadget *_gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct net2280 *dev;
+ int retval;
+ unsigned i;
+
+ /* insist on high speed support from the driver, since
+ * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
+ * "must not be used in normal operation"
+ */
+ if (!driver || driver->max_speed < USB_SPEED_HIGH ||
+ !driver->setup)
+ return -EINVAL;
+
+ dev = container_of(_gadget, struct net2280, gadget);
+
+ for (i = 0; i < dev->n_ep; i++)
+ dev->ep[i].irqs = 0;
+
+ /* hook up the driver ... */
+ dev->driver = driver;
+
+ retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
+ if (retval)
+ goto err_unbind;
+ retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
+ if (retval)
+ goto err_func;
+
+ /* enable host detection and ep0; and we're ready
+ * for set_configuration as well as eventual disconnect.
+ */
+ net2280_led_active(dev, 1);
+
+ if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
+ defect7374_enable_data_eps_zero(dev);
+
+ ep0_start(dev);
+
+ /* pci writes may still be posted */
+ return 0;
+
+err_func:
+ device_remove_file(&dev->pdev->dev, &dev_attr_function);
+err_unbind:
+ dev->driver = NULL;
+ return retval;
+}
+
+static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
+{
+ int i;
+
+ /* don't disconnect if it's not connected */
+ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+
+ /* stop hardware; prevent new request submissions;
+ * and kill any outstanding requests.
+ */
+ usb_reset(dev);
+ for (i = 0; i < dev->n_ep; i++)
+ nuke(&dev->ep[i]);
+
+ /* report disconnect; the driver is already quiesced */
+ if (dev->async_callbacks && driver) {
+ spin_unlock(&dev->lock);
+ driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+
+ usb_reinit(dev);
+}
+
+static int net2280_stop(struct usb_gadget *_gadget)
+{
+ struct net2280 *dev;
+ unsigned long flags;
+
+ dev = container_of(_gadget, struct net2280, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ stop_activity(dev, NULL);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ net2280_led_active(dev, 0);
+
+ device_remove_file(&dev->pdev->dev, &dev_attr_function);
+ device_remove_file(&dev->pdev->dev, &dev_attr_queues);
+
+ dev->driver = NULL;
+
+ return 0;
+}
+
+static void net2280_async_callbacks(struct usb_gadget *_gadget, bool enable)
+{
+ struct net2280 *dev = container_of(_gadget, struct net2280, gadget);
+
+ spin_lock_irq(&dev->lock);
+ dev->async_callbacks = enable;
+ spin_unlock_irq(&dev->lock);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
+ * also works for dma-capable endpoints, in pio mode or just
+ * to manually advance the queue after short OUT transfers.
+ */
+static void handle_ep_small(struct net2280_ep *ep)
+{
+ struct net2280_request *req;
+ u32 t;
+ /* 0 error, 1 mid-data, 2 done */
+ int mode = 1;
+
+ if (!list_empty(&ep->queue))
+ req = list_entry(ep->queue.next,
+ struct net2280_request, queue);
+ else
+ req = NULL;
+
+ /* ack all, and handle what we care about */
+ t = readl(&ep->regs->ep_stat);
+ ep->irqs++;
+
+ ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
+ ep->ep.name, t, req ? &req->req : NULL);
+
+ if (!ep->is_in || (ep->dev->quirks & PLX_2280))
+ writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
+ else
+ /* Added for 2282 */
+ writel(t, &ep->regs->ep_stat);
+
+ /* for ep0, monitor token irqs to catch data stage length errors
+ * and to synchronize on status.
+ *
+ * also, to defer reporting of protocol stalls ... here's where
+ * data or status first appears, handling stalls here should never
+ * cause trouble on the host side..
+ *
+ * control requests could be slightly faster without token synch for
+ * status, but status can jam up that way.
+ */
+ if (unlikely(ep->num == 0)) {
+ if (ep->is_in) {
+ /* status; stop NAKing */
+ if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
+ if (ep->dev->protocol_stall) {
+ ep->stopped = 1;
+ set_halt(ep);
+ }
+ if (!req)
+ allow_status(ep);
+ mode = 2;
+ /* reply to extra IN data tokens with a zlp */
+ } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
+ if (ep->dev->protocol_stall) {
+ ep->stopped = 1;
+ set_halt(ep);
+ mode = 2;
+ } else if (ep->responded &&
+ !req && !ep->stopped)
+ write_fifo(ep, NULL);
+ }
+ } else {
+ /* status; stop NAKing */
+ if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
+ if (ep->dev->protocol_stall) {
+ ep->stopped = 1;
+ set_halt(ep);
+ }
+ mode = 2;
+ /* an extra OUT token is an error */
+ } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
+ req &&
+ req->req.actual == req->req.length) ||
+ (ep->responded && !req)) {
+ ep->dev->protocol_stall = 1;
+ set_halt(ep);
+ ep->stopped = 1;
+ if (req)
+ done(ep, req, -EOVERFLOW);
+ req = NULL;
+ }
+ }
+ }
+
+ if (unlikely(!req))
+ return;
+
+ /* manual DMA queue advance after short OUT */
+ if (likely(ep->dma)) {
+ if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
+ struct net2280_request *stuck_req = NULL;
+ int stopped = ep->stopped;
+ int num_completed;
+ int stuck = 0;
+ u32 count;
+
+ /* TRANSFERRED works around OUT_DONE erratum 0112.
+ * we expect (N <= maxpacket) bytes; host wrote M.
+ * iff (M < N) we won't ever see a DMA interrupt.
+ */
+ ep->stopped = 1;
+ for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
+
+ /* any preceding dma transfers must finish.
+ * dma handles (M >= N), may empty the queue
+ */
+ num_completed = scan_dma_completions(ep);
+ if (unlikely(list_empty(&ep->queue) ||
+ ep->out_overflow)) {
+ req = NULL;
+ break;
+ }
+ req = list_entry(ep->queue.next,
+ struct net2280_request, queue);
+
+ /* here either (M < N), a "real" short rx;
+ * or (M == N) and the queue didn't empty
+ */
+ if (likely(t & BIT(FIFO_EMPTY))) {
+ count = readl(&ep->dma->dmacount);
+ count &= DMA_BYTE_COUNT_MASK;
+ if (readl(&ep->dma->dmadesc)
+ != req->td_dma)
+ req = NULL;
+ break;
+ }
+
+ /* Escape loop if no dma transfers completed
+ * after few retries.
+ */
+ if (num_completed == 0) {
+ if (stuck_req == req &&
+ readl(&ep->dma->dmadesc) !=
+ req->td_dma && stuck++ > 5) {
+ count = readl(
+ &ep->dma->dmacount);
+ count &= DMA_BYTE_COUNT_MASK;
+ req = NULL;
+ ep_dbg(ep->dev, "%s escape stuck %d, count %u\n",
+ ep->ep.name, stuck,
+ count);
+ break;
+ } else if (stuck_req != req) {
+ stuck_req = req;
+ stuck = 0;
+ }
+ } else {
+ stuck_req = NULL;
+ stuck = 0;
+ }
+
+ udelay(1);
+ }
+
+ /* stop DMA, leave ep NAKing */
+ writel(BIT(DMA_ABORT), &ep->dma->dmastat);
+ spin_stop_dma(ep->dma);
+
+ if (likely(req)) {
+ req->td->dmacount = 0;
+ t = readl(&ep->regs->ep_avail);
+ dma_done(ep, req, count,
+ (ep->out_overflow || t)
+ ? -EOVERFLOW : 0);
+ }
+
+ /* also flush to prevent erratum 0106 trouble */
+ if (unlikely(ep->out_overflow ||
+ (ep->dev->chiprev == 0x0100 &&
+ ep->dev->gadget.speed
+ == USB_SPEED_FULL))) {
+ out_flush(ep);
+ ep->out_overflow = 0;
+ }
+
+ /* (re)start dma if needed, stop NAKing */
+ ep->stopped = stopped;
+ if (!list_empty(&ep->queue))
+ restart_dma(ep);
+ } else
+ ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
+ ep->ep.name, t);
+ return;
+
+ /* data packet(s) received (in the fifo, OUT) */
+ } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
+ if (read_fifo(ep, req) && ep->num != 0)
+ mode = 2;
+
+ /* data packet(s) transmitted (IN) */
+ } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
+ unsigned len;
+
+ len = req->req.length - req->req.actual;
+ if (len > ep->ep.maxpacket)
+ len = ep->ep.maxpacket;
+ req->req.actual += len;
+
+ /* if we wrote it all, we're usually done */
+ /* send zlps until the status stage */
+ if ((req->req.actual == req->req.length) &&
+ (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
+ mode = 2;
+
+ /* there was nothing to do ... */
+ } else if (mode == 1)
+ return;
+
+ /* done */
+ if (mode == 2) {
+ /* stream endpoints often resubmit/unlink in completion */
+ done(ep, req, 0);
+
+ /* maybe advance queue to next request */
+ if (ep->num == 0) {
+ /* NOTE: net2280 could let gadget driver start the
+ * status stage later. since not all controllers let
+ * them control that, the api doesn't (yet) allow it.
+ */
+ if (!ep->stopped)
+ allow_status(ep);
+ req = NULL;
+ } else {
+ if (!list_empty(&ep->queue) && !ep->stopped)
+ req = list_entry(ep->queue.next,
+ struct net2280_request, queue);
+ else
+ req = NULL;
+ if (req && !ep->is_in)
+ stop_out_naking(ep);
+ }
+ }
+
+ /* is there a buffer for the next packet?
+ * for best streaming performance, make sure there is one.
+ */
+ if (req && !ep->stopped) {
+
+ /* load IN fifo with next packet (may be zlp) */
+ if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
+ write_fifo(ep, &req->req);
+ }
+}
+
+static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
+{
+ struct net2280_ep *ep;
+
+ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+ return &dev->ep[0];
+ list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+ u8 bEndpointAddress;
+
+ if (!ep->desc)
+ continue;
+ bEndpointAddress = ep->desc->bEndpointAddress;
+ if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+ continue;
+ if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
+ return ep;
+ }
+ return NULL;
+}
+
+static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
+{
+ u32 scratch, fsmvalue;
+ u32 ack_wait_timeout, state;
+
+ /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
+ scratch = get_idx_reg(dev->regs, SCRATCH);
+ fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
+ scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
+
+ if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
+ (r.bRequestType & USB_DIR_IN)))
+ return;
+
+ /* This is the first Control Read for this connection: */
+ if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
+ /*
+ * Connection is NOT SS:
+ * - Connection must be FS or HS.
+ * - This FSM state should allow workaround software to
+ * run after the next USB connection.
+ */
+ scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
+ dev->bug7734_patched = 1;
+ goto restore_data_eps;
+ }
+
+ /* Connection is SS: */
+ for (ack_wait_timeout = 0;
+ ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
+ ack_wait_timeout++) {
+
+ state = readl(&dev->plregs->pl_ep_status_1)
+ & (0xff << STATE);
+ if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
+ (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
+ scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
+ dev->bug7734_patched = 1;
+ break;
+ }
+
+ /*
+ * We have not yet received host's Data Phase ACK
+ * - Wait and try again.
+ */
+ udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
+ }
+
+
+ if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
+ ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
+ "to detect SS host's data phase ACK.");
+ ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
+ "got 0x%2.2x.\n", state >> STATE);
+ } else {
+ ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
+ "%duSec for Control Read Data Phase ACK\n",
+ DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
+ }
+
+restore_data_eps:
+ /*
+ * Restore data EPs to their pre-workaround settings (disabled,
+ * initialized, and other details).
+ */
+ defect7374_disable_data_eps(dev);
+
+ set_idx_reg(dev->regs, SCRATCH, scratch);
+
+ return;
+}
+
+static void ep_clear_seqnum(struct net2280_ep *ep)
+{
+ struct net2280 *dev = ep->dev;
+ u32 val;
+ static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
+
+ val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
+ val |= ep_pl[ep->num];
+ writel(val, &dev->plregs->pl_ep_ctrl);
+ val |= BIT(SEQUENCE_NUMBER_RESET);
+ writel(val, &dev->plregs->pl_ep_ctrl);
+
+ return;
+}
+
+static void handle_stat0_irqs_superspeed(struct net2280 *dev,
+ struct net2280_ep *ep, struct usb_ctrlrequest r)
+{
+ struct net2280_ep *e;
+ u16 status;
+ int tmp = 0;
+
+#define w_value le16_to_cpu(r.wValue)
+#define w_index le16_to_cpu(r.wIndex)
+#define w_length le16_to_cpu(r.wLength)
+
+ switch (r.bRequest) {
+ case USB_REQ_SET_CONFIGURATION:
+ dev->addressed_state = !w_value;
+ goto usb3_delegate;
+
+ case USB_REQ_GET_STATUS:
+ switch (r.bRequestType) {
+ case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
+ status = dev->wakeup_enable ? 0x02 : 0x00;
+ if (dev->gadget.is_selfpowered)
+ status |= BIT(0);
+ status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
+ dev->ltm_enable << 4);
+ writel(0, &dev->epregs[0].ep_irqenb);
+ set_fifo_bytecount(ep, sizeof(status));
+ writel((__force u32) status, &dev->epregs[0].ep_data);
+ allow_status_338x(ep);
+ break;
+
+ case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
+ e = get_ep_by_addr(dev, w_index);
+ if (!e)
+ goto do_stall3;
+ status = readl(&e->regs->ep_rsp) &
+ BIT(CLEAR_ENDPOINT_HALT);
+ writel(0, &dev->epregs[0].ep_irqenb);
+ set_fifo_bytecount(ep, sizeof(status));
+ writel((__force u32) status, &dev->epregs[0].ep_data);
+ allow_status_338x(ep);
+ break;
+
+ default:
+ goto usb3_delegate;
+ }
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (r.bRequestType) {
+ case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
+ if (!dev->addressed_state) {
+ switch (w_value) {
+ case USB_DEVICE_U1_ENABLE:
+ dev->u1_enable = 0;
+ writel(readl(&dev->usb_ext->usbctl2) &
+ ~BIT(U1_ENABLE),
+ &dev->usb_ext->usbctl2);
+ allow_status_338x(ep);
+ goto next_endpoints3;
+
+ case USB_DEVICE_U2_ENABLE:
+ dev->u2_enable = 0;
+ writel(readl(&dev->usb_ext->usbctl2) &
+ ~BIT(U2_ENABLE),
+ &dev->usb_ext->usbctl2);
+ allow_status_338x(ep);
+ goto next_endpoints3;
+
+ case USB_DEVICE_LTM_ENABLE:
+ dev->ltm_enable = 0;
+ writel(readl(&dev->usb_ext->usbctl2) &
+ ~BIT(LTM_ENABLE),
+ &dev->usb_ext->usbctl2);
+ allow_status_338x(ep);
+ goto next_endpoints3;
+
+ default:
+ break;
+ }
+ }
+ if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
+ dev->wakeup_enable = 0;
+ writel(readl(&dev->usb->usbctl) &
+ ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
+ &dev->usb->usbctl);
+ allow_status_338x(ep);
+ break;
+ }
+ goto usb3_delegate;
+
+ case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
+ e = get_ep_by_addr(dev, w_index);
+ if (!e)
+ goto do_stall3;
+ if (w_value != USB_ENDPOINT_HALT)
+ goto do_stall3;
+ ep_vdbg(dev, "%s clear halt\n", e->ep.name);
+ /*
+ * Workaround for SS SeqNum not cleared via
+ * Endpoint Halt (Clear) bit. select endpoint
+ */
+ ep_clear_seqnum(e);
+ clear_halt(e);
+ if (!list_empty(&e->queue) && e->td_dma)
+ restart_dma(e);
+ allow_status(ep);
+ ep->stopped = 1;
+ break;
+
+ default:
+ goto usb3_delegate;
+ }
+ break;
+ case USB_REQ_SET_FEATURE:
+ switch (r.bRequestType) {
+ case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
+ if (!dev->addressed_state) {
+ switch (w_value) {
+ case USB_DEVICE_U1_ENABLE:
+ dev->u1_enable = 1;
+ writel(readl(&dev->usb_ext->usbctl2) |
+ BIT(U1_ENABLE),
+ &dev->usb_ext->usbctl2);
+ allow_status_338x(ep);
+ goto next_endpoints3;
+
+ case USB_DEVICE_U2_ENABLE:
+ dev->u2_enable = 1;
+ writel(readl(&dev->usb_ext->usbctl2) |
+ BIT(U2_ENABLE),
+ &dev->usb_ext->usbctl2);
+ allow_status_338x(ep);
+ goto next_endpoints3;
+
+ case USB_DEVICE_LTM_ENABLE:
+ dev->ltm_enable = 1;
+ writel(readl(&dev->usb_ext->usbctl2) |
+ BIT(LTM_ENABLE),
+ &dev->usb_ext->usbctl2);
+ allow_status_338x(ep);
+ goto next_endpoints3;
+ default:
+ break;
+ }
+ }
+
+ if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
+ dev->wakeup_enable = 1;
+ writel(readl(&dev->usb->usbctl) |
+ BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
+ &dev->usb->usbctl);
+ allow_status_338x(ep);
+ break;
+ }
+ goto usb3_delegate;
+
+ case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
+ e = get_ep_by_addr(dev, w_index);
+ if (!e || (w_value != USB_ENDPOINT_HALT))
+ goto do_stall3;
+ ep->stopped = 1;
+ if (ep->num == 0)
+ ep->dev->protocol_stall = 1;
+ else {
+ if (ep->dma)
+ abort_dma(ep);
+ set_halt(ep);
+ }
+ allow_status_338x(ep);
+ break;
+
+ default:
+ goto usb3_delegate;
+ }
+
+ break;
+ default:
+
+usb3_delegate:
+ ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
+ r.bRequestType, r.bRequest,
+ w_value, w_index, w_length,
+ readl(&ep->cfg->ep_cfg));
+
+ ep->responded = 0;
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &r);
+ spin_lock(&dev->lock);
+ }
+ }
+do_stall3:
+ if (tmp < 0) {
+ ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
+ r.bRequestType, r.bRequest, tmp);
+ dev->protocol_stall = 1;
+ /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
+ set_halt(ep);
+ }
+
+next_endpoints3:
+
+#undef w_value
+#undef w_index
+#undef w_length
+
+ return;
+}
+
+static void usb338x_handle_ep_intr(struct net2280 *dev, u32 stat0)
+{
+ u32 index;
+ u32 bit;
+
+ for (index = 0; index < ARRAY_SIZE(ep_bit); index++) {
+ bit = BIT(ep_bit[index]);
+
+ if (!stat0)
+ break;
+
+ if (!(stat0 & bit))
+ continue;
+
+ stat0 &= ~bit;
+
+ handle_ep_small(&dev->ep[index]);
+ }
+}
+
+static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
+{
+ struct net2280_ep *ep;
+ u32 num, scratch;
+
+ /* most of these don't need individual acks */
+ stat &= ~BIT(INTA_ASSERTED);
+ if (!stat)
+ return;
+ /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
+
+ /* starting a control request? */
+ if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
+ union {
+ u32 raw[2];
+ struct usb_ctrlrequest r;
+ } u;
+ int tmp;
+ struct net2280_request *req;
+
+ if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ u32 val = readl(&dev->usb->usbstat);
+ if (val & BIT(SUPER_SPEED)) {
+ dev->gadget.speed = USB_SPEED_SUPER;
+ usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
+ EP0_SS_MAX_PACKET_SIZE);
+ } else if (val & BIT(HIGH_SPEED)) {
+ dev->gadget.speed = USB_SPEED_HIGH;
+ usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
+ EP0_HS_MAX_PACKET_SIZE);
+ } else {
+ dev->gadget.speed = USB_SPEED_FULL;
+ usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
+ EP0_HS_MAX_PACKET_SIZE);
+ }
+ net2280_led_speed(dev, dev->gadget.speed);
+ ep_dbg(dev, "%s\n",
+ usb_speed_string(dev->gadget.speed));
+ }
+
+ ep = &dev->ep[0];
+ ep->irqs++;
+
+ /* make sure any leftover request state is cleared */
+ stat &= ~BIT(ENDPOINT_0_INTERRUPT);
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2280_request, queue);
+ done(ep, req, (req->req.actual == req->req.length)
+ ? 0 : -EPROTO);
+ }
+ ep->stopped = 0;
+ dev->protocol_stall = 0;
+ if (!(dev->quirks & PLX_PCIE)) {
+ if (ep->dev->quirks & PLX_2280)
+ tmp = BIT(FIFO_OVERFLOW) |
+ BIT(FIFO_UNDERFLOW);
+ else
+ tmp = 0;
+
+ writel(tmp | BIT(TIMEOUT) |
+ BIT(USB_STALL_SENT) |
+ BIT(USB_IN_NAK_SENT) |
+ BIT(USB_IN_ACK_RCVD) |
+ BIT(USB_OUT_PING_NAK_SENT) |
+ BIT(USB_OUT_ACK_SENT) |
+ BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
+ BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
+ BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
+ BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
+ BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
+ BIT(DATA_IN_TOKEN_INTERRUPT),
+ &ep->regs->ep_stat);
+ }
+ u.raw[0] = readl(&dev->usb->setup0123);
+ u.raw[1] = readl(&dev->usb->setup4567);
+
+ cpu_to_le32s(&u.raw[0]);
+ cpu_to_le32s(&u.raw[1]);
+
+ if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
+ defect7374_workaround(dev, u.r);
+
+ tmp = 0;
+
+#define w_value le16_to_cpu(u.r.wValue)
+#define w_index le16_to_cpu(u.r.wIndex)
+#define w_length le16_to_cpu(u.r.wLength)
+
+ /* ack the irq */
+ writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
+ stat ^= BIT(SETUP_PACKET_INTERRUPT);
+
+ /* watch control traffic at the token level, and force
+ * synchronization before letting the status stage happen.
+ * FIXME ignore tokens we'll NAK, until driver responds.
+ * that'll mean a lot less irqs for some drivers.
+ */
+ ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
+ if (ep->is_in) {
+ scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
+ BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
+ BIT(DATA_IN_TOKEN_INTERRUPT);
+ stop_out_naking(ep);
+ } else
+ scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
+ BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
+ BIT(DATA_IN_TOKEN_INTERRUPT);
+ writel(scratch, &dev->epregs[0].ep_irqenb);
+
+ /* we made the hardware handle most lowlevel requests;
+ * everything else goes uplevel to the gadget code.
+ */
+ ep->responded = 1;
+
+ if (dev->gadget.speed == USB_SPEED_SUPER) {
+ handle_stat0_irqs_superspeed(dev, ep, u.r);
+ goto next_endpoints;
+ }
+
+ switch (u.r.bRequest) {
+ case USB_REQ_GET_STATUS: {
+ struct net2280_ep *e;
+ __le32 status;
+
+ /* hw handles device and interface status */
+ if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
+ goto delegate;
+ e = get_ep_by_addr(dev, w_index);
+ if (!e || w_length > 2)
+ goto do_stall;
+
+ if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
+ status = cpu_to_le32(1);
+ else
+ status = cpu_to_le32(0);
+
+ /* don't bother with a request object! */
+ writel(0, &dev->epregs[0].ep_irqenb);
+ set_fifo_bytecount(ep, w_length);
+ writel((__force u32)status, &dev->epregs[0].ep_data);
+ allow_status(ep);
+ ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
+ goto next_endpoints;
+ }
+ break;
+ case USB_REQ_CLEAR_FEATURE: {
+ struct net2280_ep *e;
+
+ /* hw handles device features */
+ if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+ goto delegate;
+ if (w_value != USB_ENDPOINT_HALT || w_length != 0)
+ goto do_stall;
+ e = get_ep_by_addr(dev, w_index);
+ if (!e)
+ goto do_stall;
+ if (e->wedged) {
+ ep_vdbg(dev, "%s wedged, halt not cleared\n",
+ ep->ep.name);
+ } else {
+ ep_vdbg(dev, "%s clear halt\n", e->ep.name);
+ clear_halt(e);
+ if ((ep->dev->quirks & PLX_PCIE) &&
+ !list_empty(&e->queue) && e->td_dma)
+ restart_dma(e);
+ }
+ allow_status(ep);
+ goto next_endpoints;
+ }
+ break;
+ case USB_REQ_SET_FEATURE: {
+ struct net2280_ep *e;
+
+ /* hw handles device features */
+ if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+ goto delegate;
+ if (w_value != USB_ENDPOINT_HALT || w_length != 0)
+ goto do_stall;
+ e = get_ep_by_addr(dev, w_index);
+ if (!e)
+ goto do_stall;
+ if (e->ep.name == ep0name)
+ goto do_stall;
+ set_halt(e);
+ if ((dev->quirks & PLX_PCIE) && e->dma)
+ abort_dma(e);
+ allow_status(ep);
+ ep_vdbg(dev, "%s set halt\n", ep->ep.name);
+ goto next_endpoints;
+ }
+ break;
+ default:
+delegate:
+ ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
+ "ep_cfg %08x\n",
+ u.r.bRequestType, u.r.bRequest,
+ w_value, w_index, w_length,
+ readl(&ep->cfg->ep_cfg));
+ ep->responded = 0;
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &u.r);
+ spin_lock(&dev->lock);
+ }
+ }
+
+ /* stall ep0 on error */
+ if (tmp < 0) {
+do_stall:
+ ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
+ u.r.bRequestType, u.r.bRequest, tmp);
+ dev->protocol_stall = 1;
+ }
+
+ /* some in/out token irq should follow; maybe stall then.
+ * driver must queue a request (even zlp) or halt ep0
+ * before the host times out.
+ */
+ }
+
+#undef w_value
+#undef w_index
+#undef w_length
+
+next_endpoints:
+ if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
+ u32 mask = (BIT(ENDPOINT_0_INTERRUPT) |
+ USB3380_IRQSTAT0_EP_INTR_MASK_IN |
+ USB3380_IRQSTAT0_EP_INTR_MASK_OUT);
+
+ if (stat & mask) {
+ usb338x_handle_ep_intr(dev, stat & mask);
+ stat &= ~mask;
+ }
+ } else {
+ /* endpoint data irq ? */
+ scratch = stat & 0x7f;
+ stat &= ~0x7f;
+ for (num = 0; scratch; num++) {
+ u32 t;
+
+ /* do this endpoint's FIFO and queue need tending? */
+ t = BIT(num);
+ if ((scratch & t) == 0)
+ continue;
+ scratch ^= t;
+
+ ep = &dev->ep[num];
+ handle_ep_small(ep);
+ }
+ }
+
+ if (stat)
+ ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
+}
+
+#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
+ BIT(DMA_C_INTERRUPT) | \
+ BIT(DMA_B_INTERRUPT) | \
+ BIT(DMA_A_INTERRUPT))
+#define PCI_ERROR_INTERRUPTS ( \
+ BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
+ BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
+ BIT(PCI_RETRY_ABORT_INTERRUPT))
+
+static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+ struct net2280_ep *ep;
+ u32 tmp, num, mask, scratch;
+
+ /* after disconnect there's nothing else to do! */
+ tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
+ mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
+
+ /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
+ * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
+ * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
+ * only indicates a change in the reset state).
+ */
+ if (stat & tmp) {
+ bool reset = false;
+ bool disconnect = false;
+
+ /*
+ * Ignore disconnects and resets if the speed hasn't been set.
+ * VBUS can bounce and there's always an initial reset.
+ */
+ writel(tmp, &dev->regs->irqstat1);
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
+ if ((stat & BIT(VBUS_INTERRUPT)) &&
+ (readl(&dev->usb->usbctl) &
+ BIT(VBUS_PIN)) == 0) {
+ disconnect = true;
+ ep_dbg(dev, "disconnect %s\n",
+ dev->driver->driver.name);
+ } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
+ (readl(&dev->usb->usbstat) & mask)
+ == 0) {
+ reset = true;
+ ep_dbg(dev, "reset %s\n",
+ dev->driver->driver.name);
+ }
+
+ if (disconnect || reset) {
+ stop_activity(dev, dev->driver);
+ ep0_start(dev);
+ if (dev->async_callbacks) {
+ spin_unlock(&dev->lock);
+ if (reset)
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
+ else
+ (dev->driver->disconnect)(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+ return;
+ }
+ }
+ stat &= ~tmp;
+
+ /* vBUS can bounce ... one of many reasons to ignore the
+ * notion of hotplug events on bus connect/disconnect!
+ */
+ if (!stat)
+ return;
+ }
+
+ /* NOTE: chip stays in PCI D0 state for now, but it could
+ * enter D1 to save more power
+ */
+ tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
+ if (stat & tmp) {
+ writel(tmp, &dev->regs->irqstat1);
+ spin_unlock(&dev->lock);
+ if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
+ if (dev->async_callbacks && dev->driver->suspend)
+ dev->driver->suspend(&dev->gadget);
+ if (!enable_suspend)
+ stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
+ } else {
+ if (dev->async_callbacks && dev->driver->resume)
+ dev->driver->resume(&dev->gadget);
+ /* at high speed, note erratum 0133 */
+ }
+ spin_lock(&dev->lock);
+ stat &= ~tmp;
+ }
+
+ /* clear any other status/irqs */
+ if (stat)
+ writel(stat, &dev->regs->irqstat1);
+
+ /* some status we can just ignore */
+ if (dev->quirks & PLX_2280)
+ stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
+ BIT(SUSPEND_REQUEST_INTERRUPT) |
+ BIT(RESUME_INTERRUPT) |
+ BIT(SOF_INTERRUPT));
+ else
+ stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
+ BIT(RESUME_INTERRUPT) |
+ BIT(SOF_DOWN_INTERRUPT) |
+ BIT(SOF_INTERRUPT));
+
+ if (!stat)
+ return;
+ /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
+
+ /* DMA status, for ep-{a,b,c,d} */
+ scratch = stat & DMA_INTERRUPTS;
+ stat &= ~DMA_INTERRUPTS;
+ scratch >>= 9;
+ for (num = 0; scratch; num++) {
+ struct net2280_dma_regs __iomem *dma;
+
+ tmp = BIT(num);
+ if ((tmp & scratch) == 0)
+ continue;
+ scratch ^= tmp;
+
+ ep = &dev->ep[num + 1];
+ dma = ep->dma;
+
+ if (!dma)
+ continue;
+
+ /* clear ep's dma status */
+ tmp = readl(&dma->dmastat);
+ writel(tmp, &dma->dmastat);
+
+ /* dma sync*/
+ if (dev->quirks & PLX_PCIE) {
+ u32 r_dmacount = readl(&dma->dmacount);
+ if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
+ (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
+ continue;
+ }
+
+ if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
+ ep_dbg(ep->dev, "%s no xact done? %08x\n",
+ ep->ep.name, tmp);
+ continue;
+ }
+ stop_dma(ep->dma);
+
+ /* OUT transfers terminate when the data from the
+ * host is in our memory. Process whatever's done.
+ * On this path, we know transfer's last packet wasn't
+ * less than req->length. NAK_OUT_PACKETS may be set,
+ * or the FIFO may already be holding new packets.
+ *
+ * IN transfers can linger in the FIFO for a very
+ * long time ... we ignore that for now, accounting
+ * precisely (like PIO does) needs per-packet irqs
+ */
+ scan_dma_completions(ep);
+
+ /* disable dma on inactive queues; else maybe restart */
+ if (!list_empty(&ep->queue)) {
+ tmp = readl(&dma->dmactl);
+ restart_dma(ep);
+ }
+ ep->irqs++;
+ }
+
+ /* NOTE: there are other PCI errors we might usefully notice.
+ * if they appear very often, here's where to try recovering.
+ */
+ if (stat & PCI_ERROR_INTERRUPTS) {
+ ep_err(dev, "pci dma error; stat %08x\n", stat);
+ stat &= ~PCI_ERROR_INTERRUPTS;
+ /* these are fatal errors, but "maybe" they won't
+ * happen again ...
+ */
+ stop_activity(dev, dev->driver);
+ ep0_start(dev);
+ stat = 0;
+ }
+
+ if (stat)
+ ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
+}
+
+static irqreturn_t net2280_irq(int irq, void *_dev)
+{
+ struct net2280 *dev = _dev;
+
+ /* shared interrupt, not ours */
+ if ((dev->quirks & PLX_LEGACY) &&
+ (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
+ return IRQ_NONE;
+
+ spin_lock(&dev->lock);
+
+ /* handle disconnect, dma, and more */
+ handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
+
+ /* control requests and PIO */
+ handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
+
+ if (dev->quirks & PLX_PCIE) {
+ /* re-enable interrupt to trigger any possible new interrupt */
+ u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
+ writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
+ writel(pciirqenb1, &dev->regs->pciirqenb1);
+ }
+
+ spin_unlock(&dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void gadget_release(struct device *_dev)
+{
+ struct net2280 *dev = container_of(_dev, struct net2280, gadget.dev);
+
+ kfree(dev);
+}
+
+/* tear down the binding between this driver and the pci device */
+
+static void net2280_remove(struct pci_dev *pdev)
+{
+ struct net2280 *dev = pci_get_drvdata(pdev);
+
+ if (dev->added)
+ usb_del_gadget(&dev->gadget);
+
+ BUG_ON(dev->driver);
+
+ /* then clean up the resources we allocated during probe() */
+ if (dev->requests) {
+ int i;
+ for (i = 1; i < 5; i++) {
+ if (!dev->ep[i].dummy)
+ continue;
+ dma_pool_free(dev->requests, dev->ep[i].dummy,
+ dev->ep[i].td_dma);
+ }
+ dma_pool_destroy(dev->requests);
+ }
+ if (dev->got_irq)
+ free_irq(pdev->irq, dev);
+ if (dev->quirks & PLX_PCIE)
+ pci_disable_msi(pdev);
+ if (dev->regs) {
+ net2280_led_shutdown(dev);
+ iounmap(dev->regs);
+ }
+ if (dev->region)
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (dev->enabled)
+ pci_disable_device(pdev);
+ device_remove_file(&pdev->dev, &dev_attr_registers);
+
+ ep_info(dev, "unbind\n");
+ usb_put_gadget(&dev->gadget);
+}
+
+/* wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+
+static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net2280 *dev;
+ unsigned long resource, len;
+ void __iomem *base = NULL;
+ int retval, i;
+
+ /* alloc, and start init */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ retval = -ENOMEM;
+ goto done;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ usb_initialize_gadget(&pdev->dev, &dev->gadget, gadget_release);
+ spin_lock_init(&dev->lock);
+ dev->quirks = id->driver_data;
+ dev->pdev = pdev;
+ dev->gadget.ops = &net2280_ops;
+ dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
+ USB_SPEED_SUPER : USB_SPEED_HIGH;
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ dev->gadget.name = driver_name;
+
+ /* now all the pci goodies ... */
+ if (pci_enable_device(pdev) < 0) {
+ retval = -ENODEV;
+ goto done;
+ }
+ dev->enabled = 1;
+
+ /* BAR 0 holds all the registers
+ * BAR 1 is 8051 memory; unused here (note erratum 0103)
+ * BAR 2 is fifo memory; unused here
+ */
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (!request_mem_region(resource, len, driver_name)) {
+ ep_dbg(dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto done;
+ }
+ dev->region = 1;
+
+ /* FIXME provide firmware download interface to put
+ * 8051 code into the chip, e.g. to turn on PCI PM.
+ */
+
+ base = ioremap(resource, len);
+ if (base == NULL) {
+ ep_dbg(dev, "can't map memory\n");
+ retval = -EFAULT;
+ goto done;
+ }
+ dev->regs = (struct net2280_regs __iomem *) base;
+ dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
+ dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
+ dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
+ dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
+ dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
+
+ if (dev->quirks & PLX_PCIE) {
+ u32 fsmvalue;
+ u32 usbstat;
+ dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
+ (base + 0x00b4);
+ dev->llregs = (struct usb338x_ll_regs __iomem *)
+ (base + 0x0700);
+ dev->plregs = (struct usb338x_pl_regs __iomem *)
+ (base + 0x0800);
+ usbstat = readl(&dev->usb->usbstat);
+ dev->enhanced_mode = !!(usbstat & BIT(11));
+ dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
+ /* put into initial config, link up all endpoints */
+ fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
+ (0xf << DEFECT7374_FSM_FIELD);
+ /* See if firmware needs to set up for workaround: */
+ if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
+ dev->bug7734_patched = 1;
+ writel(0, &dev->usb->usbctl);
+ } else
+ dev->bug7734_patched = 0;
+ } else {
+ dev->enhanced_mode = 0;
+ dev->n_ep = 7;
+ /* put into initial config, link up all endpoints */
+ writel(0, &dev->usb->usbctl);
+ }
+
+ usb_reset(dev);
+ usb_reinit(dev);
+
+ /* irq setup after old hardware is cleaned up */
+ if (!pdev->irq) {
+ ep_err(dev, "No IRQ. Check PCI setup!\n");
+ retval = -ENODEV;
+ goto done;
+ }
+
+ if (dev->quirks & PLX_PCIE)
+ if (pci_enable_msi(pdev))
+ ep_err(dev, "Failed to enable MSI mode\n");
+
+ if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
+ driver_name, dev)) {
+ ep_err(dev, "request interrupt %d failed\n", pdev->irq);
+ retval = -EBUSY;
+ goto done;
+ }
+ dev->got_irq = 1;
+
+ /* DMA setup */
+ /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
+ dev->requests = dma_pool_create("requests", &pdev->dev,
+ sizeof(struct net2280_dma),
+ 0 /* no alignment requirements */,
+ 0 /* or page-crossing issues */);
+ if (!dev->requests) {
+ ep_dbg(dev, "can't get request pool\n");
+ retval = -ENOMEM;
+ goto done;
+ }
+ for (i = 1; i < 5; i++) {
+ struct net2280_dma *td;
+
+ td = dma_pool_alloc(dev->requests, GFP_KERNEL,
+ &dev->ep[i].td_dma);
+ if (!td) {
+ ep_dbg(dev, "can't get dummy %d\n", i);
+ retval = -ENOMEM;
+ goto done;
+ }
+ td->dmacount = 0; /* not VALID */
+ td->dmadesc = td->dmaaddr;
+ dev->ep[i].dummy = td;
+ }
+
+ /* enable lower-overhead pci memory bursts during DMA */
+ if (dev->quirks & PLX_LEGACY)
+ writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
+ /*
+ * 256 write retries may not be enough...
+ BIT(PCI_RETRY_ABORT_ENABLE) |
+ */
+ BIT(DMA_READ_MULTIPLE_ENABLE) |
+ BIT(DMA_READ_LINE_ENABLE),
+ &dev->pci->pcimstctl);
+ /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ /* ... also flushes any posted pci writes */
+ dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
+
+ /* done */
+ ep_info(dev, "%s\n", driver_desc);
+ ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
+ pdev->irq, base, dev->chiprev);
+ ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
+ dev->enhanced_mode ? "enhanced mode" : "legacy mode");
+ retval = device_create_file(&pdev->dev, &dev_attr_registers);
+ if (retval)
+ goto done;
+
+ retval = usb_add_gadget(&dev->gadget);
+ if (retval)
+ goto done;
+ dev->added = 1;
+ return 0;
+
+done:
+ if (dev) {
+ net2280_remove(pdev);
+ kfree(dev);
+ }
+ return retval;
+}
+
+/* make sure the board is quiescent; otherwise it will continue
+ * generating IRQs across the upcoming reboot.
+ */
+
+static void net2280_shutdown(struct pci_dev *pdev)
+{
+ struct net2280 *dev = pci_get_drvdata(pdev);
+
+ /* disable IRQs */
+ writel(0, &dev->regs->pciirqenb0);
+ writel(0, &dev->regs->pciirqenb1);
+
+ /* disable the pullup so the host will think we're gone */
+ writel(0, &dev->usb->usbctl);
+
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static const struct pci_device_id pci_ids[] = { {
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_PLX_LEGACY,
+ .device = 0x2280,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = PLX_LEGACY | PLX_2280,
+ }, {
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_PLX_LEGACY,
+ .device = 0x2282,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = PLX_LEGACY,
+ },
+ {
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x2380,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = PLX_PCIE,
+ },
+ {
+ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x3380,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = PLX_PCIE | PLX_SUPERSPEED,
+ },
+ {
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x3382,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = PLX_PCIE | PLX_SUPERSPEED,
+ },
+{ /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver net2280_pci_driver = {
+ .name = driver_name,
+ .id_table = pci_ids,
+
+ .probe = net2280_probe,
+ .remove = net2280_remove,
+ .shutdown = net2280_shutdown,
+
+ /* FIXME add power management support */
+};
+
+module_pci_driver(net2280_pci_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
new file mode 100644
index 000000000..34716a9f4
--- /dev/null
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NetChip 2280 high/full speed USB device controller.
+ * Unlike many such controllers, this one talks PCI.
+ */
+
+/*
+ * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
+ */
+
+#include <linux/usb/net2280.h>
+#include <linux/usb/usb338x.h>
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef __KERNEL__
+
+/* indexed registers [11.10] are accessed indirectly
+ * caller must own the device lock.
+ */
+
+static inline u32 get_idx_reg(struct net2280_regs __iomem *regs, u32 index)
+{
+ writel(index, &regs->idxaddr);
+ /* NOTE: synchs device/cpu memory views */
+ return readl(&regs->idxdata);
+}
+
+static inline void
+set_idx_reg(struct net2280_regs __iomem *regs, u32 index, u32 value)
+{
+ writel(index, &regs->idxaddr);
+ writel(value, &regs->idxdata);
+ /* posted, may not be visible yet */
+}
+
+#endif /* __KERNEL__ */
+
+#define PCI_VENDOR_ID_PLX_LEGACY 0x17cc
+
+#define PLX_LEGACY BIT(0)
+#define PLX_2280 BIT(1)
+#define PLX_SUPERSPEED BIT(2)
+#define PLX_PCIE BIT(3)
+
+#define REG_DIAG 0x0
+#define RETRY_COUNTER 16
+#define FORCE_PCI_SERR 11
+#define FORCE_PCI_INTERRUPT 10
+#define FORCE_USB_INTERRUPT 9
+#define FORCE_CPU_INTERRUPT 8
+#define ILLEGAL_BYTE_ENABLES 5
+#define FAST_TIMES 4
+#define FORCE_RECEIVE_ERROR 2
+#define FORCE_TRANSMIT_CRC_ERROR 0
+#define REG_FRAME 0x02 /* from last sof */
+#define REG_CHIPREV 0x03 /* in bcd */
+#define REG_HS_NAK_RATE 0x0a /* NAK per N uframes */
+
+#define CHIPREV_1 0x0100
+#define CHIPREV_1A 0x0110
+
+/* DEFECT 7374 */
+#define DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS 200
+#define DEFECT_7374_PROCESSOR_WAIT_TIME 10
+
+/* ep0 max packet size */
+#define EP0_SS_MAX_PACKET_SIZE 0x200
+#define EP0_HS_MAX_PACKET_SIZE 0x40
+#ifdef __KERNEL__
+
+/*-------------------------------------------------------------------------*/
+
+/* [8.3] for scatter/gather i/o
+ * use struct net2280_dma_regs bitfields
+ */
+struct net2280_dma {
+ __le32 dmacount;
+ __le32 dmaaddr; /* the buffer */
+ __le32 dmadesc; /* next dma descriptor */
+ __le32 _reserved;
+} __aligned(16);
+
+/*-------------------------------------------------------------------------*/
+
+/* DRIVER DATA STRUCTURES and UTILITIES */
+
+struct net2280_ep {
+ struct usb_ep ep;
+ struct net2280_ep_regs __iomem *cfg;
+ struct net2280_ep_regs __iomem *regs;
+ struct net2280_dma_regs __iomem *dma;
+ struct net2280_dma *dummy;
+ dma_addr_t td_dma; /* of dummy */
+ struct net2280 *dev;
+ unsigned long irqs;
+
+ /* analogous to a host-side qh */
+ struct list_head queue;
+ const struct usb_endpoint_descriptor *desc;
+ unsigned num : 8,
+ fifo_size : 12,
+ in_fifo_validate : 1,
+ out_overflow : 1,
+ stopped : 1,
+ wedged : 1,
+ is_in : 1,
+ is_iso : 1,
+ responded : 1;
+};
+
+static inline void allow_status(struct net2280_ep *ep)
+{
+ /* ep0 only */
+ writel(BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE) |
+ BIT(CLEAR_NAK_OUT_PACKETS) |
+ BIT(CLEAR_NAK_OUT_PACKETS_MODE),
+ &ep->regs->ep_rsp);
+ ep->stopped = 1;
+}
+
+static inline void allow_status_338x(struct net2280_ep *ep)
+{
+ /*
+ * Control Status Phase Handshake was set by the chip when the setup
+ * packet arrived. While set, the chip automatically NAKs the host's
+ * Status Phase tokens.
+ */
+ writel(BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), &ep->regs->ep_rsp);
+
+ ep->stopped = 1;
+
+ /* TD 9.9 Halt Endpoint test. TD 9.22 set feature test. */
+ ep->responded = 0;
+}
+
+struct net2280_request {
+ struct usb_request req;
+ struct net2280_dma *td;
+ dma_addr_t td_dma;
+ struct list_head queue;
+ unsigned mapped : 1,
+ valid : 1;
+};
+
+struct net2280 {
+ /* each pci device provides one gadget, several endpoints */
+ struct usb_gadget gadget;
+ spinlock_t lock;
+ struct net2280_ep ep[9];
+ struct usb_gadget_driver *driver;
+ unsigned enabled : 1,
+ protocol_stall : 1,
+ softconnect : 1,
+ got_irq : 1,
+ region:1,
+ added:1,
+ u1_enable:1,
+ u2_enable:1,
+ ltm_enable:1,
+ wakeup_enable:1,
+ addressed_state:1,
+ async_callbacks:1,
+ bug7734_patched:1;
+ u16 chiprev;
+ int enhanced_mode;
+ int n_ep;
+ kernel_ulong_t quirks;
+
+
+ /* pci state used to access those endpoints */
+ struct pci_dev *pdev;
+ struct net2280_regs __iomem *regs;
+ struct net2280_usb_regs __iomem *usb;
+ struct usb338x_usb_ext_regs __iomem *usb_ext;
+ struct net2280_pci_regs __iomem *pci;
+ struct net2280_dma_regs __iomem *dma;
+ struct net2280_dep_regs __iomem *dep;
+ struct net2280_ep_regs __iomem *epregs;
+ struct usb338x_ll_regs __iomem *llregs;
+ struct usb338x_pl_regs __iomem *plregs;
+
+ struct dma_pool *requests;
+ /* statistics...*/
+};
+
+static inline void set_halt(struct net2280_ep *ep)
+{
+ /* ep0 and bulk/intr endpoints */
+ writel(BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE) |
+ /* set NAK_OUT for erratum 0114 */
+ ((ep->dev->chiprev == CHIPREV_1) << SET_NAK_OUT_PACKETS) |
+ BIT(SET_ENDPOINT_HALT),
+ &ep->regs->ep_rsp);
+}
+
+static inline void clear_halt(struct net2280_ep *ep)
+{
+ /* ep0 and bulk/intr endpoints */
+ writel(BIT(CLEAR_ENDPOINT_HALT) |
+ BIT(CLEAR_ENDPOINT_TOGGLE) |
+ /*
+ * unless the gadget driver left a short packet in the
+ * fifo, this reverses the erratum 0114 workaround.
+ */
+ ((ep->dev->chiprev == CHIPREV_1) << CLEAR_NAK_OUT_PACKETS),
+ &ep->regs->ep_rsp);
+}
+
+/*
+ * FSM value for Defect 7374 (U1U2 Test) is managed in
+ * chip's SCRATCH register:
+ */
+#define DEFECT7374_FSM_FIELD 28
+
+/* Waiting for Control Read:
+ * - A transition to this state indicates a fresh USB connection,
+ * before the first Setup Packet. The connection speed is not
+ * known. Firmware is waiting for the first Control Read.
+ * - Starting state: This state can be thought of as the FSM's typical
+ * starting state.
+ * - Tip: Upon the first SS Control Read the FSM never
+ * returns to this state.
+ */
+#define DEFECT7374_FSM_WAITING_FOR_CONTROL_READ BIT(DEFECT7374_FSM_FIELD)
+
+/* Non-SS Control Read:
+ * - A transition to this state indicates detection of the first HS
+ * or FS Control Read.
+ * - Tip: Upon the first SS Control Read the FSM never
+ * returns to this state.
+ */
+#define DEFECT7374_FSM_NON_SS_CONTROL_READ (2 << DEFECT7374_FSM_FIELD)
+
+/* SS Control Read:
+ * - A transition to this state indicates detection of the
+ * first SS Control Read.
+ * - This state indicates workaround completion. Workarounds no longer
+ * need to be applied (as long as the chip remains powered up).
+ * - Tip: Once in this state the FSM state does not change (until
+ * the chip's power is lost and restored).
+ * - This can be thought of as the final state of the FSM;
+ * the FSM 'locks-up' in this state until the chip loses power.
+ */
+#define DEFECT7374_FSM_SS_CONTROL_READ (3 << DEFECT7374_FSM_FIELD)
+
+#ifdef USE_RDK_LEDS
+
+static inline void net2280_led_init(struct net2280 *dev)
+{
+ /* LED3 (green) is on during USB activity. note erratum 0113. */
+ writel(BIT(GPIO3_LED_SELECT) |
+ BIT(GPIO3_OUTPUT_ENABLE) |
+ BIT(GPIO2_OUTPUT_ENABLE) |
+ BIT(GPIO1_OUTPUT_ENABLE) |
+ BIT(GPIO0_OUTPUT_ENABLE),
+ &dev->regs->gpioctl);
+}
+
+/* indicate speed with bi-color LED 0/1 */
+static inline
+void net2280_led_speed(struct net2280 *dev, enum usb_device_speed speed)
+{
+ u32 val = readl(&dev->regs->gpioctl);
+ switch (speed) {
+ case USB_SPEED_SUPER: /* green + red */
+ val |= BIT(GPIO0_DATA) | BIT(GPIO1_DATA);
+ break;
+ case USB_SPEED_HIGH: /* green */
+ val &= ~BIT(GPIO0_DATA);
+ val |= BIT(GPIO1_DATA);
+ break;
+ case USB_SPEED_FULL: /* red */
+ val &= ~BIT(GPIO1_DATA);
+ val |= BIT(GPIO0_DATA);
+ break;
+ default: /* (off/black) */
+ val &= ~(BIT(GPIO1_DATA) | BIT(GPIO0_DATA));
+ break;
+ }
+ writel(val, &dev->regs->gpioctl);
+}
+
+/* indicate power with LED 2 */
+static inline void net2280_led_active(struct net2280 *dev, int is_active)
+{
+ u32 val = readl(&dev->regs->gpioctl);
+
+ /* FIXME this LED never seems to turn on.*/
+ if (is_active)
+ val |= GPIO2_DATA;
+ else
+ val &= ~GPIO2_DATA;
+ writel(val, &dev->regs->gpioctl);
+}
+
+static inline void net2280_led_shutdown(struct net2280 *dev)
+{
+ /* turn off all four GPIO*_DATA bits */
+ writel(readl(&dev->regs->gpioctl) & ~0x0f,
+ &dev->regs->gpioctl);
+}
+
+#else
+
+#define net2280_led_init(dev) do { } while (0)
+#define net2280_led_speed(dev, speed) do { } while (0)
+#define net2280_led_shutdown(dev) do { } while (0)
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define ep_dbg(ndev, fmt, args...) \
+ dev_dbg((&((ndev)->pdev->dev)), fmt, ##args)
+
+#define ep_vdbg(ndev, fmt, args...) \
+ dev_vdbg((&((ndev)->pdev->dev)), fmt, ##args)
+
+#define ep_info(ndev, fmt, args...) \
+ dev_info((&((ndev)->pdev->dev)), fmt, ##args)
+
+#define ep_warn(ndev, fmt, args...) \
+ dev_warn((&((ndev)->pdev->dev)), fmt, ##args)
+
+#define ep_err(ndev, fmt, args...) \
+ dev_err((&((ndev)->pdev->dev)), fmt, ##args)
+
+/*-------------------------------------------------------------------------*/
+
+static inline void set_fifo_bytecount(struct net2280_ep *ep, unsigned count)
+{
+ if (ep->dev->pdev->vendor == 0x17cc)
+ writeb(count, 2 + (u8 __iomem *) &ep->regs->ep_cfg);
+ else{
+ u32 tmp = readl(&ep->cfg->ep_cfg) &
+ (~(0x07 << EP_FIFO_BYTE_COUNT));
+ writel(tmp | (count << EP_FIFO_BYTE_COUNT), &ep->cfg->ep_cfg);
+ }
+}
+
+static inline void start_out_naking(struct net2280_ep *ep)
+{
+ /* NOTE: hardware races lurk here, and PING protocol issues */
+ writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+ /* synch with device */
+ readl(&ep->regs->ep_rsp);
+}
+
+static inline void stop_out_naking(struct net2280_ep *ep)
+{
+ u32 tmp;
+
+ tmp = readl(&ep->regs->ep_stat);
+ if ((tmp & BIT(NAK_OUT_PACKETS)) != 0)
+ writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
+}
+
+
+static inline void set_max_speed(struct net2280_ep *ep, u32 max)
+{
+ u32 reg;
+ static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80,
+ 0x50, 0x20, 0x70, 0x40, 0x90 };
+
+ if (ep->dev->enhanced_mode) {
+ reg = ep_enhanced[ep->num];
+ switch (ep->dev->gadget.speed) {
+ case USB_SPEED_SUPER:
+ reg += 2;
+ break;
+ case USB_SPEED_FULL:
+ reg += 1;
+ break;
+ case USB_SPEED_HIGH:
+ default:
+ break;
+ }
+ } else {
+ reg = (ep->num + 1) * 0x10;
+ if (ep->dev->gadget.speed != USB_SPEED_HIGH)
+ reg += 1;
+ }
+
+ set_idx_reg(ep->dev->regs, reg, max);
+}
+
+#endif /* __KERNEL__ */
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
new file mode 100644
index 000000000..f660ebfa1
--- /dev/null
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -0,0 +1,3018 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * omap_udc.c -- for OMAP full speed udc; most chips support OTG.
+ *
+ * Copyright (C) 2004 Texas Instruments, Inc.
+ * Copyright (C) 2004-2005 David Brownell
+ *
+ * OMAP2 & DMA support by Kyungmin Park <kyungmin.park@samsung.com>
+ */
+
+#undef DEBUG
+#undef VERBOSE
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/prefetch.h>
+#include <linux/io.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+#include <asm/mach-types.h>
+
+#include <linux/omap-dma.h>
+#include <linux/platform_data/usb-omap1.h>
+
+#include <linux/soc/ti/omap1-usb.h>
+#include <linux/soc/ti/omap1-soc.h>
+#include <linux/soc/ti/omap1-io.h>
+
+#include "omap_udc.h"
+
+#undef USB_TRACE
+
+/* bulk DMA seems to be behaving for both IN and OUT */
+#define USE_DMA
+
+/* ISO too */
+#define USE_ISO
+
+#define DRIVER_DESC "OMAP UDC driver"
+#define DRIVER_VERSION "4 October 2004"
+
+#define OMAP_DMA_USB_W2FC_TX0 29
+#define OMAP_DMA_USB_W2FC_RX0 26
+
+/*
+ * The OMAP UDC needs _very_ early endpoint setup: before enabling the
+ * D+ pullup to allow enumeration. That's too early for the gadget
+ * framework to use from usb_endpoint_enable(), which happens after
+ * enumeration as part of activating an interface. (But if we add an
+ * optional new "UDC not yet running" state to the gadget driver model,
+ * even just during driver binding, the endpoint autoconfig logic is the
+ * natural spot to manufacture new endpoints.)
+ *
+ * So instead of using endpoint enable calls to control the hardware setup,
+ * this driver defines a "fifo mode" parameter. It's used during driver
+ * initialization to choose among a set of pre-defined endpoint configs.
+ * See omap_udc_setup() for available modes, or to add others. That code
+ * lives in an init section, so use this driver as a module if you need
+ * to change the fifo mode after the kernel boots.
+ *
+ * Gadget drivers normally ignore endpoints they don't care about, and
+ * won't include them in configuration descriptors. That means only
+ * misbehaving hosts would even notice they exist.
+ */
+#ifdef USE_ISO
+static unsigned fifo_mode = 3;
+#else
+static unsigned fifo_mode;
+#endif
+
+/* "modprobe omap_udc fifo_mode=42", or else as a kernel
+ * boot parameter "omap_udc:fifo_mode=42"
+ */
+module_param(fifo_mode, uint, 0);
+MODULE_PARM_DESC(fifo_mode, "endpoint configuration");
+
+#ifdef USE_DMA
+static bool use_dma = 1;
+
+/* "modprobe omap_udc use_dma=y", or else as a kernel
+ * boot parameter "omap_udc:use_dma=y"
+ */
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC(use_dma, "enable/disable DMA");
+#else /* !USE_DMA */
+
+/* save a bit of code */
+#define use_dma 0
+#endif /* !USE_DMA */
+
+
+static const char driver_name[] = "omap_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+/*-------------------------------------------------------------------------*/
+
+/* there's a notion of "current endpoint" for modifying endpoint
+ * state, and PIO access to its FIFO.
+ */
+
+static void use_ep(struct omap_ep *ep, u16 select)
+{
+ u16 num = ep->bEndpointAddress & 0x0f;
+
+ if (ep->bEndpointAddress & USB_DIR_IN)
+ num |= UDC_EP_DIR;
+ omap_writew(num | select, UDC_EP_NUM);
+ /* when select, MUST deselect later !! */
+}
+
+static inline void deselect_ep(void)
+{
+ u16 w;
+
+ w = omap_readw(UDC_EP_NUM);
+ w &= ~UDC_EP_SEL;
+ omap_writew(w, UDC_EP_NUM);
+ /* 6 wait states before TX will happen */
+}
+
+static void dma_channel_claim(struct omap_ep *ep, unsigned preferred);
+
+/*-------------------------------------------------------------------------*/
+
+static int omap_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
+ struct omap_udc *udc;
+ unsigned long flags;
+ u16 maxp;
+
+ /* catch various bogus parameters */
+ if (!_ep || !desc
+ || desc->bDescriptorType != USB_DT_ENDPOINT
+ || ep->bEndpointAddress != desc->bEndpointAddress
+ || ep->maxpacket < usb_endpoint_maxp(desc)) {
+ DBG("%s, bad ep or descriptor\n", __func__);
+ return -EINVAL;
+ }
+ maxp = usb_endpoint_maxp(desc);
+ if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
+ && maxp != ep->maxpacket)
+ || usb_endpoint_maxp(desc) > ep->maxpacket
+ || !desc->wMaxPacketSize) {
+ DBG("%s, bad %s maxpacket\n", __func__, _ep->name);
+ return -ERANGE;
+ }
+
+#ifdef USE_ISO
+ if ((desc->bmAttributes == USB_ENDPOINT_XFER_ISOC
+ && desc->bInterval != 1)) {
+ /* hardware wants period = 1; USB allows 2^(Interval-1) */
+ DBG("%s, unsupported ISO period %dms\n", _ep->name,
+ 1 << (desc->bInterval - 1));
+ return -EDOM;
+ }
+#else
+ if (desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ DBG("%s, ISO nyet\n", _ep->name);
+ return -EDOM;
+ }
+#endif
+
+ /* xfer types must match, except that interrupt ~= bulk */
+ if (ep->bmAttributes != desc->bmAttributes
+ && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
+ && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
+ DBG("%s, %s type mismatch\n", __func__, _ep->name);
+ return -EINVAL;
+ }
+
+ udc = ep->udc;
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+ DBG("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ ep->ep.desc = desc;
+ ep->irqs = 0;
+ ep->stopped = 0;
+ ep->ep.maxpacket = maxp;
+
+ /* set endpoint to initial state */
+ ep->dma_channel = 0;
+ ep->has_dma = 0;
+ ep->lch = -1;
+ use_ep(ep, UDC_EP_SEL);
+ omap_writew(udc->clr_halt, UDC_CTRL);
+ ep->ackwait = 0;
+ deselect_ep();
+
+ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+ list_add(&ep->iso, &udc->iso);
+
+ /* maybe assign a DMA channel to this endpoint */
+ if (use_dma && desc->bmAttributes == USB_ENDPOINT_XFER_BULK)
+ /* FIXME ISO can dma, but prefers first channel */
+ dma_channel_claim(ep, 0);
+
+ /* PIO OUT may RX packets */
+ if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC
+ && !ep->has_dma
+ && !(ep->bEndpointAddress & USB_DIR_IN)) {
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ ep->ackwait = 1 + ep->double_buf;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ VDBG("%s enabled\n", _ep->name);
+ return 0;
+}
+
+static void nuke(struct omap_ep *, int status);
+
+static int omap_ep_disable(struct usb_ep *_ep)
+{
+ struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
+ unsigned long flags;
+
+ if (!_ep || !ep->ep.desc) {
+ DBG("%s, %s not enabled\n", __func__,
+ _ep ? ep->ep.name : NULL);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ ep->ep.desc = NULL;
+ nuke(ep, -ESHUTDOWN);
+ ep->ep.maxpacket = ep->maxpacket;
+ ep->has_dma = 0;
+ omap_writew(UDC_SET_HALT, UDC_CTRL);
+ list_del_init(&ep->iso);
+ del_timer(&ep->timer);
+
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ VDBG("%s disabled\n", _ep->name);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *
+omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct omap_req *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void
+omap_free_request(struct usb_ep *ep, struct usb_request *_req)
+{
+ struct omap_req *req = container_of(_req, struct omap_req, req);
+
+ kfree(req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+done(struct omap_ep *ep, struct omap_req *req, int status)
+{
+ struct omap_udc *udc = ep->udc;
+ unsigned stopped = ep->stopped;
+
+ list_del_init(&req->queue);
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ if (use_dma && ep->has_dma)
+ usb_gadget_unmap_request(&udc->gadget, &req->req,
+ (ep->bEndpointAddress & USB_DIR_IN));
+
+#ifndef USB_TRACE
+ if (status && status != -ESHUTDOWN)
+#endif
+ VDBG("complete %s req %p stat %d len %u/%u\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+ spin_unlock(&ep->udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&ep->udc->lock);
+ ep->stopped = stopped;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define UDC_FIFO_FULL (UDC_NON_ISO_FIFO_FULL | UDC_ISO_FIFO_FULL)
+#define UDC_FIFO_UNWRITABLE (UDC_EP_HALTED | UDC_FIFO_FULL)
+
+#define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY)
+#define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY)
+
+static inline int
+write_packet(u8 *buf, struct omap_req *req, unsigned max)
+{
+ unsigned len;
+ u16 *wp;
+
+ len = min(req->req.length - req->req.actual, max);
+ req->req.actual += len;
+
+ max = len;
+ if (likely((((int)buf) & 1) == 0)) {
+ wp = (u16 *)buf;
+ while (max >= 2) {
+ omap_writew(*wp++, UDC_DATA);
+ max -= 2;
+ }
+ buf = (u8 *)wp;
+ }
+ while (max--)
+ omap_writeb(*buf++, UDC_DATA);
+ return len;
+}
+
+/* FIXME change r/w fifo calling convention */
+
+
+/* return: 0 = still running, 1 = completed, negative = errno */
+static int write_fifo(struct omap_ep *ep, struct omap_req *req)
+{
+ u8 *buf;
+ unsigned count;
+ int is_last;
+ u16 ep_stat;
+
+ buf = req->req.buf + req->req.actual;
+ prefetch(buf);
+
+ /* PIO-IN isn't double buffered except for iso */
+ ep_stat = omap_readw(UDC_STAT_FLG);
+ if (ep_stat & UDC_FIFO_UNWRITABLE)
+ return 0;
+
+ count = ep->ep.maxpacket;
+ count = write_packet(buf, req, count);
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ ep->ackwait = 1;
+
+ /* last packet is often short (sometimes a zlp) */
+ if (count != ep->ep.maxpacket)
+ is_last = 1;
+ else if (req->req.length == req->req.actual
+ && !req->req.zero)
+ is_last = 1;
+ else
+ is_last = 0;
+
+ /* NOTE: requests complete when all IN data is in a
+ * FIFO (or sometimes later, if a zlp was needed).
+ * Use usb_ep_fifo_status() where needed.
+ */
+ if (is_last)
+ done(ep, req, 0);
+ return is_last;
+}
+
+static inline int
+read_packet(u8 *buf, struct omap_req *req, unsigned avail)
+{
+ unsigned len;
+ u16 *wp;
+
+ len = min(req->req.length - req->req.actual, avail);
+ req->req.actual += len;
+ avail = len;
+
+ if (likely((((int)buf) & 1) == 0)) {
+ wp = (u16 *)buf;
+ while (avail >= 2) {
+ *wp++ = omap_readw(UDC_DATA);
+ avail -= 2;
+ }
+ buf = (u8 *)wp;
+ }
+ while (avail--)
+ *buf++ = omap_readb(UDC_DATA);
+ return len;
+}
+
+/* return: 0 = still running, 1 = queue empty, negative = errno */
+static int read_fifo(struct omap_ep *ep, struct omap_req *req)
+{
+ u8 *buf;
+ unsigned count, avail;
+ int is_last;
+
+ buf = req->req.buf + req->req.actual;
+ prefetchw(buf);
+
+ for (;;) {
+ u16 ep_stat = omap_readw(UDC_STAT_FLG);
+
+ is_last = 0;
+ if (ep_stat & FIFO_EMPTY) {
+ if (!ep->double_buf)
+ break;
+ ep->fnf = 1;
+ }
+ if (ep_stat & UDC_EP_HALTED)
+ break;
+
+ if (ep_stat & UDC_FIFO_FULL)
+ avail = ep->ep.maxpacket;
+ else {
+ avail = omap_readw(UDC_RXFSTAT);
+ ep->fnf = ep->double_buf;
+ }
+ count = read_packet(buf, req, avail);
+
+ /* partial packet reads may not be errors */
+ if (count < ep->ep.maxpacket) {
+ is_last = 1;
+ /* overflowed this request? flush extra data */
+ if (count != avail) {
+ req->req.status = -EOVERFLOW;
+ avail -= count;
+ while (avail--)
+ omap_readw(UDC_DATA);
+ }
+ } else if (req->req.length == req->req.actual)
+ is_last = 1;
+ else
+ is_last = 0;
+
+ if (!ep->bEndpointAddress)
+ break;
+ if (is_last)
+ done(ep, req, 0);
+ break;
+ }
+ return is_last;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
+{
+ dma_addr_t end;
+
+ /* IN-DMA needs this on fault/cancel paths, so 15xx misreports
+ * the last transfer's bytecount by more than a FIFO's worth.
+ */
+ if (cpu_is_omap15xx())
+ return 0;
+
+ end = omap_get_dma_src_pos(ep->lch);
+ if (end == ep->dma_counter)
+ return 0;
+
+ end |= start & (0xffff << 16);
+ if (end < start)
+ end += 0x10000;
+ return end - start;
+}
+
+static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
+{
+ dma_addr_t end;
+
+ end = omap_get_dma_dst_pos(ep->lch);
+ if (end == ep->dma_counter)
+ return 0;
+
+ end |= start & (0xffff << 16);
+ if (cpu_is_omap15xx())
+ end++;
+ if (end < start)
+ end += 0x10000;
+ return end - start;
+}
+
+
+/* Each USB transfer request using DMA maps to one or more DMA transfers.
+ * When DMA completion isn't request completion, the UDC continues with
+ * the next DMA transfer for that USB transfer.
+ */
+
+static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
+{
+ u16 txdma_ctrl, w;
+ unsigned length = req->req.length - req->req.actual;
+ const int sync_mode = cpu_is_omap15xx()
+ ? OMAP_DMA_SYNC_FRAME
+ : OMAP_DMA_SYNC_ELEMENT;
+ int dma_trigger = 0;
+
+ /* measure length in either bytes or packets */
+ if ((cpu_is_omap16xx() && length <= UDC_TXN_TSC)
+ || (cpu_is_omap15xx() && length < ep->maxpacket)) {
+ txdma_ctrl = UDC_TXN_EOT | length;
+ omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
+ length, 1, sync_mode, dma_trigger, 0);
+ } else {
+ length = min(length / ep->maxpacket,
+ (unsigned) UDC_TXN_TSC + 1);
+ txdma_ctrl = length;
+ omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
+ ep->ep.maxpacket >> 1, length, sync_mode,
+ dma_trigger, 0);
+ length *= ep->maxpacket;
+ }
+ omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
+ OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
+ 0, 0);
+
+ omap_start_dma(ep->lch);
+ ep->dma_counter = omap_get_dma_src_pos(ep->lch);
+ w = omap_readw(UDC_DMA_IRQ_EN);
+ w |= UDC_TX_DONE_IE(ep->dma_channel);
+ omap_writew(w, UDC_DMA_IRQ_EN);
+ omap_writew(UDC_TXN_START | txdma_ctrl, UDC_TXDMA(ep->dma_channel));
+ req->dma_bytes = length;
+}
+
+static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
+{
+ u16 w;
+
+ if (status == 0) {
+ req->req.actual += req->dma_bytes;
+
+ /* return if this request needs to send data or zlp */
+ if (req->req.actual < req->req.length)
+ return;
+ if (req->req.zero
+ && req->dma_bytes != 0
+ && (req->req.actual % ep->maxpacket) == 0)
+ return;
+ } else
+ req->req.actual += dma_src_len(ep, req->req.dma
+ + req->req.actual);
+
+ /* tx completion */
+ omap_stop_dma(ep->lch);
+ w = omap_readw(UDC_DMA_IRQ_EN);
+ w &= ~UDC_TX_DONE_IE(ep->dma_channel);
+ omap_writew(w, UDC_DMA_IRQ_EN);
+ done(ep, req, status);
+}
+
+static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
+{
+ unsigned packets = req->req.length - req->req.actual;
+ int dma_trigger = 0;
+ u16 w;
+
+ /* set up this DMA transfer, enable the fifo, start */
+ packets /= ep->ep.maxpacket;
+ packets = min(packets, (unsigned)UDC_RXN_TC + 1);
+ req->dma_bytes = packets * ep->ep.maxpacket;
+ omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
+ ep->ep.maxpacket >> 1, packets,
+ OMAP_DMA_SYNC_ELEMENT,
+ dma_trigger, 0);
+ omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
+ OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
+ 0, 0);
+ ep->dma_counter = omap_get_dma_dst_pos(ep->lch);
+
+ omap_writew(UDC_RXN_STOP | (packets - 1), UDC_RXDMA(ep->dma_channel));
+ w = omap_readw(UDC_DMA_IRQ_EN);
+ w |= UDC_RX_EOT_IE(ep->dma_channel);
+ omap_writew(w, UDC_DMA_IRQ_EN);
+ omap_writew(ep->bEndpointAddress & 0xf, UDC_EP_NUM);
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+
+ omap_start_dma(ep->lch);
+}
+
+static void
+finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one)
+{
+ u16 count, w;
+
+ if (status == 0)
+ ep->dma_counter = (u16) (req->req.dma + req->req.actual);
+ count = dma_dest_len(ep, req->req.dma + req->req.actual);
+ count += req->req.actual;
+ if (one)
+ count--;
+ if (count <= req->req.length)
+ req->req.actual = count;
+
+ if (count != req->dma_bytes || status)
+ omap_stop_dma(ep->lch);
+
+ /* if this wasn't short, request may need another transfer */
+ else if (req->req.actual < req->req.length)
+ return;
+
+ /* rx completion */
+ w = omap_readw(UDC_DMA_IRQ_EN);
+ w &= ~UDC_RX_EOT_IE(ep->dma_channel);
+ omap_writew(w, UDC_DMA_IRQ_EN);
+ done(ep, req, status);
+}
+
+static void dma_irq(struct omap_udc *udc, u16 irq_src)
+{
+ u16 dman_stat = omap_readw(UDC_DMAN_STAT);
+ struct omap_ep *ep;
+ struct omap_req *req;
+
+ /* IN dma: tx to host */
+ if (irq_src & UDC_TXN_DONE) {
+ ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)];
+ ep->irqs++;
+ /* can see TXN_DONE after dma abort */
+ if (!list_empty(&ep->queue)) {
+ req = container_of(ep->queue.next,
+ struct omap_req, queue);
+ finish_in_dma(ep, req, 0);
+ }
+ omap_writew(UDC_TXN_DONE, UDC_IRQ_SRC);
+
+ if (!list_empty(&ep->queue)) {
+ req = container_of(ep->queue.next,
+ struct omap_req, queue);
+ next_in_dma(ep, req);
+ }
+ }
+
+ /* OUT dma: rx from host */
+ if (irq_src & UDC_RXN_EOT) {
+ ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
+ ep->irqs++;
+ /* can see RXN_EOT after dma abort */
+ if (!list_empty(&ep->queue)) {
+ req = container_of(ep->queue.next,
+ struct omap_req, queue);
+ finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB);
+ }
+ omap_writew(UDC_RXN_EOT, UDC_IRQ_SRC);
+
+ if (!list_empty(&ep->queue)) {
+ req = container_of(ep->queue.next,
+ struct omap_req, queue);
+ next_out_dma(ep, req);
+ }
+ }
+
+ if (irq_src & UDC_RXN_CNT) {
+ ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
+ ep->irqs++;
+ /* omap15xx does this unasked... */
+ VDBG("%s, RX_CNT irq?\n", ep->ep.name);
+ omap_writew(UDC_RXN_CNT, UDC_IRQ_SRC);
+ }
+}
+
+static void dma_error(int lch, u16 ch_status, void *data)
+{
+ struct omap_ep *ep = data;
+
+ /* if ch_status & OMAP_DMA_DROP_IRQ ... */
+ /* if ch_status & OMAP1_DMA_TOUT_IRQ ... */
+ ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status);
+
+ /* complete current transfer ... */
+}
+
+static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
+{
+ u16 reg;
+ int status, restart, is_in;
+ int dma_channel;
+
+ is_in = ep->bEndpointAddress & USB_DIR_IN;
+ if (is_in)
+ reg = omap_readw(UDC_TXDMA_CFG);
+ else
+ reg = omap_readw(UDC_RXDMA_CFG);
+ reg |= UDC_DMA_REQ; /* "pulse" activated */
+
+ ep->dma_channel = 0;
+ ep->lch = -1;
+ if (channel == 0 || channel > 3) {
+ if ((reg & 0x0f00) == 0)
+ channel = 3;
+ else if ((reg & 0x00f0) == 0)
+ channel = 2;
+ else if ((reg & 0x000f) == 0) /* preferred for ISO */
+ channel = 1;
+ else {
+ status = -EMLINK;
+ goto just_restart;
+ }
+ }
+ reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1));
+ ep->dma_channel = channel;
+
+ if (is_in) {
+ dma_channel = OMAP_DMA_USB_W2FC_TX0 - 1 + channel;
+ status = omap_request_dma(dma_channel,
+ ep->ep.name, dma_error, ep, &ep->lch);
+ if (status == 0) {
+ omap_writew(reg, UDC_TXDMA_CFG);
+ /* EMIFF or SDRC */
+ omap_set_dma_src_burst_mode(ep->lch,
+ OMAP_DMA_DATA_BURST_4);
+ omap_set_dma_src_data_pack(ep->lch, 1);
+ /* TIPB */
+ omap_set_dma_dest_params(ep->lch,
+ OMAP_DMA_PORT_TIPB,
+ OMAP_DMA_AMODE_CONSTANT,
+ UDC_DATA_DMA,
+ 0, 0);
+ }
+ } else {
+ dma_channel = OMAP_DMA_USB_W2FC_RX0 - 1 + channel;
+ status = omap_request_dma(dma_channel,
+ ep->ep.name, dma_error, ep, &ep->lch);
+ if (status == 0) {
+ omap_writew(reg, UDC_RXDMA_CFG);
+ /* TIPB */
+ omap_set_dma_src_params(ep->lch,
+ OMAP_DMA_PORT_TIPB,
+ OMAP_DMA_AMODE_CONSTANT,
+ UDC_DATA_DMA,
+ 0, 0);
+ /* EMIFF or SDRC */
+ omap_set_dma_dest_burst_mode(ep->lch,
+ OMAP_DMA_DATA_BURST_4);
+ omap_set_dma_dest_data_pack(ep->lch, 1);
+ }
+ }
+ if (status)
+ ep->dma_channel = 0;
+ else {
+ ep->has_dma = 1;
+ omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ);
+
+ /* channel type P: hw synch (fifo) */
+ if (!cpu_is_omap15xx())
+ omap_set_dma_channel_mode(ep->lch, OMAP_DMA_LCH_P);
+ }
+
+just_restart:
+ /* restart any queue, even if the claim failed */
+ restart = !ep->stopped && !list_empty(&ep->queue);
+
+ if (status)
+ DBG("%s no dma channel: %d%s\n", ep->ep.name, status,
+ restart ? " (restart)" : "");
+ else
+ DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name,
+ is_in ? 't' : 'r',
+ ep->dma_channel - 1, ep->lch,
+ restart ? " (restart)" : "");
+
+ if (restart) {
+ struct omap_req *req;
+ req = container_of(ep->queue.next, struct omap_req, queue);
+ if (ep->has_dma)
+ (is_in ? next_in_dma : next_out_dma)(ep, req);
+ else {
+ use_ep(ep, UDC_EP_SEL);
+ (is_in ? write_fifo : read_fifo)(ep, req);
+ deselect_ep();
+ if (!is_in) {
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ ep->ackwait = 1 + ep->double_buf;
+ }
+ /* IN: 6 wait states before it'll tx */
+ }
+ }
+}
+
+static void dma_channel_release(struct omap_ep *ep)
+{
+ int shift = 4 * (ep->dma_channel - 1);
+ u16 mask = 0x0f << shift;
+ struct omap_req *req;
+ int active;
+
+ /* abort any active usb transfer request */
+ if (!list_empty(&ep->queue))
+ req = container_of(ep->queue.next, struct omap_req, queue);
+ else
+ req = NULL;
+
+ active = omap_get_dma_active_status(ep->lch);
+
+ DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
+ active ? "active" : "idle",
+ (ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
+ ep->dma_channel - 1, req);
+
+ /* NOTE: re-setting RX_REQ/TX_REQ because of a chip bug (before
+ * OMAP 1710 ES2.0) where reading the DMA_CFG can clear them.
+ */
+
+ /* wait till current packet DMA finishes, and fifo empties */
+ if (ep->bEndpointAddress & USB_DIR_IN) {
+ omap_writew((omap_readw(UDC_TXDMA_CFG) & ~mask) | UDC_DMA_REQ,
+ UDC_TXDMA_CFG);
+
+ if (req) {
+ finish_in_dma(ep, req, -ECONNRESET);
+
+ /* clear FIFO; hosts probably won't empty it */
+ use_ep(ep, UDC_EP_SEL);
+ omap_writew(UDC_CLR_EP, UDC_CTRL);
+ deselect_ep();
+ }
+ while (omap_readw(UDC_TXDMA_CFG) & mask)
+ udelay(10);
+ } else {
+ omap_writew((omap_readw(UDC_RXDMA_CFG) & ~mask) | UDC_DMA_REQ,
+ UDC_RXDMA_CFG);
+
+ /* dma empties the fifo */
+ while (omap_readw(UDC_RXDMA_CFG) & mask)
+ udelay(10);
+ if (req)
+ finish_out_dma(ep, req, -ECONNRESET, 0);
+ }
+ omap_free_dma(ep->lch);
+ ep->dma_channel = 0;
+ ep->lch = -1;
+ /* has_dma still set, till endpoint is fully quiesced */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int
+omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
+ struct omap_req *req = container_of(_req, struct omap_req, req);
+ struct omap_udc *udc;
+ unsigned long flags;
+ int is_iso = 0;
+
+ /* catch various bogus parameters */
+ if (!_req || !req->req.complete || !req->req.buf
+ || !list_empty(&req->queue)) {
+ DBG("%s, bad params\n", __func__);
+ return -EINVAL;
+ }
+ if (!_ep || (!ep->ep.desc && ep->bEndpointAddress)) {
+ DBG("%s, bad ep\n", __func__);
+ return -EINVAL;
+ }
+ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (req->req.length > ep->ep.maxpacket)
+ return -EMSGSIZE;
+ is_iso = 1;
+ }
+
+ /* this isn't bogus, but OMAP DMA isn't the only hardware to
+ * have a hard time with partial packet reads... reject it.
+ */
+ if (use_dma
+ && ep->has_dma
+ && ep->bEndpointAddress != 0
+ && (ep->bEndpointAddress & USB_DIR_IN) == 0
+ && (req->req.length % ep->ep.maxpacket) != 0) {
+ DBG("%s, no partial packet OUT reads\n", __func__);
+ return -EMSGSIZE;
+ }
+
+ udc = ep->udc;
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ if (use_dma && ep->has_dma)
+ usb_gadget_map_request(&udc->gadget, &req->req,
+ (ep->bEndpointAddress & USB_DIR_IN));
+
+ VDBG("%s queue req %p, len %d buf %p\n",
+ ep->ep.name, _req, _req->length, _req->buf);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ req->req.status = -EINPROGRESS;
+ req->req.actual = 0;
+
+ /* maybe kickstart non-iso i/o queues */
+ if (is_iso) {
+ u16 w;
+
+ w = omap_readw(UDC_IRQ_EN);
+ w |= UDC_SOF_IE;
+ omap_writew(w, UDC_IRQ_EN);
+ } else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
+ int is_in;
+
+ if (ep->bEndpointAddress == 0) {
+ if (!udc->ep0_pending || !list_empty(&ep->queue)) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EL2HLT;
+ }
+
+ /* empty DATA stage? */
+ is_in = udc->ep0_in;
+ if (!req->req.length) {
+
+ /* chip became CONFIGURED or ADDRESSED
+ * earlier; drivers may already have queued
+ * requests to non-control endpoints
+ */
+ if (udc->ep0_set_config) {
+ u16 irq_en = omap_readw(UDC_IRQ_EN);
+
+ irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE;
+ if (!udc->ep0_reset_config)
+ irq_en |= UDC_EPN_RX_IE
+ | UDC_EPN_TX_IE;
+ omap_writew(irq_en, UDC_IRQ_EN);
+ }
+
+ /* STATUS for zero length DATA stages is
+ * always an IN ... even for IN transfers,
+ * a weird case which seem to stall OMAP.
+ */
+ omap_writew(UDC_EP_SEL | UDC_EP_DIR,
+ UDC_EP_NUM);
+ omap_writew(UDC_CLR_EP, UDC_CTRL);
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+
+ /* cleanup */
+ udc->ep0_pending = 0;
+ done(ep, req, 0);
+ req = NULL;
+
+ /* non-empty DATA stage */
+ } else if (is_in) {
+ omap_writew(UDC_EP_SEL | UDC_EP_DIR,
+ UDC_EP_NUM);
+ } else {
+ if (udc->ep0_setup)
+ goto irq_wait;
+ omap_writew(UDC_EP_SEL, UDC_EP_NUM);
+ }
+ } else {
+ is_in = ep->bEndpointAddress & USB_DIR_IN;
+ if (!ep->has_dma)
+ use_ep(ep, UDC_EP_SEL);
+ /* if ISO: SOF IRQs must be enabled/disabled! */
+ }
+
+ if (ep->has_dma)
+ (is_in ? next_in_dma : next_out_dma)(ep, req);
+ else if (req) {
+ if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
+ req = NULL;
+ deselect_ep();
+ if (!is_in) {
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ ep->ackwait = 1 + ep->double_buf;
+ }
+ /* IN: 6 wait states before it'll tx */
+ }
+ }
+
+irq_wait:
+ /* irq handler advances the queue */
+ if (req != NULL)
+ list_add_tail(&req->queue, &ep->queue);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
+ struct omap_req *req = NULL, *iter;
+ unsigned long flags;
+
+ if (!_ep || !_req)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return -EINVAL;
+ }
+
+ if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
+ int channel = ep->dma_channel;
+
+ /* releasing the channel cancels the request,
+ * reclaiming the channel restarts the queue
+ */
+ dma_channel_release(ep);
+ dma_channel_claim(ep, channel);
+ } else
+ done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int omap_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
+ unsigned long flags;
+ int status = -EOPNOTSUPP;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ /* just use protocol stalls for ep0; real halts are annoying */
+ if (ep->bEndpointAddress == 0) {
+ if (!ep->udc->ep0_pending)
+ status = -EINVAL;
+ else if (value) {
+ if (ep->udc->ep0_set_config) {
+ WARNING("error changing config?\n");
+ omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
+ }
+ omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
+ ep->udc->ep0_pending = 0;
+ status = 0;
+ } else /* NOP */
+ status = 0;
+
+ /* otherwise, all active non-ISO endpoints can halt */
+ } else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->ep.desc) {
+
+ /* IN endpoints must already be idle */
+ if ((ep->bEndpointAddress & USB_DIR_IN)
+ && !list_empty(&ep->queue)) {
+ status = -EAGAIN;
+ goto done;
+ }
+
+ if (value) {
+ int channel;
+
+ if (use_dma && ep->dma_channel
+ && !list_empty(&ep->queue)) {
+ channel = ep->dma_channel;
+ dma_channel_release(ep);
+ } else
+ channel = 0;
+
+ use_ep(ep, UDC_EP_SEL);
+ if (omap_readw(UDC_STAT_FLG) & UDC_NON_ISO_FIFO_EMPTY) {
+ omap_writew(UDC_SET_HALT, UDC_CTRL);
+ status = 0;
+ } else
+ status = -EAGAIN;
+ deselect_ep();
+
+ if (channel)
+ dma_channel_claim(ep, channel);
+ } else {
+ use_ep(ep, 0);
+ omap_writew(ep->udc->clr_halt, UDC_CTRL);
+ ep->ackwait = 0;
+ if (!(ep->bEndpointAddress & USB_DIR_IN)) {
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ ep->ackwait = 1 + ep->double_buf;
+ }
+ }
+ }
+done:
+ VDBG("%s %s halt stat %d\n", ep->ep.name,
+ value ? "set" : "clear", status);
+
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return status;
+}
+
+static const struct usb_ep_ops omap_ep_ops = {
+ .enable = omap_ep_enable,
+ .disable = omap_ep_disable,
+
+ .alloc_request = omap_alloc_request,
+ .free_request = omap_free_request,
+
+ .queue = omap_ep_queue,
+ .dequeue = omap_ep_dequeue,
+
+ .set_halt = omap_ep_set_halt,
+ /* fifo_status ... report bytes in fifo */
+ /* fifo_flush ... flush fifo */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int omap_get_frame(struct usb_gadget *gadget)
+{
+ u16 sof = omap_readw(UDC_SOF);
+ return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC;
+}
+
+static int omap_wakeup(struct usb_gadget *gadget)
+{
+ struct omap_udc *udc;
+ unsigned long flags;
+ int retval = -EHOSTUNREACH;
+
+ udc = container_of(gadget, struct omap_udc, gadget);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (udc->devstat & UDC_SUS) {
+ /* NOTE: OTG spec erratum says that OTG devices may
+ * issue wakeups without host enable.
+ */
+ if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) {
+ DBG("remote wakeup...\n");
+ omap_writew(UDC_RMT_WKP, UDC_SYSCON2);
+ retval = 0;
+ }
+
+ /* NOTE: non-OTG systems may use SRP TOO... */
+ } else if (!(udc->devstat & UDC_ATT)) {
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ retval = otg_start_srp(udc->transceiver->otg);
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return retval;
+}
+
+static int
+omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
+{
+ struct omap_udc *udc;
+ unsigned long flags;
+ u16 syscon1;
+
+ gadget->is_selfpowered = (is_selfpowered != 0);
+ udc = container_of(gadget, struct omap_udc, gadget);
+ spin_lock_irqsave(&udc->lock, flags);
+ syscon1 = omap_readw(UDC_SYSCON1);
+ if (is_selfpowered)
+ syscon1 |= UDC_SELF_PWR;
+ else
+ syscon1 &= ~UDC_SELF_PWR;
+ omap_writew(syscon1, UDC_SYSCON1);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int can_pullup(struct omap_udc *udc)
+{
+ return udc->driver && udc->softconnect && udc->vbus_active;
+}
+
+static void pullup_enable(struct omap_udc *udc)
+{
+ u16 w;
+
+ w = omap_readw(UDC_SYSCON1);
+ w |= UDC_PULLUP_EN;
+ omap_writew(w, UDC_SYSCON1);
+ if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
+ u32 l;
+
+ l = omap_readl(OTG_CTRL);
+ l |= OTG_BSESSVLD;
+ omap_writel(l, OTG_CTRL);
+ }
+ omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
+}
+
+static void pullup_disable(struct omap_udc *udc)
+{
+ u16 w;
+
+ if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
+ u32 l;
+
+ l = omap_readl(OTG_CTRL);
+ l &= ~OTG_BSESSVLD;
+ omap_writel(l, OTG_CTRL);
+ }
+ omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
+ w = omap_readw(UDC_SYSCON1);
+ w &= ~UDC_PULLUP_EN;
+ omap_writew(w, UDC_SYSCON1);
+}
+
+static struct omap_udc *udc;
+
+static void omap_udc_enable_clock(int enable)
+{
+ if (udc == NULL || udc->dc_clk == NULL || udc->hhc_clk == NULL)
+ return;
+
+ if (enable) {
+ clk_enable(udc->dc_clk);
+ clk_enable(udc->hhc_clk);
+ udelay(100);
+ } else {
+ clk_disable(udc->hhc_clk);
+ clk_disable(udc->dc_clk);
+ }
+}
+
+/*
+ * Called by whatever detects VBUS sessions: external transceiver
+ * driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock.
+ */
+static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct omap_udc *udc;
+ unsigned long flags;
+ u32 l;
+
+ udc = container_of(gadget, struct omap_udc, gadget);
+ spin_lock_irqsave(&udc->lock, flags);
+ VDBG("VBUS %s\n", is_active ? "on" : "off");
+ udc->vbus_active = (is_active != 0);
+ if (cpu_is_omap15xx()) {
+ /* "software" detect, ignored if !VBUS_MODE_1510 */
+ l = omap_readl(FUNC_MUX_CTRL_0);
+ if (is_active)
+ l |= VBUS_CTRL_1510;
+ else
+ l &= ~VBUS_CTRL_1510;
+ omap_writel(l, FUNC_MUX_CTRL_0);
+ }
+ if (udc->dc_clk != NULL && is_active) {
+ if (!udc->clk_requested) {
+ omap_udc_enable_clock(1);
+ udc->clk_requested = 1;
+ }
+ }
+ if (can_pullup(udc))
+ pullup_enable(udc);
+ else
+ pullup_disable(udc);
+ if (udc->dc_clk != NULL && !is_active) {
+ if (udc->clk_requested) {
+ omap_udc_enable_clock(0);
+ udc->clk_requested = 0;
+ }
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static int omap_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct omap_udc *udc;
+
+ udc = container_of(gadget, struct omap_udc, gadget);
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ return usb_phy_set_power(udc->transceiver, mA);
+ return -EOPNOTSUPP;
+}
+
+static int omap_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct omap_udc *udc;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct omap_udc, gadget);
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->softconnect = (is_on != 0);
+ if (can_pullup(udc))
+ pullup_enable(udc);
+ else
+ pullup_disable(udc);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static int omap_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int omap_udc_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops omap_gadget_ops = {
+ .get_frame = omap_get_frame,
+ .wakeup = omap_wakeup,
+ .set_selfpowered = omap_set_selfpowered,
+ .vbus_session = omap_vbus_session,
+ .vbus_draw = omap_vbus_draw,
+ .pullup = omap_pullup,
+ .udc_start = omap_udc_start,
+ .udc_stop = omap_udc_stop,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* dequeue ALL requests; caller holds udc->lock */
+static void nuke(struct omap_ep *ep, int status)
+{
+ struct omap_req *req;
+
+ ep->stopped = 1;
+
+ if (use_dma && ep->dma_channel)
+ dma_channel_release(ep);
+
+ use_ep(ep, 0);
+ omap_writew(UDC_CLR_EP, UDC_CTRL);
+ if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
+ omap_writew(UDC_SET_HALT, UDC_CTRL);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct omap_req, queue);
+ done(ep, req, status);
+ }
+}
+
+/* caller holds udc->lock */
+static void udc_quiesce(struct omap_udc *udc)
+{
+ struct omap_ep *ep;
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ nuke(&udc->ep[0], -ESHUTDOWN);
+ list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list)
+ nuke(ep, -ESHUTDOWN);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void update_otg(struct omap_udc *udc)
+{
+ u16 devstat;
+
+ if (!gadget_is_otg(&udc->gadget))
+ return;
+
+ if (omap_readl(OTG_CTRL) & OTG_ID)
+ devstat = omap_readw(UDC_DEVSTAT);
+ else
+ devstat = 0;
+
+ udc->gadget.b_hnp_enable = !!(devstat & UDC_B_HNP_ENABLE);
+ udc->gadget.a_hnp_support = !!(devstat & UDC_A_HNP_SUPPORT);
+ udc->gadget.a_alt_hnp_support = !!(devstat & UDC_A_ALT_HNP_SUPPORT);
+
+ /* Enable HNP early, avoiding races on suspend irq path.
+ * ASSUMES OTG state machine B_BUS_REQ input is true.
+ */
+ if (udc->gadget.b_hnp_enable) {
+ u32 l;
+
+ l = omap_readl(OTG_CTRL);
+ l |= OTG_B_HNPEN | OTG_B_BUSREQ;
+ l &= ~OTG_PULLUP;
+ omap_writel(l, OTG_CTRL);
+ }
+}
+
+static void ep0_irq(struct omap_udc *udc, u16 irq_src)
+{
+ struct omap_ep *ep0 = &udc->ep[0];
+ struct omap_req *req = NULL;
+
+ ep0->irqs++;
+
+ /* Clear any pending requests and then scrub any rx/tx state
+ * before starting to handle the SETUP request.
+ */
+ if (irq_src & UDC_SETUP) {
+ u16 ack = irq_src & (UDC_EP0_TX|UDC_EP0_RX);
+
+ nuke(ep0, 0);
+ if (ack) {
+ omap_writew(ack, UDC_IRQ_SRC);
+ irq_src = UDC_SETUP;
+ }
+ }
+
+ /* IN/OUT packets mean we're in the DATA or STATUS stage.
+ * This driver uses only uses protocol stalls (ep0 never halts),
+ * and if we got this far the gadget driver already had a
+ * chance to stall. Tries to be forgiving of host oddities.
+ *
+ * NOTE: the last chance gadget drivers have to stall control
+ * requests is during their request completion callback.
+ */
+ if (!list_empty(&ep0->queue))
+ req = container_of(ep0->queue.next, struct omap_req, queue);
+
+ /* IN == TX to host */
+ if (irq_src & UDC_EP0_TX) {
+ int stat;
+
+ omap_writew(UDC_EP0_TX, UDC_IRQ_SRC);
+ omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
+ stat = omap_readw(UDC_STAT_FLG);
+ if (stat & UDC_ACK) {
+ if (udc->ep0_in) {
+ /* write next IN packet from response,
+ * or set up the status stage.
+ */
+ if (req)
+ stat = write_fifo(ep0, req);
+ omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+ if (!req && udc->ep0_pending) {
+ omap_writew(UDC_EP_SEL, UDC_EP_NUM);
+ omap_writew(UDC_CLR_EP, UDC_CTRL);
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ omap_writew(0, UDC_EP_NUM);
+ udc->ep0_pending = 0;
+ } /* else: 6 wait states before it'll tx */
+ } else {
+ /* ack status stage of OUT transfer */
+ omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+ if (req)
+ done(ep0, req, 0);
+ }
+ req = NULL;
+ } else if (stat & UDC_STALL) {
+ omap_writew(UDC_CLR_HALT, UDC_CTRL);
+ omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+ } else {
+ omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+ }
+ }
+
+ /* OUT == RX from host */
+ if (irq_src & UDC_EP0_RX) {
+ int stat;
+
+ omap_writew(UDC_EP0_RX, UDC_IRQ_SRC);
+ omap_writew(UDC_EP_SEL, UDC_EP_NUM);
+ stat = omap_readw(UDC_STAT_FLG);
+ if (stat & UDC_ACK) {
+ if (!udc->ep0_in) {
+ stat = 0;
+ /* read next OUT packet of request, maybe
+ * reactivating the fifo; stall on errors.
+ */
+ stat = read_fifo(ep0, req);
+ if (!req || stat < 0) {
+ omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
+ udc->ep0_pending = 0;
+ stat = 0;
+ } else if (stat == 0)
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ omap_writew(0, UDC_EP_NUM);
+
+ /* activate status stage */
+ if (stat == 1) {
+ done(ep0, req, 0);
+ /* that may have STALLed ep0... */
+ omap_writew(UDC_EP_SEL | UDC_EP_DIR,
+ UDC_EP_NUM);
+ omap_writew(UDC_CLR_EP, UDC_CTRL);
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+ udc->ep0_pending = 0;
+ }
+ } else {
+ /* ack status stage of IN transfer */
+ omap_writew(0, UDC_EP_NUM);
+ if (req)
+ done(ep0, req, 0);
+ }
+ } else if (stat & UDC_STALL) {
+ omap_writew(UDC_CLR_HALT, UDC_CTRL);
+ omap_writew(0, UDC_EP_NUM);
+ } else {
+ omap_writew(0, UDC_EP_NUM);
+ }
+ }
+
+ /* SETUP starts all control transfers */
+ if (irq_src & UDC_SETUP) {
+ union u {
+ u16 word[4];
+ struct usb_ctrlrequest r;
+ } u;
+ int status = -EINVAL;
+ struct omap_ep *ep;
+
+ /* read the (latest) SETUP message */
+ do {
+ omap_writew(UDC_SETUP_SEL, UDC_EP_NUM);
+ /* two bytes at a time */
+ u.word[0] = omap_readw(UDC_DATA);
+ u.word[1] = omap_readw(UDC_DATA);
+ u.word[2] = omap_readw(UDC_DATA);
+ u.word[3] = omap_readw(UDC_DATA);
+ omap_writew(0, UDC_EP_NUM);
+ } while (omap_readw(UDC_IRQ_SRC) & UDC_SETUP);
+
+#define w_value le16_to_cpu(u.r.wValue)
+#define w_index le16_to_cpu(u.r.wIndex)
+#define w_length le16_to_cpu(u.r.wLength)
+
+ /* Delegate almost all control requests to the gadget driver,
+ * except for a handful of ch9 status/feature requests that
+ * hardware doesn't autodecode _and_ the gadget API hides.
+ */
+ udc->ep0_in = (u.r.bRequestType & USB_DIR_IN) != 0;
+ udc->ep0_set_config = 0;
+ udc->ep0_pending = 1;
+ ep0->stopped = 0;
+ ep0->ackwait = 0;
+ switch (u.r.bRequest) {
+ case USB_REQ_SET_CONFIGURATION:
+ /* udc needs to know when ep != 0 is valid */
+ if (u.r.bRequestType != USB_RECIP_DEVICE)
+ goto delegate;
+ if (w_length != 0)
+ goto do_stall;
+ udc->ep0_set_config = 1;
+ udc->ep0_reset_config = (w_value == 0);
+ VDBG("set config %d\n", w_value);
+
+ /* update udc NOW since gadget driver may start
+ * queueing requests immediately; clear config
+ * later if it fails the request.
+ */
+ if (udc->ep0_reset_config)
+ omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
+ else
+ omap_writew(UDC_DEV_CFG, UDC_SYSCON2);
+ update_otg(udc);
+ goto delegate;
+ case USB_REQ_CLEAR_FEATURE:
+ /* clear endpoint halt */
+ if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+ goto delegate;
+ if (w_value != USB_ENDPOINT_HALT
+ || w_length != 0)
+ goto do_stall;
+ ep = &udc->ep[w_index & 0xf];
+ if (ep != ep0) {
+ if (w_index & USB_DIR_IN)
+ ep += 16;
+ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+ || !ep->ep.desc)
+ goto do_stall;
+ use_ep(ep, 0);
+ omap_writew(udc->clr_halt, UDC_CTRL);
+ ep->ackwait = 0;
+ if (!(ep->bEndpointAddress & USB_DIR_IN)) {
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ ep->ackwait = 1 + ep->double_buf;
+ }
+ /* NOTE: assumes the host behaves sanely,
+ * only clearing real halts. Else we may
+ * need to kill pending transfers and then
+ * restart the queue... very messy for DMA!
+ */
+ }
+ VDBG("%s halt cleared by host\n", ep->name);
+ goto ep0out_status_stage;
+ case USB_REQ_SET_FEATURE:
+ /* set endpoint halt */
+ if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+ goto delegate;
+ if (w_value != USB_ENDPOINT_HALT
+ || w_length != 0)
+ goto do_stall;
+ ep = &udc->ep[w_index & 0xf];
+ if (w_index & USB_DIR_IN)
+ ep += 16;
+ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+ || ep == ep0 || !ep->ep.desc)
+ goto do_stall;
+ if (use_dma && ep->has_dma) {
+ /* this has rude side-effects (aborts) and
+ * can't really work if DMA-IN is active
+ */
+ DBG("%s host set_halt, NYET\n", ep->name);
+ goto do_stall;
+ }
+ use_ep(ep, 0);
+ /* can't halt if fifo isn't empty... */
+ omap_writew(UDC_CLR_EP, UDC_CTRL);
+ omap_writew(UDC_SET_HALT, UDC_CTRL);
+ VDBG("%s halted by host\n", ep->name);
+ep0out_status_stage:
+ status = 0;
+ omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
+ omap_writew(UDC_CLR_EP, UDC_CTRL);
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+ udc->ep0_pending = 0;
+ break;
+ case USB_REQ_GET_STATUS:
+ /* USB_ENDPOINT_HALT status? */
+ if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
+ goto intf_status;
+
+ /* ep0 never stalls */
+ if (!(w_index & 0xf))
+ goto zero_status;
+
+ /* only active endpoints count */
+ ep = &udc->ep[w_index & 0xf];
+ if (w_index & USB_DIR_IN)
+ ep += 16;
+ if (!ep->ep.desc)
+ goto do_stall;
+
+ /* iso never stalls */
+ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+ goto zero_status;
+
+ /* FIXME don't assume non-halted endpoints!! */
+ ERR("%s status, can't report\n", ep->ep.name);
+ goto do_stall;
+
+intf_status:
+ /* return interface status. if we were pedantic,
+ * we'd detect non-existent interfaces, and stall.
+ */
+ if (u.r.bRequestType
+ != (USB_DIR_IN|USB_RECIP_INTERFACE))
+ goto delegate;
+
+zero_status:
+ /* return two zero bytes */
+ omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
+ omap_writew(0, UDC_DATA);
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ omap_writew(UDC_EP_DIR, UDC_EP_NUM);
+ status = 0;
+ VDBG("GET_STATUS, interface %d\n", w_index);
+ /* next, status stage */
+ break;
+ default:
+delegate:
+ /* activate the ep0out fifo right away */
+ if (!udc->ep0_in && w_length) {
+ omap_writew(0, UDC_EP_NUM);
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ }
+
+ /* gadget drivers see class/vendor specific requests,
+ * {SET,GET}_{INTERFACE,DESCRIPTOR,CONFIGURATION},
+ * and more
+ */
+ VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
+ u.r.bRequestType, u.r.bRequest,
+ w_value, w_index, w_length);
+
+#undef w_value
+#undef w_index
+#undef w_length
+
+ /* The gadget driver may return an error here,
+ * causing an immediate protocol stall.
+ *
+ * Else it must issue a response, either queueing a
+ * response buffer for the DATA stage, or halting ep0
+ * (causing a protocol stall, not a real halt). A
+ * zero length buffer means no DATA stage.
+ *
+ * It's fine to issue that response after the setup()
+ * call returns, and this IRQ was handled.
+ */
+ udc->ep0_setup = 1;
+ spin_unlock(&udc->lock);
+ status = udc->driver->setup(&udc->gadget, &u.r);
+ spin_lock(&udc->lock);
+ udc->ep0_setup = 0;
+ }
+
+ if (status < 0) {
+do_stall:
+ VDBG("req %02x.%02x protocol STALL; stat %d\n",
+ u.r.bRequestType, u.r.bRequest, status);
+ if (udc->ep0_set_config) {
+ if (udc->ep0_reset_config)
+ WARNING("error resetting config?\n");
+ else
+ omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
+ }
+ omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
+ udc->ep0_pending = 0;
+ }
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define OTG_FLAGS (UDC_B_HNP_ENABLE|UDC_A_HNP_SUPPORT|UDC_A_ALT_HNP_SUPPORT)
+
+static void devstate_irq(struct omap_udc *udc, u16 irq_src)
+{
+ u16 devstat, change;
+
+ devstat = omap_readw(UDC_DEVSTAT);
+ change = devstat ^ udc->devstat;
+ udc->devstat = devstat;
+
+ if (change & (UDC_USB_RESET|UDC_ATT)) {
+ udc_quiesce(udc);
+
+ if (change & UDC_ATT) {
+ /* driver for any external transceiver will
+ * have called omap_vbus_session() already
+ */
+ if (devstat & UDC_ATT) {
+ udc->gadget.speed = USB_SPEED_FULL;
+ VDBG("connect\n");
+ if (IS_ERR_OR_NULL(udc->transceiver))
+ pullup_enable(udc);
+ /* if (driver->connect) call it */
+ } else if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ if (IS_ERR_OR_NULL(udc->transceiver))
+ pullup_disable(udc);
+ DBG("disconnect, gadget %s\n",
+ udc->driver->driver.name);
+ if (udc->driver->disconnect) {
+ spin_unlock(&udc->lock);
+ udc->driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+ change &= ~UDC_ATT;
+ }
+
+ if (change & UDC_USB_RESET) {
+ if (devstat & UDC_USB_RESET) {
+ VDBG("RESET=1\n");
+ } else {
+ udc->gadget.speed = USB_SPEED_FULL;
+ INFO("USB reset done, gadget %s\n",
+ udc->driver->driver.name);
+ /* ep0 traffic is legal from now on */
+ omap_writew(UDC_DS_CHG_IE | UDC_EP0_IE,
+ UDC_IRQ_EN);
+ }
+ change &= ~UDC_USB_RESET;
+ }
+ }
+ if (change & UDC_SUS) {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+ /* FIXME tell isp1301 to suspend/resume (?) */
+ if (devstat & UDC_SUS) {
+ VDBG("suspend\n");
+ update_otg(udc);
+ /* HNP could be under way already */
+ if (udc->gadget.speed == USB_SPEED_FULL
+ && udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ usb_phy_set_suspend(
+ udc->transceiver, 1);
+ } else {
+ VDBG("resume\n");
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ usb_phy_set_suspend(
+ udc->transceiver, 0);
+ if (udc->gadget.speed == USB_SPEED_FULL
+ && udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+ }
+ change &= ~UDC_SUS;
+ }
+ if (!cpu_is_omap15xx() && (change & OTG_FLAGS)) {
+ update_otg(udc);
+ change &= ~OTG_FLAGS;
+ }
+
+ change &= ~(UDC_CFG|UDC_DEF|UDC_ADD);
+ if (change)
+ VDBG("devstat %03x, ignore change %03x\n",
+ devstat, change);
+
+ omap_writew(UDC_DS_CHG, UDC_IRQ_SRC);
+}
+
+static irqreturn_t omap_udc_irq(int irq, void *_udc)
+{
+ struct omap_udc *udc = _udc;
+ u16 irq_src;
+ irqreturn_t status = IRQ_NONE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ irq_src = omap_readw(UDC_IRQ_SRC);
+
+ /* Device state change (usb ch9 stuff) */
+ if (irq_src & UDC_DS_CHG) {
+ devstate_irq(_udc, irq_src);
+ status = IRQ_HANDLED;
+ irq_src &= ~UDC_DS_CHG;
+ }
+
+ /* EP0 control transfers */
+ if (irq_src & (UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX)) {
+ ep0_irq(_udc, irq_src);
+ status = IRQ_HANDLED;
+ irq_src &= ~(UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX);
+ }
+
+ /* DMA transfer completion */
+ if (use_dma && (irq_src & (UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT))) {
+ dma_irq(_udc, irq_src);
+ status = IRQ_HANDLED;
+ irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT);
+ }
+
+ irq_src &= ~(UDC_IRQ_SOF | UDC_EPN_TX|UDC_EPN_RX);
+ if (irq_src)
+ DBG("udc_irq, unhandled %03x\n", irq_src);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return status;
+}
+
+/* workaround for seemingly-lost IRQs for RX ACKs... */
+#define PIO_OUT_TIMEOUT (jiffies + HZ/3)
+#define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
+
+static void pio_out_timer(struct timer_list *t)
+{
+ struct omap_ep *ep = from_timer(ep, t, timer);
+ unsigned long flags;
+ u16 stat_flg;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ if (!list_empty(&ep->queue) && ep->ackwait) {
+ use_ep(ep, UDC_EP_SEL);
+ stat_flg = omap_readw(UDC_STAT_FLG);
+
+ if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
+ || (ep->double_buf && HALF_FULL(stat_flg)))) {
+ struct omap_req *req;
+
+ VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg);
+ req = container_of(ep->queue.next,
+ struct omap_req, queue);
+ (void) read_fifo(ep, req);
+ omap_writew(ep->bEndpointAddress, UDC_EP_NUM);
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ ep->ackwait = 1 + ep->double_buf;
+ } else
+ deselect_ep();
+ }
+ mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+}
+
+static irqreturn_t omap_udc_pio_irq(int irq, void *_dev)
+{
+ u16 epn_stat, irq_src;
+ irqreturn_t status = IRQ_NONE;
+ struct omap_ep *ep;
+ int epnum;
+ struct omap_udc *udc = _dev;
+ struct omap_req *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ epn_stat = omap_readw(UDC_EPN_STAT);
+ irq_src = omap_readw(UDC_IRQ_SRC);
+
+ /* handle OUT first, to avoid some wasteful NAKs */
+ if (irq_src & UDC_EPN_RX) {
+ epnum = (epn_stat >> 8) & 0x0f;
+ omap_writew(UDC_EPN_RX, UDC_IRQ_SRC);
+ status = IRQ_HANDLED;
+ ep = &udc->ep[epnum];
+ ep->irqs++;
+
+ omap_writew(epnum | UDC_EP_SEL, UDC_EP_NUM);
+ ep->fnf = 0;
+ if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
+ ep->ackwait--;
+ if (!list_empty(&ep->queue)) {
+ int stat;
+ req = container_of(ep->queue.next,
+ struct omap_req, queue);
+ stat = read_fifo(ep, req);
+ if (!ep->double_buf)
+ ep->fnf = 1;
+ }
+ }
+ /* min 6 clock delay before clearing EP_SEL ... */
+ epn_stat = omap_readw(UDC_EPN_STAT);
+ epn_stat = omap_readw(UDC_EPN_STAT);
+ omap_writew(epnum, UDC_EP_NUM);
+
+ /* enabling fifo _after_ clearing ACK, contrary to docs,
+ * reduces lossage; timer still needed though (sigh).
+ */
+ if (ep->fnf) {
+ omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
+ ep->ackwait = 1 + ep->double_buf;
+ }
+ mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
+ }
+
+ /* then IN transfers */
+ else if (irq_src & UDC_EPN_TX) {
+ epnum = epn_stat & 0x0f;
+ omap_writew(UDC_EPN_TX, UDC_IRQ_SRC);
+ status = IRQ_HANDLED;
+ ep = &udc->ep[16 + epnum];
+ ep->irqs++;
+
+ omap_writew(epnum | UDC_EP_DIR | UDC_EP_SEL, UDC_EP_NUM);
+ if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
+ ep->ackwait = 0;
+ if (!list_empty(&ep->queue)) {
+ req = container_of(ep->queue.next,
+ struct omap_req, queue);
+ (void) write_fifo(ep, req);
+ }
+ }
+ /* min 6 clock delay before clearing EP_SEL ... */
+ epn_stat = omap_readw(UDC_EPN_STAT);
+ epn_stat = omap_readw(UDC_EPN_STAT);
+ omap_writew(epnum | UDC_EP_DIR, UDC_EP_NUM);
+ /* then 6 clocks before it'd tx */
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return status;
+}
+
+#ifdef USE_ISO
+static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
+{
+ struct omap_udc *udc = _dev;
+ struct omap_ep *ep;
+ int pending = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* handle all non-DMA ISO transfers */
+ list_for_each_entry(ep, &udc->iso, iso) {
+ u16 stat;
+ struct omap_req *req;
+
+ if (ep->has_dma || list_empty(&ep->queue))
+ continue;
+ req = list_entry(ep->queue.next, struct omap_req, queue);
+
+ use_ep(ep, UDC_EP_SEL);
+ stat = omap_readw(UDC_STAT_FLG);
+
+ /* NOTE: like the other controller drivers, this isn't
+ * currently reporting lost or damaged frames.
+ */
+ if (ep->bEndpointAddress & USB_DIR_IN) {
+ if (stat & UDC_MISS_IN)
+ /* done(ep, req, -EPROTO) */;
+ else
+ write_fifo(ep, req);
+ } else {
+ int status = 0;
+
+ if (stat & UDC_NO_RXPACKET)
+ status = -EREMOTEIO;
+ else if (stat & UDC_ISO_ERR)
+ status = -EILSEQ;
+ else if (stat & UDC_DATA_FLUSH)
+ status = -ENOSR;
+
+ if (status)
+ /* done(ep, req, status) */;
+ else
+ read_fifo(ep, req);
+ }
+ deselect_ep();
+ /* 6 wait states before next EP */
+
+ ep->irqs++;
+ if (!list_empty(&ep->queue))
+ pending = 1;
+ }
+ if (!pending) {
+ u16 w;
+
+ w = omap_readw(UDC_IRQ_EN);
+ w &= ~UDC_SOF_IE;
+ omap_writew(w, UDC_IRQ_EN);
+ }
+ omap_writew(UDC_IRQ_SOF, UDC_IRQ_SRC);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return IRQ_HANDLED;
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static inline int machine_without_vbus_sense(void)
+{
+ return machine_is_omap_innovator()
+ || machine_is_omap_osk()
+ || machine_is_omap_palmte()
+ || machine_is_sx1()
+ /* No known omap7xx boards with vbus sense */
+ || cpu_is_omap7xx();
+}
+
+static int omap_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ int status;
+ struct omap_ep *ep;
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* reset state */
+ list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+ ep->irqs = 0;
+ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+ continue;
+ use_ep(ep, 0);
+ omap_writew(UDC_SET_HALT, UDC_CTRL);
+ }
+ udc->ep0_pending = 0;
+ udc->ep[0].irqs = 0;
+ udc->softconnect = 1;
+
+ /* hook up the driver */
+ udc->driver = driver;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (udc->dc_clk != NULL)
+ omap_udc_enable_clock(1);
+
+ omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
+
+ /* connect to bus through transceiver */
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
+ status = otg_set_peripheral(udc->transceiver->otg,
+ &udc->gadget);
+ if (status < 0) {
+ ERR("can't bind to transceiver\n");
+ udc->driver = NULL;
+ goto done;
+ }
+ } else {
+ status = 0;
+ if (can_pullup(udc))
+ pullup_enable(udc);
+ else
+ pullup_disable(udc);
+ }
+
+ /* boards that don't have VBUS sensing can't autogate 48MHz;
+ * can't enter deep sleep while a gadget driver is active.
+ */
+ if (machine_without_vbus_sense())
+ omap_vbus_session(&udc->gadget, 1);
+
+done:
+ if (udc->dc_clk != NULL)
+ omap_udc_enable_clock(0);
+
+ return status;
+}
+
+static int omap_udc_stop(struct usb_gadget *g)
+{
+ unsigned long flags;
+
+ if (udc->dc_clk != NULL)
+ omap_udc_enable_clock(1);
+
+ if (machine_without_vbus_sense())
+ omap_vbus_session(&udc->gadget, 0);
+
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ (void) otg_set_peripheral(udc->transceiver->otg, NULL);
+ else
+ pullup_disable(udc);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc_quiesce(udc);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ udc->driver = NULL;
+
+ if (udc->dc_clk != NULL)
+ omap_udc_enable_clock(0);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+#include <linux/seq_file.h>
+
+static const char proc_filename[] = "driver/udc";
+
+#define FOURBITS "%s%s%s%s"
+#define EIGHTBITS "%s%s%s%s%s%s%s%s"
+
+static void proc_ep_show(struct seq_file *s, struct omap_ep *ep)
+{
+ u16 stat_flg;
+ struct omap_req *req;
+ char buf[20];
+
+ use_ep(ep, 0);
+
+ if (use_dma && ep->has_dma)
+ snprintf(buf, sizeof buf, "(%cxdma%d lch%d) ",
+ (ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
+ ep->dma_channel - 1, ep->lch);
+ else
+ buf[0] = 0;
+
+ stat_flg = omap_readw(UDC_STAT_FLG);
+ seq_printf(s,
+ "\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n",
+ ep->name, buf,
+ ep->double_buf ? "dbuf " : "",
+ ({ char *s;
+ switch (ep->ackwait) {
+ case 0:
+ s = "";
+ break;
+ case 1:
+ s = "(ackw) ";
+ break;
+ case 2:
+ s = "(ackw2) ";
+ break;
+ default:
+ s = "(?) ";
+ break;
+ } s; }),
+ ep->irqs, stat_flg,
+ (stat_flg & UDC_NO_RXPACKET) ? "no_rxpacket " : "",
+ (stat_flg & UDC_MISS_IN) ? "miss_in " : "",
+ (stat_flg & UDC_DATA_FLUSH) ? "data_flush " : "",
+ (stat_flg & UDC_ISO_ERR) ? "iso_err " : "",
+ (stat_flg & UDC_ISO_FIFO_EMPTY) ? "iso_fifo_empty " : "",
+ (stat_flg & UDC_ISO_FIFO_FULL) ? "iso_fifo_full " : "",
+ (stat_flg & UDC_EP_HALTED) ? "HALT " : "",
+ (stat_flg & UDC_STALL) ? "STALL " : "",
+ (stat_flg & UDC_NAK) ? "NAK " : "",
+ (stat_flg & UDC_ACK) ? "ACK " : "",
+ (stat_flg & UDC_FIFO_EN) ? "fifo_en " : "",
+ (stat_flg & UDC_NON_ISO_FIFO_EMPTY) ? "fifo_empty " : "",
+ (stat_flg & UDC_NON_ISO_FIFO_FULL) ? "fifo_full " : "");
+
+ if (list_empty(&ep->queue))
+ seq_printf(s, "\t(queue empty)\n");
+ else
+ list_for_each_entry(req, &ep->queue, queue) {
+ unsigned length = req->req.actual;
+
+ if (use_dma && buf[0]) {
+ length += ((ep->bEndpointAddress & USB_DIR_IN)
+ ? dma_src_len : dma_dest_len)
+ (ep, req->req.dma + length);
+ buf[0] = 0;
+ }
+ seq_printf(s, "\treq %p len %d/%d buf %p\n",
+ &req->req, length,
+ req->req.length, req->req.buf);
+ }
+}
+
+static char *trx_mode(unsigned m, int enabled)
+{
+ switch (m) {
+ case 0:
+ return enabled ? "*6wire" : "unused";
+ case 1:
+ return "4wire";
+ case 2:
+ return "3wire";
+ case 3:
+ return "6wire";
+ default:
+ return "unknown";
+ }
+}
+
+static int proc_otg_show(struct seq_file *s)
+{
+ u32 tmp;
+ u32 trans = 0;
+ char *ctrl_name = "(UNKNOWN)";
+
+ tmp = omap_readl(OTG_REV);
+ ctrl_name = "transceiver_ctrl";
+ trans = omap_readw(USB_TRANSCEIVER_CTRL);
+ seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
+ tmp >> 4, tmp & 0xf, ctrl_name, trans);
+ tmp = omap_readw(OTG_SYSCON_1);
+ seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
+ FOURBITS "\n", tmp,
+ trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R),
+ trx_mode(USB1_TRX_MODE(tmp), trans & CONF_USB1_UNI_R),
+ (USB0_TRX_MODE(tmp) == 0 && !cpu_is_omap1710())
+ ? "internal"
+ : trx_mode(USB0_TRX_MODE(tmp), 1),
+ (tmp & OTG_IDLE_EN) ? " !otg" : "",
+ (tmp & HST_IDLE_EN) ? " !host" : "",
+ (tmp & DEV_IDLE_EN) ? " !dev" : "",
+ (tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active");
+ tmp = omap_readl(OTG_SYSCON_2);
+ seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS
+ " b_ase_brst=%d hmc=%d\n", tmp,
+ (tmp & OTG_EN) ? " otg_en" : "",
+ (tmp & USBX_SYNCHRO) ? " synchro" : "",
+ /* much more SRP stuff */
+ (tmp & SRP_DATA) ? " srp_data" : "",
+ (tmp & SRP_VBUS) ? " srp_vbus" : "",
+ (tmp & OTG_PADEN) ? " otg_paden" : "",
+ (tmp & HMC_PADEN) ? " hmc_paden" : "",
+ (tmp & UHOST_EN) ? " uhost_en" : "",
+ (tmp & HMC_TLLSPEED) ? " tllspeed" : "",
+ (tmp & HMC_TLLATTACH) ? " tllattach" : "",
+ B_ASE_BRST(tmp),
+ OTG_HMC(tmp));
+ tmp = omap_readl(OTG_CTRL);
+ seq_printf(s, "otg_ctrl %06x" EIGHTBITS EIGHTBITS "%s\n", tmp,
+ (tmp & OTG_ASESSVLD) ? " asess" : "",
+ (tmp & OTG_BSESSEND) ? " bsess_end" : "",
+ (tmp & OTG_BSESSVLD) ? " bsess" : "",
+ (tmp & OTG_VBUSVLD) ? " vbus" : "",
+ (tmp & OTG_ID) ? " id" : "",
+ (tmp & OTG_DRIVER_SEL) ? " DEVICE" : " HOST",
+ (tmp & OTG_A_SETB_HNPEN) ? " a_setb_hnpen" : "",
+ (tmp & OTG_A_BUSREQ) ? " a_bus" : "",
+ (tmp & OTG_B_HNPEN) ? " b_hnpen" : "",
+ (tmp & OTG_B_BUSREQ) ? " b_bus" : "",
+ (tmp & OTG_BUSDROP) ? " busdrop" : "",
+ (tmp & OTG_PULLDOWN) ? " down" : "",
+ (tmp & OTG_PULLUP) ? " up" : "",
+ (tmp & OTG_DRV_VBUS) ? " drv" : "",
+ (tmp & OTG_PD_VBUS) ? " pd_vb" : "",
+ (tmp & OTG_PU_VBUS) ? " pu_vb" : "",
+ (tmp & OTG_PU_ID) ? " pu_id" : ""
+ );
+ tmp = omap_readw(OTG_IRQ_EN);
+ seq_printf(s, "otg_irq_en %04x" "\n", tmp);
+ tmp = omap_readw(OTG_IRQ_SRC);
+ seq_printf(s, "otg_irq_src %04x" "\n", tmp);
+ tmp = omap_readw(OTG_OUTCTRL);
+ seq_printf(s, "otg_outctrl %04x" "\n", tmp);
+ tmp = omap_readw(OTG_TEST);
+ seq_printf(s, "otg_test %04x" "\n", tmp);
+ return 0;
+}
+
+static int proc_udc_show(struct seq_file *s, void *_)
+{
+ u32 tmp;
+ struct omap_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ seq_printf(s, "%s, version: " DRIVER_VERSION
+#ifdef USE_ISO
+ " (iso)"
+#endif
+ "%s\n",
+ driver_desc,
+ use_dma ? " (dma)" : "");
+
+ tmp = omap_readw(UDC_REV) & 0xff;
+ seq_printf(s,
+ "UDC rev %d.%d, fifo mode %d, gadget %s\n"
+ "hmc %d, transceiver %s\n",
+ tmp >> 4, tmp & 0xf,
+ fifo_mode,
+ udc->driver ? udc->driver->driver.name : "(none)",
+ HMC,
+ udc->transceiver
+ ? udc->transceiver->label
+ : (cpu_is_omap1710()
+ ? "external" : "(none)"));
+ seq_printf(s, "ULPD control %04x req %04x status %04x\n",
+ omap_readw(ULPD_CLOCK_CTRL),
+ omap_readw(ULPD_SOFT_REQ),
+ omap_readw(ULPD_STATUS_REQ));
+
+ /* OTG controller registers */
+ if (!cpu_is_omap15xx())
+ proc_otg_show(s);
+
+ tmp = omap_readw(UDC_SYSCON1);
+ seq_printf(s, "\nsyscon1 %04x" EIGHTBITS "\n", tmp,
+ (tmp & UDC_CFG_LOCK) ? " cfg_lock" : "",
+ (tmp & UDC_DATA_ENDIAN) ? " data_endian" : "",
+ (tmp & UDC_DMA_ENDIAN) ? " dma_endian" : "",
+ (tmp & UDC_NAK_EN) ? " nak" : "",
+ (tmp & UDC_AUTODECODE_DIS) ? " autodecode_dis" : "",
+ (tmp & UDC_SELF_PWR) ? " self_pwr" : "",
+ (tmp & UDC_SOFF_DIS) ? " soff_dis" : "",
+ (tmp & UDC_PULLUP_EN) ? " PULLUP" : "");
+ /* syscon2 is write-only */
+
+ /* UDC controller registers */
+ if (!(tmp & UDC_PULLUP_EN)) {
+ seq_printf(s, "(suspended)\n");
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+ }
+
+ tmp = omap_readw(UDC_DEVSTAT);
+ seq_printf(s, "devstat %04x" EIGHTBITS "%s%s\n", tmp,
+ (tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "",
+ (tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "",
+ (tmp & UDC_A_ALT_HNP_SUPPORT) ? " a_alt_hnp" : "",
+ (tmp & UDC_R_WK_OK) ? " r_wk_ok" : "",
+ (tmp & UDC_USB_RESET) ? " usb_reset" : "",
+ (tmp & UDC_SUS) ? " SUS" : "",
+ (tmp & UDC_CFG) ? " CFG" : "",
+ (tmp & UDC_ADD) ? " ADD" : "",
+ (tmp & UDC_DEF) ? " DEF" : "",
+ (tmp & UDC_ATT) ? " ATT" : "");
+ seq_printf(s, "sof %04x\n", omap_readw(UDC_SOF));
+ tmp = omap_readw(UDC_IRQ_EN);
+ seq_printf(s, "irq_en %04x" FOURBITS "%s\n", tmp,
+ (tmp & UDC_SOF_IE) ? " sof" : "",
+ (tmp & UDC_EPN_RX_IE) ? " epn_rx" : "",
+ (tmp & UDC_EPN_TX_IE) ? " epn_tx" : "",
+ (tmp & UDC_DS_CHG_IE) ? " ds_chg" : "",
+ (tmp & UDC_EP0_IE) ? " ep0" : "");
+ tmp = omap_readw(UDC_IRQ_SRC);
+ seq_printf(s, "irq_src %04x" EIGHTBITS "%s%s\n", tmp,
+ (tmp & UDC_TXN_DONE) ? " txn_done" : "",
+ (tmp & UDC_RXN_CNT) ? " rxn_cnt" : "",
+ (tmp & UDC_RXN_EOT) ? " rxn_eot" : "",
+ (tmp & UDC_IRQ_SOF) ? " sof" : "",
+ (tmp & UDC_EPN_RX) ? " epn_rx" : "",
+ (tmp & UDC_EPN_TX) ? " epn_tx" : "",
+ (tmp & UDC_DS_CHG) ? " ds_chg" : "",
+ (tmp & UDC_SETUP) ? " setup" : "",
+ (tmp & UDC_EP0_RX) ? " ep0out" : "",
+ (tmp & UDC_EP0_TX) ? " ep0in" : "");
+ if (use_dma) {
+ unsigned i;
+
+ tmp = omap_readw(UDC_DMA_IRQ_EN);
+ seq_printf(s, "dma_irq_en %04x%s" EIGHTBITS "\n", tmp,
+ (tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "",
+ (tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "",
+ (tmp & UDC_RX_EOT_IE(3)) ? " rx2_eot" : "",
+
+ (tmp & UDC_TX_DONE_IE(2)) ? " tx1_done" : "",
+ (tmp & UDC_RX_CNT_IE(2)) ? " rx1_cnt" : "",
+ (tmp & UDC_RX_EOT_IE(2)) ? " rx1_eot" : "",
+
+ (tmp & UDC_TX_DONE_IE(1)) ? " tx0_done" : "",
+ (tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "",
+ (tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : "");
+
+ tmp = omap_readw(UDC_RXDMA_CFG);
+ seq_printf(s, "rxdma_cfg %04x\n", tmp);
+ if (tmp) {
+ for (i = 0; i < 3; i++) {
+ if ((tmp & (0x0f << (i * 4))) == 0)
+ continue;
+ seq_printf(s, "rxdma[%d] %04x\n", i,
+ omap_readw(UDC_RXDMA(i + 1)));
+ }
+ }
+ tmp = omap_readw(UDC_TXDMA_CFG);
+ seq_printf(s, "txdma_cfg %04x\n", tmp);
+ if (tmp) {
+ for (i = 0; i < 3; i++) {
+ if (!(tmp & (0x0f << (i * 4))))
+ continue;
+ seq_printf(s, "txdma[%d] %04x\n", i,
+ omap_readw(UDC_TXDMA(i + 1)));
+ }
+ }
+ }
+
+ tmp = omap_readw(UDC_DEVSTAT);
+ if (tmp & UDC_ATT) {
+ proc_ep_show(s, &udc->ep[0]);
+ if (tmp & UDC_ADD) {
+ list_for_each_entry(ep, &udc->gadget.ep_list,
+ ep.ep_list) {
+ if (ep->ep.desc)
+ proc_ep_show(s, ep);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static void create_proc_file(void)
+{
+ proc_create_single(proc_filename, 0, NULL, proc_udc_show);
+}
+
+static void remove_proc_file(void)
+{
+ remove_proc_entry(proc_filename, NULL);
+}
+
+#else
+
+static inline void create_proc_file(void) {}
+static inline void remove_proc_file(void) {}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* Before this controller can enumerate, we need to pick an endpoint
+ * configuration, or "fifo_mode" That involves allocating 2KB of packet
+ * buffer space among the endpoints we'll be operating.
+ *
+ * NOTE: as of OMAP 1710 ES2.0, writing a new endpoint config when
+ * UDC_SYSCON_1.CFG_LOCK is set can now work. We won't use that
+ * capability yet though.
+ */
+static unsigned
+omap_ep_setup(char *name, u8 addr, u8 type,
+ unsigned buf, unsigned maxp, int dbuf)
+{
+ struct omap_ep *ep;
+ u16 epn_rxtx = 0;
+
+ /* OUT endpoints first, then IN */
+ ep = &udc->ep[addr & 0xf];
+ if (addr & USB_DIR_IN)
+ ep += 16;
+
+ /* in case of ep init table bugs */
+ BUG_ON(ep->name[0]);
+
+ /* chip setup ... bit values are same for IN, OUT */
+ if (type == USB_ENDPOINT_XFER_ISOC) {
+ switch (maxp) {
+ case 8:
+ epn_rxtx = 0 << 12;
+ break;
+ case 16:
+ epn_rxtx = 1 << 12;
+ break;
+ case 32:
+ epn_rxtx = 2 << 12;
+ break;
+ case 64:
+ epn_rxtx = 3 << 12;
+ break;
+ case 128:
+ epn_rxtx = 4 << 12;
+ break;
+ case 256:
+ epn_rxtx = 5 << 12;
+ break;
+ case 512:
+ epn_rxtx = 6 << 12;
+ break;
+ default:
+ BUG();
+ }
+ epn_rxtx |= UDC_EPN_RX_ISO;
+ dbuf = 1;
+ } else {
+ /* double-buffering "not supported" on 15xx,
+ * and ignored for PIO-IN on newer chips
+ * (for more reliable behavior)
+ */
+ if (!use_dma || cpu_is_omap15xx())
+ dbuf = 0;
+
+ switch (maxp) {
+ case 8:
+ epn_rxtx = 0 << 12;
+ break;
+ case 16:
+ epn_rxtx = 1 << 12;
+ break;
+ case 32:
+ epn_rxtx = 2 << 12;
+ break;
+ case 64:
+ epn_rxtx = 3 << 12;
+ break;
+ default:
+ BUG();
+ }
+ if (dbuf && addr)
+ epn_rxtx |= UDC_EPN_RX_DB;
+ timer_setup(&ep->timer, pio_out_timer, 0);
+ }
+ if (addr)
+ epn_rxtx |= UDC_EPN_RX_VALID;
+ BUG_ON(buf & 0x07);
+ epn_rxtx |= buf >> 3;
+
+ DBG("%s addr %02x rxtx %04x maxp %d%s buf %d\n",
+ name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf);
+
+ if (addr & USB_DIR_IN)
+ omap_writew(epn_rxtx, UDC_EP_TX(addr & 0xf));
+ else
+ omap_writew(epn_rxtx, UDC_EP_RX(addr));
+
+ /* next endpoint's buffer starts after this one's */
+ buf += maxp;
+ if (dbuf)
+ buf += maxp;
+ BUG_ON(buf > 2048);
+
+ /* set up driver data structures */
+ BUG_ON(strlen(name) >= sizeof ep->name);
+ strscpy(ep->name, name, sizeof(ep->name));
+ INIT_LIST_HEAD(&ep->queue);
+ INIT_LIST_HEAD(&ep->iso);
+ ep->bEndpointAddress = addr;
+ ep->bmAttributes = type;
+ ep->double_buf = dbuf;
+ ep->udc = udc;
+
+ switch (type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ep->ep.caps.type_control = true;
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ ep->ep.caps.type_iso = true;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ ep->ep.caps.type_bulk = true;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ ep->ep.caps.type_int = true;
+ break;
+ }
+
+ if (addr & USB_DIR_IN)
+ ep->ep.caps.dir_in = true;
+ else
+ ep->ep.caps.dir_out = true;
+
+ ep->ep.name = ep->name;
+ ep->ep.ops = &omap_ep_ops;
+ ep->maxpacket = maxp;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+
+ return buf;
+}
+
+static void omap_udc_release(struct device *dev)
+{
+ pullup_disable(udc);
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
+ usb_put_phy(udc->transceiver);
+ udc->transceiver = NULL;
+ }
+ omap_writew(0, UDC_SYSCON1);
+ remove_proc_file();
+ if (udc->dc_clk) {
+ if (udc->clk_requested)
+ omap_udc_enable_clock(0);
+ clk_unprepare(udc->hhc_clk);
+ clk_unprepare(udc->dc_clk);
+ clk_put(udc->hhc_clk);
+ clk_put(udc->dc_clk);
+ }
+ if (udc->done)
+ complete(udc->done);
+ kfree(udc);
+}
+
+static int
+omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
+{
+ unsigned tmp, buf;
+
+ /* abolish any previous hardware state */
+ omap_writew(0, UDC_SYSCON1);
+ omap_writew(0, UDC_IRQ_EN);
+ omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
+ omap_writew(0, UDC_DMA_IRQ_EN);
+ omap_writew(0, UDC_RXDMA_CFG);
+ omap_writew(0, UDC_TXDMA_CFG);
+
+ /* UDC_PULLUP_EN gates the chip clock */
+ /* OTG_SYSCON_1 |= DEV_IDLE_EN; */
+
+ udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ spin_lock_init(&udc->lock);
+
+ udc->gadget.ops = &omap_gadget_ops;
+ udc->gadget.ep0 = &udc->ep[0].ep;
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ INIT_LIST_HEAD(&udc->iso);
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_FULL;
+ udc->gadget.name = driver_name;
+ udc->gadget.quirk_ep_out_aligned_size = 1;
+ udc->transceiver = xceiv;
+
+ /* ep0 is special; put it right after the SETUP buffer */
+ buf = omap_ep_setup("ep0", 0, USB_ENDPOINT_XFER_CONTROL,
+ 8 /* after SETUP */, 64 /* maxpacket */, 0);
+ list_del_init(&udc->ep[0].ep.ep_list);
+
+ /* initially disable all non-ep0 endpoints */
+ for (tmp = 1; tmp < 15; tmp++) {
+ omap_writew(0, UDC_EP_RX(tmp));
+ omap_writew(0, UDC_EP_TX(tmp));
+ }
+
+#define OMAP_BULK_EP(name, addr) \
+ buf = omap_ep_setup(name "-bulk", addr, \
+ USB_ENDPOINT_XFER_BULK, buf, 64, 1);
+#define OMAP_INT_EP(name, addr, maxp) \
+ buf = omap_ep_setup(name "-int", addr, \
+ USB_ENDPOINT_XFER_INT, buf, maxp, 0);
+#define OMAP_ISO_EP(name, addr, maxp) \
+ buf = omap_ep_setup(name "-iso", addr, \
+ USB_ENDPOINT_XFER_ISOC, buf, maxp, 1);
+
+ switch (fifo_mode) {
+ case 0:
+ OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
+ OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
+ OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
+ break;
+ case 1:
+ OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
+ OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
+ OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
+
+ OMAP_BULK_EP("ep3in", USB_DIR_IN | 3);
+ OMAP_BULK_EP("ep4out", USB_DIR_OUT | 4);
+ OMAP_INT_EP("ep10in", USB_DIR_IN | 10, 16);
+
+ OMAP_BULK_EP("ep5in", USB_DIR_IN | 5);
+ OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
+ OMAP_INT_EP("ep11in", USB_DIR_IN | 11, 16);
+
+ OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
+ OMAP_BULK_EP("ep6out", USB_DIR_OUT | 6);
+ OMAP_INT_EP("ep12in", USB_DIR_IN | 12, 16);
+
+ OMAP_BULK_EP("ep7in", USB_DIR_IN | 7);
+ OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
+ OMAP_INT_EP("ep13in", USB_DIR_IN | 13, 16);
+ OMAP_INT_EP("ep13out", USB_DIR_OUT | 13, 16);
+
+ OMAP_BULK_EP("ep8in", USB_DIR_IN | 8);
+ OMAP_BULK_EP("ep8out", USB_DIR_OUT | 8);
+ OMAP_INT_EP("ep14in", USB_DIR_IN | 14, 16);
+ OMAP_INT_EP("ep14out", USB_DIR_OUT | 14, 16);
+
+ OMAP_BULK_EP("ep15in", USB_DIR_IN | 15);
+ OMAP_BULK_EP("ep15out", USB_DIR_OUT | 15);
+
+ break;
+
+#ifdef USE_ISO
+ case 2: /* mixed iso/bulk */
+ OMAP_ISO_EP("ep1in", USB_DIR_IN | 1, 256);
+ OMAP_ISO_EP("ep2out", USB_DIR_OUT | 2, 256);
+ OMAP_ISO_EP("ep3in", USB_DIR_IN | 3, 128);
+ OMAP_ISO_EP("ep4out", USB_DIR_OUT | 4, 128);
+
+ OMAP_INT_EP("ep5in", USB_DIR_IN | 5, 16);
+
+ OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
+ OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
+ OMAP_INT_EP("ep8in", USB_DIR_IN | 8, 16);
+ break;
+ case 3: /* mixed bulk/iso */
+ OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
+ OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
+ OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
+
+ OMAP_BULK_EP("ep4in", USB_DIR_IN | 4);
+ OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
+ OMAP_INT_EP("ep6in", USB_DIR_IN | 6, 16);
+
+ OMAP_ISO_EP("ep7in", USB_DIR_IN | 7, 256);
+ OMAP_ISO_EP("ep8out", USB_DIR_OUT | 8, 256);
+ OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
+ break;
+#endif
+
+ /* add more modes as needed */
+
+ default:
+ ERR("unsupported fifo_mode #%d\n", fifo_mode);
+ return -ENODEV;
+ }
+ omap_writew(UDC_CFG_LOCK|UDC_SELF_PWR, UDC_SYSCON1);
+ INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf);
+ return 0;
+}
+
+static int omap_udc_probe(struct platform_device *pdev)
+{
+ int status = -ENODEV;
+ int hmc;
+ struct usb_phy *xceiv = NULL;
+ const char *type = NULL;
+ struct omap_usb_config *config = dev_get_platdata(&pdev->dev);
+ struct clk *dc_clk = NULL;
+ struct clk *hhc_clk = NULL;
+
+ if (cpu_is_omap7xx())
+ use_dma = 0;
+
+ /* NOTE: "knows" the order of the resources! */
+ if (!request_mem_region(pdev->resource[0].start,
+ resource_size(&pdev->resource[0]),
+ driver_name)) {
+ DBG("request_mem_region failed\n");
+ return -EBUSY;
+ }
+
+ if (cpu_is_omap16xx()) {
+ dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
+ hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
+ BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
+ /* can't use omap_udc_enable_clock yet */
+ clk_prepare_enable(dc_clk);
+ clk_prepare_enable(hhc_clk);
+ udelay(100);
+ }
+
+ if (cpu_is_omap7xx()) {
+ dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
+ hhc_clk = clk_get(&pdev->dev, "l3_ocpi_ck");
+ BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
+ /* can't use omap_udc_enable_clock yet */
+ clk_prepare_enable(dc_clk);
+ clk_prepare_enable(hhc_clk);
+ udelay(100);
+ }
+
+ INFO("OMAP UDC rev %d.%d%s\n",
+ omap_readw(UDC_REV) >> 4, omap_readw(UDC_REV) & 0xf,
+ config->otg ? ", Mini-AB" : "");
+
+ /* use the mode given to us by board init code */
+ if (cpu_is_omap15xx()) {
+ hmc = HMC_1510;
+ type = "(unknown)";
+
+ if (machine_without_vbus_sense()) {
+ /* just set up software VBUS detect, and then
+ * later rig it so we always report VBUS.
+ * FIXME without really sensing VBUS, we can't
+ * know when to turn PULLUP_EN on/off; and that
+ * means we always "need" the 48MHz clock.
+ */
+ u32 tmp = omap_readl(FUNC_MUX_CTRL_0);
+ tmp &= ~VBUS_CTRL_1510;
+ omap_writel(tmp, FUNC_MUX_CTRL_0);
+ tmp |= VBUS_MODE_1510;
+ tmp &= ~VBUS_CTRL_1510;
+ omap_writel(tmp, FUNC_MUX_CTRL_0);
+ }
+ } else {
+ /* The transceiver may package some GPIO logic or handle
+ * loopback and/or transceiverless setup; if we find one,
+ * use it. Except for OTG, we don't _need_ to talk to one;
+ * but not having one probably means no VBUS detection.
+ */
+ xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (!IS_ERR_OR_NULL(xceiv))
+ type = xceiv->label;
+ else if (config->otg) {
+ DBG("OTG requires external transceiver!\n");
+ goto cleanup0;
+ }
+
+ hmc = HMC_1610;
+
+ switch (hmc) {
+ case 0: /* POWERUP DEFAULT == 0 */
+ case 4:
+ case 12:
+ case 20:
+ if (!cpu_is_omap1710()) {
+ type = "integrated";
+ break;
+ }
+ fallthrough;
+ case 3:
+ case 11:
+ case 16:
+ case 19:
+ case 25:
+ if (IS_ERR_OR_NULL(xceiv)) {
+ DBG("external transceiver not registered!\n");
+ type = "unknown";
+ }
+ break;
+ case 21: /* internal loopback */
+ type = "loopback";
+ break;
+ case 14: /* transceiverless */
+ if (cpu_is_omap1710())
+ goto bad_on_1710;
+ fallthrough;
+ case 13:
+ case 15:
+ type = "no";
+ break;
+
+ default:
+bad_on_1710:
+ ERR("unrecognized UDC HMC mode %d\n", hmc);
+ goto cleanup0;
+ }
+ }
+
+ INFO("hmc mode %d, %s transceiver\n", hmc, type);
+
+ /* a "gadget" abstracts/virtualizes the controller */
+ status = omap_udc_setup(pdev, xceiv);
+ if (status)
+ goto cleanup0;
+
+ xceiv = NULL;
+ /* "udc" is now valid */
+ pullup_disable(udc);
+#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
+ udc->gadget.is_otg = (config->otg != 0);
+#endif
+
+ /* starting with omap1710 es2.0, clear toggle is a separate bit */
+ if (omap_readw(UDC_REV) >= 0x61)
+ udc->clr_halt = UDC_RESET_EP | UDC_CLRDATA_TOGGLE;
+ else
+ udc->clr_halt = UDC_RESET_EP;
+
+ /* USB general purpose IRQ: ep0, state changes, dma, etc */
+ status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
+ omap_udc_irq, 0, driver_name, udc);
+ if (status != 0) {
+ ERR("can't get irq %d, err %d\n",
+ (int) pdev->resource[1].start, status);
+ goto cleanup1;
+ }
+
+ /* USB "non-iso" IRQ (PIO for all but ep0) */
+ status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
+ omap_udc_pio_irq, 0, "omap_udc pio", udc);
+ if (status != 0) {
+ ERR("can't get irq %d, err %d\n",
+ (int) pdev->resource[2].start, status);
+ goto cleanup1;
+ }
+#ifdef USE_ISO
+ status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
+ omap_udc_iso_irq, 0, "omap_udc iso", udc);
+ if (status != 0) {
+ ERR("can't get irq %d, err %d\n",
+ (int) pdev->resource[3].start, status);
+ goto cleanup1;
+ }
+#endif
+ if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
+ udc->dc_clk = dc_clk;
+ udc->hhc_clk = hhc_clk;
+ clk_disable(hhc_clk);
+ clk_disable(dc_clk);
+ }
+
+ create_proc_file();
+ return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
+ omap_udc_release);
+
+cleanup1:
+ kfree(udc);
+ udc = NULL;
+
+cleanup0:
+ if (!IS_ERR_OR_NULL(xceiv))
+ usb_put_phy(xceiv);
+
+ if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
+ clk_disable_unprepare(hhc_clk);
+ clk_disable_unprepare(dc_clk);
+ clk_put(hhc_clk);
+ clk_put(dc_clk);
+ }
+
+ release_mem_region(pdev->resource[0].start,
+ resource_size(&pdev->resource[0]));
+
+ return status;
+}
+
+static int omap_udc_remove(struct platform_device *pdev)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ udc->done = &done;
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ wait_for_completion(&done);
+
+ release_mem_region(pdev->resource[0].start,
+ resource_size(&pdev->resource[0]));
+
+ return 0;
+}
+
+/* suspend/resume/wakeup from sysfs (echo > power/state) or when the
+ * system is forced into deep sleep
+ *
+ * REVISIT we should probably reject suspend requests when there's a host
+ * session active, rather than disconnecting, at least on boards that can
+ * report VBUS irqs (UDC_DEVSTAT.UDC_ATT). And in any case, we need to
+ * make host resumes and VBUS detection trigger OMAP wakeup events; that
+ * may involve talking to an external transceiver (e.g. isp1301).
+ */
+
+static int omap_udc_suspend(struct platform_device *dev, pm_message_t message)
+{
+ u32 devstat;
+
+ devstat = omap_readw(UDC_DEVSTAT);
+
+ /* we're requesting 48 MHz clock if the pullup is enabled
+ * (== we're attached to the host) and we're not suspended,
+ * which would prevent entry to deep sleep...
+ */
+ if ((devstat & UDC_ATT) != 0 && (devstat & UDC_SUS) == 0) {
+ WARNING("session active; suspend requires disconnect\n");
+ omap_pullup(&udc->gadget, 0);
+ }
+
+ return 0;
+}
+
+static int omap_udc_resume(struct platform_device *dev)
+{
+ DBG("resume + wakeup/SRP\n");
+ omap_pullup(&udc->gadget, 1);
+
+ /* maybe the host would enumerate us if we nudged it */
+ msleep(100);
+ return omap_wakeup(&udc->gadget);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct platform_driver udc_driver = {
+ .probe = omap_udc_probe,
+ .remove = omap_udc_remove,
+ .suspend = omap_udc_suspend,
+ .resume = omap_udc_resume,
+ .driver = {
+ .name = driver_name,
+ },
+};
+
+module_platform_driver(udc_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap_udc");
diff --git a/drivers/usb/gadget/udc/omap_udc.h b/drivers/usb/gadget/udc/omap_udc.h
new file mode 100644
index 000000000..00f9e608e
--- /dev/null
+++ b/drivers/usb/gadget/udc/omap_udc.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * omap_udc.h -- for omap 3.2 udc, with OTG support
+ *
+ * 2004 (C) Texas Instruments, Inc.
+ * 2004 (C) David Brownell
+ */
+
+/*
+ * USB device/endpoint management registers
+ */
+
+#define UDC_REV (UDC_BASE + 0x0) /* Revision */
+#define UDC_EP_NUM (UDC_BASE + 0x4) /* Which endpoint */
+# define UDC_SETUP_SEL (1 << 6)
+# define UDC_EP_SEL (1 << 5)
+# define UDC_EP_DIR (1 << 4)
+ /* low 4 bits for endpoint number */
+#define UDC_DATA (UDC_BASE + 0x08) /* Endpoint FIFO */
+#define UDC_CTRL (UDC_BASE + 0x0C) /* Endpoint control */
+# define UDC_CLR_HALT (1 << 7)
+# define UDC_SET_HALT (1 << 6)
+# define UDC_CLRDATA_TOGGLE (1 << 3)
+# define UDC_SET_FIFO_EN (1 << 2)
+# define UDC_CLR_EP (1 << 1)
+# define UDC_RESET_EP (1 << 0)
+#define UDC_STAT_FLG (UDC_BASE + 0x10) /* Endpoint status */
+# define UDC_NO_RXPACKET (1 << 15)
+# define UDC_MISS_IN (1 << 14)
+# define UDC_DATA_FLUSH (1 << 13)
+# define UDC_ISO_ERR (1 << 12)
+# define UDC_ISO_FIFO_EMPTY (1 << 9)
+# define UDC_ISO_FIFO_FULL (1 << 8)
+# define UDC_EP_HALTED (1 << 6)
+# define UDC_STALL (1 << 5)
+# define UDC_NAK (1 << 4)
+# define UDC_ACK (1 << 3)
+# define UDC_FIFO_EN (1 << 2)
+# define UDC_NON_ISO_FIFO_EMPTY (1 << 1)
+# define UDC_NON_ISO_FIFO_FULL (1 << 0)
+#define UDC_RXFSTAT (UDC_BASE + 0x14) /* OUT bytecount */
+#define UDC_SYSCON1 (UDC_BASE + 0x18) /* System config 1 */
+# define UDC_CFG_LOCK (1 << 8)
+# define UDC_DATA_ENDIAN (1 << 7)
+# define UDC_DMA_ENDIAN (1 << 6)
+# define UDC_NAK_EN (1 << 4)
+# define UDC_AUTODECODE_DIS (1 << 3)
+# define UDC_SELF_PWR (1 << 2)
+# define UDC_SOFF_DIS (1 << 1)
+# define UDC_PULLUP_EN (1 << 0)
+#define UDC_SYSCON2 (UDC_BASE + 0x1C) /* System config 2 */
+# define UDC_RMT_WKP (1 << 6)
+# define UDC_STALL_CMD (1 << 5)
+# define UDC_DEV_CFG (1 << 3)
+# define UDC_CLR_CFG (1 << 2)
+#define UDC_DEVSTAT (UDC_BASE + 0x20) /* Device status */
+# define UDC_B_HNP_ENABLE (1 << 9)
+# define UDC_A_HNP_SUPPORT (1 << 8)
+# define UDC_A_ALT_HNP_SUPPORT (1 << 7)
+# define UDC_R_WK_OK (1 << 6)
+# define UDC_USB_RESET (1 << 5)
+# define UDC_SUS (1 << 4)
+# define UDC_CFG (1 << 3)
+# define UDC_ADD (1 << 2)
+# define UDC_DEF (1 << 1)
+# define UDC_ATT (1 << 0)
+#define UDC_SOF (UDC_BASE + 0x24) /* Start of frame */
+# define UDC_FT_LOCK (1 << 12)
+# define UDC_TS_OK (1 << 11)
+# define UDC_TS 0x03ff
+#define UDC_IRQ_EN (UDC_BASE + 0x28) /* Interrupt enable */
+# define UDC_SOF_IE (1 << 7)
+# define UDC_EPN_RX_IE (1 << 5)
+# define UDC_EPN_TX_IE (1 << 4)
+# define UDC_DS_CHG_IE (1 << 3)
+# define UDC_EP0_IE (1 << 0)
+#define UDC_DMA_IRQ_EN (UDC_BASE + 0x2C) /* DMA irq enable */
+ /* rx/tx dma channels numbered 1-3 not 0-2 */
+# define UDC_TX_DONE_IE(n) (1 << (4 * (n) - 2))
+# define UDC_RX_CNT_IE(n) (1 << (4 * (n) - 3))
+# define UDC_RX_EOT_IE(n) (1 << (4 * (n) - 4))
+#define UDC_IRQ_SRC (UDC_BASE + 0x30) /* Interrupt source */
+# define UDC_TXN_DONE (1 << 10)
+# define UDC_RXN_CNT (1 << 9)
+# define UDC_RXN_EOT (1 << 8)
+# define UDC_IRQ_SOF (1 << 7)
+# define UDC_EPN_RX (1 << 5)
+# define UDC_EPN_TX (1 << 4)
+# define UDC_DS_CHG (1 << 3)
+# define UDC_SETUP (1 << 2)
+# define UDC_EP0_RX (1 << 1)
+# define UDC_EP0_TX (1 << 0)
+# define UDC_IRQ_SRC_MASK 0x7bf
+#define UDC_EPN_STAT (UDC_BASE + 0x34) /* EP irq status */
+#define UDC_DMAN_STAT (UDC_BASE + 0x38) /* DMA irq status */
+# define UDC_DMA_RX_SB (1 << 12)
+# define UDC_DMA_RX_SRC(x) (((x)>>8) & 0xf)
+# define UDC_DMA_TX_SRC(x) (((x)>>0) & 0xf)
+
+
+/* DMA configuration registers: up to three channels in each direction. */
+#define UDC_RXDMA_CFG (UDC_BASE + 0x40) /* 3 eps for RX DMA */
+# define UDC_DMA_REQ (1 << 12)
+#define UDC_TXDMA_CFG (UDC_BASE + 0x44) /* 3 eps for TX DMA */
+#define UDC_DATA_DMA (UDC_BASE + 0x48) /* rx/tx fifo addr */
+
+/* rx/tx dma control, numbering channels 1-3 not 0-2 */
+#define UDC_TXDMA(chan) (UDC_BASE + 0x50 - 4 + 4 * (chan))
+# define UDC_TXN_EOT (1 << 15) /* bytes vs packets */
+# define UDC_TXN_START (1 << 14) /* start transfer */
+# define UDC_TXN_TSC 0x03ff /* units in xfer */
+#define UDC_RXDMA(chan) (UDC_BASE + 0x60 - 4 + 4 * (chan))
+# define UDC_RXN_STOP (1 << 15) /* enable EOT irq */
+# define UDC_RXN_TC 0x00ff /* packets in xfer */
+
+
+/*
+ * Endpoint configuration registers (used before CFG_LOCK is set)
+ * UDC_EP_TX(0) is unused
+ */
+#define UDC_EP_RX(endpoint) (UDC_BASE + 0x80 + (endpoint)*4)
+# define UDC_EPN_RX_VALID (1 << 15)
+# define UDC_EPN_RX_DB (1 << 14)
+ /* buffer size in bits 13, 12 */
+# define UDC_EPN_RX_ISO (1 << 11)
+ /* buffer pointer in low 11 bits */
+#define UDC_EP_TX(endpoint) (UDC_BASE + 0xc0 + (endpoint)*4)
+ /* same bitfields as in RX */
+
+/*-------------------------------------------------------------------------*/
+
+struct omap_req {
+ struct usb_request req;
+ struct list_head queue;
+ unsigned dma_bytes;
+ unsigned mapped:1;
+};
+
+struct omap_ep {
+ struct usb_ep ep;
+ struct list_head queue;
+ unsigned long irqs;
+ struct list_head iso;
+ char name[14];
+ u16 maxpacket;
+ u8 bEndpointAddress;
+ u8 bmAttributes;
+ unsigned double_buf:1;
+ unsigned stopped:1;
+ unsigned fnf:1;
+ unsigned has_dma:1;
+ u8 ackwait;
+ u8 dma_channel;
+ u16 dma_counter;
+ int lch;
+ struct omap_udc *udc;
+ struct timer_list timer;
+};
+
+struct omap_udc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ spinlock_t lock;
+ struct omap_ep ep[32];
+ u16 devstat;
+ u16 clr_halt;
+ struct usb_phy *transceiver;
+ struct list_head iso;
+ unsigned softconnect:1;
+ unsigned vbus_active:1;
+ unsigned ep0_pending:1;
+ unsigned ep0_in:1;
+ unsigned ep0_set_config:1;
+ unsigned ep0_reset_config:1;
+ unsigned ep0_setup:1;
+ struct completion *done;
+ struct clk *dc_clk;
+ struct clk *hhc_clk;
+ unsigned clk_requested:1;
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef VERBOSE
+# define VDBG DBG
+#else
+# define VDBG(stuff...) do{}while(0)
+#endif
+
+#define ERR(stuff...) pr_err("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
+#define INFO(stuff...) pr_info("udc: " stuff)
+#define DBG(stuff...) pr_debug("udc: " stuff)
+
+/*-------------------------------------------------------------------------*/
+
+/* MOD_CONF_CTRL_0 */
+#define VBUS_W2FC_1510 (1 << 17) /* 0 gpio0, 1 dvdd2 pin */
+
+/* FUNC_MUX_CTRL_0 */
+#define VBUS_CTRL_1510 (1 << 19) /* 1 connected (software) */
+#define VBUS_MODE_1510 (1 << 18) /* 0 hardware, 1 software */
+
+#define HMC_1510 ((omap_readl(MOD_CONF_CTRL_0) >> 1) & 0x3f)
+#define HMC_1610 (omap_readl(OTG_SYSCON_2) & 0x3f)
+#define HMC (cpu_is_omap15xx() ? HMC_1510 : HMC_1610)
+
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
new file mode 100644
index 000000000..4f8617210
--- /dev/null
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -0,0 +1,3163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/irq.h>
+
+#define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */
+#define PCH_VBUS_INTERVAL 10 /* VBUS polling interval (msec) */
+
+/* Address offset of Registers */
+#define UDC_EP_REG_SHIFT 0x20 /* Offset to next EP */
+
+#define UDC_EPCTL_ADDR 0x00 /* Endpoint control */
+#define UDC_EPSTS_ADDR 0x04 /* Endpoint status */
+#define UDC_BUFIN_FRAMENUM_ADDR 0x08 /* buffer size in / frame number out */
+#define UDC_BUFOUT_MAXPKT_ADDR 0x0C /* buffer size out / maxpkt in */
+#define UDC_SUBPTR_ADDR 0x10 /* setup buffer pointer */
+#define UDC_DESPTR_ADDR 0x14 /* Data descriptor pointer */
+#define UDC_CONFIRM_ADDR 0x18 /* Write/Read confirmation */
+
+#define UDC_DEVCFG_ADDR 0x400 /* Device configuration */
+#define UDC_DEVCTL_ADDR 0x404 /* Device control */
+#define UDC_DEVSTS_ADDR 0x408 /* Device status */
+#define UDC_DEVIRQSTS_ADDR 0x40C /* Device irq status */
+#define UDC_DEVIRQMSK_ADDR 0x410 /* Device irq mask */
+#define UDC_EPIRQSTS_ADDR 0x414 /* Endpoint irq status */
+#define UDC_EPIRQMSK_ADDR 0x418 /* Endpoint irq mask */
+#define UDC_DEVLPM_ADDR 0x41C /* LPM control / status */
+#define UDC_CSR_BUSY_ADDR 0x4f0 /* UDC_CSR_BUSY Status register */
+#define UDC_SRST_ADDR 0x4fc /* SOFT RESET register */
+#define UDC_CSR_ADDR 0x500 /* USB_DEVICE endpoint register */
+
+/* Endpoint control register */
+/* Bit position */
+#define UDC_EPCTL_MRXFLUSH (1 << 12)
+#define UDC_EPCTL_RRDY (1 << 9)
+#define UDC_EPCTL_CNAK (1 << 8)
+#define UDC_EPCTL_SNAK (1 << 7)
+#define UDC_EPCTL_NAK (1 << 6)
+#define UDC_EPCTL_P (1 << 3)
+#define UDC_EPCTL_F (1 << 1)
+#define UDC_EPCTL_S (1 << 0)
+#define UDC_EPCTL_ET_SHIFT 4
+/* Mask patern */
+#define UDC_EPCTL_ET_MASK 0x00000030
+/* Value for ET field */
+#define UDC_EPCTL_ET_CONTROL 0
+#define UDC_EPCTL_ET_ISO 1
+#define UDC_EPCTL_ET_BULK 2
+#define UDC_EPCTL_ET_INTERRUPT 3
+
+/* Endpoint status register */
+/* Bit position */
+#define UDC_EPSTS_XFERDONE (1 << 27)
+#define UDC_EPSTS_RSS (1 << 26)
+#define UDC_EPSTS_RCS (1 << 25)
+#define UDC_EPSTS_TXEMPTY (1 << 24)
+#define UDC_EPSTS_TDC (1 << 10)
+#define UDC_EPSTS_HE (1 << 9)
+#define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
+#define UDC_EPSTS_BNA (1 << 7)
+#define UDC_EPSTS_IN (1 << 6)
+#define UDC_EPSTS_OUT_SHIFT 4
+/* Mask patern */
+#define UDC_EPSTS_OUT_MASK 0x00000030
+#define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
+/* Value for OUT field */
+#define UDC_EPSTS_OUT_SETUP 2
+#define UDC_EPSTS_OUT_DATA 1
+
+/* Device configuration register */
+/* Bit position */
+#define UDC_DEVCFG_CSR_PRG (1 << 17)
+#define UDC_DEVCFG_SP (1 << 3)
+/* SPD Valee */
+#define UDC_DEVCFG_SPD_HS 0x0
+#define UDC_DEVCFG_SPD_FS 0x1
+#define UDC_DEVCFG_SPD_LS 0x2
+
+/* Device control register */
+/* Bit position */
+#define UDC_DEVCTL_THLEN_SHIFT 24
+#define UDC_DEVCTL_BRLEN_SHIFT 16
+#define UDC_DEVCTL_CSR_DONE (1 << 13)
+#define UDC_DEVCTL_SD (1 << 10)
+#define UDC_DEVCTL_MODE (1 << 9)
+#define UDC_DEVCTL_BREN (1 << 8)
+#define UDC_DEVCTL_THE (1 << 7)
+#define UDC_DEVCTL_DU (1 << 4)
+#define UDC_DEVCTL_TDE (1 << 3)
+#define UDC_DEVCTL_RDE (1 << 2)
+#define UDC_DEVCTL_RES (1 << 0)
+
+/* Device status register */
+/* Bit position */
+#define UDC_DEVSTS_TS_SHIFT 18
+#define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
+#define UDC_DEVSTS_ALT_SHIFT 8
+#define UDC_DEVSTS_INTF_SHIFT 4
+#define UDC_DEVSTS_CFG_SHIFT 0
+/* Mask patern */
+#define UDC_DEVSTS_TS_MASK 0xfffc0000
+#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
+#define UDC_DEVSTS_ALT_MASK 0x00000f00
+#define UDC_DEVSTS_INTF_MASK 0x000000f0
+#define UDC_DEVSTS_CFG_MASK 0x0000000f
+/* value for maximum speed for SPEED field */
+#define UDC_DEVSTS_ENUM_SPEED_FULL 1
+#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
+#define UDC_DEVSTS_ENUM_SPEED_LOW 2
+#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
+
+/* Device irq register */
+/* Bit position */
+#define UDC_DEVINT_RWKP (1 << 7)
+#define UDC_DEVINT_ENUM (1 << 6)
+#define UDC_DEVINT_SOF (1 << 5)
+#define UDC_DEVINT_US (1 << 4)
+#define UDC_DEVINT_UR (1 << 3)
+#define UDC_DEVINT_ES (1 << 2)
+#define UDC_DEVINT_SI (1 << 1)
+#define UDC_DEVINT_SC (1 << 0)
+/* Mask patern */
+#define UDC_DEVINT_MSK 0x7f
+
+/* Endpoint irq register */
+/* Bit position */
+#define UDC_EPINT_IN_SHIFT 0
+#define UDC_EPINT_OUT_SHIFT 16
+#define UDC_EPINT_IN_EP0 (1 << 0)
+#define UDC_EPINT_OUT_EP0 (1 << 16)
+/* Mask patern */
+#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
+
+/* UDC_CSR_BUSY Status register */
+/* Bit position */
+#define UDC_CSR_BUSY (1 << 0)
+
+/* SOFT RESET register */
+/* Bit position */
+#define UDC_PSRST (1 << 1)
+#define UDC_SRST (1 << 0)
+
+/* USB_DEVICE endpoint register */
+/* Bit position */
+#define UDC_CSR_NE_NUM_SHIFT 0
+#define UDC_CSR_NE_DIR_SHIFT 4
+#define UDC_CSR_NE_TYPE_SHIFT 5
+#define UDC_CSR_NE_CFG_SHIFT 7
+#define UDC_CSR_NE_INTF_SHIFT 11
+#define UDC_CSR_NE_ALT_SHIFT 15
+#define UDC_CSR_NE_MAX_PKT_SHIFT 19
+/* Mask patern */
+#define UDC_CSR_NE_NUM_MASK 0x0000000f
+#define UDC_CSR_NE_DIR_MASK 0x00000010
+#define UDC_CSR_NE_TYPE_MASK 0x00000060
+#define UDC_CSR_NE_CFG_MASK 0x00000780
+#define UDC_CSR_NE_INTF_MASK 0x00007800
+#define UDC_CSR_NE_ALT_MASK 0x00078000
+#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
+
+#define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
+#define PCH_UDC_EPINT(in, num)\
+ (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
+
+/* Index of endpoint */
+#define UDC_EP0IN_IDX 0
+#define UDC_EP0OUT_IDX 1
+#define UDC_EPIN_IDX(ep) (ep * 2)
+#define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
+#define PCH_UDC_EP0 0
+#define PCH_UDC_EP1 1
+#define PCH_UDC_EP2 2
+#define PCH_UDC_EP3 3
+
+/* Number of endpoint */
+#define PCH_UDC_EP_NUM 32 /* Total number of EPs (16 IN,16 OUT) */
+#define PCH_UDC_USED_EP_NUM 4 /* EP number of EP's really used */
+/* Length Value */
+#define PCH_UDC_BRLEN 0x0F /* Burst length */
+#define PCH_UDC_THLEN 0x1F /* Threshold length */
+/* Value of EP Buffer Size */
+#define UDC_EP0IN_BUFF_SIZE 16
+#define UDC_EPIN_BUFF_SIZE 256
+#define UDC_EP0OUT_BUFF_SIZE 16
+#define UDC_EPOUT_BUFF_SIZE 256
+/* Value of EP maximum packet size */
+#define UDC_EP0IN_MAX_PKT_SIZE 64
+#define UDC_EP0OUT_MAX_PKT_SIZE 64
+#define UDC_BULK_MAX_PKT_SIZE 512
+
+/* DMA */
+#define DMA_DIR_RX 1 /* DMA for data receive */
+#define DMA_DIR_TX 2 /* DMA for data transmit */
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+#define UDC_DMA_MAXPACKET 65536 /* maximum packet size for DMA */
+
+/**
+ * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
+ * for data
+ * @status: Status quadlet
+ * @reserved: Reserved
+ * @dataptr: Buffer descriptor
+ * @next: Next descriptor
+ */
+struct pch_udc_data_dma_desc {
+ u32 status;
+ u32 reserved;
+ u32 dataptr;
+ u32 next;
+};
+
+/**
+ * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
+ * for control data
+ * @status: Status
+ * @reserved: Reserved
+ * @request: Control Request
+ */
+struct pch_udc_stp_dma_desc {
+ u32 status;
+ u32 reserved;
+ struct usb_ctrlrequest request;
+} __attribute((packed));
+
+/* DMA status definitions */
+/* Buffer status */
+#define PCH_UDC_BUFF_STS 0xC0000000
+#define PCH_UDC_BS_HST_RDY 0x00000000
+#define PCH_UDC_BS_DMA_BSY 0x40000000
+#define PCH_UDC_BS_DMA_DONE 0x80000000
+#define PCH_UDC_BS_HST_BSY 0xC0000000
+/* Rx/Tx Status */
+#define PCH_UDC_RXTX_STS 0x30000000
+#define PCH_UDC_RTS_SUCC 0x00000000
+#define PCH_UDC_RTS_DESERR 0x10000000
+#define PCH_UDC_RTS_BUFERR 0x30000000
+/* Last Descriptor Indication */
+#define PCH_UDC_DMA_LAST 0x08000000
+/* Number of Rx/Tx Bytes Mask */
+#define PCH_UDC_RXTX_BYTES 0x0000ffff
+
+/**
+ * struct pch_udc_cfg_data - Structure to hold current configuration
+ * and interface information
+ * @cur_cfg: current configuration in use
+ * @cur_intf: current interface in use
+ * @cur_alt: current alt interface in use
+ */
+struct pch_udc_cfg_data {
+ u16 cur_cfg;
+ u16 cur_intf;
+ u16 cur_alt;
+};
+
+/**
+ * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
+ * @ep: embedded ep request
+ * @td_stp_phys: for setup request
+ * @td_data_phys: for data request
+ * @td_stp: for setup request
+ * @td_data: for data request
+ * @dev: reference to device struct
+ * @offset_addr: offset address of ep register
+ * @desc: for this ep
+ * @queue: queue for requests
+ * @num: endpoint number
+ * @in: endpoint is IN
+ * @halted: endpoint halted?
+ * @epsts: Endpoint status
+ */
+struct pch_udc_ep {
+ struct usb_ep ep;
+ dma_addr_t td_stp_phys;
+ dma_addr_t td_data_phys;
+ struct pch_udc_stp_dma_desc *td_stp;
+ struct pch_udc_data_dma_desc *td_data;
+ struct pch_udc_dev *dev;
+ unsigned long offset_addr;
+ struct list_head queue;
+ unsigned num:5,
+ in:1,
+ halted:1;
+ unsigned long epsts;
+};
+
+/**
+ * struct pch_vbus_gpio_data - Structure holding GPIO informaton
+ * for detecting VBUS
+ * @port: gpio descriptor for the VBUS GPIO
+ * @intr: gpio interrupt number
+ * @irq_work_fall: Structure for WorkQueue
+ * @irq_work_rise: Structure for WorkQueue
+ */
+struct pch_vbus_gpio_data {
+ struct gpio_desc *port;
+ int intr;
+ struct work_struct irq_work_fall;
+ struct work_struct irq_work_rise;
+};
+
+/**
+ * struct pch_udc_dev - Structure holding complete information
+ * of the PCH USB device
+ * @gadget: gadget driver data
+ * @driver: reference to gadget driver bound
+ * @pdev: reference to the PCI device
+ * @ep: array of endpoints
+ * @lock: protects all state
+ * @stall: stall requested
+ * @prot_stall: protcol stall requested
+ * @registered: driver registered with system
+ * @suspended: driver in suspended state
+ * @connected: gadget driver associated
+ * @vbus_session: required vbus_session state
+ * @set_cfg_not_acked: pending acknowledgement 4 setup
+ * @waiting_zlp_ack: pending acknowledgement 4 ZLP
+ * @data_requests: DMA pool for data requests
+ * @stp_requests: DMA pool for setup requests
+ * @dma_addr: DMA pool for received
+ * @setup_data: Received setup data
+ * @base_addr: for mapped device memory
+ * @bar: PCI BAR used for mapped device memory
+ * @cfg_data: current cfg, intf, and alt in use
+ * @vbus_gpio: GPIO informaton for detecting VBUS
+ */
+struct pch_udc_dev {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct pci_dev *pdev;
+ struct pch_udc_ep ep[PCH_UDC_EP_NUM];
+ spinlock_t lock; /* protects all state */
+ unsigned
+ stall:1,
+ prot_stall:1,
+ suspended:1,
+ connected:1,
+ vbus_session:1,
+ set_cfg_not_acked:1,
+ waiting_zlp_ack:1;
+ struct dma_pool *data_requests;
+ struct dma_pool *stp_requests;
+ dma_addr_t dma_addr;
+ struct usb_ctrlrequest setup_data;
+ void __iomem *base_addr;
+ unsigned short bar;
+ struct pch_udc_cfg_data cfg_data;
+ struct pch_vbus_gpio_data vbus_gpio;
+};
+#define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
+
+#define PCH_UDC_PCI_BAR_QUARK_X1000 0
+#define PCH_UDC_PCI_BAR 1
+
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
+#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
+
+#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
+#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
+
+static const char ep0_string[] = "ep0in";
+static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
+static bool speed_fs;
+module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
+MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
+
+/**
+ * struct pch_udc_request - Structure holding a PCH USB device request packet
+ * @req: embedded ep request
+ * @td_data_phys: phys. address
+ * @td_data: first dma desc. of chain
+ * @td_data_last: last dma desc. of chain
+ * @queue: associated queue
+ * @dma_going: DMA in progress for request
+ * @dma_done: DMA completed for request
+ * @chain_len: chain length
+ */
+struct pch_udc_request {
+ struct usb_request req;
+ dma_addr_t td_data_phys;
+ struct pch_udc_data_dma_desc *td_data;
+ struct pch_udc_data_dma_desc *td_data_last;
+ struct list_head queue;
+ unsigned dma_going:1,
+ dma_done:1;
+ unsigned chain_len;
+};
+
+static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
+{
+ return ioread32(dev->base_addr + reg);
+}
+
+static inline void pch_udc_writel(struct pch_udc_dev *dev,
+ unsigned long val, unsigned long reg)
+{
+ iowrite32(val, dev->base_addr + reg);
+}
+
+static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
+ unsigned long reg,
+ unsigned long bitmask)
+{
+ pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
+}
+
+static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
+ unsigned long reg,
+ unsigned long bitmask)
+{
+ pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
+}
+
+static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
+{
+ return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
+}
+
+static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
+ unsigned long val, unsigned long reg)
+{
+ iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
+}
+
+static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
+ unsigned long reg,
+ unsigned long bitmask)
+{
+ pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
+}
+
+static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
+ unsigned long reg,
+ unsigned long bitmask)
+{
+ pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
+}
+
+/**
+ * pch_udc_csr_busy() - Wait till idle.
+ * @dev: Reference to pch_udc_dev structure
+ */
+static void pch_udc_csr_busy(struct pch_udc_dev *dev)
+{
+ unsigned int count = 200;
+
+ /* Wait till idle */
+ while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
+ && --count)
+ cpu_relax();
+ if (!count)
+ dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
+}
+
+/**
+ * pch_udc_write_csr() - Write the command and status registers.
+ * @dev: Reference to pch_udc_dev structure
+ * @val: value to be written to CSR register
+ * @ep: end-point number
+ */
+static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
+ unsigned int ep)
+{
+ unsigned long reg = PCH_UDC_CSR(ep);
+
+ pch_udc_csr_busy(dev); /* Wait till idle */
+ pch_udc_writel(dev, val, reg);
+ pch_udc_csr_busy(dev); /* Wait till idle */
+}
+
+/**
+ * pch_udc_read_csr() - Read the command and status registers.
+ * @dev: Reference to pch_udc_dev structure
+ * @ep: end-point number
+ *
+ * Return codes: content of CSR register
+ */
+static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
+{
+ unsigned long reg = PCH_UDC_CSR(ep);
+
+ pch_udc_csr_busy(dev); /* Wait till idle */
+ pch_udc_readl(dev, reg); /* Dummy read */
+ pch_udc_csr_busy(dev); /* Wait till idle */
+ return pch_udc_readl(dev, reg);
+}
+
+/**
+ * pch_udc_rmt_wakeup() - Initiate for remote wakeup
+ * @dev: Reference to pch_udc_dev structure
+ */
+static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
+{
+ pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+ mdelay(1);
+ pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+}
+
+/**
+ * pch_udc_get_frame() - Get the current frame from device status register
+ * @dev: Reference to pch_udc_dev structure
+ * Retern current frame
+ */
+static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
+{
+ u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
+ return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
+}
+
+/**
+ * pch_udc_clear_selfpowered() - Clear the self power control
+ * @dev: Reference to pch_udc_regs structure
+ */
+static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
+{
+ pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
+}
+
+/**
+ * pch_udc_set_selfpowered() - Set the self power control
+ * @dev: Reference to pch_udc_regs structure
+ */
+static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
+{
+ pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
+}
+
+/**
+ * pch_udc_set_disconnect() - Set the disconnect status.
+ * @dev: Reference to pch_udc_regs structure
+ */
+static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
+{
+ pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
+}
+
+/**
+ * pch_udc_clear_disconnect() - Clear the disconnect status.
+ * @dev: Reference to pch_udc_regs structure
+ */
+static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
+{
+ /* Clear the disconnect */
+ pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+ pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
+ mdelay(1);
+ /* Resume USB signalling */
+ pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+}
+
+static void pch_udc_init(struct pch_udc_dev *dev);
+
+/**
+ * pch_udc_reconnect() - This API initializes usb device controller,
+ * and clear the disconnect status.
+ * @dev: Reference to pch_udc_regs structure
+ */
+static void pch_udc_reconnect(struct pch_udc_dev *dev)
+{
+ pch_udc_init(dev);
+
+ /* enable device interrupts */
+ /* pch_udc_enable_interrupts() */
+ pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
+ UDC_DEVINT_UR | UDC_DEVINT_ENUM);
+
+ /* Clear the disconnect */
+ pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+ pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
+ mdelay(1);
+ /* Resume USB signalling */
+ pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+}
+
+/**
+ * pch_udc_vbus_session() - set or clearr the disconnect status.
+ * @dev: Reference to pch_udc_regs structure
+ * @is_active: Parameter specifying the action
+ * 0: indicating VBUS power is ending
+ * !0: indicating VBUS power is starting
+ */
+static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
+ int is_active)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&dev->lock, iflags);
+ if (is_active) {
+ pch_udc_reconnect(dev);
+ dev->vbus_session = 1;
+ } else {
+ if (dev->driver && dev->driver->disconnect) {
+ spin_unlock_irqrestore(&dev->lock, iflags);
+ dev->driver->disconnect(&dev->gadget);
+ spin_lock_irqsave(&dev->lock, iflags);
+ }
+ pch_udc_set_disconnect(dev);
+ dev->vbus_session = 0;
+ }
+ spin_unlock_irqrestore(&dev->lock, iflags);
+}
+
+/**
+ * pch_udc_ep_set_stall() - Set the stall of endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ */
+static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
+{
+ if (ep->in) {
+ pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
+ pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
+ } else {
+ pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
+ }
+}
+
+/**
+ * pch_udc_ep_clear_stall() - Clear the stall of endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
+{
+ /* Clear the stall */
+ pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
+ /* Clear NAK by writing CNAK */
+ pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
+}
+
+/**
+ * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ * @type: Type of endpoint
+ */
+static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
+ u8 type)
+{
+ pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
+ UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ * @buf_size: The buffer word size
+ * @ep_in: EP is IN
+ */
+static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
+ u32 buf_size, u32 ep_in)
+{
+ u32 data;
+ if (ep_in) {
+ data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
+ data = (data & 0xffff0000) | (buf_size & 0xffff);
+ pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
+ } else {
+ data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
+ data = (buf_size << 16) | (data & 0xffff);
+ pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
+ }
+}
+
+/**
+ * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ * @pkt_size: The packet byte size
+ */
+static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
+{
+ u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
+ data = (data & 0xffff0000) | (pkt_size & 0xffff);
+ pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ * @addr: Address of the register
+ */
+static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
+{
+ pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ * @addr: Address of the register
+ */
+static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
+{
+ pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
+{
+ pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
+}
+
+/**
+ * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
+{
+ pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
+}
+
+/**
+ * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
+{
+ pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
+}
+
+/**
+ * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
+ * register depending on the direction specified
+ * @dev: Reference to structure of type pch_udc_regs
+ * @dir: whether Tx or Rx
+ * DMA_DIR_RX: Receive
+ * DMA_DIR_TX: Transmit
+ */
+static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
+{
+ if (dir == DMA_DIR_RX)
+ pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
+ else if (dir == DMA_DIR_TX)
+ pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
+}
+
+/**
+ * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
+ * register depending on the direction specified
+ * @dev: Reference to structure of type pch_udc_regs
+ * @dir: Whether Tx or Rx
+ * DMA_DIR_RX: Receive
+ * DMA_DIR_TX: Transmit
+ */
+static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
+{
+ if (dir == DMA_DIR_RX)
+ pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
+ else if (dir == DMA_DIR_TX)
+ pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
+}
+
+/**
+ * pch_udc_set_csr_done() - Set the device control register
+ * CSR done field (bit 13)
+ * @dev: reference to structure of type pch_udc_regs
+ */
+static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
+{
+ pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
+}
+
+/**
+ * pch_udc_disable_interrupts() - Disables the specified interrupts
+ * @dev: Reference to structure of type pch_udc_regs
+ * @mask: Mask to disable interrupts
+ */
+static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
+ u32 mask)
+{
+ pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_enable_interrupts() - Enable the specified interrupts
+ * @dev: Reference to structure of type pch_udc_regs
+ * @mask: Mask to enable interrupts
+ */
+static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
+ u32 mask)
+{
+ pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
+ * @dev: Reference to structure of type pch_udc_regs
+ * @mask: Mask to disable interrupts
+ */
+static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
+ u32 mask)
+{
+ pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
+ * @dev: Reference to structure of type pch_udc_regs
+ * @mask: Mask to enable interrupts
+ */
+static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
+ u32 mask)
+{
+ pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_read_device_interrupts() - Read the device interrupts
+ * @dev: Reference to structure of type pch_udc_regs
+ * Retern The device interrupts
+ */
+static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
+{
+ return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_write_device_interrupts() - Write device interrupts
+ * @dev: Reference to structure of type pch_udc_regs
+ * @val: The value to be written to interrupt register
+ */
+static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
+ u32 val)
+{
+ pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
+ * @dev: Reference to structure of type pch_udc_regs
+ * Retern The endpoint interrupt
+ */
+static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
+{
+ return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_write_ep_interrupts() - Clear endpoint interupts
+ * @dev: Reference to structure of type pch_udc_regs
+ * @val: The value to be written to interrupt register
+ */
+static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
+ u32 val)
+{
+ pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_read_device_status() - Read the device status
+ * @dev: Reference to structure of type pch_udc_regs
+ * Retern The device status
+ */
+static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
+{
+ return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
+}
+
+/**
+ * pch_udc_read_ep_control() - Read the endpoint control
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ * Retern The endpoint control register value
+ */
+static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
+{
+ return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
+}
+
+/**
+ * pch_udc_clear_ep_control() - Clear the endpoint control register
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ * Retern The endpoint control register value
+ */
+static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
+{
+ return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
+}
+
+/**
+ * pch_udc_read_ep_status() - Read the endpoint status
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ * Retern The endpoint status
+ */
+static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
+{
+ return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
+}
+
+/**
+ * pch_udc_clear_ep_status() - Clear the endpoint status
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ * @stat: Endpoint status
+ */
+static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
+ u32 stat)
+{
+ return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
+ * of the endpoint control register
+ * @ep: Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
+{
+ pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
+}
+
+/**
+ * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
+ * of the endpoint control register
+ * @ep: reference to structure of type pch_udc_ep_regs
+ */
+static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
+{
+ unsigned int loopcnt = 0;
+ struct pch_udc_dev *dev = ep->dev;
+
+ if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
+ return;
+ if (!ep->in) {
+ loopcnt = 10000;
+ while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
+ --loopcnt)
+ udelay(5);
+ if (!loopcnt)
+ dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
+ __func__);
+ }
+ loopcnt = 10000;
+ while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
+ pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
+ udelay(5);
+ }
+ if (!loopcnt)
+ dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
+ __func__, ep->num, (ep->in ? "in" : "out"));
+}
+
+/**
+ * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
+ * @ep: reference to structure of type pch_udc_ep_regs
+ * @dir: direction of endpoint
+ * 0: endpoint is OUT
+ * !0: endpoint is IN
+ */
+static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
+{
+ if (dir) { /* IN ep */
+ pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
+ return;
+ }
+}
+
+/**
+ * pch_udc_ep_enable() - This api enables endpoint
+ * @ep: reference to structure of type pch_udc_ep_regs
+ * @cfg: current configuration information
+ * @desc: endpoint descriptor
+ */
+static void pch_udc_ep_enable(struct pch_udc_ep *ep,
+ struct pch_udc_cfg_data *cfg,
+ const struct usb_endpoint_descriptor *desc)
+{
+ u32 val = 0;
+ u32 buff_size = 0;
+
+ pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
+ if (ep->in)
+ buff_size = UDC_EPIN_BUFF_SIZE;
+ else
+ buff_size = UDC_EPOUT_BUFF_SIZE;
+ pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
+ pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
+ pch_udc_ep_set_nak(ep);
+ pch_udc_ep_fifo_flush(ep, ep->in);
+ /* Configure the endpoint */
+ val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
+ ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
+ UDC_CSR_NE_TYPE_SHIFT) |
+ (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
+ (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
+ (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
+ usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
+
+ if (ep->in)
+ pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
+ else
+ pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
+}
+
+/**
+ * pch_udc_ep_disable() - This api disables endpoint
+ * @ep: reference to structure of type pch_udc_ep_regs
+ */
+static void pch_udc_ep_disable(struct pch_udc_ep *ep)
+{
+ if (ep->in) {
+ /* flush the fifo */
+ pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
+ /* set NAK */
+ pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
+ pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
+ } else {
+ /* set NAK */
+ pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
+ }
+ /* reset desc pointer */
+ pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
+}
+
+/**
+ * pch_udc_wait_ep_stall() - Wait EP stall.
+ * @ep: reference to structure of type pch_udc_ep_regs
+ */
+static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
+{
+ unsigned int count = 10000;
+
+ /* Wait till idle */
+ while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
+ udelay(5);
+ if (!count)
+ dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
+}
+
+/**
+ * pch_udc_init() - This API initializes usb device controller
+ * @dev: Rreference to pch_udc_regs structure
+ */
+static void pch_udc_init(struct pch_udc_dev *dev)
+{
+ if (NULL == dev) {
+ pr_err("%s: Invalid address\n", __func__);
+ return;
+ }
+ /* Soft Reset and Reset PHY */
+ pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
+ pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
+ mdelay(1);
+ pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
+ pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
+ mdelay(1);
+ /* mask and clear all device interrupts */
+ pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
+ pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
+
+ /* mask and clear all ep interrupts */
+ pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
+ pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
+
+ /* enable dynamic CSR programmingi, self powered and device speed */
+ if (speed_fs)
+ pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
+ UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
+ else /* defaul high speed */
+ pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
+ UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
+ pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
+ (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
+ (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
+ UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
+ UDC_DEVCTL_THE);
+}
+
+/**
+ * pch_udc_exit() - This API exit usb device controller
+ * @dev: Reference to pch_udc_regs structure
+ */
+static void pch_udc_exit(struct pch_udc_dev *dev)
+{
+ /* mask all device interrupts */
+ pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
+ /* mask all ep interrupts */
+ pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
+ /* put device in disconnected state */
+ pch_udc_set_disconnect(dev);
+}
+
+/**
+ * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
+ * @gadget: Reference to the gadget driver
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: If the gadget passed is NULL
+ */
+static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
+{
+ struct pch_udc_dev *dev;
+
+ if (!gadget)
+ return -EINVAL;
+ dev = container_of(gadget, struct pch_udc_dev, gadget);
+ return pch_udc_get_frame(dev);
+}
+
+/**
+ * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
+ * @gadget: Reference to the gadget driver
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: If the gadget passed is NULL
+ */
+static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
+{
+ struct pch_udc_dev *dev;
+ unsigned long flags;
+
+ if (!gadget)
+ return -EINVAL;
+ dev = container_of(gadget, struct pch_udc_dev, gadget);
+ spin_lock_irqsave(&dev->lock, flags);
+ pch_udc_rmt_wakeup(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+/**
+ * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
+ * is self powered or not
+ * @gadget: Reference to the gadget driver
+ * @value: Specifies self powered or not
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: If the gadget passed is NULL
+ */
+static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
+{
+ struct pch_udc_dev *dev;
+
+ if (!gadget)
+ return -EINVAL;
+ gadget->is_selfpowered = (value != 0);
+ dev = container_of(gadget, struct pch_udc_dev, gadget);
+ if (value)
+ pch_udc_set_selfpowered(dev);
+ else
+ pch_udc_clear_selfpowered(dev);
+ return 0;
+}
+
+/**
+ * pch_udc_pcd_pullup() - This API is invoked to make the device
+ * visible/invisible to the host
+ * @gadget: Reference to the gadget driver
+ * @is_on: Specifies whether the pull up is made active or inactive
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: If the gadget passed is NULL
+ */
+static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct pch_udc_dev *dev;
+ unsigned long iflags;
+
+ if (!gadget)
+ return -EINVAL;
+
+ dev = container_of(gadget, struct pch_udc_dev, gadget);
+
+ spin_lock_irqsave(&dev->lock, iflags);
+ if (is_on) {
+ pch_udc_reconnect(dev);
+ } else {
+ if (dev->driver && dev->driver->disconnect) {
+ spin_unlock_irqrestore(&dev->lock, iflags);
+ dev->driver->disconnect(&dev->gadget);
+ spin_lock_irqsave(&dev->lock, iflags);
+ }
+ pch_udc_set_disconnect(dev);
+ }
+ spin_unlock_irqrestore(&dev->lock, iflags);
+
+ return 0;
+}
+
+/**
+ * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
+ * transceiver (or GPIO) that
+ * detects a VBUS power session starting/ending
+ * @gadget: Reference to the gadget driver
+ * @is_active: specifies whether the session is starting or ending
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: If the gadget passed is NULL
+ */
+static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct pch_udc_dev *dev;
+
+ if (!gadget)
+ return -EINVAL;
+ dev = container_of(gadget, struct pch_udc_dev, gadget);
+ pch_udc_vbus_session(dev, is_active);
+ return 0;
+}
+
+/**
+ * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
+ * SET_CONFIGURATION calls to
+ * specify how much power the device can consume
+ * @gadget: Reference to the gadget driver
+ * @mA: specifies the current limit in 2mA unit
+ *
+ * Return codes:
+ * -EINVAL: If the gadget passed is NULL
+ * -EOPNOTSUPP:
+ */
+static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
+{
+ return -EOPNOTSUPP;
+}
+
+static int pch_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int pch_udc_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops pch_udc_ops = {
+ .get_frame = pch_udc_pcd_get_frame,
+ .wakeup = pch_udc_pcd_wakeup,
+ .set_selfpowered = pch_udc_pcd_selfpowered,
+ .pullup = pch_udc_pcd_pullup,
+ .vbus_session = pch_udc_pcd_vbus_session,
+ .vbus_draw = pch_udc_pcd_vbus_draw,
+ .udc_start = pch_udc_start,
+ .udc_stop = pch_udc_stop,
+};
+
+/**
+ * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
+ * @dev: Reference to the driver structure
+ *
+ * Return value:
+ * 1: VBUS is high
+ * 0: VBUS is low
+ * -1: It is not enable to detect VBUS using GPIO
+ */
+static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
+{
+ int vbus = 0;
+
+ if (dev->vbus_gpio.port)
+ vbus = gpiod_get_value(dev->vbus_gpio.port) ? 1 : 0;
+ else
+ vbus = -1;
+
+ return vbus;
+}
+
+/**
+ * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
+ * If VBUS is Low, disconnect is processed
+ * @irq_work: Structure for WorkQueue
+ *
+ */
+static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
+{
+ struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
+ struct pch_vbus_gpio_data, irq_work_fall);
+ struct pch_udc_dev *dev =
+ container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
+ int vbus_saved = -1;
+ int vbus;
+ int count;
+
+ if (!dev->vbus_gpio.port)
+ return;
+
+ for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
+ count++) {
+ vbus = pch_vbus_gpio_get_value(dev);
+
+ if ((vbus_saved == vbus) && (vbus == 0)) {
+ dev_dbg(&dev->pdev->dev, "VBUS fell");
+ if (dev->driver
+ && dev->driver->disconnect) {
+ dev->driver->disconnect(
+ &dev->gadget);
+ }
+ if (dev->vbus_gpio.intr)
+ pch_udc_init(dev);
+ else
+ pch_udc_reconnect(dev);
+ return;
+ }
+ vbus_saved = vbus;
+ mdelay(PCH_VBUS_INTERVAL);
+ }
+}
+
+/**
+ * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
+ * If VBUS is High, connect is processed
+ * @irq_work: Structure for WorkQueue
+ *
+ */
+static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
+{
+ struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
+ struct pch_vbus_gpio_data, irq_work_rise);
+ struct pch_udc_dev *dev =
+ container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
+ int vbus;
+
+ if (!dev->vbus_gpio.port)
+ return;
+
+ mdelay(PCH_VBUS_INTERVAL);
+ vbus = pch_vbus_gpio_get_value(dev);
+
+ if (vbus == 1) {
+ dev_dbg(&dev->pdev->dev, "VBUS rose");
+ pch_udc_reconnect(dev);
+ return;
+ }
+}
+
+/**
+ * pch_vbus_gpio_irq() - IRQ handler for GPIO interrupt for changing VBUS
+ * @irq: Interrupt request number
+ * @data: Reference to the device structure
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: GPIO port is invalid or can't be initialized.
+ */
+static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
+{
+ struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
+
+ if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
+ return IRQ_NONE;
+
+ if (pch_vbus_gpio_get_value(dev))
+ schedule_work(&dev->vbus_gpio.irq_work_rise);
+ else
+ schedule_work(&dev->vbus_gpio.irq_work_fall);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
+ * @dev: Reference to the driver structure
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: GPIO port is invalid or can't be initialized.
+ */
+static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
+{
+ struct device *d = &dev->pdev->dev;
+ int err;
+ int irq_num = 0;
+ struct gpio_desc *gpiod;
+
+ dev->vbus_gpio.port = NULL;
+ dev->vbus_gpio.intr = 0;
+
+ /* Retrieve the GPIO line from the USB gadget device */
+ gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ gpiod_set_consumer_name(gpiod, "pch_vbus");
+
+ dev->vbus_gpio.port = gpiod;
+ INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
+
+ irq_num = gpiod_to_irq(gpiod);
+ if (irq_num > 0) {
+ irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
+ err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
+ "vbus_detect", dev);
+ if (!err) {
+ dev->vbus_gpio.intr = irq_num;
+ INIT_WORK(&dev->vbus_gpio.irq_work_rise,
+ pch_vbus_gpio_work_rise);
+ } else {
+ pr_err("%s: can't request irq %d, err: %d\n",
+ __func__, irq_num, err);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pch_vbus_gpio_free() - This API frees resources of GPIO port
+ * @dev: Reference to the driver structure
+ */
+static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
+{
+ if (dev->vbus_gpio.intr)
+ free_irq(dev->vbus_gpio.intr, dev);
+}
+
+/**
+ * complete_req() - This API is invoked from the driver when processing
+ * of a request is complete
+ * @ep: Reference to the endpoint structure
+ * @req: Reference to the request structure
+ * @status: Indicates the success/failure of completion
+ */
+static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
+ int status)
+ __releases(&dev->lock)
+ __acquires(&dev->lock)
+{
+ struct pch_udc_dev *dev;
+ unsigned halted = ep->halted;
+
+ list_del_init(&req->queue);
+
+ /* set new status if pending */
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ dev = ep->dev;
+ usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
+ ep->halted = 1;
+ spin_unlock(&dev->lock);
+ if (!ep->in)
+ pch_udc_ep_clear_rrdy(ep);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&dev->lock);
+ ep->halted = halted;
+}
+
+/**
+ * empty_req_queue() - This API empties the request queue of an endpoint
+ * @ep: Reference to the endpoint structure
+ */
+static void empty_req_queue(struct pch_udc_ep *ep)
+{
+ struct pch_udc_request *req;
+
+ ep->halted = 1;
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+ complete_req(ep, req, -ESHUTDOWN); /* Remove from list */
+ }
+}
+
+/**
+ * pch_udc_free_dma_chain() - This function frees the DMA chain created
+ * for the request
+ * @dev: Reference to the driver structure
+ * @req: Reference to the request to be freed
+ *
+ * Return codes:
+ * 0: Success
+ */
+static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
+ struct pch_udc_request *req)
+{
+ struct pch_udc_data_dma_desc *td = req->td_data;
+ unsigned i = req->chain_len;
+
+ dma_addr_t addr2;
+ dma_addr_t addr = (dma_addr_t)td->next;
+ td->next = 0x00;
+ for (; i > 1; --i) {
+ /* do not free first desc., will be done by free for request */
+ td = phys_to_virt(addr);
+ addr2 = (dma_addr_t)td->next;
+ dma_pool_free(dev->data_requests, td, addr);
+ addr = addr2;
+ }
+ req->chain_len = 1;
+}
+
+/**
+ * pch_udc_create_dma_chain() - This function creates or reinitializes
+ * a DMA chain
+ * @ep: Reference to the endpoint structure
+ * @req: Reference to the request
+ * @buf_len: The buffer length
+ * @gfp_flags: Flags to be used while mapping the data buffer
+ *
+ * Return codes:
+ * 0: success,
+ * -ENOMEM: dma_pool_alloc invocation fails
+ */
+static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
+ struct pch_udc_request *req,
+ unsigned long buf_len,
+ gfp_t gfp_flags)
+{
+ struct pch_udc_data_dma_desc *td = req->td_data, *last;
+ unsigned long bytes = req->req.length, i = 0;
+ dma_addr_t dma_addr;
+ unsigned len = 1;
+
+ if (req->chain_len > 1)
+ pch_udc_free_dma_chain(ep->dev, req);
+
+ td->dataptr = req->req.dma;
+ td->status = PCH_UDC_BS_HST_BSY;
+
+ for (; ; bytes -= buf_len, ++len) {
+ td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
+ if (bytes <= buf_len)
+ break;
+ last = td;
+ td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
+ &dma_addr);
+ if (!td)
+ goto nomem;
+ i += buf_len;
+ td->dataptr = req->td_data->dataptr + i;
+ last->next = dma_addr;
+ }
+
+ req->td_data_last = td;
+ td->status |= PCH_UDC_DMA_LAST;
+ td->next = req->td_data_phys;
+ req->chain_len = len;
+ return 0;
+
+nomem:
+ if (len > 1) {
+ req->chain_len = len;
+ pch_udc_free_dma_chain(ep->dev, req);
+ }
+ req->chain_len = 1;
+ return -ENOMEM;
+}
+
+/**
+ * prepare_dma() - This function creates and initializes the DMA chain
+ * for the request
+ * @ep: Reference to the endpoint structure
+ * @req: Reference to the request
+ * @gfp: Flag to be used while mapping the data buffer
+ *
+ * Return codes:
+ * 0: Success
+ * Other 0: linux error number on failure
+ */
+static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
+ gfp_t gfp)
+{
+ int retval;
+
+ /* Allocate and create a DMA chain */
+ retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
+ if (retval) {
+ pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
+ return retval;
+ }
+ if (ep->in)
+ req->td_data->status = (req->td_data->status &
+ ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
+ return 0;
+}
+
+/**
+ * process_zlp() - This function process zero length packets
+ * from the gadget driver
+ * @ep: Reference to the endpoint structure
+ * @req: Reference to the request
+ */
+static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
+{
+ struct pch_udc_dev *dev = ep->dev;
+
+ /* IN zlp's are handled by hardware */
+ complete_req(ep, req, 0);
+
+ /* if set_config or set_intf is waiting for ack by zlp
+ * then set CSR_DONE
+ */
+ if (dev->set_cfg_not_acked) {
+ pch_udc_set_csr_done(dev);
+ dev->set_cfg_not_acked = 0;
+ }
+ /* setup command is ACK'ed now by zlp */
+ if (!dev->stall && dev->waiting_zlp_ack) {
+ pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
+ dev->waiting_zlp_ack = 0;
+ }
+}
+
+/**
+ * pch_udc_start_rxrequest() - This function starts the receive requirement.
+ * @ep: Reference to the endpoint structure
+ * @req: Reference to the request structure
+ */
+static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
+ struct pch_udc_request *req)
+{
+ struct pch_udc_data_dma_desc *td_data;
+
+ pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+ td_data = req->td_data;
+ /* Set the status bits for all descriptors */
+ while (1) {
+ td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
+ PCH_UDC_BS_HST_RDY;
+ if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
+ break;
+ td_data = phys_to_virt(td_data->next);
+ }
+ /* Write the descriptor pointer */
+ pch_udc_ep_set_ddptr(ep, req->td_data_phys);
+ req->dma_going = 1;
+ pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
+ pch_udc_set_dma(ep->dev, DMA_DIR_RX);
+ pch_udc_ep_clear_nak(ep);
+ pch_udc_ep_set_rrdy(ep);
+}
+
+/**
+ * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
+ * from gadget driver
+ * @usbep: Reference to the USB endpoint structure
+ * @desc: Reference to the USB endpoint descriptor structure
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL:
+ * -ESHUTDOWN:
+ */
+static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct pch_udc_ep *ep;
+ struct pch_udc_dev *dev;
+ unsigned long iflags;
+
+ if (!usbep || (usbep->name == ep0_string) || !desc ||
+ (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
+ return -EINVAL;
+
+ ep = container_of(usbep, struct pch_udc_ep, ep);
+ dev = ep->dev;
+ if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
+ return -ESHUTDOWN;
+ spin_lock_irqsave(&dev->lock, iflags);
+ ep->ep.desc = desc;
+ ep->halted = 0;
+ pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
+ ep->ep.maxpacket = usb_endpoint_maxp(desc);
+ pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+ spin_unlock_irqrestore(&dev->lock, iflags);
+ return 0;
+}
+
+/**
+ * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
+ * from gadget driver
+ * @usbep: Reference to the USB endpoint structure
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL:
+ */
+static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
+{
+ struct pch_udc_ep *ep;
+ unsigned long iflags;
+
+ if (!usbep)
+ return -EINVAL;
+
+ ep = container_of(usbep, struct pch_udc_ep, ep);
+ if ((usbep->name == ep0_string) || !ep->ep.desc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, iflags);
+ empty_req_queue(ep);
+ ep->halted = 1;
+ pch_udc_ep_disable(ep);
+ pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+ ep->ep.desc = NULL;
+ INIT_LIST_HEAD(&ep->queue);
+ spin_unlock_irqrestore(&ep->dev->lock, iflags);
+ return 0;
+}
+
+/**
+ * pch_udc_alloc_request() - This function allocates request structure.
+ * It is called by gadget driver
+ * @usbep: Reference to the USB endpoint structure
+ * @gfp: Flag to be used while allocating memory
+ *
+ * Return codes:
+ * NULL: Failure
+ * Allocated address: Success
+ */
+static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
+ gfp_t gfp)
+{
+ struct pch_udc_request *req;
+ struct pch_udc_ep *ep;
+ struct pch_udc_data_dma_desc *dma_desc;
+
+ if (!usbep)
+ return NULL;
+ ep = container_of(usbep, struct pch_udc_ep, ep);
+ req = kzalloc(sizeof *req, gfp);
+ if (!req)
+ return NULL;
+ req->req.dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&req->queue);
+ if (!ep->dev->dma_addr)
+ return &req->req;
+ /* ep0 in requests are allocated from data pool here */
+ dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
+ &req->td_data_phys);
+ if (NULL == dma_desc) {
+ kfree(req);
+ return NULL;
+ }
+ /* prevent from using desc. - set HOST BUSY */
+ dma_desc->status |= PCH_UDC_BS_HST_BSY;
+ dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
+ req->td_data = dma_desc;
+ req->td_data_last = dma_desc;
+ req->chain_len = 1;
+ return &req->req;
+}
+
+/**
+ * pch_udc_free_request() - This function frees request structure.
+ * It is called by gadget driver
+ * @usbep: Reference to the USB endpoint structure
+ * @usbreq: Reference to the USB request
+ */
+static void pch_udc_free_request(struct usb_ep *usbep,
+ struct usb_request *usbreq)
+{
+ struct pch_udc_ep *ep;
+ struct pch_udc_request *req;
+ struct pch_udc_dev *dev;
+
+ if (!usbep || !usbreq)
+ return;
+ ep = container_of(usbep, struct pch_udc_ep, ep);
+ req = container_of(usbreq, struct pch_udc_request, req);
+ dev = ep->dev;
+ if (!list_empty(&req->queue))
+ dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
+ __func__, usbep->name, req);
+ if (req->td_data != NULL) {
+ if (req->chain_len > 1)
+ pch_udc_free_dma_chain(ep->dev, req);
+ dma_pool_free(ep->dev->data_requests, req->td_data,
+ req->td_data_phys);
+ }
+ kfree(req);
+}
+
+/**
+ * pch_udc_pcd_queue() - This function queues a request packet. It is called
+ * by gadget driver
+ * @usbep: Reference to the USB endpoint structure
+ * @usbreq: Reference to the USB request
+ * @gfp: Flag to be used while mapping the data buffer
+ *
+ * Return codes:
+ * 0: Success
+ * linux error number: Failure
+ */
+static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
+ gfp_t gfp)
+{
+ int retval = 0;
+ struct pch_udc_ep *ep;
+ struct pch_udc_dev *dev;
+ struct pch_udc_request *req;
+ unsigned long iflags;
+
+ if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
+ return -EINVAL;
+ ep = container_of(usbep, struct pch_udc_ep, ep);
+ dev = ep->dev;
+ if (!ep->ep.desc && ep->num)
+ return -EINVAL;
+ req = container_of(usbreq, struct pch_udc_request, req);
+ if (!list_empty(&req->queue))
+ return -EINVAL;
+ if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
+ return -ESHUTDOWN;
+ spin_lock_irqsave(&dev->lock, iflags);
+ /* map the buffer for dma */
+ retval = usb_gadget_map_request(&dev->gadget, usbreq, ep->in);
+ if (retval)
+ goto probe_end;
+ if (usbreq->length > 0) {
+ retval = prepare_dma(ep, req, GFP_ATOMIC);
+ if (retval)
+ goto probe_end;
+ }
+ usbreq->actual = 0;
+ usbreq->status = -EINPROGRESS;
+ req->dma_done = 0;
+ if (list_empty(&ep->queue) && !ep->halted) {
+ /* no pending transfer, so start this req */
+ if (!usbreq->length) {
+ process_zlp(ep, req);
+ retval = 0;
+ goto probe_end;
+ }
+ if (!ep->in) {
+ pch_udc_start_rxrequest(ep, req);
+ } else {
+ /*
+ * For IN trfr the descriptors will be programmed and
+ * P bit will be set when
+ * we get an IN token
+ */
+ pch_udc_wait_ep_stall(ep);
+ pch_udc_ep_clear_nak(ep);
+ pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
+ }
+ }
+ /* Now add this request to the ep's pending requests */
+ if (req != NULL)
+ list_add_tail(&req->queue, &ep->queue);
+
+probe_end:
+ spin_unlock_irqrestore(&dev->lock, iflags);
+ return retval;
+}
+
+/**
+ * pch_udc_pcd_dequeue() - This function de-queues a request packet.
+ * It is called by gadget driver
+ * @usbep: Reference to the USB endpoint structure
+ * @usbreq: Reference to the USB request
+ *
+ * Return codes:
+ * 0: Success
+ * linux error number: Failure
+ */
+static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
+ struct usb_request *usbreq)
+{
+ struct pch_udc_ep *ep;
+ struct pch_udc_request *req;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ ep = container_of(usbep, struct pch_udc_ep, ep);
+ if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
+ return ret;
+ req = container_of(usbreq, struct pch_udc_request, req);
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ /* make sure it's still queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == usbreq) {
+ pch_udc_ep_set_nak(ep);
+ if (!list_empty(&req->queue))
+ complete_req(ep, req, -ECONNRESET);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return ret;
+}
+
+/**
+ * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
+ * feature
+ * @usbep: Reference to the USB endpoint structure
+ * @halt: Specifies whether to set or clear the feature
+ *
+ * Return codes:
+ * 0: Success
+ * linux error number: Failure
+ */
+static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
+{
+ struct pch_udc_ep *ep;
+ unsigned long iflags;
+ int ret;
+
+ if (!usbep)
+ return -EINVAL;
+ ep = container_of(usbep, struct pch_udc_ep, ep);
+ if (!ep->ep.desc && !ep->num)
+ return -EINVAL;
+ if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
+ return -ESHUTDOWN;
+ spin_lock_irqsave(&udc_stall_spinlock, iflags);
+ if (list_empty(&ep->queue)) {
+ if (halt) {
+ if (ep->num == PCH_UDC_EP0)
+ ep->dev->stall = 1;
+ pch_udc_ep_set_stall(ep);
+ pch_udc_enable_ep_interrupts(
+ ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+ } else {
+ pch_udc_ep_clear_stall(ep);
+ }
+ ret = 0;
+ } else {
+ ret = -EAGAIN;
+ }
+ spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
+ return ret;
+}
+
+/**
+ * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
+ * halt feature
+ * @usbep: Reference to the USB endpoint structure
+ *
+ * Return codes:
+ * 0: Success
+ * linux error number: Failure
+ */
+static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
+{
+ struct pch_udc_ep *ep;
+ unsigned long iflags;
+ int ret;
+
+ if (!usbep)
+ return -EINVAL;
+ ep = container_of(usbep, struct pch_udc_ep, ep);
+ if (!ep->ep.desc && !ep->num)
+ return -EINVAL;
+ if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
+ return -ESHUTDOWN;
+ spin_lock_irqsave(&udc_stall_spinlock, iflags);
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ } else {
+ if (ep->num == PCH_UDC_EP0)
+ ep->dev->stall = 1;
+ pch_udc_ep_set_stall(ep);
+ pch_udc_enable_ep_interrupts(ep->dev,
+ PCH_UDC_EPINT(ep->in, ep->num));
+ ep->dev->prot_stall = 1;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
+ return ret;
+}
+
+/**
+ * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
+ * @usbep: Reference to the USB endpoint structure
+ */
+static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
+{
+ struct pch_udc_ep *ep;
+
+ if (!usbep)
+ return;
+
+ ep = container_of(usbep, struct pch_udc_ep, ep);
+ if (ep->ep.desc || !ep->num)
+ pch_udc_ep_fifo_flush(ep, ep->in);
+}
+
+static const struct usb_ep_ops pch_udc_ep_ops = {
+ .enable = pch_udc_pcd_ep_enable,
+ .disable = pch_udc_pcd_ep_disable,
+ .alloc_request = pch_udc_alloc_request,
+ .free_request = pch_udc_free_request,
+ .queue = pch_udc_pcd_queue,
+ .dequeue = pch_udc_pcd_dequeue,
+ .set_halt = pch_udc_pcd_set_halt,
+ .set_wedge = pch_udc_pcd_set_wedge,
+ .fifo_status = NULL,
+ .fifo_flush = pch_udc_pcd_fifo_flush,
+};
+
+/**
+ * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
+ * @td_stp: Reference to the SETP buffer structure
+ */
+static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
+{
+ static u32 pky_marker;
+
+ if (!td_stp)
+ return;
+ td_stp->reserved = ++pky_marker;
+ memset(&td_stp->request, 0xFF, sizeof td_stp->request);
+ td_stp->status = PCH_UDC_BS_HST_RDY;
+}
+
+/**
+ * pch_udc_start_next_txrequest() - This function starts
+ * the next transmission requirement
+ * @ep: Reference to the endpoint structure
+ */
+static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
+{
+ struct pch_udc_request *req;
+ struct pch_udc_data_dma_desc *td_data;
+
+ if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
+ return;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ /* next request */
+ req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+ if (req->dma_going)
+ return;
+ if (!req->td_data)
+ return;
+ pch_udc_wait_ep_stall(ep);
+ req->dma_going = 1;
+ pch_udc_ep_set_ddptr(ep, 0);
+ td_data = req->td_data;
+ while (1) {
+ td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
+ PCH_UDC_BS_HST_RDY;
+ if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
+ break;
+ td_data = phys_to_virt(td_data->next);
+ }
+ pch_udc_ep_set_ddptr(ep, req->td_data_phys);
+ pch_udc_set_dma(ep->dev, DMA_DIR_TX);
+ pch_udc_ep_set_pd(ep);
+ pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+ pch_udc_ep_clear_nak(ep);
+}
+
+/**
+ * pch_udc_complete_transfer() - This function completes a transfer
+ * @ep: Reference to the endpoint structure
+ */
+static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
+{
+ struct pch_udc_request *req;
+ struct pch_udc_dev *dev = ep->dev;
+
+ if (list_empty(&ep->queue))
+ return;
+ req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+ if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
+ PCH_UDC_BS_DMA_DONE)
+ return;
+ if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
+ PCH_UDC_RTS_SUCC) {
+ dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
+ "epstatus=0x%08x\n",
+ (req->td_data_last->status & PCH_UDC_RXTX_STS),
+ (int)(ep->epsts));
+ return;
+ }
+
+ req->req.actual = req->req.length;
+ req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
+ req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
+ complete_req(ep, req, 0);
+ req->dma_going = 0;
+ if (!list_empty(&ep->queue)) {
+ pch_udc_wait_ep_stall(ep);
+ pch_udc_ep_clear_nak(ep);
+ pch_udc_enable_ep_interrupts(ep->dev,
+ PCH_UDC_EPINT(ep->in, ep->num));
+ } else {
+ pch_udc_disable_ep_interrupts(ep->dev,
+ PCH_UDC_EPINT(ep->in, ep->num));
+ }
+}
+
+/**
+ * pch_udc_complete_receiver() - This function completes a receiver
+ * @ep: Reference to the endpoint structure
+ */
+static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
+{
+ struct pch_udc_request *req;
+ struct pch_udc_dev *dev = ep->dev;
+ unsigned int count;
+ struct pch_udc_data_dma_desc *td;
+ dma_addr_t addr;
+
+ if (list_empty(&ep->queue))
+ return;
+ /* next request */
+ req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+ pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+ pch_udc_ep_set_ddptr(ep, 0);
+ if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
+ PCH_UDC_BS_DMA_DONE)
+ td = req->td_data_last;
+ else
+ td = req->td_data;
+
+ while (1) {
+ if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
+ dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
+ "epstatus=0x%08x\n",
+ (req->td_data->status & PCH_UDC_RXTX_STS),
+ (int)(ep->epsts));
+ return;
+ }
+ if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
+ if (td->status & PCH_UDC_DMA_LAST) {
+ count = td->status & PCH_UDC_RXTX_BYTES;
+ break;
+ }
+ if (td == req->td_data_last) {
+ dev_err(&dev->pdev->dev, "Not complete RX descriptor");
+ return;
+ }
+ addr = (dma_addr_t)td->next;
+ td = phys_to_virt(addr);
+ }
+ /* on 64k packets the RXBYTES field is zero */
+ if (!count && (req->req.length == UDC_DMA_MAXPACKET))
+ count = UDC_DMA_MAXPACKET;
+ req->td_data->status |= PCH_UDC_DMA_LAST;
+ td->status |= PCH_UDC_BS_HST_BSY;
+
+ req->dma_going = 0;
+ req->req.actual = count;
+ complete_req(ep, req, 0);
+ /* If there is a new/failed requests try that now */
+ if (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+ pch_udc_start_rxrequest(ep, req);
+ }
+}
+
+/**
+ * pch_udc_svc_data_in() - This function process endpoint interrupts
+ * for IN endpoints
+ * @dev: Reference to the device structure
+ * @ep_num: Endpoint that generated the interrupt
+ */
+static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
+{
+ u32 epsts;
+ struct pch_udc_ep *ep;
+
+ ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
+ epsts = ep->epsts;
+ ep->epsts = 0;
+
+ if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
+ UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
+ UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
+ return;
+ if ((epsts & UDC_EPSTS_BNA))
+ return;
+ if (epsts & UDC_EPSTS_HE)
+ return;
+ if (epsts & UDC_EPSTS_RSS) {
+ pch_udc_ep_set_stall(ep);
+ pch_udc_enable_ep_interrupts(ep->dev,
+ PCH_UDC_EPINT(ep->in, ep->num));
+ }
+ if (epsts & UDC_EPSTS_RCS) {
+ if (!dev->prot_stall) {
+ pch_udc_ep_clear_stall(ep);
+ } else {
+ pch_udc_ep_set_stall(ep);
+ pch_udc_enable_ep_interrupts(ep->dev,
+ PCH_UDC_EPINT(ep->in, ep->num));
+ }
+ }
+ if (epsts & UDC_EPSTS_TDC)
+ pch_udc_complete_transfer(ep);
+ /* On IN interrupt, provide data if we have any */
+ if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
+ !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
+ pch_udc_start_next_txrequest(ep);
+}
+
+/**
+ * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
+ * @dev: Reference to the device structure
+ * @ep_num: Endpoint that generated the interrupt
+ */
+static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
+{
+ u32 epsts;
+ struct pch_udc_ep *ep;
+ struct pch_udc_request *req = NULL;
+
+ ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
+ epsts = ep->epsts;
+ ep->epsts = 0;
+
+ if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
+ /* next request */
+ req = list_entry(ep->queue.next, struct pch_udc_request,
+ queue);
+ if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
+ PCH_UDC_BS_DMA_DONE) {
+ if (!req->dma_going)
+ pch_udc_start_rxrequest(ep, req);
+ return;
+ }
+ }
+ if (epsts & UDC_EPSTS_HE)
+ return;
+ if (epsts & UDC_EPSTS_RSS) {
+ pch_udc_ep_set_stall(ep);
+ pch_udc_enable_ep_interrupts(ep->dev,
+ PCH_UDC_EPINT(ep->in, ep->num));
+ }
+ if (epsts & UDC_EPSTS_RCS) {
+ if (!dev->prot_stall) {
+ pch_udc_ep_clear_stall(ep);
+ } else {
+ pch_udc_ep_set_stall(ep);
+ pch_udc_enable_ep_interrupts(ep->dev,
+ PCH_UDC_EPINT(ep->in, ep->num));
+ }
+ }
+ if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
+ UDC_EPSTS_OUT_DATA) {
+ if (ep->dev->prot_stall == 1) {
+ pch_udc_ep_set_stall(ep);
+ pch_udc_enable_ep_interrupts(ep->dev,
+ PCH_UDC_EPINT(ep->in, ep->num));
+ } else {
+ pch_udc_complete_receiver(ep);
+ }
+ }
+ if (list_empty(&ep->queue))
+ pch_udc_set_dma(dev, DMA_DIR_RX);
+}
+
+static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
+ __must_hold(&dev->lock)
+{
+ int rc;
+
+ /* In some cases we can get an interrupt before driver gets setup */
+ if (!dev->driver)
+ return -ESHUTDOWN;
+
+ spin_unlock(&dev->lock);
+ rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
+ spin_lock(&dev->lock);
+ return rc;
+}
+
+/**
+ * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
+ * @dev: Reference to the device structure
+ */
+static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
+{
+ u32 epsts;
+ struct pch_udc_ep *ep;
+ struct pch_udc_ep *ep_out;
+
+ ep = &dev->ep[UDC_EP0IN_IDX];
+ ep_out = &dev->ep[UDC_EP0OUT_IDX];
+ epsts = ep->epsts;
+ ep->epsts = 0;
+
+ if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
+ UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
+ UDC_EPSTS_XFERDONE)))
+ return;
+ if ((epsts & UDC_EPSTS_BNA))
+ return;
+ if (epsts & UDC_EPSTS_HE)
+ return;
+ if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
+ pch_udc_complete_transfer(ep);
+ pch_udc_clear_dma(dev, DMA_DIR_RX);
+ ep_out->td_data->status = (ep_out->td_data->status &
+ ~PCH_UDC_BUFF_STS) |
+ PCH_UDC_BS_HST_RDY;
+ pch_udc_ep_clear_nak(ep_out);
+ pch_udc_set_dma(dev, DMA_DIR_RX);
+ pch_udc_ep_set_rrdy(ep_out);
+ }
+ /* On IN interrupt, provide data if we have any */
+ if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
+ !(epsts & UDC_EPSTS_TXEMPTY))
+ pch_udc_start_next_txrequest(ep);
+}
+
+/**
+ * pch_udc_svc_control_out() - Routine that handle Control
+ * OUT endpoint interrupts
+ * @dev: Reference to the device structure
+ */
+static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
+ __releases(&dev->lock)
+ __acquires(&dev->lock)
+{
+ u32 stat;
+ int setup_supported;
+ struct pch_udc_ep *ep;
+
+ ep = &dev->ep[UDC_EP0OUT_IDX];
+ stat = ep->epsts;
+ ep->epsts = 0;
+
+ /* If setup data */
+ if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
+ UDC_EPSTS_OUT_SETUP) {
+ dev->stall = 0;
+ dev->ep[UDC_EP0IN_IDX].halted = 0;
+ dev->ep[UDC_EP0OUT_IDX].halted = 0;
+ dev->setup_data = ep->td_stp->request;
+ pch_udc_init_setup_buff(ep->td_stp);
+ pch_udc_clear_dma(dev, DMA_DIR_RX);
+ pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
+ dev->ep[UDC_EP0IN_IDX].in);
+ if ((dev->setup_data.bRequestType & USB_DIR_IN))
+ dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
+ else /* OUT */
+ dev->gadget.ep0 = &ep->ep;
+ /* If Mass storage Reset */
+ if ((dev->setup_data.bRequestType == 0x21) &&
+ (dev->setup_data.bRequest == 0xFF))
+ dev->prot_stall = 0;
+ /* call gadget with setup data received */
+ setup_supported = pch_udc_gadget_setup(dev);
+
+ if (dev->setup_data.bRequestType & USB_DIR_IN) {
+ ep->td_data->status = (ep->td_data->status &
+ ~PCH_UDC_BUFF_STS) |
+ PCH_UDC_BS_HST_RDY;
+ pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+ }
+ /* ep0 in returns data on IN phase */
+ if (setup_supported >= 0 && setup_supported <
+ UDC_EP0IN_MAX_PKT_SIZE) {
+ pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
+ /* Gadget would have queued a request when
+ * we called the setup */
+ if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
+ pch_udc_set_dma(dev, DMA_DIR_RX);
+ pch_udc_ep_clear_nak(ep);
+ }
+ } else if (setup_supported < 0) {
+ /* if unsupported request, then stall */
+ pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
+ pch_udc_enable_ep_interrupts(ep->dev,
+ PCH_UDC_EPINT(ep->in, ep->num));
+ dev->stall = 0;
+ pch_udc_set_dma(dev, DMA_DIR_RX);
+ } else {
+ dev->waiting_zlp_ack = 1;
+ }
+ } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
+ UDC_EPSTS_OUT_DATA) && !dev->stall) {
+ pch_udc_clear_dma(dev, DMA_DIR_RX);
+ pch_udc_ep_set_ddptr(ep, 0);
+ if (!list_empty(&ep->queue)) {
+ ep->epsts = stat;
+ pch_udc_svc_data_out(dev, PCH_UDC_EP0);
+ }
+ pch_udc_set_dma(dev, DMA_DIR_RX);
+ }
+ pch_udc_ep_set_rrdy(ep);
+}
+
+
+/**
+ * pch_udc_postsvc_epinters() - This function enables end point interrupts
+ * and clears NAK status
+ * @dev: Reference to the device structure
+ * @ep_num: End point number
+ */
+static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
+{
+ struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
+ if (list_empty(&ep->queue))
+ return;
+ pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+ pch_udc_ep_clear_nak(ep);
+}
+
+/**
+ * pch_udc_read_all_epstatus() - This function read all endpoint status
+ * @dev: Reference to the device structure
+ * @ep_intr: Status of endpoint interrupt
+ */
+static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
+{
+ int i;
+ struct pch_udc_ep *ep;
+
+ for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
+ /* IN */
+ if (ep_intr & (0x1 << i)) {
+ ep = &dev->ep[UDC_EPIN_IDX(i)];
+ ep->epsts = pch_udc_read_ep_status(ep);
+ pch_udc_clear_ep_status(ep, ep->epsts);
+ }
+ /* OUT */
+ if (ep_intr & (0x10000 << i)) {
+ ep = &dev->ep[UDC_EPOUT_IDX(i)];
+ ep->epsts = pch_udc_read_ep_status(ep);
+ pch_udc_clear_ep_status(ep, ep->epsts);
+ }
+ }
+}
+
+/**
+ * pch_udc_activate_control_ep() - This function enables the control endpoints
+ * for traffic after a reset
+ * @dev: Reference to the device structure
+ */
+static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
+{
+ struct pch_udc_ep *ep;
+ u32 val;
+
+ /* Setup the IN endpoint */
+ ep = &dev->ep[UDC_EP0IN_IDX];
+ pch_udc_clear_ep_control(ep);
+ pch_udc_ep_fifo_flush(ep, ep->in);
+ pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
+ pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
+ /* Initialize the IN EP Descriptor */
+ ep->td_data = NULL;
+ ep->td_stp = NULL;
+ ep->td_data_phys = 0;
+ ep->td_stp_phys = 0;
+
+ /* Setup the OUT endpoint */
+ ep = &dev->ep[UDC_EP0OUT_IDX];
+ pch_udc_clear_ep_control(ep);
+ pch_udc_ep_fifo_flush(ep, ep->in);
+ pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
+ pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
+ val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
+ pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
+
+ /* Initialize the SETUP buffer */
+ pch_udc_init_setup_buff(ep->td_stp);
+ /* Write the pointer address of dma descriptor */
+ pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
+ /* Write the pointer address of Setup descriptor */
+ pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+
+ /* Initialize the dma descriptor */
+ ep->td_data->status = PCH_UDC_DMA_LAST;
+ ep->td_data->dataptr = dev->dma_addr;
+ ep->td_data->next = ep->td_data_phys;
+
+ pch_udc_ep_clear_nak(ep);
+}
+
+
+/**
+ * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
+ * @dev: Reference to driver structure
+ */
+static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
+{
+ struct pch_udc_ep *ep;
+ int i;
+
+ pch_udc_clear_dma(dev, DMA_DIR_TX);
+ pch_udc_clear_dma(dev, DMA_DIR_RX);
+ /* Mask all endpoint interrupts */
+ pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+ /* clear all endpoint interrupts */
+ pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+
+ for (i = 0; i < PCH_UDC_EP_NUM; i++) {
+ ep = &dev->ep[i];
+ pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
+ pch_udc_clear_ep_control(ep);
+ pch_udc_ep_set_ddptr(ep, 0);
+ pch_udc_write_csr(ep->dev, 0x00, i);
+ }
+ dev->stall = 0;
+ dev->prot_stall = 0;
+ dev->waiting_zlp_ack = 0;
+ dev->set_cfg_not_acked = 0;
+
+ /* disable ep to empty req queue. Skip the control EP's */
+ for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
+ ep = &dev->ep[i];
+ pch_udc_ep_set_nak(ep);
+ pch_udc_ep_fifo_flush(ep, ep->in);
+ /* Complete request queue */
+ empty_req_queue(ep);
+ }
+ if (dev->driver) {
+ spin_unlock(&dev->lock);
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
+ spin_lock(&dev->lock);
+ }
+}
+
+/**
+ * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
+ * done interrupt
+ * @dev: Reference to driver structure
+ */
+static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
+{
+ u32 dev_stat, dev_speed;
+ u32 speed = USB_SPEED_FULL;
+
+ dev_stat = pch_udc_read_device_status(dev);
+ dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
+ UDC_DEVSTS_ENUM_SPEED_SHIFT;
+ switch (dev_speed) {
+ case UDC_DEVSTS_ENUM_SPEED_HIGH:
+ speed = USB_SPEED_HIGH;
+ break;
+ case UDC_DEVSTS_ENUM_SPEED_FULL:
+ speed = USB_SPEED_FULL;
+ break;
+ case UDC_DEVSTS_ENUM_SPEED_LOW:
+ speed = USB_SPEED_LOW;
+ break;
+ default:
+ BUG();
+ }
+ dev->gadget.speed = speed;
+ pch_udc_activate_control_ep(dev);
+ pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
+ pch_udc_set_dma(dev, DMA_DIR_TX);
+ pch_udc_set_dma(dev, DMA_DIR_RX);
+ pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
+
+ /* enable device interrupts */
+ pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
+ UDC_DEVINT_ES | UDC_DEVINT_ENUM |
+ UDC_DEVINT_SI | UDC_DEVINT_SC);
+}
+
+/**
+ * pch_udc_svc_intf_interrupt() - This function handles a set interface
+ * interrupt
+ * @dev: Reference to driver structure
+ */
+static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
+{
+ u32 reg, dev_stat = 0;
+ int i;
+
+ dev_stat = pch_udc_read_device_status(dev);
+ dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
+ UDC_DEVSTS_INTF_SHIFT;
+ dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
+ UDC_DEVSTS_ALT_SHIFT;
+ dev->set_cfg_not_acked = 1;
+ /* Construct the usb request for gadget driver and inform it */
+ memset(&dev->setup_data, 0 , sizeof dev->setup_data);
+ dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
+ dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
+ dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
+ dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
+ /* programm the Endpoint Cfg registers */
+ /* Only one end point cfg register */
+ reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
+ reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
+ (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
+ reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
+ (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
+ pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
+ for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
+ /* clear stall bits */
+ pch_udc_ep_clear_stall(&(dev->ep[i]));
+ dev->ep[i].halted = 0;
+ }
+ dev->stall = 0;
+ pch_udc_gadget_setup(dev);
+}
+
+/**
+ * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
+ * interrupt
+ * @dev: Reference to driver structure
+ */
+static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
+{
+ int i;
+ u32 reg, dev_stat = 0;
+
+ dev_stat = pch_udc_read_device_status(dev);
+ dev->set_cfg_not_acked = 1;
+ dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
+ UDC_DEVSTS_CFG_SHIFT;
+ /* make usb request for gadget driver */
+ memset(&dev->setup_data, 0 , sizeof dev->setup_data);
+ dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
+ dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
+ /* program the NE registers */
+ /* Only one end point cfg register */
+ reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
+ reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
+ (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
+ pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
+ for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
+ /* clear stall bits */
+ pch_udc_ep_clear_stall(&(dev->ep[i]));
+ dev->ep[i].halted = 0;
+ }
+ dev->stall = 0;
+
+ /* call gadget zero with setup data received */
+ pch_udc_gadget_setup(dev);
+}
+
+/**
+ * pch_udc_dev_isr() - This function services device interrupts
+ * by invoking appropriate routines.
+ * @dev: Reference to the device structure
+ * @dev_intr: The Device interrupt status.
+ */
+static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
+{
+ int vbus;
+
+ /* USB Reset Interrupt */
+ if (dev_intr & UDC_DEVINT_UR) {
+ pch_udc_svc_ur_interrupt(dev);
+ dev_dbg(&dev->pdev->dev, "USB_RESET\n");
+ }
+ /* Enumeration Done Interrupt */
+ if (dev_intr & UDC_DEVINT_ENUM) {
+ pch_udc_svc_enum_interrupt(dev);
+ dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
+ }
+ /* Set Interface Interrupt */
+ if (dev_intr & UDC_DEVINT_SI)
+ pch_udc_svc_intf_interrupt(dev);
+ /* Set Config Interrupt */
+ if (dev_intr & UDC_DEVINT_SC)
+ pch_udc_svc_cfg_interrupt(dev);
+ /* USB Suspend interrupt */
+ if (dev_intr & UDC_DEVINT_US) {
+ if (dev->driver
+ && dev->driver->suspend) {
+ spin_unlock(&dev->lock);
+ dev->driver->suspend(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+
+ vbus = pch_vbus_gpio_get_value(dev);
+ if ((dev->vbus_session == 0)
+ && (vbus != 1)) {
+ if (dev->driver && dev->driver->disconnect) {
+ spin_unlock(&dev->lock);
+ dev->driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+ pch_udc_reconnect(dev);
+ } else if ((dev->vbus_session == 0)
+ && (vbus == 1)
+ && !dev->vbus_gpio.intr)
+ schedule_work(&dev->vbus_gpio.irq_work_fall);
+
+ dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
+ }
+ /* Clear the SOF interrupt, if enabled */
+ if (dev_intr & UDC_DEVINT_SOF)
+ dev_dbg(&dev->pdev->dev, "SOF\n");
+ /* ES interrupt, IDLE > 3ms on the USB */
+ if (dev_intr & UDC_DEVINT_ES)
+ dev_dbg(&dev->pdev->dev, "ES\n");
+ /* RWKP interrupt */
+ if (dev_intr & UDC_DEVINT_RWKP)
+ dev_dbg(&dev->pdev->dev, "RWKP\n");
+}
+
+/**
+ * pch_udc_isr() - This function handles interrupts from the PCH USB Device
+ * @irq: Interrupt request number
+ * @pdev: Reference to the device structure
+ */
+static irqreturn_t pch_udc_isr(int irq, void *pdev)
+{
+ struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
+ u32 dev_intr, ep_intr;
+ int i;
+
+ dev_intr = pch_udc_read_device_interrupts(dev);
+ ep_intr = pch_udc_read_ep_interrupts(dev);
+
+ /* For a hot plug, this find that the controller is hung up. */
+ if (dev_intr == ep_intr)
+ if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
+ dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
+ /* The controller is reset */
+ pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
+ return IRQ_HANDLED;
+ }
+ if (dev_intr)
+ /* Clear device interrupts */
+ pch_udc_write_device_interrupts(dev, dev_intr);
+ if (ep_intr)
+ /* Clear ep interrupts */
+ pch_udc_write_ep_interrupts(dev, ep_intr);
+ if (!dev_intr && !ep_intr)
+ return IRQ_NONE;
+ spin_lock(&dev->lock);
+ if (dev_intr)
+ pch_udc_dev_isr(dev, dev_intr);
+ if (ep_intr) {
+ pch_udc_read_all_epstatus(dev, ep_intr);
+ /* Process Control In interrupts, if present */
+ if (ep_intr & UDC_EPINT_IN_EP0) {
+ pch_udc_svc_control_in(dev);
+ pch_udc_postsvc_epinters(dev, 0);
+ }
+ /* Process Control Out interrupts, if present */
+ if (ep_intr & UDC_EPINT_OUT_EP0)
+ pch_udc_svc_control_out(dev);
+ /* Process data in end point interrupts */
+ for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
+ if (ep_intr & (1 << i)) {
+ pch_udc_svc_data_in(dev, i);
+ pch_udc_postsvc_epinters(dev, i);
+ }
+ }
+ /* Process data out end point interrupts */
+ for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
+ PCH_UDC_USED_EP_NUM); i++)
+ if (ep_intr & (1 << i))
+ pch_udc_svc_data_out(dev, i -
+ UDC_EPINT_OUT_SHIFT);
+ }
+ spin_unlock(&dev->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * pch_udc_setup_ep0() - This function enables control endpoint for traffic
+ * @dev: Reference to the device structure
+ */
+static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
+{
+ /* enable ep0 interrupts */
+ pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
+ UDC_EPINT_OUT_EP0);
+ /* enable device interrupts */
+ pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
+ UDC_DEVINT_ES | UDC_DEVINT_ENUM |
+ UDC_DEVINT_SI | UDC_DEVINT_SC);
+}
+
+/**
+ * pch_udc_pcd_reinit() - This API initializes the endpoint structures
+ * @dev: Reference to the driver structure
+ */
+static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
+{
+ const char *const ep_string[] = {
+ ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
+ "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
+ "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
+ "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
+ "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
+ "ep15in", "ep15out",
+ };
+ int i;
+
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+
+ /* Initialize the endpoints structures */
+ memset(dev->ep, 0, sizeof dev->ep);
+ for (i = 0; i < PCH_UDC_EP_NUM; i++) {
+ struct pch_udc_ep *ep = &dev->ep[i];
+ ep->dev = dev;
+ ep->halted = 1;
+ ep->num = i / 2;
+ ep->in = ~i & 1;
+ ep->ep.name = ep_string[i];
+ ep->ep.ops = &pch_udc_ep_ops;
+ if (ep->in) {
+ ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
+ ep->ep.caps.dir_in = true;
+ } else {
+ ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
+ UDC_EP_REG_SHIFT;
+ ep->ep.caps.dir_out = true;
+ }
+ if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+ /* need to set ep->ep.maxpacket and set Default Configuration?*/
+ usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
+ list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+ INIT_LIST_HEAD(&ep->queue);
+ }
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
+
+ /* remove ep0 in and out from the list. They have own pointer */
+ list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
+ list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
+
+ dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
+ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+}
+
+/**
+ * pch_udc_pcd_init() - This API initializes the driver structure
+ * @dev: Reference to the driver structure
+ *
+ * Return codes:
+ * 0: Success
+ * -ERRNO: All kind of errors when retrieving VBUS GPIO
+ */
+static int pch_udc_pcd_init(struct pch_udc_dev *dev)
+{
+ int ret;
+
+ pch_udc_init(dev);
+ pch_udc_pcd_reinit(dev);
+
+ ret = pch_vbus_gpio_init(dev);
+ if (ret)
+ pch_udc_exit(dev);
+ return ret;
+}
+
+/**
+ * init_dma_pools() - create dma pools during initialization
+ * @dev: reference to struct pci_dev
+ */
+static int init_dma_pools(struct pch_udc_dev *dev)
+{
+ struct pch_udc_stp_dma_desc *td_stp;
+ struct pch_udc_data_dma_desc *td_data;
+ void *ep0out_buf;
+
+ /* DMA setup */
+ dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
+ sizeof(struct pch_udc_data_dma_desc), 0, 0);
+ if (!dev->data_requests) {
+ dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* dma desc for setup data */
+ dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
+ sizeof(struct pch_udc_stp_dma_desc), 0, 0);
+ if (!dev->stp_requests) {
+ dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
+ __func__);
+ return -ENOMEM;
+ }
+ /* setup */
+ td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
+ &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
+ if (!td_stp) {
+ dev_err(&dev->pdev->dev,
+ "%s: can't allocate setup dma descriptor\n", __func__);
+ return -ENOMEM;
+ }
+ dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
+
+ /* data: 0 packets !? */
+ td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
+ &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
+ if (!td_data) {
+ dev_err(&dev->pdev->dev,
+ "%s: can't allocate data dma descriptor\n", __func__);
+ return -ENOMEM;
+ }
+ dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
+ dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
+ dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
+ dev->ep[UDC_EP0IN_IDX].td_data = NULL;
+ dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
+
+ ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
+ GFP_KERNEL);
+ if (!ep0out_buf)
+ return -ENOMEM;
+ dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
+ UDC_EP0OUT_BUFF_SIZE * 4,
+ DMA_FROM_DEVICE);
+ return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
+}
+
+static int pch_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct pch_udc_dev *dev = to_pch_udc(g);
+
+ dev->driver = driver;
+
+ /* get ready for ep0 traffic */
+ pch_udc_setup_ep0(dev);
+
+ /* clear SD */
+ if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
+ pch_udc_clear_disconnect(dev);
+
+ dev->connected = 1;
+ return 0;
+}
+
+static int pch_udc_stop(struct usb_gadget *g)
+{
+ struct pch_udc_dev *dev = to_pch_udc(g);
+
+ pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
+
+ /* Assures that there are no pending requests with this driver */
+ dev->driver = NULL;
+ dev->connected = 0;
+
+ /* set SD */
+ pch_udc_set_disconnect(dev);
+
+ return 0;
+}
+
+static void pch_vbus_gpio_remove_table(void *table)
+{
+ gpiod_remove_lookup_table(table);
+}
+
+static int pch_vbus_gpio_add_table(struct device *d, void *table)
+{
+ gpiod_add_lookup_table(table);
+ return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, table);
+}
+
+static struct gpiod_lookup_table pch_udc_minnow_vbus_gpio_table = {
+ .dev_id = "0000:02:02.4",
+ .table = {
+ GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
+ {}
+ },
+};
+
+static int pch_udc_minnow_platform_init(struct device *d)
+{
+ return pch_vbus_gpio_add_table(d, &pch_udc_minnow_vbus_gpio_table);
+}
+
+static int pch_udc_quark_platform_init(struct device *d)
+{
+ struct pch_udc_dev *dev = dev_get_drvdata(d);
+
+ dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
+ return 0;
+}
+
+static void pch_udc_shutdown(struct pci_dev *pdev)
+{
+ struct pch_udc_dev *dev = pci_get_drvdata(pdev);
+
+ pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
+ pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+
+ /* disable the pullup so the host will think we're gone */
+ pch_udc_set_disconnect(dev);
+}
+
+static void pch_udc_remove(struct pci_dev *pdev)
+{
+ struct pch_udc_dev *dev = pci_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&dev->gadget);
+
+ /* gadget driver must not be registered */
+ if (dev->driver)
+ dev_err(&pdev->dev,
+ "%s: gadget driver still bound!!!\n", __func__);
+ /* dma pool cleanup */
+ dma_pool_destroy(dev->data_requests);
+
+ if (dev->stp_requests) {
+ /* cleanup DMA desc's for ep0in */
+ if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
+ dma_pool_free(dev->stp_requests,
+ dev->ep[UDC_EP0OUT_IDX].td_stp,
+ dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
+ }
+ if (dev->ep[UDC_EP0OUT_IDX].td_data) {
+ dma_pool_free(dev->stp_requests,
+ dev->ep[UDC_EP0OUT_IDX].td_data,
+ dev->ep[UDC_EP0OUT_IDX].td_data_phys);
+ }
+ dma_pool_destroy(dev->stp_requests);
+ }
+
+ if (dev->dma_addr)
+ dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
+ UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
+
+ pch_vbus_gpio_free(dev);
+
+ pch_udc_exit(dev);
+}
+
+static int __maybe_unused pch_udc_suspend(struct device *d)
+{
+ struct pch_udc_dev *dev = dev_get_drvdata(d);
+
+ pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
+ pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+
+ return 0;
+}
+
+static int __maybe_unused pch_udc_resume(struct device *d)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
+
+typedef int (*platform_init_fn)(struct device *);
+
+static int pch_udc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ platform_init_fn platform_init = (platform_init_fn)id->driver_data;
+ int retval;
+ struct pch_udc_dev *dev;
+
+ /* init */
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ /* pci setup */
+ retval = pcim_enable_device(pdev);
+ if (retval)
+ return retval;
+
+ dev->bar = PCH_UDC_PCI_BAR;
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ /* Platform specific hook */
+ if (platform_init) {
+ retval = platform_init(&pdev->dev);
+ if (retval)
+ return retval;
+ }
+
+ /* PCI resource allocation */
+ retval = pcim_iomap_regions(pdev, BIT(dev->bar), pci_name(pdev));
+ if (retval)
+ return retval;
+
+ dev->base_addr = pcim_iomap_table(pdev)[dev->bar];
+
+ /* initialize the hardware */
+ retval = pch_udc_pcd_init(dev);
+ if (retval)
+ return retval;
+
+ pci_enable_msi(pdev);
+
+ retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (retval) {
+ dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
+ pdev->irq);
+ goto finished;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ /* device struct setup */
+ spin_lock_init(&dev->lock);
+ dev->gadget.ops = &pch_udc_ops;
+
+ retval = init_dma_pools(dev);
+ if (retval)
+ goto finished;
+
+ dev->gadget.name = KBUILD_MODNAME;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
+
+ /* Put the device in disconnected state till a driver is bound */
+ pch_udc_set_disconnect(dev);
+ retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+ if (retval)
+ goto finished;
+ return 0;
+
+finished:
+ pch_udc_remove(pdev);
+ return retval;
+}
+
+static const struct pci_device_id pch_udc_pcidev_id[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = 0xffffffff,
+ .driver_data = (kernel_ulong_t)&pch_udc_quark_platform_init,
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC,
+ PCI_VENDOR_ID_CIRCUITCO, PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = 0xffffffff,
+ .driver_data = (kernel_ulong_t)&pch_udc_minnow_platform_init,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = 0xffffffff,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = 0xffffffff,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = 0xffffffff,
+ },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
+
+static struct pci_driver pch_udc_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pch_udc_pcidev_id,
+ .probe = pch_udc_probe,
+ .remove = pch_udc_remove,
+ .shutdown = pch_udc_shutdown,
+ .driver = {
+ .pm = &pch_udc_pm,
+ },
+};
+
+module_pci_driver(pch_udc_driver);
+
+MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
+MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
new file mode 100644
index 000000000..9e01ddf2b
--- /dev/null
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -0,0 +1,2550 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
+ * Copyright (C) 2003 Robert Schwebel, Pengutronix
+ * Copyright (C) 2003 Benedikt Spranger, Pengutronix
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003 Joshua Wise
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/platform_data/pxa2xx_udc.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/prefetch.h>
+
+#include <asm/byteorder.h>
+#include <asm/dma.h>
+#include <asm/mach-types.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+
+#define UDCCR 0x0000 /* UDC Control Register */
+#define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */
+#define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */
+#define UDC_RES3 0x000C /* UDC Undocumented - Reserved3 */
+#define UDCCS0 0x0010 /* UDC Endpoint 0 Control/Status Register */
+#define UDCCS1 0x0014 /* UDC Endpoint 1 (IN) Control/Status Register */
+#define UDCCS2 0x0018 /* UDC Endpoint 2 (OUT) Control/Status Register */
+#define UDCCS3 0x001C /* UDC Endpoint 3 (IN) Control/Status Register */
+#define UDCCS4 0x0020 /* UDC Endpoint 4 (OUT) Control/Status Register */
+#define UDCCS5 0x0024 /* UDC Endpoint 5 (Interrupt) Control/Status Register */
+#define UDCCS6 0x0028 /* UDC Endpoint 6 (IN) Control/Status Register */
+#define UDCCS7 0x002C /* UDC Endpoint 7 (OUT) Control/Status Register */
+#define UDCCS8 0x0030 /* UDC Endpoint 8 (IN) Control/Status Register */
+#define UDCCS9 0x0034 /* UDC Endpoint 9 (OUT) Control/Status Register */
+#define UDCCS10 0x0038 /* UDC Endpoint 10 (Interrupt) Control/Status Register */
+#define UDCCS11 0x003C /* UDC Endpoint 11 (IN) Control/Status Register */
+#define UDCCS12 0x0040 /* UDC Endpoint 12 (OUT) Control/Status Register */
+#define UDCCS13 0x0044 /* UDC Endpoint 13 (IN) Control/Status Register */
+#define UDCCS14 0x0048 /* UDC Endpoint 14 (OUT) Control/Status Register */
+#define UDCCS15 0x004C /* UDC Endpoint 15 (Interrupt) Control/Status Register */
+#define UFNRH 0x0060 /* UDC Frame Number Register High */
+#define UFNRL 0x0064 /* UDC Frame Number Register Low */
+#define UBCR2 0x0068 /* UDC Byte Count Reg 2 */
+#define UBCR4 0x006c /* UDC Byte Count Reg 4 */
+#define UBCR7 0x0070 /* UDC Byte Count Reg 7 */
+#define UBCR9 0x0074 /* UDC Byte Count Reg 9 */
+#define UBCR12 0x0078 /* UDC Byte Count Reg 12 */
+#define UBCR14 0x007c /* UDC Byte Count Reg 14 */
+#define UDDR0 0x0080 /* UDC Endpoint 0 Data Register */
+#define UDDR1 0x0100 /* UDC Endpoint 1 Data Register */
+#define UDDR2 0x0180 /* UDC Endpoint 2 Data Register */
+#define UDDR3 0x0200 /* UDC Endpoint 3 Data Register */
+#define UDDR4 0x0400 /* UDC Endpoint 4 Data Register */
+#define UDDR5 0x00A0 /* UDC Endpoint 5 Data Register */
+#define UDDR6 0x0600 /* UDC Endpoint 6 Data Register */
+#define UDDR7 0x0680 /* UDC Endpoint 7 Data Register */
+#define UDDR8 0x0700 /* UDC Endpoint 8 Data Register */
+#define UDDR9 0x0900 /* UDC Endpoint 9 Data Register */
+#define UDDR10 0x00C0 /* UDC Endpoint 10 Data Register */
+#define UDDR11 0x0B00 /* UDC Endpoint 11 Data Register */
+#define UDDR12 0x0B80 /* UDC Endpoint 12 Data Register */
+#define UDDR13 0x0C00 /* UDC Endpoint 13 Data Register */
+#define UDDR14 0x0E00 /* UDC Endpoint 14 Data Register */
+#define UDDR15 0x00E0 /* UDC Endpoint 15 Data Register */
+
+#define UICR0 0x0050 /* UDC Interrupt Control Register 0 */
+#define UICR1 0x0054 /* UDC Interrupt Control Register 1 */
+
+#define USIR0 0x0058 /* UDC Status Interrupt Register 0 */
+#define USIR1 0x005C /* UDC Status Interrupt Register 1 */
+
+#define UDCCR_UDE (1 << 0) /* UDC enable */
+#define UDCCR_UDA (1 << 1) /* UDC active */
+#define UDCCR_RSM (1 << 2) /* Device resume */
+#define UDCCR_RESIR (1 << 3) /* Resume interrupt request */
+#define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */
+#define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */
+#define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */
+#define UDCCR_REM (1 << 7) /* Reset interrupt mask */
+
+#define UDCCS0_OPR (1 << 0) /* OUT packet ready */
+#define UDCCS0_IPR (1 << 1) /* IN packet ready */
+#define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */
+#define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */
+#define UDCCS0_SST (1 << 4) /* Sent stall */
+#define UDCCS0_FST (1 << 5) /* Force stall */
+#define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */
+#define UDCCS0_SA (1 << 7) /* Setup active */
+
+#define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */
+#define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */
+#define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */
+#define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */
+#define UDCCS_BI_SST (1 << 4) /* Sent stall */
+#define UDCCS_BI_FST (1 << 5) /* Force stall */
+#define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */
+
+#define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */
+#define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */
+#define UDCCS_BO_DME (1 << 3) /* DMA enable */
+#define UDCCS_BO_SST (1 << 4) /* Sent stall */
+#define UDCCS_BO_FST (1 << 5) /* Force stall */
+#define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */
+#define UDCCS_BO_RSP (1 << 7) /* Receive short packet */
+
+#define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */
+#define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */
+#define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */
+#define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */
+#define UDCCS_II_TSP (1 << 7) /* Transmit short packet */
+
+#define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */
+#define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */
+#ifdef CONFIG_ARCH_IXP4XX /* FIXME: is this right?, datasheed says '2' */
+#define UDCCS_IO_ROF (1 << 3) /* Receive overflow */
+#endif
+#ifdef CONFIG_ARCH_PXA
+#define UDCCS_IO_ROF (1 << 2) /* Receive overflow */
+#endif
+#define UDCCS_IO_DME (1 << 3) /* DMA enable */
+#define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */
+#define UDCCS_IO_RSP (1 << 7) /* Receive short packet */
+
+#define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */
+#define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */
+#define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */
+#define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */
+#define UDCCS_INT_SST (1 << 4) /* Sent stall */
+#define UDCCS_INT_FST (1 << 5) /* Force stall */
+#define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */
+
+#define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */
+#define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */
+#define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */
+#define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */
+#define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */
+#define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */
+#define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */
+#define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */
+
+#define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */
+#define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */
+#define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */
+#define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */
+#define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */
+#define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */
+#define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */
+#define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */
+
+#define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */
+#define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */
+#define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */
+#define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */
+#define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */
+#define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */
+#define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */
+#define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */
+
+#define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */
+#define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */
+#define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */
+#define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */
+#define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */
+#define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */
+#define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */
+#define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */
+
+/*
+ * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
+ * series processors. The UDC for the IXP 4xx series is very similar.
+ * There are fifteen endpoints, in addition to ep0.
+ *
+ * Such controller drivers work with a gadget driver. The gadget driver
+ * returns descriptors, implements configuration and data protocols used
+ * by the host to interact with this device, and allocates endpoints to
+ * the different protocol interfaces. The controller driver virtualizes
+ * usb hardware so that the gadget drivers will be more portable.
+ *
+ * This UDC hardware wants to implement a bit too much USB protocol, so
+ * it constrains the sorts of USB configuration change events that work.
+ * The errata for these chips are misleading; some "fixed" bugs from
+ * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
+ *
+ * Note that the UDC hardware supports DMA (except on IXP) but that's
+ * not used here. IN-DMA (to host) is simple enough, when the data is
+ * suitably aligned (16 bytes) ... the network stack doesn't do that,
+ * other software can. OUT-DMA is buggy in most chip versions, as well
+ * as poorly designed (data toggle not automatic). So this driver won't
+ * bother using DMA. (Mostly-working IN-DMA support was available in
+ * kernels before 2.6.23, but was never enabled or well tested.)
+ */
+
+#define DRIVER_VERSION "30-June-2007"
+#define DRIVER_DESC "PXA 25x USB Device Controller driver"
+
+
+static const char driver_name [] = "pxa25x_udc";
+
+static const char ep0name [] = "ep0";
+
+
+#ifdef CONFIG_ARCH_IXP4XX
+
+/* cpu-specific register addresses are compiled in to this code */
+#ifdef CONFIG_ARCH_PXA
+#error "Can't configure both IXP and PXA"
+#endif
+
+/* IXP doesn't yet support <linux/clk.h> */
+#define clk_get(dev,name) NULL
+#define clk_enable(clk) do { } while (0)
+#define clk_disable(clk) do { } while (0)
+#define clk_put(clk) do { } while (0)
+
+#endif
+
+#include "pxa25x_udc.h"
+
+
+#ifdef CONFIG_USB_PXA25X_SMALL
+#define SIZE_STR " (small)"
+#else
+#define SIZE_STR ""
+#endif
+
+/* ---------------------------------------------------------------------------
+ * endpoint related parts of the api to the usb controller hardware,
+ * used by gadget driver; and the inner talker-to-hardware core.
+ * ---------------------------------------------------------------------------
+ */
+
+static void pxa25x_ep_fifo_flush (struct usb_ep *ep);
+static void nuke (struct pxa25x_ep *, int status);
+
+/* one GPIO should control a D+ pullup, so host sees this device (or not) */
+static void pullup_off(void)
+{
+ struct pxa2xx_udc_mach_info *mach = the_controller->mach;
+ int off_level = mach->gpio_pullup_inverted;
+
+ if (gpio_is_valid(mach->gpio_pullup))
+ gpio_set_value(mach->gpio_pullup, off_level);
+ else if (mach->udc_command)
+ mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
+}
+
+static void pullup_on(void)
+{
+ struct pxa2xx_udc_mach_info *mach = the_controller->mach;
+ int on_level = !mach->gpio_pullup_inverted;
+
+ if (gpio_is_valid(mach->gpio_pullup))
+ gpio_set_value(mach->gpio_pullup, on_level);
+ else if (mach->udc_command)
+ mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
+}
+
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+/*
+ * IXP4xx has its buses wired up in a way that relies on never doing any
+ * byte swaps, independent of whether it runs in big-endian or little-endian
+ * mode, as explained by Krzysztof Hałasa.
+ *
+ * We only support pxa25x in little-endian mode, but it is very likely
+ * that it works the same way.
+ */
+static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
+{
+ iowrite32be(val, dev->regs + reg);
+}
+
+static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
+{
+ return ioread32be(dev->regs + reg);
+}
+#else
+static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
+{
+ writel(val, dev->regs + reg);
+}
+
+static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
+{
+ return readl(dev->regs + reg);
+}
+#endif
+
+static void pio_irq_enable(struct pxa25x_ep *ep)
+{
+ u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
+
+ if (bEndpointAddress < 8)
+ udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) &
+ ~(1 << bEndpointAddress));
+ else {
+ bEndpointAddress -= 8;
+ udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) &
+ ~(1 << bEndpointAddress));
+ }
+}
+
+static void pio_irq_disable(struct pxa25x_ep *ep)
+{
+ u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
+
+ if (bEndpointAddress < 8)
+ udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) |
+ (1 << bEndpointAddress));
+ else {
+ bEndpointAddress -= 8;
+ udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) |
+ (1 << bEndpointAddress));
+ }
+}
+
+/* The UDCCR reg contains mask and interrupt status bits,
+ * so using '|=' isn't safe as it may ack an interrupt.
+ */
+#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
+
+static inline void udc_set_mask_UDCCR(struct pxa25x_udc *dev, int mask)
+{
+ u32 udccr = udc_get_reg(dev, UDCCR);
+
+ udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS), UDCCR);
+}
+
+static inline void udc_clear_mask_UDCCR(struct pxa25x_udc *dev, int mask)
+{
+ u32 udccr = udc_get_reg(dev, UDCCR);
+
+ udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS), UDCCR);
+}
+
+static inline void udc_ack_int_UDCCR(struct pxa25x_udc *dev, int mask)
+{
+ /* udccr contains the bits we dont want to change */
+ u32 udccr = udc_get_reg(dev, UDCCR) & UDCCR_MASK_BITS;
+
+ udc_set_reg(dev, udccr | (mask & ~UDCCR_MASK_BITS), UDCCR);
+}
+
+static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *ep)
+{
+ return udc_get_reg(ep->dev, ep->regoff_udccs);
+}
+
+static inline void udc_ep_set_UDCCS(struct pxa25x_ep *ep, u32 data)
+{
+ udc_set_reg(ep->dev, data, ep->regoff_udccs);
+}
+
+static inline u32 udc_ep0_get_UDCCS(struct pxa25x_udc *dev)
+{
+ return udc_get_reg(dev, UDCCS0);
+}
+
+static inline void udc_ep0_set_UDCCS(struct pxa25x_udc *dev, u32 data)
+{
+ udc_set_reg(dev, data, UDCCS0);
+}
+
+static inline u32 udc_ep_get_UDDR(struct pxa25x_ep *ep)
+{
+ return udc_get_reg(ep->dev, ep->regoff_uddr);
+}
+
+static inline void udc_ep_set_UDDR(struct pxa25x_ep *ep, u32 data)
+{
+ udc_set_reg(ep->dev, data, ep->regoff_uddr);
+}
+
+static inline u32 udc_ep_get_UBCR(struct pxa25x_ep *ep)
+{
+ return udc_get_reg(ep->dev, ep->regoff_ubcr);
+}
+
+/*
+ * endpoint enable/disable
+ *
+ * we need to verify the descriptors used to enable endpoints. since pxa25x
+ * endpoint configurations are fixed, and are pretty much always enabled,
+ * there's not a lot to manage here.
+ *
+ * because pxa25x can't selectively initialize bulk (or interrupt) endpoints,
+ * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
+ * for a single interface (with only the default altsetting) and for gadget
+ * drivers that don't halt endpoints (not reset by set_interface). that also
+ * means that if you use ISO, you must violate the USB spec rule that all
+ * iso endpoints must be in non-default altsettings.
+ */
+static int pxa25x_ep_enable (struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct pxa25x_ep *ep;
+ struct pxa25x_udc *dev;
+
+ ep = container_of (_ep, struct pxa25x_ep, ep);
+ if (!_ep || !desc || _ep->name == ep0name
+ || desc->bDescriptorType != USB_DT_ENDPOINT
+ || ep->bEndpointAddress != desc->bEndpointAddress
+ || ep->fifo_size < usb_endpoint_maxp (desc)) {
+ DMSG("%s, bad ep or descriptor\n", __func__);
+ return -EINVAL;
+ }
+
+ /* xfer types must match, except that interrupt ~= bulk */
+ if (ep->bmAttributes != desc->bmAttributes
+ && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
+ && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
+ DMSG("%s, %s type mismatch\n", __func__, _ep->name);
+ return -EINVAL;
+ }
+
+ /* hardware _could_ do smaller, but driver doesn't */
+ if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
+ && usb_endpoint_maxp (desc)
+ != BULK_FIFO_SIZE)
+ || !desc->wMaxPacketSize) {
+ DMSG("%s, bad %s maxpacket\n", __func__, _ep->name);
+ return -ERANGE;
+ }
+
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ DMSG("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+
+ ep->ep.desc = desc;
+ ep->stopped = 0;
+ ep->pio_irqs = 0;
+ ep->ep.maxpacket = usb_endpoint_maxp (desc);
+
+ /* flush fifo (mostly for OUT buffers) */
+ pxa25x_ep_fifo_flush (_ep);
+
+ /* ... reset halt state too, if we could ... */
+
+ DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
+ return 0;
+}
+
+static int pxa25x_ep_disable (struct usb_ep *_ep)
+{
+ struct pxa25x_ep *ep;
+ unsigned long flags;
+
+ ep = container_of (_ep, struct pxa25x_ep, ep);
+ if (!_ep || !ep->ep.desc) {
+ DMSG("%s, %s not enabled\n", __func__,
+ _ep ? ep->ep.name : NULL);
+ return -EINVAL;
+ }
+ local_irq_save(flags);
+
+ nuke (ep, -ESHUTDOWN);
+
+ /* flush fifo (mostly for IN buffers) */
+ pxa25x_ep_fifo_flush (_ep);
+
+ ep->ep.desc = NULL;
+ ep->stopped = 1;
+
+ local_irq_restore(flags);
+ DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* for the pxa25x, these can just wrap kmalloc/kfree. gadget drivers
+ * must still pass correctly initialized endpoints, since other controller
+ * drivers may care about how it's currently set up (dma issues etc).
+ */
+
+/*
+ * pxa25x_ep_alloc_request - allocate a request data structure
+ */
+static struct usb_request *
+pxa25x_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct pxa25x_request *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD (&req->queue);
+ return &req->req;
+}
+
+
+/*
+ * pxa25x_ep_free_request - deallocate a request data structure
+ */
+static void
+pxa25x_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct pxa25x_request *req;
+
+ req = container_of (_req, struct pxa25x_request, req);
+ WARN_ON(!list_empty (&req->queue));
+ kfree(req);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * done - retire a request; caller blocked irqs
+ */
+static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status)
+{
+ unsigned stopped = ep->stopped;
+
+ list_del_init(&req->queue);
+
+ if (likely (req->req.status == -EINPROGRESS))
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ if (status && status != -ESHUTDOWN)
+ DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ ep->stopped = stopped;
+}
+
+
+static inline void ep0_idle (struct pxa25x_udc *dev)
+{
+ dev->ep0state = EP0_IDLE;
+}
+
+static int
+write_packet(struct pxa25x_ep *ep, struct pxa25x_request *req, unsigned max)
+{
+ u8 *buf;
+ unsigned length, count;
+
+ buf = req->req.buf + req->req.actual;
+ prefetch(buf);
+
+ /* how big will this packet be? */
+ length = min(req->req.length - req->req.actual, max);
+ req->req.actual += length;
+
+ count = length;
+ while (likely(count--))
+ udc_ep_set_UDDR(ep, *buf++);
+
+ return length;
+}
+
+/*
+ * write to an IN endpoint fifo, as many packets as possible.
+ * irqs will use this to write the rest later.
+ * caller guarantees at least one packet buffer is ready (or a zlp).
+ */
+static int
+write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
+{
+ unsigned max;
+
+ max = usb_endpoint_maxp(ep->ep.desc);
+ do {
+ unsigned count;
+ int is_last, is_short;
+
+ count = write_packet(ep, req, max);
+
+ /* last packet is usually short (or a zlp) */
+ if (unlikely (count != max))
+ is_last = is_short = 1;
+ else {
+ if (likely(req->req.length != req->req.actual)
+ || req->req.zero)
+ is_last = 0;
+ else
+ is_last = 1;
+ /* interrupt/iso maxpacket may not fill the fifo */
+ is_short = unlikely (max < ep->fifo_size);
+ }
+
+ DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
+ ep->ep.name, count,
+ is_last ? "/L" : "", is_short ? "/S" : "",
+ req->req.length - req->req.actual, req);
+
+ /* let loose that packet. maybe try writing another one,
+ * double buffering might work. TSP, TPC, and TFS
+ * bit values are the same for all normal IN endpoints.
+ */
+ udc_ep_set_UDCCS(ep, UDCCS_BI_TPC);
+ if (is_short)
+ udc_ep_set_UDCCS(ep, UDCCS_BI_TSP);
+
+ /* requests complete when all IN data is in the FIFO */
+ if (is_last) {
+ done (ep, req, 0);
+ if (list_empty(&ep->queue))
+ pio_irq_disable(ep);
+ return 1;
+ }
+
+ // TODO experiment: how robust can fifo mode tweaking be?
+ // double buffering is off in the default fifo mode, which
+ // prevents TFS from being set here.
+
+ } while (udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS);
+ return 0;
+}
+
+/* caller asserts req->pending (ep0 irq status nyet cleared); starts
+ * ep0 data stage. these chips want very simple state transitions.
+ */
+static inline
+void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag)
+{
+ udc_ep0_set_UDCCS(dev, flags|UDCCS0_SA|UDCCS0_OPR);
+ udc_set_reg(dev, USIR0, USIR0_IR0);
+ dev->req_pending = 0;
+ DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
+ __func__, tag, udc_ep0_get_UDCCS(dev), flags);
+}
+
+static int
+write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
+{
+ struct pxa25x_udc *dev = ep->dev;
+ unsigned count;
+ int is_short;
+
+ count = write_packet(&dev->ep[0], req, EP0_FIFO_SIZE);
+ ep->dev->stats.write.bytes += count;
+
+ /* last packet "must be" short (or a zlp) */
+ is_short = (count != EP0_FIFO_SIZE);
+
+ DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
+ req->req.length - req->req.actual, req);
+
+ if (unlikely (is_short)) {
+ if (ep->dev->req_pending)
+ ep0start(ep->dev, UDCCS0_IPR, "short IN");
+ else
+ udc_ep0_set_UDCCS(dev, UDCCS0_IPR);
+
+ count = req->req.length;
+ done (ep, req, 0);
+ ep0_idle(ep->dev);
+#ifndef CONFIG_ARCH_IXP4XX
+#if 1
+ /* This seems to get rid of lost status irqs in some cases:
+ * host responds quickly, or next request involves config
+ * change automagic, or should have been hidden, or ...
+ *
+ * FIXME get rid of all udelays possible...
+ */
+ if (count >= EP0_FIFO_SIZE) {
+ count = 100;
+ do {
+ if ((udc_ep0_get_UDCCS(dev) & UDCCS0_OPR) != 0) {
+ /* clear OPR, generate ack */
+ udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
+ break;
+ }
+ count--;
+ udelay(1);
+ } while (count);
+ }
+#endif
+#endif
+ } else if (ep->dev->req_pending)
+ ep0start(ep->dev, 0, "IN");
+ return is_short;
+}
+
+
+/*
+ * read_fifo - unload packet(s) from the fifo we use for usb OUT
+ * transfers and put them into the request. caller should have made
+ * sure there's at least one packet ready.
+ *
+ * returns true if the request completed because of short packet or the
+ * request buffer having filled (and maybe overran till end-of-packet).
+ */
+static int
+read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
+{
+ for (;;) {
+ u32 udccs;
+ u8 *buf;
+ unsigned bufferspace, count, is_short;
+
+ /* make sure there's a packet in the FIFO.
+ * UDCCS_{BO,IO}_RPC are all the same bit value.
+ * UDCCS_{BO,IO}_RNE are all the same bit value.
+ */
+ udccs = udc_ep_get_UDCCS(ep);
+ if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
+ break;
+ buf = req->req.buf + req->req.actual;
+ prefetchw(buf);
+ bufferspace = req->req.length - req->req.actual;
+
+ /* read all bytes from this packet */
+ if (likely (udccs & UDCCS_BO_RNE)) {
+ count = 1 + (0x0ff & udc_ep_get_UBCR(ep));
+ req->req.actual += min (count, bufferspace);
+ } else /* zlp */
+ count = 0;
+ is_short = (count < ep->ep.maxpacket);
+ DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
+ ep->ep.name, udccs, count,
+ is_short ? "/S" : "",
+ req, req->req.actual, req->req.length);
+ while (likely (count-- != 0)) {
+ u8 byte = (u8) udc_ep_get_UDDR(ep);
+
+ if (unlikely (bufferspace == 0)) {
+ /* this happens when the driver's buffer
+ * is smaller than what the host sent.
+ * discard the extra data.
+ */
+ if (req->req.status != -EOVERFLOW)
+ DMSG("%s overflow %d\n",
+ ep->ep.name, count);
+ req->req.status = -EOVERFLOW;
+ } else {
+ *buf++ = byte;
+ bufferspace--;
+ }
+ }
+ udc_ep_set_UDCCS(ep, UDCCS_BO_RPC);
+ /* RPC/RSP/RNE could now reflect the other packet buffer */
+
+ /* iso is one request per packet */
+ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (udccs & UDCCS_IO_ROF)
+ req->req.status = -EHOSTUNREACH;
+ /* more like "is_done" */
+ is_short = 1;
+ }
+
+ /* completion */
+ if (is_short || req->req.actual == req->req.length) {
+ done (ep, req, 0);
+ if (list_empty(&ep->queue))
+ pio_irq_disable(ep);
+ return 1;
+ }
+
+ /* finished that packet. the next one may be waiting... */
+ }
+ return 0;
+}
+
+/*
+ * special ep0 version of the above. no UBCR0 or double buffering; status
+ * handshaking is magic. most device protocols don't need control-OUT.
+ * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
+ * protocols do use them.
+ */
+static int
+read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
+{
+ u8 *buf, byte;
+ unsigned bufferspace;
+
+ buf = req->req.buf + req->req.actual;
+ bufferspace = req->req.length - req->req.actual;
+
+ while (udc_ep_get_UDCCS(ep) & UDCCS0_RNE) {
+ byte = (u8) UDDR0;
+
+ if (unlikely (bufferspace == 0)) {
+ /* this happens when the driver's buffer
+ * is smaller than what the host sent.
+ * discard the extra data.
+ */
+ if (req->req.status != -EOVERFLOW)
+ DMSG("%s overflow\n", ep->ep.name);
+ req->req.status = -EOVERFLOW;
+ } else {
+ *buf++ = byte;
+ req->req.actual++;
+ bufferspace--;
+ }
+ }
+
+ udc_ep_set_UDCCS(ep, UDCCS0_OPR | UDCCS0_IPR);
+
+ /* completion */
+ if (req->req.actual >= req->req.length)
+ return 1;
+
+ /* finished that packet. the next one may be waiting... */
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct pxa25x_request *req;
+ struct pxa25x_ep *ep;
+ struct pxa25x_udc *dev;
+ unsigned long flags;
+
+ req = container_of(_req, struct pxa25x_request, req);
+ if (unlikely (!_req || !_req->complete || !_req->buf
+ || !list_empty(&req->queue))) {
+ DMSG("%s, bad params\n", __func__);
+ return -EINVAL;
+ }
+
+ ep = container_of(_ep, struct pxa25x_ep, ep);
+ if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) {
+ DMSG("%s, bad ep\n", __func__);
+ return -EINVAL;
+ }
+
+ dev = ep->dev;
+ if (unlikely (!dev->driver
+ || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+ DMSG("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+
+ /* iso is always one packet per request, that's the only way
+ * we can report per-packet status. that also helps with dma.
+ */
+ if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+ && req->req.length > usb_endpoint_maxp(ep->ep.desc)))
+ return -EMSGSIZE;
+
+ DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
+ _ep->name, _req, _req->length, _req->buf);
+
+ local_irq_save(flags);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ /* kickstart this i/o queue? */
+ if (list_empty(&ep->queue) && !ep->stopped) {
+ if (ep->ep.desc == NULL/* ep0 */) {
+ unsigned length = _req->length;
+
+ switch (dev->ep0state) {
+ case EP0_IN_DATA_PHASE:
+ dev->stats.write.ops++;
+ if (write_ep0_fifo(ep, req))
+ req = NULL;
+ break;
+
+ case EP0_OUT_DATA_PHASE:
+ dev->stats.read.ops++;
+ /* messy ... */
+ if (dev->req_config) {
+ DBG(DBG_VERBOSE, "ep0 config ack%s\n",
+ dev->has_cfr ? "" : " raced");
+ if (dev->has_cfr)
+ udc_set_reg(dev, UDCCFR, UDCCFR_AREN |
+ UDCCFR_ACM | UDCCFR_MB1);
+ done(ep, req, 0);
+ dev->ep0state = EP0_END_XFER;
+ local_irq_restore (flags);
+ return 0;
+ }
+ if (dev->req_pending)
+ ep0start(dev, UDCCS0_IPR, "OUT");
+ if (length == 0 || ((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0
+ && read_ep0_fifo(ep, req))) {
+ ep0_idle(dev);
+ done(ep, req, 0);
+ req = NULL;
+ }
+ break;
+
+ default:
+ DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
+ local_irq_restore (flags);
+ return -EL2HLT;
+ }
+ /* can the FIFO can satisfy the request immediately? */
+ } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
+ if ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) != 0
+ && write_fifo(ep, req))
+ req = NULL;
+ } else if ((udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) != 0
+ && read_fifo(ep, req)) {
+ req = NULL;
+ }
+
+ if (likely(req && ep->ep.desc))
+ pio_irq_enable(ep);
+ }
+
+ /* pio or dma irq handler advances the queue. */
+ if (likely(req != NULL))
+ list_add_tail(&req->queue, &ep->queue);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+/*
+ * nuke - dequeue ALL requests
+ */
+static void nuke(struct pxa25x_ep *ep, int status)
+{
+ struct pxa25x_request *req;
+
+ /* called with irqs blocked */
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct pxa25x_request,
+ queue);
+ done(ep, req, status);
+ }
+ if (ep->ep.desc)
+ pio_irq_disable(ep);
+}
+
+
+/* dequeue JUST ONE request */
+static int pxa25x_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct pxa25x_ep *ep;
+ struct pxa25x_request *req = NULL;
+ struct pxa25x_request *iter;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct pxa25x_ep, ep);
+ if (!_ep || ep->ep.name == ep0name)
+ return -EINVAL;
+
+ local_irq_save(flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ local_irq_restore(flags);
+ return -EINVAL;
+ }
+
+ done(ep, req, -ECONNRESET);
+
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct pxa25x_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct pxa25x_ep, ep);
+ if (unlikely (!_ep
+ || (!ep->ep.desc && ep->ep.name != ep0name))
+ || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ DMSG("%s, bad ep\n", __func__);
+ return -EINVAL;
+ }
+ if (value == 0) {
+ /* this path (reset toggle+halt) is needed to implement
+ * SET_INTERFACE on normal hardware. but it can't be
+ * done from software on the PXA UDC, and the hardware
+ * forgets to do it as part of SET_INTERFACE automagic.
+ */
+ DMSG("only host can clear %s halt\n", _ep->name);
+ return -EROFS;
+ }
+
+ local_irq_save(flags);
+
+ if ((ep->bEndpointAddress & USB_DIR_IN) != 0
+ && ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) == 0
+ || !list_empty(&ep->queue))) {
+ local_irq_restore(flags);
+ return -EAGAIN;
+ }
+
+ /* FST bit is the same for control, bulk in, bulk out, interrupt in */
+ udc_ep_set_UDCCS(ep, UDCCS_BI_FST|UDCCS_BI_FTF);
+
+ /* ep0 needs special care */
+ if (!ep->ep.desc) {
+ start_watchdog(ep->dev);
+ ep->dev->req_pending = 0;
+ ep->dev->ep0state = EP0_STALL;
+
+ /* and bulk/intr endpoints like dropping stalls too */
+ } else {
+ unsigned i;
+ for (i = 0; i < 1000; i += 20) {
+ if (udc_ep_get_UDCCS(ep) & UDCCS_BI_SST)
+ break;
+ udelay(20);
+ }
+ }
+ local_irq_restore(flags);
+
+ DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
+ return 0;
+}
+
+static int pxa25x_ep_fifo_status(struct usb_ep *_ep)
+{
+ struct pxa25x_ep *ep;
+
+ ep = container_of(_ep, struct pxa25x_ep, ep);
+ if (!_ep) {
+ DMSG("%s, bad ep\n", __func__);
+ return -ENODEV;
+ }
+ /* pxa can't report unclaimed bytes from IN fifos */
+ if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
+ return -EOPNOTSUPP;
+ if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
+ || (udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) == 0)
+ return 0;
+ else
+ return (udc_ep_get_UBCR(ep) & 0xfff) + 1;
+}
+
+static void pxa25x_ep_fifo_flush(struct usb_ep *_ep)
+{
+ struct pxa25x_ep *ep;
+
+ ep = container_of(_ep, struct pxa25x_ep, ep);
+ if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
+ DMSG("%s, bad ep\n", __func__);
+ return;
+ }
+
+ /* toggle and halt bits stay unchanged */
+
+ /* for OUT, just read and discard the FIFO contents. */
+ if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
+ while (((udc_ep_get_UDCCS(ep)) & UDCCS_BO_RNE) != 0)
+ (void)udc_ep_get_UDDR(ep);
+ return;
+ }
+
+ /* most IN status is the same, but ISO can't stall */
+ udc_ep_set_UDCCS(ep, UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
+ | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
+ ? 0 : UDCCS_BI_SST));
+}
+
+
+static const struct usb_ep_ops pxa25x_ep_ops = {
+ .enable = pxa25x_ep_enable,
+ .disable = pxa25x_ep_disable,
+
+ .alloc_request = pxa25x_ep_alloc_request,
+ .free_request = pxa25x_ep_free_request,
+
+ .queue = pxa25x_ep_queue,
+ .dequeue = pxa25x_ep_dequeue,
+
+ .set_halt = pxa25x_ep_set_halt,
+ .fifo_status = pxa25x_ep_fifo_status,
+ .fifo_flush = pxa25x_ep_fifo_flush,
+};
+
+
+/* ---------------------------------------------------------------------------
+ * device-scoped parts of the api to the usb controller hardware
+ * ---------------------------------------------------------------------------
+ */
+
+static int pxa25x_udc_get_frame(struct usb_gadget *_gadget)
+{
+ struct pxa25x_udc *dev;
+
+ dev = container_of(_gadget, struct pxa25x_udc, gadget);
+ return ((udc_get_reg(dev, UFNRH) & 0x07) << 8) |
+ (udc_get_reg(dev, UFNRL) & 0xff);
+}
+
+static int pxa25x_udc_wakeup(struct usb_gadget *_gadget)
+{
+ struct pxa25x_udc *udc;
+
+ udc = container_of(_gadget, struct pxa25x_udc, gadget);
+
+ /* host may not have enabled remote wakeup */
+ if ((udc_ep0_get_UDCCS(udc) & UDCCS0_DRWF) == 0)
+ return -EHOSTUNREACH;
+ udc_set_mask_UDCCR(udc, UDCCR_RSM);
+ return 0;
+}
+
+static void stop_activity(struct pxa25x_udc *, struct usb_gadget_driver *);
+static void udc_enable (struct pxa25x_udc *);
+static void udc_disable(struct pxa25x_udc *);
+
+/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
+ * in active use.
+ */
+static int pullup(struct pxa25x_udc *udc)
+{
+ int is_active = udc->vbus && udc->pullup && !udc->suspended;
+ DMSG("%s\n", is_active ? "active" : "inactive");
+ if (is_active) {
+ if (!udc->active) {
+ udc->active = 1;
+ /* Enable clock for USB device */
+ clk_enable(udc->clk);
+ udc_enable(udc);
+ }
+ } else {
+ if (udc->active) {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+ DMSG("disconnect %s\n", udc->driver
+ ? udc->driver->driver.name
+ : "(no driver)");
+ stop_activity(udc, udc->driver);
+ }
+ udc_disable(udc);
+ /* Disable clock for USB device */
+ clk_disable(udc->clk);
+ udc->active = 0;
+ }
+
+ }
+ return 0;
+}
+
+/* VBUS reporting logically comes from a transceiver */
+static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+ struct pxa25x_udc *udc;
+
+ udc = container_of(_gadget, struct pxa25x_udc, gadget);
+ udc->vbus = is_active;
+ DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
+ pullup(udc);
+ return 0;
+}
+
+/* drivers may have software control over D+ pullup */
+static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active)
+{
+ struct pxa25x_udc *udc;
+
+ udc = container_of(_gadget, struct pxa25x_udc, gadget);
+
+ /* not all boards support pullup control */
+ if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
+ return -EOPNOTSUPP;
+
+ udc->pullup = (is_active != 0);
+ pullup(udc);
+ return 0;
+}
+
+/* boards may consume current from VBUS, up to 100-500mA based on config.
+ * the 500uA suspend ceiling means that exclusively vbus-powered PXA designs
+ * violate USB specs.
+ */
+static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+ struct pxa25x_udc *udc;
+
+ udc = container_of(_gadget, struct pxa25x_udc, gadget);
+
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ return usb_phy_set_power(udc->transceiver, mA);
+ return -EOPNOTSUPP;
+}
+
+static int pxa25x_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int pxa25x_udc_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops pxa25x_udc_ops = {
+ .get_frame = pxa25x_udc_get_frame,
+ .wakeup = pxa25x_udc_wakeup,
+ .vbus_session = pxa25x_udc_vbus_session,
+ .pullup = pxa25x_udc_pullup,
+ .vbus_draw = pxa25x_udc_vbus_draw,
+ .udc_start = pxa25x_udc_start,
+ .udc_stop = pxa25x_udc_stop,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+
+static int udc_debug_show(struct seq_file *m, void *_d)
+{
+ struct pxa25x_udc *dev = m->private;
+ unsigned long flags;
+ int i;
+ u32 tmp;
+
+ local_irq_save(flags);
+
+ /* basic device status */
+ seq_printf(m, DRIVER_DESC "\n"
+ "%s version: %s\nGadget driver: %s\nHost %s\n\n",
+ driver_name, DRIVER_VERSION SIZE_STR "(pio)",
+ dev->driver ? dev->driver->driver.name : "(none)",
+ dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected");
+
+ /* registers for device and ep0 */
+ seq_printf(m,
+ "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
+ udc_get_reg(dev, UICR1), udc_get_reg(dev, UICR0),
+ udc_get_reg(dev, USIR1), udc_get_reg(dev, USIR0),
+ udc_get_reg(dev, UFNRH), udc_get_reg(dev, UFNRL));
+
+ tmp = udc_get_reg(dev, UDCCR);
+ seq_printf(m,
+ "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
+ (tmp & UDCCR_REM) ? " rem" : "",
+ (tmp & UDCCR_RSTIR) ? " rstir" : "",
+ (tmp & UDCCR_SRM) ? " srm" : "",
+ (tmp & UDCCR_SUSIR) ? " susir" : "",
+ (tmp & UDCCR_RESIR) ? " resir" : "",
+ (tmp & UDCCR_RSM) ? " rsm" : "",
+ (tmp & UDCCR_UDA) ? " uda" : "",
+ (tmp & UDCCR_UDE) ? " ude" : "");
+
+ tmp = udc_ep0_get_UDCCS(dev);
+ seq_printf(m,
+ "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
+ (tmp & UDCCS0_SA) ? " sa" : "",
+ (tmp & UDCCS0_RNE) ? " rne" : "",
+ (tmp & UDCCS0_FST) ? " fst" : "",
+ (tmp & UDCCS0_SST) ? " sst" : "",
+ (tmp & UDCCS0_DRWF) ? " dwrf" : "",
+ (tmp & UDCCS0_FTF) ? " ftf" : "",
+ (tmp & UDCCS0_IPR) ? " ipr" : "",
+ (tmp & UDCCS0_OPR) ? " opr" : "");
+
+ if (dev->has_cfr) {
+ tmp = udc_get_reg(dev, UDCCFR);
+ seq_printf(m,
+ "udccfr %02X =%s%s\n", tmp,
+ (tmp & UDCCFR_AREN) ? " aren" : "",
+ (tmp & UDCCFR_ACM) ? " acm" : "");
+ }
+
+ if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver)
+ goto done;
+
+ seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
+ dev->stats.write.bytes, dev->stats.write.ops,
+ dev->stats.read.bytes, dev->stats.read.ops,
+ dev->stats.irqs);
+
+ /* dump endpoint queues */
+ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+ struct pxa25x_ep *ep = &dev->ep [i];
+ struct pxa25x_request *req;
+
+ if (i != 0) {
+ const struct usb_endpoint_descriptor *desc;
+
+ desc = ep->ep.desc;
+ if (!desc)
+ continue;
+ tmp = udc_ep_get_UDCCS(&dev->ep[i]);
+ seq_printf(m,
+ "%s max %d %s udccs %02x irqs %lu\n",
+ ep->ep.name, usb_endpoint_maxp(desc),
+ "pio", tmp, ep->pio_irqs);
+ /* TODO translate all five groups of udccs bits! */
+
+ } else /* ep0 should only have one transfer queued */
+ seq_printf(m, "ep0 max 16 pio irqs %lu\n",
+ ep->pio_irqs);
+
+ if (list_empty(&ep->queue)) {
+ seq_printf(m, "\t(nothing queued)\n");
+ continue;
+ }
+ list_for_each_entry(req, &ep->queue, queue) {
+ seq_printf(m,
+ "\treq %p len %d/%d buf %p\n",
+ &req->req, req->req.actual,
+ req->req.length, req->req.buf);
+ }
+ }
+
+done:
+ local_irq_restore(flags);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(udc_debug);
+
+#define create_debug_files(dev) \
+ do { \
+ debugfs_create_file(dev->gadget.name, \
+ S_IRUGO, NULL, dev, &udc_debug_fops); \
+ } while (0)
+#define remove_debug_files(dev) debugfs_lookup_and_remove(dev->gadget.name, NULL)
+
+#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
+
+#define create_debug_files(dev) do {} while (0)
+#define remove_debug_files(dev) do {} while (0)
+
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * udc_disable - disable USB device controller
+ */
+static void udc_disable(struct pxa25x_udc *dev)
+{
+ /* block all irqs */
+ udc_set_mask_UDCCR(dev, UDCCR_SRM|UDCCR_REM);
+ udc_set_reg(dev, UICR0, 0xff);
+ udc_set_reg(dev, UICR1, 0xff);
+ udc_set_reg(dev, UFNRH, UFNRH_SIM);
+
+ /* if hardware supports it, disconnect from usb */
+ pullup_off();
+
+ udc_clear_mask_UDCCR(dev, UDCCR_UDE);
+
+ ep0_idle (dev);
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+}
+
+
+/*
+ * udc_reinit - initialize software state
+ */
+static void udc_reinit(struct pxa25x_udc *dev)
+{
+ u32 i;
+
+ /* device/ep0 records init */
+ INIT_LIST_HEAD (&dev->gadget.ep_list);
+ INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
+ dev->ep0state = EP0_IDLE;
+ dev->gadget.quirk_altset_not_supp = 1;
+
+ /* basic endpoint records init */
+ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+ struct pxa25x_ep *ep = &dev->ep[i];
+
+ if (i != 0)
+ list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
+
+ ep->ep.desc = NULL;
+ ep->stopped = 0;
+ INIT_LIST_HEAD (&ep->queue);
+ ep->pio_irqs = 0;
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
+ }
+
+ /* the rest was statically initialized, and is read-only */
+}
+
+/* until it's enabled, this UDC should be completely invisible
+ * to any USB host.
+ */
+static void udc_enable (struct pxa25x_udc *dev)
+{
+ udc_clear_mask_UDCCR(dev, UDCCR_UDE);
+
+ /* try to clear these bits before we enable the udc */
+ udc_ack_int_UDCCR(dev, UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
+
+ ep0_idle(dev);
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ dev->stats.irqs = 0;
+
+ /*
+ * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
+ * - enable UDC
+ * - if RESET is already in progress, ack interrupt
+ * - unmask reset interrupt
+ */
+ udc_set_mask_UDCCR(dev, UDCCR_UDE);
+ if (!(udc_get_reg(dev, UDCCR) & UDCCR_UDA))
+ udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
+
+ if (dev->has_cfr /* UDC_RES2 is defined */) {
+ /* pxa255 (a0+) can avoid a set_config race that could
+ * prevent gadget drivers from configuring correctly
+ */
+ udc_set_reg(dev, UDCCFR, UDCCFR_ACM | UDCCFR_MB1);
+ } else {
+ /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
+ * which could result in missing packets and interrupts.
+ * supposedly one bit per endpoint, controlling whether it
+ * double buffers or not; ACM/AREN bits fit into the holes.
+ * zero bits (like USIR0_IRx) disable double buffering.
+ */
+ udc_set_reg(dev, UDC_RES1, 0x00);
+ udc_set_reg(dev, UDC_RES2, 0x00);
+ }
+
+ /* enable suspend/resume and reset irqs */
+ udc_clear_mask_UDCCR(dev, UDCCR_SRM | UDCCR_REM);
+
+ /* enable ep0 irqs */
+ udc_set_reg(dev, UICR0, udc_get_reg(dev, UICR0) & ~UICR0_IM0);
+
+ /* if hardware supports it, pullup D+ and wait for reset */
+ pullup_on();
+}
+
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests. then usb traffic follows until a
+ * disconnect is reported. then a host may connect again, or
+ * the driver might get unbound.
+ */
+static int pxa25x_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct pxa25x_udc *dev = to_pxa25x(g);
+ int retval;
+
+ /* first hook up the driver ... */
+ dev->driver = driver;
+ dev->pullup = 1;
+
+ /* ... then enable host detection and ep0; and we're ready
+ * for set_configuration as well as eventual disconnect.
+ */
+ /* connect to bus through transceiver */
+ if (!IS_ERR_OR_NULL(dev->transceiver)) {
+ retval = otg_set_peripheral(dev->transceiver->otg,
+ &dev->gadget);
+ if (retval)
+ goto bind_fail;
+ }
+
+ dump_state(dev);
+ return 0;
+bind_fail:
+ return retval;
+}
+
+static void
+reset_gadget(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
+{
+ int i;
+
+ /* don't disconnect drivers more than once */
+ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ /* prevent new request submissions, kill any outstanding requests */
+ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+ struct pxa25x_ep *ep = &dev->ep[i];
+
+ ep->stopped = 1;
+ nuke(ep, -ESHUTDOWN);
+ }
+ del_timer_sync(&dev->timer);
+
+ /* report reset; the driver is already quiesced */
+ if (driver)
+ usb_gadget_udc_reset(&dev->gadget, driver);
+
+ /* re-init driver-visible data structures */
+ udc_reinit(dev);
+}
+
+static void
+stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
+{
+ int i;
+
+ /* don't disconnect drivers more than once */
+ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ /* prevent new request submissions, kill any outstanding requests */
+ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+ struct pxa25x_ep *ep = &dev->ep[i];
+
+ ep->stopped = 1;
+ nuke(ep, -ESHUTDOWN);
+ }
+ del_timer_sync(&dev->timer);
+
+ /* report disconnect; the driver is already quiesced */
+ if (driver)
+ driver->disconnect(&dev->gadget);
+
+ /* re-init driver-visible data structures */
+ udc_reinit(dev);
+}
+
+static int pxa25x_udc_stop(struct usb_gadget*g)
+{
+ struct pxa25x_udc *dev = to_pxa25x(g);
+
+ local_irq_disable();
+ dev->pullup = 0;
+ stop_activity(dev, NULL);
+ local_irq_enable();
+
+ if (!IS_ERR_OR_NULL(dev->transceiver))
+ (void) otg_set_peripheral(dev->transceiver->otg, NULL);
+
+ dev->driver = NULL;
+
+ dump_state(dev);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_LUBBOCK
+
+/* Lubbock has separate connect and disconnect irqs. More typical designs
+ * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
+ */
+
+static irqreturn_t
+lubbock_vbus_irq(int irq, void *_dev)
+{
+ struct pxa25x_udc *dev = _dev;
+ int vbus;
+
+ dev->stats.irqs++;
+ if (irq == dev->usb_irq) {
+ vbus = 1;
+ disable_irq(dev->usb_irq);
+ enable_irq(dev->usb_disc_irq);
+ } else if (irq == dev->usb_disc_irq) {
+ vbus = 0;
+ disable_irq(dev->usb_disc_irq);
+ enable_irq(dev->usb_irq);
+ } else {
+ return IRQ_NONE;
+ }
+
+ pxa25x_udc_vbus_session(&dev->gadget, vbus);
+ return IRQ_HANDLED;
+}
+
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline void clear_ep_state (struct pxa25x_udc *dev)
+{
+ unsigned i;
+
+ /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
+ * fifos, and pending transactions mustn't be continued in any case.
+ */
+ for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
+ nuke(&dev->ep[i], -ECONNABORTED);
+}
+
+static void udc_watchdog(struct timer_list *t)
+{
+ struct pxa25x_udc *dev = from_timer(dev, t, timer);
+
+ local_irq_disable();
+ if (dev->ep0state == EP0_STALL
+ && (udc_ep0_get_UDCCS(dev) & UDCCS0_FST) == 0
+ && (udc_ep0_get_UDCCS(dev) & UDCCS0_SST) == 0) {
+ udc_ep0_set_UDCCS(dev, UDCCS0_FST|UDCCS0_FTF);
+ DBG(DBG_VERBOSE, "ep0 re-stall\n");
+ start_watchdog(dev);
+ }
+ local_irq_enable();
+}
+
+static void handle_ep0 (struct pxa25x_udc *dev)
+{
+ u32 udccs0 = udc_ep0_get_UDCCS(dev);
+ struct pxa25x_ep *ep = &dev->ep [0];
+ struct pxa25x_request *req;
+ union {
+ struct usb_ctrlrequest r;
+ u8 raw [8];
+ u32 word [2];
+ } u;
+
+ if (list_empty(&ep->queue))
+ req = NULL;
+ else
+ req = list_entry(ep->queue.next, struct pxa25x_request, queue);
+
+ /* clear stall status */
+ if (udccs0 & UDCCS0_SST) {
+ nuke(ep, -EPIPE);
+ udc_ep0_set_UDCCS(dev, UDCCS0_SST);
+ del_timer(&dev->timer);
+ ep0_idle(dev);
+ }
+
+ /* previous request unfinished? non-error iff back-to-back ... */
+ if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
+ nuke(ep, 0);
+ del_timer(&dev->timer);
+ ep0_idle(dev);
+ }
+
+ switch (dev->ep0state) {
+ case EP0_IDLE:
+ /* late-breaking status? */
+ udccs0 = udc_ep0_get_UDCCS(dev);
+
+ /* start control request? */
+ if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
+ == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
+ int i;
+
+ nuke (ep, -EPROTO);
+
+ /* read SETUP packet */
+ for (i = 0; i < 8; i++) {
+ if (unlikely(!(udc_ep0_get_UDCCS(dev) & UDCCS0_RNE))) {
+bad_setup:
+ DMSG("SETUP %d!\n", i);
+ goto stall;
+ }
+ u.raw [i] = (u8) UDDR0;
+ }
+ if (unlikely((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0))
+ goto bad_setup;
+
+got_setup:
+ DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+ u.r.bRequestType, u.r.bRequest,
+ le16_to_cpu(u.r.wValue),
+ le16_to_cpu(u.r.wIndex),
+ le16_to_cpu(u.r.wLength));
+
+ /* cope with automagic for some standard requests. */
+ dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD;
+ dev->req_config = 0;
+ dev->req_pending = 1;
+ switch (u.r.bRequest) {
+ /* hardware restricts gadget drivers here! */
+ case USB_REQ_SET_CONFIGURATION:
+ if (u.r.bRequestType == USB_RECIP_DEVICE) {
+ /* reflect hardware's automagic
+ * up to the gadget driver.
+ */
+config_change:
+ dev->req_config = 1;
+ clear_ep_state(dev);
+ /* if !has_cfr, there's no synch
+ * else use AREN (later) not SA|OPR
+ * USIR0_IR0 acts edge sensitive
+ */
+ }
+ break;
+ /* ... and here, even more ... */
+ case USB_REQ_SET_INTERFACE:
+ if (u.r.bRequestType == USB_RECIP_INTERFACE) {
+ /* udc hardware is broken by design:
+ * - altsetting may only be zero;
+ * - hw resets all interfaces' eps;
+ * - ep reset doesn't include halt(?).
+ */
+ DMSG("broken set_interface (%d/%d)\n",
+ le16_to_cpu(u.r.wIndex),
+ le16_to_cpu(u.r.wValue));
+ goto config_change;
+ }
+ break;
+ /* hardware was supposed to hide this */
+ case USB_REQ_SET_ADDRESS:
+ if (u.r.bRequestType == USB_RECIP_DEVICE) {
+ ep0start(dev, 0, "address");
+ return;
+ }
+ break;
+ }
+
+ if (u.r.bRequestType & USB_DIR_IN)
+ dev->ep0state = EP0_IN_DATA_PHASE;
+ else
+ dev->ep0state = EP0_OUT_DATA_PHASE;
+
+ i = dev->driver->setup(&dev->gadget, &u.r);
+ if (i < 0) {
+ /* hardware automagic preventing STALL... */
+ if (dev->req_config) {
+ /* hardware sometimes neglects to tell
+ * tell us about config change events,
+ * so later ones may fail...
+ */
+ WARNING("config change %02x fail %d?\n",
+ u.r.bRequest, i);
+ return;
+ /* TODO experiment: if has_cfr,
+ * hardware didn't ACK; maybe we
+ * could actually STALL!
+ */
+ }
+ DBG(DBG_VERBOSE, "protocol STALL, "
+ "%02x err %d\n", udc_ep0_get_UDCCS(dev), i);
+stall:
+ /* the watchdog timer helps deal with cases
+ * where udc seems to clear FST wrongly, and
+ * then NAKs instead of STALLing.
+ */
+ ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
+ start_watchdog(dev);
+ dev->ep0state = EP0_STALL;
+
+ /* deferred i/o == no response yet */
+ } else if (dev->req_pending) {
+ if (likely(dev->ep0state == EP0_IN_DATA_PHASE
+ || dev->req_std || u.r.wLength))
+ ep0start(dev, 0, "defer");
+ else
+ ep0start(dev, UDCCS0_IPR, "defer/IPR");
+ }
+
+ /* expect at least one data or status stage irq */
+ return;
+
+ } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
+ == (UDCCS0_OPR|UDCCS0_SA))) {
+ unsigned i;
+
+ /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
+ * still observed on a pxa255 a0.
+ */
+ DBG(DBG_VERBOSE, "e131\n");
+ nuke(ep, -EPROTO);
+
+ /* read SETUP data, but don't trust it too much */
+ for (i = 0; i < 8; i++)
+ u.raw [i] = (u8) UDDR0;
+ if ((u.r.bRequestType & USB_RECIP_MASK)
+ > USB_RECIP_OTHER)
+ goto stall;
+ if (u.word [0] == 0 && u.word [1] == 0)
+ goto stall;
+ goto got_setup;
+ } else {
+ /* some random early IRQ:
+ * - we acked FST
+ * - IPR cleared
+ * - OPR got set, without SA (likely status stage)
+ */
+ udc_ep0_set_UDCCS(dev, udccs0 & (UDCCS0_SA|UDCCS0_OPR));
+ }
+ break;
+ case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
+ if (udccs0 & UDCCS0_OPR) {
+ udc_ep0_set_UDCCS(dev, UDCCS0_OPR|UDCCS0_FTF);
+ DBG(DBG_VERBOSE, "ep0in premature status\n");
+ if (req)
+ done(ep, req, 0);
+ ep0_idle(dev);
+ } else /* irq was IPR clearing */ {
+ if (req) {
+ /* this IN packet might finish the request */
+ (void) write_ep0_fifo(ep, req);
+ } /* else IN token before response was written */
+ }
+ break;
+ case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
+ if (udccs0 & UDCCS0_OPR) {
+ if (req) {
+ /* this OUT packet might finish the request */
+ if (read_ep0_fifo(ep, req))
+ done(ep, req, 0);
+ /* else more OUT packets expected */
+ } /* else OUT token before read was issued */
+ } else /* irq was IPR clearing */ {
+ DBG(DBG_VERBOSE, "ep0out premature status\n");
+ if (req)
+ done(ep, req, 0);
+ ep0_idle(dev);
+ }
+ break;
+ case EP0_END_XFER:
+ if (req)
+ done(ep, req, 0);
+ /* ack control-IN status (maybe in-zlp was skipped)
+ * also appears after some config change events.
+ */
+ if (udccs0 & UDCCS0_OPR)
+ udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
+ ep0_idle(dev);
+ break;
+ case EP0_STALL:
+ udc_ep0_set_UDCCS(dev, UDCCS0_FST);
+ break;
+ }
+ udc_set_reg(dev, USIR0, USIR0_IR0);
+}
+
+static void handle_ep(struct pxa25x_ep *ep)
+{
+ struct pxa25x_request *req;
+ int is_in = ep->bEndpointAddress & USB_DIR_IN;
+ int completed;
+ u32 udccs, tmp;
+
+ do {
+ completed = 0;
+ if (likely (!list_empty(&ep->queue)))
+ req = list_entry(ep->queue.next,
+ struct pxa25x_request, queue);
+ else
+ req = NULL;
+
+ // TODO check FST handling
+
+ udccs = udc_ep_get_UDCCS(ep);
+ if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
+ tmp = UDCCS_BI_TUR;
+ if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
+ tmp |= UDCCS_BI_SST;
+ tmp &= udccs;
+ if (likely (tmp))
+ udc_ep_set_UDCCS(ep, tmp);
+ if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
+ completed = write_fifo(ep, req);
+
+ } else { /* irq from RPC (or for ISO, ROF) */
+ if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
+ tmp = UDCCS_BO_SST | UDCCS_BO_DME;
+ else
+ tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
+ tmp &= udccs;
+ if (likely(tmp))
+ udc_ep_set_UDCCS(ep, tmp);
+
+ /* fifos can hold packets, ready for reading... */
+ if (likely(req)) {
+ completed = read_fifo(ep, req);
+ } else
+ pio_irq_disable(ep);
+ }
+ ep->pio_irqs++;
+ } while (completed);
+}
+
+/*
+ * pxa25x_udc_irq - interrupt handler
+ *
+ * avoid delays in ep0 processing. the control handshaking isn't always
+ * under software control (pxa250c0 and the pxa255 are better), and delays
+ * could cause usb protocol errors.
+ */
+static irqreturn_t
+pxa25x_udc_irq(int irq, void *_dev)
+{
+ struct pxa25x_udc *dev = _dev;
+ int handled;
+
+ dev->stats.irqs++;
+ do {
+ u32 udccr = udc_get_reg(dev, UDCCR);
+
+ handled = 0;
+
+ /* SUSpend Interrupt Request */
+ if (unlikely(udccr & UDCCR_SUSIR)) {
+ udc_ack_int_UDCCR(dev, UDCCR_SUSIR);
+ handled = 1;
+ DBG(DBG_VERBOSE, "USB suspend\n");
+
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN
+ && dev->driver
+ && dev->driver->suspend)
+ dev->driver->suspend(&dev->gadget);
+ ep0_idle (dev);
+ }
+
+ /* RESume Interrupt Request */
+ if (unlikely(udccr & UDCCR_RESIR)) {
+ udc_ack_int_UDCCR(dev, UDCCR_RESIR);
+ handled = 1;
+ DBG(DBG_VERBOSE, "USB resume\n");
+
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN
+ && dev->driver
+ && dev->driver->resume)
+ dev->driver->resume(&dev->gadget);
+ }
+
+ /* ReSeT Interrupt Request - USB reset */
+ if (unlikely(udccr & UDCCR_RSTIR)) {
+ udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
+ handled = 1;
+
+ if ((udc_get_reg(dev, UDCCR) & UDCCR_UDA) == 0) {
+ DBG(DBG_VERBOSE, "USB reset start\n");
+
+ /* reset driver and endpoints,
+ * in case that's not yet done
+ */
+ reset_gadget(dev, dev->driver);
+
+ } else {
+ DBG(DBG_VERBOSE, "USB reset end\n");
+ dev->gadget.speed = USB_SPEED_FULL;
+ memset(&dev->stats, 0, sizeof dev->stats);
+ /* driver and endpoints are still reset */
+ }
+
+ } else {
+ u32 usir0 = udc_get_reg(dev, USIR0) &
+ ~udc_get_reg(dev, UICR0);
+ u32 usir1 = udc_get_reg(dev, USIR1) &
+ ~udc_get_reg(dev, UICR1);
+ int i;
+
+ if (unlikely (!usir0 && !usir1))
+ continue;
+
+ DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
+
+ /* control traffic */
+ if (usir0 & USIR0_IR0) {
+ dev->ep[0].pio_irqs++;
+ handle_ep0(dev);
+ handled = 1;
+ }
+
+ /* endpoint data transfers */
+ for (i = 0; i < 8; i++) {
+ u32 tmp = 1 << i;
+
+ if (i && (usir0 & tmp)) {
+ handle_ep(&dev->ep[i]);
+ udc_set_reg(dev, USIR0,
+ udc_get_reg(dev, USIR0) | tmp);
+ handled = 1;
+ }
+#ifndef CONFIG_USB_PXA25X_SMALL
+ if (usir1 & tmp) {
+ handle_ep(&dev->ep[i+8]);
+ udc_set_reg(dev, USIR1,
+ udc_get_reg(dev, USIR1) | tmp);
+ handled = 1;
+ }
+#endif
+ }
+ }
+
+ /* we could also ask for 1 msec SOF (SIR) interrupts */
+
+ } while (handled);
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void nop_release (struct device *dev)
+{
+ DMSG("%s %s\n", __func__, dev_name(dev));
+}
+
+/* this uses load-time allocation and initialization (instead of
+ * doing it at run-time) to save code, eliminate fault paths, and
+ * be more obviously correct.
+ */
+static struct pxa25x_udc memory = {
+ .gadget = {
+ .ops = &pxa25x_udc_ops,
+ .ep0 = &memory.ep[0].ep,
+ .name = driver_name,
+ .dev = {
+ .init_name = "gadget",
+ .release = nop_release,
+ },
+ },
+
+ /* control endpoint */
+ .ep[0] = {
+ .ep = {
+ .name = ep0name,
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = EP0_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .dev = &memory,
+ .regoff_udccs = UDCCS0,
+ .regoff_uddr = UDDR0,
+ },
+
+ /* first group of endpoints */
+ .ep[1] = {
+ .ep = {
+ .name = "ep1in-bulk",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_IN),
+ },
+ .dev = &memory,
+ .fifo_size = BULK_FIFO_SIZE,
+ .bEndpointAddress = USB_DIR_IN | 1,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .regoff_udccs = UDCCS1,
+ .regoff_uddr = UDDR1,
+ },
+ .ep[2] = {
+ .ep = {
+ .name = "ep2out-bulk",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_OUT),
+ },
+ .dev = &memory,
+ .fifo_size = BULK_FIFO_SIZE,
+ .bEndpointAddress = 2,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .regoff_udccs = UDCCS2,
+ .regoff_ubcr = UBCR2,
+ .regoff_uddr = UDDR2,
+ },
+#ifndef CONFIG_USB_PXA25X_SMALL
+ .ep[3] = {
+ .ep = {
+ .name = "ep3in-iso",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_IN),
+ },
+ .dev = &memory,
+ .fifo_size = ISO_FIFO_SIZE,
+ .bEndpointAddress = USB_DIR_IN | 3,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .regoff_udccs = UDCCS3,
+ .regoff_uddr = UDDR3,
+ },
+ .ep[4] = {
+ .ep = {
+ .name = "ep4out-iso",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_OUT),
+ },
+ .dev = &memory,
+ .fifo_size = ISO_FIFO_SIZE,
+ .bEndpointAddress = 4,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .regoff_udccs = UDCCS4,
+ .regoff_ubcr = UBCR4,
+ .regoff_uddr = UDDR4,
+ },
+ .ep[5] = {
+ .ep = {
+ .name = "ep5in-int",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = INT_FIFO_SIZE,
+ .caps = USB_EP_CAPS(0, 0),
+ },
+ .dev = &memory,
+ .fifo_size = INT_FIFO_SIZE,
+ .bEndpointAddress = USB_DIR_IN | 5,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .regoff_udccs = UDCCS5,
+ .regoff_uddr = UDDR5,
+ },
+
+ /* second group of endpoints */
+ .ep[6] = {
+ .ep = {
+ .name = "ep6in-bulk",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_IN),
+ },
+ .dev = &memory,
+ .fifo_size = BULK_FIFO_SIZE,
+ .bEndpointAddress = USB_DIR_IN | 6,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .regoff_udccs = UDCCS6,
+ .regoff_uddr = UDDR6,
+ },
+ .ep[7] = {
+ .ep = {
+ .name = "ep7out-bulk",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_OUT),
+ },
+ .dev = &memory,
+ .fifo_size = BULK_FIFO_SIZE,
+ .bEndpointAddress = 7,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .regoff_udccs = UDCCS7,
+ .regoff_ubcr = UBCR7,
+ .regoff_uddr = UDDR7,
+ },
+ .ep[8] = {
+ .ep = {
+ .name = "ep8in-iso",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_IN),
+ },
+ .dev = &memory,
+ .fifo_size = ISO_FIFO_SIZE,
+ .bEndpointAddress = USB_DIR_IN | 8,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .regoff_udccs = UDCCS8,
+ .regoff_uddr = UDDR8,
+ },
+ .ep[9] = {
+ .ep = {
+ .name = "ep9out-iso",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_OUT),
+ },
+ .dev = &memory,
+ .fifo_size = ISO_FIFO_SIZE,
+ .bEndpointAddress = 9,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .regoff_udccs = UDCCS9,
+ .regoff_ubcr = UBCR9,
+ .regoff_uddr = UDDR9,
+ },
+ .ep[10] = {
+ .ep = {
+ .name = "ep10in-int",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = INT_FIFO_SIZE,
+ .caps = USB_EP_CAPS(0, 0),
+ },
+ .dev = &memory,
+ .fifo_size = INT_FIFO_SIZE,
+ .bEndpointAddress = USB_DIR_IN | 10,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .regoff_udccs = UDCCS10,
+ .regoff_uddr = UDDR10,
+ },
+
+ /* third group of endpoints */
+ .ep[11] = {
+ .ep = {
+ .name = "ep11in-bulk",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_IN),
+ },
+ .dev = &memory,
+ .fifo_size = BULK_FIFO_SIZE,
+ .bEndpointAddress = USB_DIR_IN | 11,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .regoff_udccs = UDCCS11,
+ .regoff_uddr = UDDR11,
+ },
+ .ep[12] = {
+ .ep = {
+ .name = "ep12out-bulk",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = BULK_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_OUT),
+ },
+ .dev = &memory,
+ .fifo_size = BULK_FIFO_SIZE,
+ .bEndpointAddress = 12,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .regoff_udccs = UDCCS12,
+ .regoff_ubcr = UBCR12,
+ .regoff_uddr = UDDR12,
+ },
+ .ep[13] = {
+ .ep = {
+ .name = "ep13in-iso",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_IN),
+ },
+ .dev = &memory,
+ .fifo_size = ISO_FIFO_SIZE,
+ .bEndpointAddress = USB_DIR_IN | 13,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .regoff_udccs = UDCCS13,
+ .regoff_uddr = UDDR13,
+ },
+ .ep[14] = {
+ .ep = {
+ .name = "ep14out-iso",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = ISO_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_OUT),
+ },
+ .dev = &memory,
+ .fifo_size = ISO_FIFO_SIZE,
+ .bEndpointAddress = 14,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .regoff_udccs = UDCCS14,
+ .regoff_ubcr = UBCR14,
+ .regoff_uddr = UDDR14,
+ },
+ .ep[15] = {
+ .ep = {
+ .name = "ep15in-int",
+ .ops = &pxa25x_ep_ops,
+ .maxpacket = INT_FIFO_SIZE,
+ .caps = USB_EP_CAPS(0, 0),
+ },
+ .dev = &memory,
+ .fifo_size = INT_FIFO_SIZE,
+ .bEndpointAddress = USB_DIR_IN | 15,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .regoff_udccs = UDCCS15,
+ .regoff_uddr = UDDR15,
+ },
+#endif /* !CONFIG_USB_PXA25X_SMALL */
+};
+
+#define CP15R0_VENDOR_MASK 0xffffe000
+
+#if defined(CONFIG_ARCH_PXA)
+#define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
+
+#elif defined(CONFIG_ARCH_IXP4XX)
+#define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
+
+#endif
+
+#define CP15R0_PROD_MASK 0x000003f0
+#define PXA25x 0x00000100 /* and PXA26x */
+#define PXA210 0x00000120
+
+#define CP15R0_REV_MASK 0x0000000f
+
+#define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
+
+#define PXA255_A0 0x00000106 /* or PXA260_B1 */
+#define PXA250_C0 0x00000105 /* or PXA26x_B0 */
+#define PXA250_B2 0x00000104
+#define PXA250_B1 0x00000103 /* or PXA260_A0 */
+#define PXA250_B0 0x00000102
+#define PXA250_A1 0x00000101
+#define PXA250_A0 0x00000100
+
+#define PXA210_C0 0x00000125
+#define PXA210_B2 0x00000124
+#define PXA210_B1 0x00000123
+#define PXA210_B0 0x00000122
+#define IXP425_A0 0x000001c1
+#define IXP425_B0 0x000001f1
+#define IXP465_AD 0x00000200
+
+/*
+ * probe - binds to the platform device
+ */
+static int pxa25x_udc_probe(struct platform_device *pdev)
+{
+ struct pxa25x_udc *dev = &memory;
+ int retval, irq;
+ u32 chiprev;
+
+ pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
+
+ /* insist on Intel/ARM/XScale */
+ asm("mrc p15, 0, %0, c0, c0" : "=r" (chiprev));
+ if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
+ pr_err("%s: not XScale!\n", driver_name);
+ return -ENODEV;
+ }
+
+ /* trigger chiprev-specific logic */
+ switch (chiprev & CP15R0_PRODREV_MASK) {
+#if defined(CONFIG_ARCH_PXA)
+ case PXA255_A0:
+ dev->has_cfr = 1;
+ break;
+ case PXA250_A0:
+ case PXA250_A1:
+ /* A0/A1 "not released"; ep 13, 15 unusable */
+ fallthrough;
+ case PXA250_B2: case PXA210_B2:
+ case PXA250_B1: case PXA210_B1:
+ case PXA250_B0: case PXA210_B0:
+ /* OUT-DMA is broken ... */
+ fallthrough;
+ case PXA250_C0: case PXA210_C0:
+ break;
+#elif defined(CONFIG_ARCH_IXP4XX)
+ case IXP425_A0:
+ case IXP425_B0:
+ case IXP465_AD:
+ dev->has_cfr = 1;
+ break;
+#endif
+ default:
+ pr_err("%s: unrecognized processor: %08x\n",
+ driver_name, chiprev);
+ /* iop3xx, ixp4xx, ... */
+ return -ENODEV;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ dev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev->regs))
+ return PTR_ERR(dev->regs);
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk))
+ return PTR_ERR(dev->clk);
+
+ pr_debug("%s: IRQ %d%s%s\n", driver_name, irq,
+ dev->has_cfr ? "" : " (!cfr)",
+ SIZE_STR "(pio)"
+ );
+
+ /* other non-static parts of init */
+ dev->dev = &pdev->dev;
+ dev->mach = dev_get_platdata(&pdev->dev);
+
+ dev->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+
+ if (gpio_is_valid(dev->mach->gpio_pullup)) {
+ retval = devm_gpio_request(&pdev->dev, dev->mach->gpio_pullup,
+ "pca25x_udc GPIO PULLUP");
+ if (retval) {
+ dev_dbg(&pdev->dev,
+ "can't get pullup gpio %d, err: %d\n",
+ dev->mach->gpio_pullup, retval);
+ goto err;
+ }
+ gpio_direction_output(dev->mach->gpio_pullup, 0);
+ }
+
+ timer_setup(&dev->timer, udc_watchdog, 0);
+
+ the_controller = dev;
+ platform_set_drvdata(pdev, dev);
+
+ udc_disable(dev);
+ udc_reinit(dev);
+
+ dev->vbus = 0;
+
+ /* irq setup after old hardware state is cleaned up */
+ retval = devm_request_irq(&pdev->dev, irq, pxa25x_udc_irq, 0,
+ driver_name, dev);
+ if (retval != 0) {
+ pr_err("%s: can't get irq %d, err %d\n",
+ driver_name, irq, retval);
+ goto err;
+ }
+ dev->got_irq = 1;
+
+#ifdef CONFIG_ARCH_LUBBOCK
+ if (machine_is_lubbock()) {
+ dev->usb_irq = platform_get_irq(pdev, 1);
+ if (dev->usb_irq < 0)
+ return dev->usb_irq;
+
+ dev->usb_disc_irq = platform_get_irq(pdev, 2);
+ if (dev->usb_disc_irq < 0)
+ return dev->usb_disc_irq;
+
+ retval = devm_request_irq(&pdev->dev, dev->usb_disc_irq,
+ lubbock_vbus_irq, 0, driver_name,
+ dev);
+ if (retval != 0) {
+ pr_err("%s: can't get irq %i, err %d\n",
+ driver_name, dev->usb_disc_irq, retval);
+ goto err;
+ }
+ retval = devm_request_irq(&pdev->dev, dev->usb_irq,
+ lubbock_vbus_irq, 0, driver_name,
+ dev);
+ if (retval != 0) {
+ pr_err("%s: can't get irq %i, err %d\n",
+ driver_name, dev->usb_irq, retval);
+ goto err;
+ }
+ } else
+#endif
+ create_debug_files(dev);
+
+ retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+ if (!retval)
+ return retval;
+
+ remove_debug_files(dev);
+ err:
+ if (!IS_ERR_OR_NULL(dev->transceiver))
+ dev->transceiver = NULL;
+ return retval;
+}
+
+static void pxa25x_udc_shutdown(struct platform_device *_dev)
+{
+ pullup_off();
+}
+
+static int pxa25x_udc_remove(struct platform_device *pdev)
+{
+ struct pxa25x_udc *dev = platform_get_drvdata(pdev);
+
+ if (dev->driver)
+ return -EBUSY;
+
+ usb_del_gadget_udc(&dev->gadget);
+ dev->pullup = 0;
+ pullup(dev);
+
+ remove_debug_files(dev);
+
+ if (!IS_ERR_OR_NULL(dev->transceiver))
+ dev->transceiver = NULL;
+
+ the_controller = NULL;
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_PM
+
+/* USB suspend (controlled by the host) and system suspend (controlled
+ * by the PXA) don't necessarily work well together. If USB is active,
+ * the 48 MHz clock is required; so the system can't enter 33 MHz idle
+ * mode, or any deeper PM saving state.
+ *
+ * For now, we punt and forcibly disconnect from the USB host when PXA
+ * enters any suspend state. While we're disconnected, we always disable
+ * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
+ * Boards without software pullup control shouldn't use those states.
+ * VBUS IRQs should probably be ignored so that the PXA device just acts
+ * "dead" to USB hosts until system resume.
+ */
+static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct pxa25x_udc *udc = platform_get_drvdata(dev);
+ unsigned long flags;
+
+ if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
+ WARNING("USB host won't detect disconnect!\n");
+ udc->suspended = 1;
+
+ local_irq_save(flags);
+ pullup(udc);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int pxa25x_udc_resume(struct platform_device *dev)
+{
+ struct pxa25x_udc *udc = platform_get_drvdata(dev);
+ unsigned long flags;
+
+ udc->suspended = 0;
+ local_irq_save(flags);
+ pullup(udc);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+#else
+#define pxa25x_udc_suspend NULL
+#define pxa25x_udc_resume NULL
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static struct platform_driver udc_driver = {
+ .shutdown = pxa25x_udc_shutdown,
+ .probe = pxa25x_udc_probe,
+ .remove = pxa25x_udc_remove,
+ .suspend = pxa25x_udc_suspend,
+ .resume = pxa25x_udc_resume,
+ .driver = {
+ .name = "pxa25x-udc",
+ },
+};
+
+module_platform_driver(udc_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa25x-udc");
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
new file mode 100644
index 000000000..6ab6047ed
--- /dev/null
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Intel PXA25x on-chip full speed USB device controller
+ *
+ * Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix
+ * Copyright (C) 2003 David Brownell
+ */
+
+#ifndef __LINUX_USB_GADGET_PXA25X_H
+#define __LINUX_USB_GADGET_PXA25X_H
+
+#include <linux/types.h>
+
+/*-------------------------------------------------------------------------*/
+
+/* pxa25x has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
+#define UFNRH_SIR (1 << 7) /* SOF interrupt request */
+#define UFNRH_SIM (1 << 6) /* SOF interrupt mask */
+#define UFNRH_IPE14 (1 << 5) /* ISO packet error, ep14 */
+#define UFNRH_IPE9 (1 << 4) /* ISO packet error, ep9 */
+#define UFNRH_IPE4 (1 << 3) /* ISO packet error, ep4 */
+
+/* pxa255 has this (move to include/asm-arm/arch-pxa/pxa-regs.h) */
+#define UDCCFR UDC_RES2 /* UDC Control Function Register */
+#define UDCCFR_AREN (1 << 7) /* ACK response enable (now) */
+#define UDCCFR_ACM (1 << 2) /* ACK control mode (wait for AREN) */
+
+/* latest pxa255 errata define new "must be one" bits in UDCCFR */
+#define UDCCFR_MB1 (0xff & ~(UDCCFR_AREN|UDCCFR_ACM))
+
+/*-------------------------------------------------------------------------*/
+
+struct pxa25x_udc;
+
+struct pxa25x_ep {
+ struct usb_ep ep;
+ struct pxa25x_udc *dev;
+
+ struct list_head queue;
+ unsigned long pio_irqs;
+
+ unsigned short fifo_size;
+ u8 bEndpointAddress;
+ u8 bmAttributes;
+
+ unsigned stopped : 1;
+ unsigned dma_fixup : 1;
+
+ /* UDCCS = UDC Control/Status for this EP
+ * UBCR = UDC Byte Count Remaining (contents of OUT fifo)
+ * UDDR = UDC Endpoint Data Register (the fifo)
+ * DRCM = DMA Request Channel Map
+ */
+ u32 regoff_udccs;
+ u32 regoff_ubcr;
+ u32 regoff_uddr;
+};
+
+struct pxa25x_request {
+ struct usb_request req;
+ struct list_head queue;
+};
+
+enum ep0_state {
+ EP0_IDLE,
+ EP0_IN_DATA_PHASE,
+ EP0_OUT_DATA_PHASE,
+ EP0_END_XFER,
+ EP0_STALL,
+};
+
+#define EP0_FIFO_SIZE ((unsigned)16)
+#define BULK_FIFO_SIZE ((unsigned)64)
+#define ISO_FIFO_SIZE ((unsigned)256)
+#define INT_FIFO_SIZE ((unsigned)8)
+
+struct udc_stats {
+ struct ep0stats {
+ unsigned long ops;
+ unsigned long bytes;
+ } read, write;
+ unsigned long irqs;
+};
+
+#ifdef CONFIG_USB_PXA25X_SMALL
+/* when memory's tight, SMALL config saves code+data. */
+#define PXA_UDC_NUM_ENDPOINTS 3
+#endif
+
+#ifndef PXA_UDC_NUM_ENDPOINTS
+#define PXA_UDC_NUM_ENDPOINTS 16
+#endif
+
+struct pxa25x_udc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+ enum ep0_state ep0state;
+ struct udc_stats stats;
+ unsigned got_irq : 1,
+ vbus : 1,
+ pullup : 1,
+ has_cfr : 1,
+ req_pending : 1,
+ req_std : 1,
+ req_config : 1,
+ suspended : 1,
+ active : 1;
+
+#define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200))
+ struct timer_list timer;
+
+ struct device *dev;
+ struct clk *clk;
+ struct pxa2xx_udc_mach_info *mach;
+ struct usb_phy *transceiver;
+ u64 dma_mask;
+ struct pxa25x_ep ep [PXA_UDC_NUM_ENDPOINTS];
+ void __iomem *regs;
+ int usb_irq;
+ int usb_disc_irq;
+};
+#define to_pxa25x(g) (container_of((g), struct pxa25x_udc, gadget))
+
+/*-------------------------------------------------------------------------*/
+
+static struct pxa25x_udc *the_controller;
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Debugging support vanishes in non-debug builds. DBG_NORMAL should be
+ * mostly silent during normal use/testing, with no timing side-effects.
+ */
+#define DBG_NORMAL 1 /* error paths, device state transitions */
+#define DBG_VERBOSE 2 /* add some success path trace info */
+#define DBG_NOISY 3 /* ... even more: request level */
+#define DBG_VERY_NOISY 4 /* ... even more: packet level */
+
+#define DMSG(stuff...) pr_debug("udc: " stuff)
+
+#ifdef DEBUG
+
+static const char *state_name[] = {
+ "EP0_IDLE",
+ "EP0_IN_DATA_PHASE", "EP0_OUT_DATA_PHASE",
+ "EP0_END_XFER", "EP0_STALL"
+};
+
+#ifdef VERBOSE_DEBUG
+# define UDC_DEBUG DBG_VERBOSE
+#else
+# define UDC_DEBUG DBG_NORMAL
+#endif
+
+static void __maybe_unused
+dump_udccr(const char *label)
+{
+ u32 udccr = UDCCR;
+ DMSG("%s %02X =%s%s%s%s%s%s%s%s\n",
+ label, udccr,
+ (udccr & UDCCR_REM) ? " rem" : "",
+ (udccr & UDCCR_RSTIR) ? " rstir" : "",
+ (udccr & UDCCR_SRM) ? " srm" : "",
+ (udccr & UDCCR_SUSIR) ? " susir" : "",
+ (udccr & UDCCR_RESIR) ? " resir" : "",
+ (udccr & UDCCR_RSM) ? " rsm" : "",
+ (udccr & UDCCR_UDA) ? " uda" : "",
+ (udccr & UDCCR_UDE) ? " ude" : "");
+}
+
+static void __maybe_unused
+dump_udccs0(const char *label)
+{
+ u32 udccs0 = UDCCS0;
+
+ DMSG("%s %s %02X =%s%s%s%s%s%s%s%s\n",
+ label, state_name[the_controller->ep0state], udccs0,
+ (udccs0 & UDCCS0_SA) ? " sa" : "",
+ (udccs0 & UDCCS0_RNE) ? " rne" : "",
+ (udccs0 & UDCCS0_FST) ? " fst" : "",
+ (udccs0 & UDCCS0_SST) ? " sst" : "",
+ (udccs0 & UDCCS0_DRWF) ? " dwrf" : "",
+ (udccs0 & UDCCS0_FTF) ? " ftf" : "",
+ (udccs0 & UDCCS0_IPR) ? " ipr" : "",
+ (udccs0 & UDCCS0_OPR) ? " opr" : "");
+}
+
+static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *);
+
+static void __maybe_unused
+dump_state(struct pxa25x_udc *dev)
+{
+ u32 tmp;
+ unsigned i;
+
+ DMSG("%s, uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
+ state_name[dev->ep0state],
+ UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
+ dump_udccr("udccr");
+ if (dev->has_cfr) {
+ tmp = UDCCFR;
+ DMSG("udccfr %02X =%s%s\n", tmp,
+ (tmp & UDCCFR_AREN) ? " aren" : "",
+ (tmp & UDCCFR_ACM) ? " acm" : "");
+ }
+
+ if (!dev->driver) {
+ DMSG("no gadget driver bound\n");
+ return;
+ } else
+ DMSG("ep0 driver '%s'\n", dev->driver->driver.name);
+
+ dump_udccs0 ("udccs0");
+ DMSG("ep0 IN %lu/%lu, OUT %lu/%lu\n",
+ dev->stats.write.bytes, dev->stats.write.ops,
+ dev->stats.read.bytes, dev->stats.read.ops);
+
+ for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) {
+ if (dev->ep[i].ep.desc == NULL)
+ continue;
+ DMSG ("udccs%d = %02x\n", i, udc_ep_get_UDCCS(&dev->ep[i]));
+ }
+}
+
+#else
+
+#define dump_udccr(x) do{}while(0)
+#define dump_udccs0(x) do{}while(0)
+#define dump_state(x) do{}while(0)
+
+#define UDC_DEBUG ((unsigned)0)
+
+#endif
+
+#define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
+
+#define ERR(stuff...) pr_err("udc: " stuff)
+#define WARNING(stuff...) pr_warn("udc: " stuff)
+#define INFO(stuff...) pr_info("udc: " stuff)
+
+
+#endif /* __LINUX_USB_GADGET_PXA25X_H */
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
new file mode 100644
index 000000000..0ecdfd2ba
--- /dev/null
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -0,0 +1,2562 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Handles the Intel 27x USB Device Controller (UDC)
+ *
+ * Inspired by original driver by Frank Becker, David Brownell, and others.
+ * Copyright (C) 2008 Robert Jarzmik
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <linux/byteorder/generic.h>
+#include <linux/platform_data/pxa2xx_udc.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/phy.h>
+
+#include "pxa27x_udc.h"
+
+/*
+ * This driver handles the USB Device Controller (UDC) in Intel's PXA 27x
+ * series processors.
+ *
+ * Such controller drivers work with a gadget driver. The gadget driver
+ * returns descriptors, implements configuration and data protocols used
+ * by the host to interact with this device, and allocates endpoints to
+ * the different protocol interfaces. The controller driver virtualizes
+ * usb hardware so that the gadget drivers will be more portable.
+ *
+ * This UDC hardware wants to implement a bit too much USB protocol. The
+ * biggest issues are: that the endpoints have to be set up before the
+ * controller can be enabled (minor, and not uncommon); and each endpoint
+ * can only have one configuration, interface and alternative interface
+ * number (major, and very unusual). Once set up, these cannot be changed
+ * without a controller reset.
+ *
+ * The workaround is to setup all combinations necessary for the gadgets which
+ * will work with this driver. This is done in pxa_udc structure, statically.
+ * See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep.
+ * (You could modify this if needed. Some drivers have a "fifo_mode" module
+ * parameter to facilitate such changes.)
+ *
+ * The combinations have been tested with these gadgets :
+ * - zero gadget
+ * - file storage gadget
+ * - ether gadget
+ *
+ * The driver doesn't use DMA, only IO access and IRQ callbacks. No use is
+ * made of UDC's double buffering either. USB "On-The-Go" is not implemented.
+ *
+ * All the requests are handled the same way :
+ * - the drivers tries to handle the request directly to the IO
+ * - if the IO fifo is not big enough, the remaining is send/received in
+ * interrupt handling.
+ */
+
+#define DRIVER_VERSION "2008-04-18"
+#define DRIVER_DESC "PXA 27x USB Device Controller driver"
+
+static const char driver_name[] = "pxa27x_udc";
+static struct pxa_udc *the_controller;
+
+static void handle_ep(struct pxa_ep *ep);
+
+/*
+ * Debug filesystem
+ */
+#ifdef CONFIG_USB_GADGET_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+
+static int state_dbg_show(struct seq_file *s, void *p)
+{
+ struct pxa_udc *udc = s->private;
+ u32 tmp;
+
+ if (!udc->driver)
+ return -ENODEV;
+
+ /* basic device status */
+ seq_printf(s, DRIVER_DESC "\n"
+ "%s version: %s\n"
+ "Gadget driver: %s\n",
+ driver_name, DRIVER_VERSION,
+ udc->driver ? udc->driver->driver.name : "(none)");
+
+ tmp = udc_readl(udc, UDCCR);
+ seq_printf(s,
+ "udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), con=%d,inter=%d,altinter=%d\n",
+ tmp,
+ (tmp & UDCCR_OEN) ? " oen":"",
+ (tmp & UDCCR_AALTHNP) ? " aalthnp":"",
+ (tmp & UDCCR_AHNP) ? " rem" : "",
+ (tmp & UDCCR_BHNP) ? " rstir" : "",
+ (tmp & UDCCR_DWRE) ? " dwre" : "",
+ (tmp & UDCCR_SMAC) ? " smac" : "",
+ (tmp & UDCCR_EMCE) ? " emce" : "",
+ (tmp & UDCCR_UDR) ? " udr" : "",
+ (tmp & UDCCR_UDA) ? " uda" : "",
+ (tmp & UDCCR_UDE) ? " ude" : "",
+ (tmp & UDCCR_ACN) >> UDCCR_ACN_S,
+ (tmp & UDCCR_AIN) >> UDCCR_AIN_S,
+ (tmp & UDCCR_AAISN) >> UDCCR_AAISN_S);
+ /* registers for device and ep0 */
+ seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n",
+ udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1));
+ seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n",
+ udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1));
+ seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR));
+ seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, reconfig=%lu\n",
+ udc->stats.irqs_reset, udc->stats.irqs_suspend,
+ udc->stats.irqs_resume, udc->stats.irqs_reconfig);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(state_dbg);
+
+static int queues_dbg_show(struct seq_file *s, void *p)
+{
+ struct pxa_udc *udc = s->private;
+ struct pxa_ep *ep;
+ struct pxa27x_request *req;
+ int i, maxpkt;
+
+ if (!udc->driver)
+ return -ENODEV;
+
+ /* dump endpoint queues */
+ for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
+ ep = &udc->pxa_ep[i];
+ maxpkt = ep->fifo_size;
+ seq_printf(s, "%-12s max_pkt=%d %s\n",
+ EPNAME(ep), maxpkt, "pio");
+
+ if (list_empty(&ep->queue)) {
+ seq_puts(s, "\t(nothing queued)\n");
+ continue;
+ }
+
+ list_for_each_entry(req, &ep->queue, queue) {
+ seq_printf(s, "\treq %p len %d/%d buf %p\n",
+ &req->req, req->req.actual,
+ req->req.length, req->req.buf);
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(queues_dbg);
+
+static int eps_dbg_show(struct seq_file *s, void *p)
+{
+ struct pxa_udc *udc = s->private;
+ struct pxa_ep *ep;
+ int i;
+ u32 tmp;
+
+ if (!udc->driver)
+ return -ENODEV;
+
+ ep = &udc->pxa_ep[0];
+ tmp = udc_ep_readl(ep, UDCCSR);
+ seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n",
+ tmp,
+ (tmp & UDCCSR0_SA) ? " sa" : "",
+ (tmp & UDCCSR0_RNE) ? " rne" : "",
+ (tmp & UDCCSR0_FST) ? " fst" : "",
+ (tmp & UDCCSR0_SST) ? " sst" : "",
+ (tmp & UDCCSR0_DME) ? " dme" : "",
+ (tmp & UDCCSR0_IPR) ? " ipr" : "",
+ (tmp & UDCCSR0_OPC) ? " opc" : "");
+ for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
+ ep = &udc->pxa_ep[i];
+ tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR);
+ seq_printf(s, "%-12s: IN %lu(%lu reqs), OUT %lu(%lu reqs), irqs=%lu, udccr=0x%08x, udccsr=0x%03x, udcbcr=%d\n",
+ EPNAME(ep),
+ ep->stats.in_bytes, ep->stats.in_ops,
+ ep->stats.out_bytes, ep->stats.out_ops,
+ ep->stats.irqs,
+ tmp, udc_ep_readl(ep, UDCCSR),
+ udc_ep_readl(ep, UDCBCR));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(eps_dbg);
+
+static void pxa_init_debugfs(struct pxa_udc *udc)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
+ debugfs_create_file("udcstate", 0400, root, udc, &state_dbg_fops);
+ debugfs_create_file("queues", 0400, root, udc, &queues_dbg_fops);
+ debugfs_create_file("epstate", 0400, root, udc, &eps_dbg_fops);
+}
+
+static void pxa_cleanup_debugfs(struct pxa_udc *udc)
+{
+ debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
+}
+
+#else
+static inline void pxa_init_debugfs(struct pxa_udc *udc)
+{
+}
+
+static inline void pxa_cleanup_debugfs(struct pxa_udc *udc)
+{
+}
+#endif
+
+/**
+ * is_match_usb_pxa - check if usb_ep and pxa_ep match
+ * @udc_usb_ep: usb endpoint
+ * @ep: pxa endpoint
+ * @config: configuration required in pxa_ep
+ * @interface: interface required in pxa_ep
+ * @altsetting: altsetting required in pxa_ep
+ *
+ * Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise
+ */
+static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep,
+ int config, int interface, int altsetting)
+{
+ if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr)
+ return 0;
+ if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in)
+ return 0;
+ if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type)
+ return 0;
+ if ((ep->config != config) || (ep->interface != interface)
+ || (ep->alternate != altsetting))
+ return 0;
+ return 1;
+}
+
+/**
+ * find_pxa_ep - find pxa_ep structure matching udc_usb_ep
+ * @udc: pxa udc
+ * @udc_usb_ep: udc_usb_ep structure
+ *
+ * Match udc_usb_ep and all pxa_ep available, to see if one matches.
+ * This is necessary because of the strong pxa hardware restriction requiring
+ * that once pxa endpoints are initialized, their configuration is freezed, and
+ * no change can be made to their address, direction, or in which configuration,
+ * interface or altsetting they are active ... which differs from more usual
+ * models which have endpoints be roughly just addressable fifos, and leave
+ * configuration events up to gadget drivers (like all control messages).
+ *
+ * Note that there is still a blurred point here :
+ * - we rely on UDCCR register "active interface" and "active altsetting".
+ * This is a nonsense in regard of USB spec, where multiple interfaces are
+ * active at the same time.
+ * - if we knew for sure that the pxa can handle multiple interface at the
+ * same time, assuming Intel's Developer Guide is wrong, this function
+ * should be reviewed, and a cache of couples (iface, altsetting) should
+ * be kept in the pxa_udc structure. In this case this function would match
+ * against the cache of couples instead of the "last altsetting" set up.
+ *
+ * Returns the matched pxa_ep structure or NULL if none found
+ */
+static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc,
+ struct udc_usb_ep *udc_usb_ep)
+{
+ int i;
+ struct pxa_ep *ep;
+ int cfg = udc->config;
+ int iface = udc->last_interface;
+ int alt = udc->last_alternate;
+
+ if (udc_usb_ep == &udc->udc_usb_ep[0])
+ return &udc->pxa_ep[0];
+
+ for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
+ ep = &udc->pxa_ep[i];
+ if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt))
+ return ep;
+ }
+ return NULL;
+}
+
+/**
+ * update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep
+ * @udc: pxa udc
+ *
+ * Context: interrupt handler
+ *
+ * Updates all pxa_ep fields in udc_usb_ep structures, if this field was
+ * previously set up (and is not NULL). The update is necessary is a
+ * configuration change or altsetting change was issued by the USB host.
+ */
+static void update_pxa_ep_matches(struct pxa_udc *udc)
+{
+ int i;
+ struct udc_usb_ep *udc_usb_ep;
+
+ for (i = 1; i < NR_USB_ENDPOINTS; i++) {
+ udc_usb_ep = &udc->udc_usb_ep[i];
+ if (udc_usb_ep->pxa_ep)
+ udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep);
+ }
+}
+
+/**
+ * pio_irq_enable - Enables irq generation for one endpoint
+ * @ep: udc endpoint
+ */
+static void pio_irq_enable(struct pxa_ep *ep)
+{
+ struct pxa_udc *udc = ep->dev;
+ int index = EPIDX(ep);
+ u32 udcicr0 = udc_readl(udc, UDCICR0);
+ u32 udcicr1 = udc_readl(udc, UDCICR1);
+
+ if (index < 16)
+ udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2)));
+ else
+ udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2)));
+}
+
+/**
+ * pio_irq_disable - Disables irq generation for one endpoint
+ * @ep: udc endpoint
+ */
+static void pio_irq_disable(struct pxa_ep *ep)
+{
+ struct pxa_udc *udc = ep->dev;
+ int index = EPIDX(ep);
+ u32 udcicr0 = udc_readl(udc, UDCICR0);
+ u32 udcicr1 = udc_readl(udc, UDCICR1);
+
+ if (index < 16)
+ udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2)));
+ else
+ udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2)));
+}
+
+/**
+ * udc_set_mask_UDCCR - set bits in UDCCR
+ * @udc: udc device
+ * @mask: bits to set in UDCCR
+ *
+ * Sets bits in UDCCR, leaving DME and FST bits as they were.
+ */
+static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask)
+{
+ u32 udccr = udc_readl(udc, UDCCR);
+ udc_writel(udc, UDCCR,
+ (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS));
+}
+
+/**
+ * udc_clear_mask_UDCCR - clears bits in UDCCR
+ * @udc: udc device
+ * @mask: bit to clear in UDCCR
+ *
+ * Clears bits in UDCCR, leaving DME and FST bits as they were.
+ */
+static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
+{
+ u32 udccr = udc_readl(udc, UDCCR);
+ udc_writel(udc, UDCCR,
+ (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS));
+}
+
+/**
+ * ep_write_UDCCSR - set bits in UDCCSR
+ * @ep: udc endpoint
+ * @mask: bits to set in UDCCR
+ *
+ * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
+ *
+ * A specific case is applied to ep0 : the ACM bit is always set to 1, for
+ * SET_INTERFACE and SET_CONFIGURATION.
+ */
+static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
+{
+ if (is_ep0(ep))
+ mask |= UDCCSR0_ACM;
+ udc_ep_writel(ep, UDCCSR, mask);
+}
+
+/**
+ * ep_count_bytes_remain - get how many bytes in udc endpoint
+ * @ep: udc endpoint
+ *
+ * Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP)
+ */
+static int ep_count_bytes_remain(struct pxa_ep *ep)
+{
+ if (ep->dir_in)
+ return -EOPNOTSUPP;
+ return udc_ep_readl(ep, UDCBCR) & 0x3ff;
+}
+
+/**
+ * ep_is_empty - checks if ep has byte ready for reading
+ * @ep: udc endpoint
+ *
+ * If endpoint is the control endpoint, checks if there are bytes in the
+ * control endpoint fifo. If endpoint is a data endpoint, checks if bytes
+ * are ready for reading on OUT endpoint.
+ *
+ * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint
+ */
+static int ep_is_empty(struct pxa_ep *ep)
+{
+ int ret;
+
+ if (!is_ep0(ep) && ep->dir_in)
+ return -EOPNOTSUPP;
+ if (is_ep0(ep))
+ ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE);
+ else
+ ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE);
+ return ret;
+}
+
+/**
+ * ep_is_full - checks if ep has place to write bytes
+ * @ep: udc endpoint
+ *
+ * If endpoint is not the control endpoint and is an IN endpoint, checks if
+ * there is place to write bytes into the endpoint.
+ *
+ * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint
+ */
+static int ep_is_full(struct pxa_ep *ep)
+{
+ if (is_ep0(ep))
+ return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR);
+ if (!ep->dir_in)
+ return -EOPNOTSUPP;
+ return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF));
+}
+
+/**
+ * epout_has_pkt - checks if OUT endpoint fifo has a packet available
+ * @ep: pxa endpoint
+ *
+ * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep.
+ */
+static int epout_has_pkt(struct pxa_ep *ep)
+{
+ if (!is_ep0(ep) && ep->dir_in)
+ return -EOPNOTSUPP;
+ if (is_ep0(ep))
+ return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC);
+ return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC);
+}
+
+/**
+ * set_ep0state - Set ep0 automata state
+ * @udc: udc device
+ * @state: state
+ */
+static void set_ep0state(struct pxa_udc *udc, int state)
+{
+ struct pxa_ep *ep = &udc->pxa_ep[0];
+ char *old_stname = EP0_STNAME(udc);
+
+ udc->ep0state = state;
+ ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname,
+ EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR),
+ udc_ep_readl(ep, UDCBCR));
+}
+
+/**
+ * ep0_idle - Put control endpoint into idle state
+ * @dev: udc device
+ */
+static void ep0_idle(struct pxa_udc *dev)
+{
+ set_ep0state(dev, WAIT_FOR_SETUP);
+}
+
+/**
+ * inc_ep_stats_reqs - Update ep stats counts
+ * @ep: physical endpoint
+ * @is_in: ep direction (USB_DIR_IN or 0)
+ *
+ */
+static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in)
+{
+ if (is_in)
+ ep->stats.in_ops++;
+ else
+ ep->stats.out_ops++;
+}
+
+/**
+ * inc_ep_stats_bytes - Update ep stats counts
+ * @ep: physical endpoint
+ * @count: bytes transferred on endpoint
+ * @is_in: ep direction (USB_DIR_IN or 0)
+ */
+static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
+{
+ if (is_in)
+ ep->stats.in_bytes += count;
+ else
+ ep->stats.out_bytes += count;
+}
+
+/**
+ * pxa_ep_setup - Sets up an usb physical endpoint
+ * @ep: pxa27x physical endpoint
+ *
+ * Find the physical pxa27x ep, and setup its UDCCR
+ */
+static void pxa_ep_setup(struct pxa_ep *ep)
+{
+ u32 new_udccr;
+
+ new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN)
+ | ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN)
+ | ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN)
+ | ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN)
+ | ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET)
+ | ((ep->dir_in) ? UDCCONR_ED : 0)
+ | ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS)
+ | UDCCONR_EE;
+
+ udc_ep_writel(ep, UDCCR, new_udccr);
+}
+
+/**
+ * pxa_eps_setup - Sets up all usb physical endpoints
+ * @dev: udc device
+ *
+ * Setup all pxa physical endpoints, except ep0
+ */
+static void pxa_eps_setup(struct pxa_udc *dev)
+{
+ unsigned int i;
+
+ dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev);
+
+ for (i = 1; i < NR_PXA_ENDPOINTS; i++)
+ pxa_ep_setup(&dev->pxa_ep[i]);
+}
+
+/**
+ * pxa_ep_alloc_request - Allocate usb request
+ * @_ep: usb endpoint
+ * @gfp_flags:
+ *
+ * For the pxa27x, these can just wrap kmalloc/kfree. gadget drivers
+ * must still pass correctly initialized endpoints, since other controller
+ * drivers may care about how it's currently set up (dma issues etc).
+ */
+static struct usb_request *
+pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct pxa27x_request *req;
+
+ req = kzalloc(sizeof *req, gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+ req->in_use = 0;
+ req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+
+ return &req->req;
+}
+
+/**
+ * pxa_ep_free_request - Free usb request
+ * @_ep: usb endpoint
+ * @_req: usb request
+ *
+ * Wrapper around kfree to free _req
+ */
+static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct pxa27x_request *req;
+
+ req = container_of(_req, struct pxa27x_request, req);
+ WARN_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+/**
+ * ep_add_request - add a request to the endpoint's queue
+ * @ep: usb endpoint
+ * @req: usb request
+ *
+ * Context: ep->lock held
+ *
+ * Queues the request in the endpoint's queue, and enables the interrupts
+ * on the endpoint.
+ */
+static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+ if (unlikely(!req))
+ return;
+ ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
+ req->req.length, udc_ep_readl(ep, UDCCSR));
+
+ req->in_use = 1;
+ list_add_tail(&req->queue, &ep->queue);
+ pio_irq_enable(ep);
+}
+
+/**
+ * ep_del_request - removes a request from the endpoint's queue
+ * @ep: usb endpoint
+ * @req: usb request
+ *
+ * Context: ep->lock held
+ *
+ * Unqueue the request from the endpoint's queue. If there are no more requests
+ * on the endpoint, and if it's not the control endpoint, interrupts are
+ * disabled on the endpoint.
+ */
+static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+ if (unlikely(!req))
+ return;
+ ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
+ req->req.length, udc_ep_readl(ep, UDCCSR));
+
+ list_del_init(&req->queue);
+ req->in_use = 0;
+ if (!is_ep0(ep) && list_empty(&ep->queue))
+ pio_irq_disable(ep);
+}
+
+/**
+ * req_done - Complete an usb request
+ * @ep: pxa physical endpoint
+ * @req: pxa request
+ * @status: usb request status sent to gadget API
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held if flags not NULL, else ep->lock released
+ *
+ * Retire a pxa27x usb request. Endpoint must be locked.
+ */
+static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
+ unsigned long *pflags)
+{
+ unsigned long flags;
+
+ ep_del_request(ep, req);
+ if (likely(req->req.status == -EINPROGRESS))
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ if (status && status != -ESHUTDOWN)
+ ep_dbg(ep, "complete req %p stat %d len %u/%u\n",
+ &req->req, status,
+ req->req.actual, req->req.length);
+
+ if (pflags)
+ spin_unlock_irqrestore(&ep->lock, *pflags);
+ local_irq_save(flags);
+ usb_gadget_giveback_request(&req->udc_usb_ep->usb_ep, &req->req);
+ local_irq_restore(flags);
+ if (pflags)
+ spin_lock_irqsave(&ep->lock, *pflags);
+}
+
+/**
+ * ep_end_out_req - Ends endpoint OUT request
+ * @ep: physical endpoint
+ * @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held or released (see req_done())
+ *
+ * Ends endpoint OUT request (completes usb request).
+ */
+static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
+{
+ inc_ep_stats_reqs(ep, !USB_DIR_IN);
+ req_done(ep, req, 0, pflags);
+}
+
+/**
+ * ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
+ * @ep: physical endpoint
+ * @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held or released (see req_done())
+ *
+ * Ends control endpoint OUT request (completes usb request), and puts
+ * control endpoint into idle state
+ */
+static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
+{
+ set_ep0state(ep->dev, OUT_STATUS_STAGE);
+ ep_end_out_req(ep, req, pflags);
+ ep0_idle(ep->dev);
+}
+
+/**
+ * ep_end_in_req - Ends endpoint IN request
+ * @ep: physical endpoint
+ * @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held or released (see req_done())
+ *
+ * Ends endpoint IN request (completes usb request).
+ */
+static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
+{
+ inc_ep_stats_reqs(ep, USB_DIR_IN);
+ req_done(ep, req, 0, pflags);
+}
+
+/**
+ * ep0_end_in_req - Ends control endpoint IN request (ends data stage)
+ * @ep: physical endpoint
+ * @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
+ *
+ * Context: ep->lock held or released (see req_done())
+ *
+ * Ends control endpoint IN request (completes usb request), and puts
+ * control endpoint into status state
+ */
+static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
+{
+ set_ep0state(ep->dev, IN_STATUS_STAGE);
+ ep_end_in_req(ep, req, pflags);
+}
+
+/**
+ * nuke - Dequeue all requests
+ * @ep: pxa endpoint
+ * @status: usb request status
+ *
+ * Context: ep->lock released
+ *
+ * Dequeues all requests on an endpoint. As a side effect, interrupts will be
+ * disabled on that endpoint (because no more requests).
+ */
+static void nuke(struct pxa_ep *ep, int status)
+{
+ struct pxa27x_request *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct pxa27x_request, queue);
+ req_done(ep, req, status, &flags);
+ }
+ spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+/**
+ * read_packet - transfer 1 packet from an OUT endpoint into request
+ * @ep: pxa physical endpoint
+ * @req: usb request
+ *
+ * Takes bytes from OUT endpoint and transfers them info the usb request.
+ * If there is less space in request than bytes received in OUT endpoint,
+ * bytes are left in the OUT endpoint.
+ *
+ * Returns how many bytes were actually transferred
+ */
+static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+ u32 *buf;
+ int bytes_ep, bufferspace, count, i;
+
+ bytes_ep = ep_count_bytes_remain(ep);
+ bufferspace = req->req.length - req->req.actual;
+
+ buf = (u32 *)(req->req.buf + req->req.actual);
+ prefetchw(buf);
+
+ if (likely(!ep_is_empty(ep)))
+ count = min(bytes_ep, bufferspace);
+ else /* zlp */
+ count = 0;
+
+ for (i = count; i > 0; i -= 4)
+ *buf++ = udc_ep_readl(ep, UDCDR);
+ req->req.actual += count;
+
+ ep_write_UDCCSR(ep, UDCCSR_PC);
+
+ return count;
+}
+
+/**
+ * write_packet - transfer 1 packet from request into an IN endpoint
+ * @ep: pxa physical endpoint
+ * @req: usb request
+ * @max: max bytes that fit into endpoint
+ *
+ * Takes bytes from usb request, and transfers them into the physical
+ * endpoint. If there are no bytes to transfer, doesn't write anything
+ * to physical endpoint.
+ *
+ * Returns how many bytes were actually transferred.
+ */
+static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned int max)
+{
+ int length, count, remain, i;
+ u32 *buf;
+ u8 *buf_8;
+
+ buf = (u32 *)(req->req.buf + req->req.actual);
+ prefetch(buf);
+
+ length = min(req->req.length - req->req.actual, max);
+ req->req.actual += length;
+
+ remain = length & 0x3;
+ count = length & ~(0x3);
+ for (i = count; i > 0 ; i -= 4)
+ udc_ep_writel(ep, UDCDR, *buf++);
+
+ buf_8 = (u8 *)buf;
+ for (i = remain; i > 0; i--)
+ udc_ep_writeb(ep, UDCDR, *buf_8++);
+
+ ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain,
+ udc_ep_readl(ep, UDCCSR));
+
+ return length;
+}
+
+/**
+ * read_fifo - Transfer packets from OUT endpoint into usb request
+ * @ep: pxa physical endpoint
+ * @req: usb request
+ *
+ * Context: interrupt handler
+ *
+ * Unload as many packets as possible from the fifo we use for usb OUT
+ * transfers and put them into the request. Caller should have made sure
+ * there's at least one packet ready.
+ * Doesn't complete the request, that's the caller's job
+ *
+ * Returns 1 if the request completed, 0 otherwise
+ */
+static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+ int count, is_short, completed = 0;
+
+ while (epout_has_pkt(ep)) {
+ count = read_packet(ep, req);
+ inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
+
+ is_short = (count < ep->fifo_size);
+ ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
+ udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
+ &req->req, req->req.actual, req->req.length);
+
+ /* completion */
+ if (is_short || req->req.actual == req->req.length) {
+ completed = 1;
+ break;
+ }
+ /* finished that packet. the next one may be waiting... */
+ }
+ return completed;
+}
+
+/**
+ * write_fifo - transfer packets from usb request into an IN endpoint
+ * @ep: pxa physical endpoint
+ * @req: pxa usb request
+ *
+ * Write to an IN endpoint fifo, as many packets as possible.
+ * irqs will use this to write the rest later.
+ * caller guarantees at least one packet buffer is ready (or a zlp).
+ * Doesn't complete the request, that's the caller's job
+ *
+ * Returns 1 if request fully transferred, 0 if partial transfer
+ */
+static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+ unsigned max;
+ int count, is_short, is_last = 0, completed = 0, totcount = 0;
+ u32 udccsr;
+
+ max = ep->fifo_size;
+ do {
+ udccsr = udc_ep_readl(ep, UDCCSR);
+ if (udccsr & UDCCSR_PC) {
+ ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
+ udccsr);
+ ep_write_UDCCSR(ep, UDCCSR_PC);
+ }
+ if (udccsr & UDCCSR_TRN) {
+ ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
+ udccsr);
+ ep_write_UDCCSR(ep, UDCCSR_TRN);
+ }
+
+ count = write_packet(ep, req, max);
+ inc_ep_stats_bytes(ep, count, USB_DIR_IN);
+ totcount += count;
+
+ /* last packet is usually short (or a zlp) */
+ if (unlikely(count < max)) {
+ is_last = 1;
+ is_short = 1;
+ } else {
+ if (likely(req->req.length > req->req.actual)
+ || req->req.zero)
+ is_last = 0;
+ else
+ is_last = 1;
+ /* interrupt/iso maxpacket may not fill the fifo */
+ is_short = unlikely(max < ep->fifo_size);
+ }
+
+ if (is_short)
+ ep_write_UDCCSR(ep, UDCCSR_SP);
+
+ /* requests complete when all IN data is in the FIFO */
+ if (is_last) {
+ completed = 1;
+ break;
+ }
+ } while (!ep_is_full(ep));
+
+ ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n",
+ totcount, is_last ? "/L" : "", is_short ? "/S" : "",
+ req->req.length - req->req.actual, &req->req);
+
+ return completed;
+}
+
+/**
+ * read_ep0_fifo - Transfer packets from control endpoint into usb request
+ * @ep: control endpoint
+ * @req: pxa usb request
+ *
+ * Special ep0 version of the above read_fifo. Reads as many bytes from control
+ * endpoint as can be read, and stores them into usb request (limited by request
+ * maximum length).
+ *
+ * Returns 0 if usb request only partially filled, 1 if fully filled
+ */
+static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+ int count, is_short, completed = 0;
+
+ while (epout_has_pkt(ep)) {
+ count = read_packet(ep, req);
+ ep_write_UDCCSR(ep, UDCCSR0_OPC);
+ inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
+
+ is_short = (count < ep->fifo_size);
+ ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
+ udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
+ &req->req, req->req.actual, req->req.length);
+
+ if (is_short || req->req.actual >= req->req.length) {
+ completed = 1;
+ break;
+ }
+ }
+
+ return completed;
+}
+
+/**
+ * write_ep0_fifo - Send a request to control endpoint (ep0 in)
+ * @ep: control endpoint
+ * @req: request
+ *
+ * Context: interrupt handler
+ *
+ * Sends a request (or a part of the request) to the control endpoint (ep0 in).
+ * If the request doesn't fit, the remaining part will be sent from irq.
+ * The request is considered fully written only if either :
+ * - last write transferred all remaining bytes, but fifo was not fully filled
+ * - last write was a 0 length write
+ *
+ * Returns 1 if request fully written, 0 if request only partially sent
+ */
+static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
+{
+ unsigned count;
+ int is_last, is_short;
+
+ count = write_packet(ep, req, EP0_FIFO_SIZE);
+ inc_ep_stats_bytes(ep, count, USB_DIR_IN);
+
+ is_short = (count < EP0_FIFO_SIZE);
+ is_last = ((count == 0) || (count < EP0_FIFO_SIZE));
+
+ /* Sends either a short packet or a 0 length packet */
+ if (unlikely(is_short))
+ ep_write_UDCCSR(ep, UDCCSR0_IPR);
+
+ ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
+ count, is_short ? "/S" : "", is_last ? "/L" : "",
+ req->req.length - req->req.actual,
+ &req->req, udc_ep_readl(ep, UDCCSR));
+
+ return is_last;
+}
+
+/**
+ * pxa_ep_queue - Queue a request into an IN endpoint
+ * @_ep: usb endpoint
+ * @_req: usb request
+ * @gfp_flags: flags
+ *
+ * Context: thread context or from the interrupt handler in the
+ * special case of ep0 setup :
+ * (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue)
+ *
+ * Returns 0 if succedeed, error otherwise
+ */
+static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct udc_usb_ep *udc_usb_ep;
+ struct pxa_ep *ep;
+ struct pxa27x_request *req;
+ struct pxa_udc *dev;
+ unsigned long flags;
+ int rc = 0;
+ int is_first_req;
+ unsigned length;
+ int recursion_detected;
+
+ req = container_of(_req, struct pxa27x_request, req);
+ udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+
+ if (unlikely(!_req || !_req->complete || !_req->buf))
+ return -EINVAL;
+
+ if (unlikely(!_ep))
+ return -EINVAL;
+
+ ep = udc_usb_ep->pxa_ep;
+ if (unlikely(!ep))
+ return -EINVAL;
+
+ dev = ep->dev;
+ if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+ ep_dbg(ep, "bogus device state\n");
+ return -ESHUTDOWN;
+ }
+
+ /* iso is always one packet per request, that's the only way
+ * we can report per-packet status. that also helps with dma.
+ */
+ if (unlikely(EPXFERTYPE_is_ISO(ep)
+ && req->req.length > ep->fifo_size))
+ return -EMSGSIZE;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ recursion_detected = ep->in_handle_ep;
+
+ is_first_req = list_empty(&ep->queue);
+ ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
+ _req, is_first_req ? "yes" : "no",
+ _req->length, _req->buf);
+
+ if (!ep->enabled) {
+ _req->status = -ESHUTDOWN;
+ rc = -ESHUTDOWN;
+ goto out_locked;
+ }
+
+ if (req->in_use) {
+ ep_err(ep, "refusing to queue req %p (already queued)\n", req);
+ goto out_locked;
+ }
+
+ length = _req->length;
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ ep_add_request(ep, req);
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ if (is_ep0(ep)) {
+ switch (dev->ep0state) {
+ case WAIT_ACK_SET_CONF_INTERF:
+ if (length == 0) {
+ ep_end_in_req(ep, req, NULL);
+ } else {
+ ep_err(ep, "got a request of %d bytes while"
+ "in state WAIT_ACK_SET_CONF_INTERF\n",
+ length);
+ ep_del_request(ep, req);
+ rc = -EL2HLT;
+ }
+ ep0_idle(ep->dev);
+ break;
+ case IN_DATA_STAGE:
+ if (!ep_is_full(ep))
+ if (write_ep0_fifo(ep, req))
+ ep0_end_in_req(ep, req, NULL);
+ break;
+ case OUT_DATA_STAGE:
+ if ((length == 0) || !epout_has_pkt(ep))
+ if (read_ep0_fifo(ep, req))
+ ep0_end_out_req(ep, req, NULL);
+ break;
+ default:
+ ep_err(ep, "odd state %s to send me a request\n",
+ EP0_STNAME(ep->dev));
+ ep_del_request(ep, req);
+ rc = -EL2HLT;
+ break;
+ }
+ } else {
+ if (!recursion_detected)
+ handle_ep(ep);
+ }
+
+out:
+ return rc;
+out_locked:
+ spin_unlock_irqrestore(&ep->lock, flags);
+ goto out;
+}
+
+/**
+ * pxa_ep_dequeue - Dequeue one request
+ * @_ep: usb endpoint
+ * @_req: usb request
+ *
+ * Return 0 if no error, -EINVAL or -ECONNRESET otherwise
+ */
+static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct pxa_ep *ep;
+ struct udc_usb_ep *udc_usb_ep;
+ struct pxa27x_request *req = NULL, *iter;
+ unsigned long flags;
+ int rc = -EINVAL;
+
+ if (!_ep)
+ return rc;
+ udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+ ep = udc_usb_ep->pxa_ep;
+ if (!ep || is_ep0(ep))
+ return rc;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ req = iter;
+ rc = 0;
+ break;
+ }
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+ if (!rc)
+ req_done(ep, req, -ECONNRESET, NULL);
+ return rc;
+}
+
+/**
+ * pxa_ep_set_halt - Halts operations on one endpoint
+ * @_ep: usb endpoint
+ * @value:
+ *
+ * Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise
+ */
+static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct pxa_ep *ep;
+ struct udc_usb_ep *udc_usb_ep;
+ unsigned long flags;
+ int rc;
+
+
+ if (!_ep)
+ return -EINVAL;
+ udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+ ep = udc_usb_ep->pxa_ep;
+ if (!ep || is_ep0(ep))
+ return -EINVAL;
+
+ if (value == 0) {
+ /*
+ * This path (reset toggle+halt) is needed to implement
+ * SET_INTERFACE on normal hardware. but it can't be
+ * done from software on the PXA UDC, and the hardware
+ * forgets to do it as part of SET_INTERFACE automagic.
+ */
+ ep_dbg(ep, "only host can clear halt\n");
+ return -EROFS;
+ }
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ rc = -EAGAIN;
+ if (ep->dir_in && (ep_is_full(ep) || !list_empty(&ep->queue)))
+ goto out;
+
+ /* FST, FEF bits are the same for control and non control endpoints */
+ rc = 0;
+ ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
+ if (is_ep0(ep))
+ set_ep0state(ep->dev, STALL);
+
+out:
+ spin_unlock_irqrestore(&ep->lock, flags);
+ return rc;
+}
+
+/**
+ * pxa_ep_fifo_status - Get how many bytes in physical endpoint
+ * @_ep: usb endpoint
+ *
+ * Returns number of bytes in OUT fifos. Broken for IN fifos.
+ */
+static int pxa_ep_fifo_status(struct usb_ep *_ep)
+{
+ struct pxa_ep *ep;
+ struct udc_usb_ep *udc_usb_ep;
+
+ if (!_ep)
+ return -ENODEV;
+ udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+ ep = udc_usb_ep->pxa_ep;
+ if (!ep || is_ep0(ep))
+ return -ENODEV;
+
+ if (ep->dir_in)
+ return -EOPNOTSUPP;
+ if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep))
+ return 0;
+ else
+ return ep_count_bytes_remain(ep) + 1;
+}
+
+/**
+ * pxa_ep_fifo_flush - Flushes one endpoint
+ * @_ep: usb endpoint
+ *
+ * Discards all data in one endpoint(IN or OUT), except control endpoint.
+ */
+static void pxa_ep_fifo_flush(struct usb_ep *_ep)
+{
+ struct pxa_ep *ep;
+ struct udc_usb_ep *udc_usb_ep;
+ unsigned long flags;
+
+ if (!_ep)
+ return;
+ udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+ ep = udc_usb_ep->pxa_ep;
+ if (!ep || is_ep0(ep))
+ return;
+
+ spin_lock_irqsave(&ep->lock, flags);
+
+ if (unlikely(!list_empty(&ep->queue)))
+ ep_dbg(ep, "called while queue list not empty\n");
+ ep_dbg(ep, "called\n");
+
+ /* for OUT, just read and discard the FIFO contents. */
+ if (!ep->dir_in) {
+ while (!ep_is_empty(ep))
+ udc_ep_readl(ep, UDCDR);
+ } else {
+ /* most IN status is the same, but ISO can't stall */
+ ep_write_UDCCSR(ep,
+ UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
+ | (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
+ }
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+/**
+ * pxa_ep_enable - Enables usb endpoint
+ * @_ep: usb endpoint
+ * @desc: usb endpoint descriptor
+ *
+ * Nothing much to do here, as ep configuration is done once and for all
+ * before udc is enabled. After udc enable, no physical endpoint configuration
+ * can be changed.
+ * Function makes sanity checks and flushes the endpoint.
+ */
+static int pxa_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct pxa_ep *ep;
+ struct udc_usb_ep *udc_usb_ep;
+ struct pxa_udc *udc;
+
+ if (!_ep || !desc)
+ return -EINVAL;
+
+ udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+ if (udc_usb_ep->pxa_ep) {
+ ep = udc_usb_ep->pxa_ep;
+ ep_warn(ep, "usb_ep %s already enabled, doing nothing\n",
+ _ep->name);
+ } else {
+ ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep);
+ }
+
+ if (!ep || is_ep0(ep)) {
+ dev_err(udc_usb_ep->dev->dev,
+ "unable to match pxa_ep for ep %s\n",
+ _ep->name);
+ return -EINVAL;
+ }
+
+ if ((desc->bDescriptorType != USB_DT_ENDPOINT)
+ || (ep->type != usb_endpoint_type(desc))) {
+ ep_err(ep, "type mismatch\n");
+ return -EINVAL;
+ }
+
+ if (ep->fifo_size < usb_endpoint_maxp(desc)) {
+ ep_err(ep, "bad maxpacket\n");
+ return -ERANGE;
+ }
+
+ udc_usb_ep->pxa_ep = ep;
+ udc = ep->dev;
+
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+ ep_err(ep, "bogus device state\n");
+ return -ESHUTDOWN;
+ }
+
+ ep->enabled = 1;
+
+ /* flush fifo (mostly for OUT buffers) */
+ pxa_ep_fifo_flush(_ep);
+
+ ep_dbg(ep, "enabled\n");
+ return 0;
+}
+
+/**
+ * pxa_ep_disable - Disable usb endpoint
+ * @_ep: usb endpoint
+ *
+ * Same as for pxa_ep_enable, no physical endpoint configuration can be
+ * changed.
+ * Function flushes the endpoint and related requests.
+ */
+static int pxa_ep_disable(struct usb_ep *_ep)
+{
+ struct pxa_ep *ep;
+ struct udc_usb_ep *udc_usb_ep;
+
+ if (!_ep)
+ return -EINVAL;
+
+ udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
+ ep = udc_usb_ep->pxa_ep;
+ if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
+ return -EINVAL;
+
+ ep->enabled = 0;
+ nuke(ep, -ESHUTDOWN);
+
+ pxa_ep_fifo_flush(_ep);
+ udc_usb_ep->pxa_ep = NULL;
+
+ ep_dbg(ep, "disabled\n");
+ return 0;
+}
+
+static const struct usb_ep_ops pxa_ep_ops = {
+ .enable = pxa_ep_enable,
+ .disable = pxa_ep_disable,
+
+ .alloc_request = pxa_ep_alloc_request,
+ .free_request = pxa_ep_free_request,
+
+ .queue = pxa_ep_queue,
+ .dequeue = pxa_ep_dequeue,
+
+ .set_halt = pxa_ep_set_halt,
+ .fifo_status = pxa_ep_fifo_status,
+ .fifo_flush = pxa_ep_fifo_flush,
+};
+
+/**
+ * dplus_pullup - Connect or disconnect pullup resistor to D+ pin
+ * @udc: udc device
+ * @on: 0 if disconnect pullup resistor, 1 otherwise
+ * Context: any
+ *
+ * Handle D+ pullup resistor, make the device visible to the usb bus, and
+ * declare it as a full speed usb device
+ */
+static void dplus_pullup(struct pxa_udc *udc, int on)
+{
+ if (udc->gpiod) {
+ gpiod_set_value(udc->gpiod, on);
+ } else if (udc->udc_command) {
+ if (on)
+ udc->udc_command(PXA2XX_UDC_CMD_CONNECT);
+ else
+ udc->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
+ }
+ udc->pullup_on = on;
+}
+
+/**
+ * pxa_udc_get_frame - Returns usb frame number
+ * @_gadget: usb gadget
+ */
+static int pxa_udc_get_frame(struct usb_gadget *_gadget)
+{
+ struct pxa_udc *udc = to_gadget_udc(_gadget);
+
+ return (udc_readl(udc, UDCFNR) & 0x7ff);
+}
+
+/**
+ * pxa_udc_wakeup - Force udc device out of suspend
+ * @_gadget: usb gadget
+ *
+ * Returns 0 if successful, error code otherwise
+ */
+static int pxa_udc_wakeup(struct usb_gadget *_gadget)
+{
+ struct pxa_udc *udc = to_gadget_udc(_gadget);
+
+ /* host may not have enabled remote wakeup */
+ if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0)
+ return -EHOSTUNREACH;
+ udc_set_mask_UDCCR(udc, UDCCR_UDR);
+ return 0;
+}
+
+static void udc_enable(struct pxa_udc *udc);
+static void udc_disable(struct pxa_udc *udc);
+
+/**
+ * should_enable_udc - Tells if UDC should be enabled
+ * @udc: udc device
+ * Context: any
+ *
+ * The UDC should be enabled if :
+ * - the pullup resistor is connected
+ * - and a gadget driver is bound
+ * - and vbus is sensed (or no vbus sense is available)
+ *
+ * Returns 1 if UDC should be enabled, 0 otherwise
+ */
+static int should_enable_udc(struct pxa_udc *udc)
+{
+ int put_on;
+
+ put_on = ((udc->pullup_on) && (udc->driver));
+ put_on &= ((udc->vbus_sensed) || (IS_ERR_OR_NULL(udc->transceiver)));
+ return put_on;
+}
+
+/**
+ * should_disable_udc - Tells if UDC should be disabled
+ * @udc: udc device
+ * Context: any
+ *
+ * The UDC should be disabled if :
+ * - the pullup resistor is not connected
+ * - or no gadget driver is bound
+ * - or no vbus is sensed (when vbus sesing is available)
+ *
+ * Returns 1 if UDC should be disabled
+ */
+static int should_disable_udc(struct pxa_udc *udc)
+{
+ int put_off;
+
+ put_off = ((!udc->pullup_on) || (!udc->driver));
+ put_off |= ((!udc->vbus_sensed) && (!IS_ERR_OR_NULL(udc->transceiver)));
+ return put_off;
+}
+
+/**
+ * pxa_udc_pullup - Offer manual D+ pullup control
+ * @_gadget: usb gadget using the control
+ * @is_active: 0 if disconnect, else connect D+ pullup resistor
+ *
+ * Context: task context, might sleep
+ *
+ * Returns 0 if OK, -EOPNOTSUPP if udc driver doesn't handle D+ pullup
+ */
+static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active)
+{
+ struct pxa_udc *udc = to_gadget_udc(_gadget);
+
+ if (!udc->gpiod && !udc->udc_command)
+ return -EOPNOTSUPP;
+
+ dplus_pullup(udc, is_active);
+
+ if (should_enable_udc(udc))
+ udc_enable(udc);
+ if (should_disable_udc(udc))
+ udc_disable(udc);
+ return 0;
+}
+
+/**
+ * pxa_udc_vbus_session - Called by external transceiver to enable/disable udc
+ * @_gadget: usb gadget
+ * @is_active: 0 if should disable the udc, 1 if should enable
+ *
+ * Enables the udc, and optionnaly activates D+ pullup resistor. Or disables the
+ * udc, and deactivates D+ pullup resistor.
+ *
+ * Returns 0
+ */
+static int pxa_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+ struct pxa_udc *udc = to_gadget_udc(_gadget);
+
+ udc->vbus_sensed = is_active;
+ if (should_enable_udc(udc))
+ udc_enable(udc);
+ if (should_disable_udc(udc))
+ udc_disable(udc);
+
+ return 0;
+}
+
+/**
+ * pxa_udc_vbus_draw - Called by gadget driver after SET_CONFIGURATION completed
+ * @_gadget: usb gadget
+ * @mA: current drawn
+ *
+ * Context: task context, might sleep
+ *
+ * Called after a configuration was chosen by a USB host, to inform how much
+ * current can be drawn by the device from VBus line.
+ *
+ * Returns 0 or -EOPNOTSUPP if no transceiver is handling the udc
+ */
+static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+ struct pxa_udc *udc;
+
+ udc = to_gadget_udc(_gadget);
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ return usb_phy_set_power(udc->transceiver, mA);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * pxa_udc_phy_event - Called by phy upon VBus event
+ * @nb: notifier block
+ * @action: phy action, is vbus connect or disconnect
+ * @data: the usb_gadget structure in pxa_udc
+ *
+ * Called by the USB Phy when a cable connect or disconnect is sensed.
+ *
+ * Returns 0
+ */
+static int pxa_udc_phy_event(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct usb_gadget *gadget = data;
+
+ switch (action) {
+ case USB_EVENT_VBUS:
+ usb_gadget_vbus_connect(gadget);
+ return NOTIFY_OK;
+ case USB_EVENT_NONE:
+ usb_gadget_vbus_disconnect(gadget);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block pxa27x_udc_phy = {
+ .notifier_call = pxa_udc_phy_event,
+};
+
+static int pxa27x_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int pxa27x_udc_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops pxa_udc_ops = {
+ .get_frame = pxa_udc_get_frame,
+ .wakeup = pxa_udc_wakeup,
+ .pullup = pxa_udc_pullup,
+ .vbus_session = pxa_udc_vbus_session,
+ .vbus_draw = pxa_udc_vbus_draw,
+ .udc_start = pxa27x_udc_start,
+ .udc_stop = pxa27x_udc_stop,
+};
+
+/**
+ * udc_disable - disable udc device controller
+ * @udc: udc device
+ * Context: any
+ *
+ * Disables the udc device : disables clocks, udc interrupts, control endpoint
+ * interrupts.
+ */
+static void udc_disable(struct pxa_udc *udc)
+{
+ if (!udc->enabled)
+ return;
+
+ udc_writel(udc, UDCICR0, 0);
+ udc_writel(udc, UDCICR1, 0);
+
+ udc_clear_mask_UDCCR(udc, UDCCR_UDE);
+
+ ep0_idle(udc);
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ clk_disable(udc->clk);
+
+ udc->enabled = 0;
+}
+
+/**
+ * udc_init_data - Initialize udc device data structures
+ * @dev: udc device
+ *
+ * Initializes gadget endpoint list, endpoints locks. No action is taken
+ * on the hardware.
+ */
+static void udc_init_data(struct pxa_udc *dev)
+{
+ int i;
+ struct pxa_ep *ep;
+
+ /* device/ep0 records init */
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+ dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
+ dev->gadget.quirk_altset_not_supp = 1;
+ ep0_idle(dev);
+
+ /* PXA endpoints init */
+ for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
+ ep = &dev->pxa_ep[i];
+
+ ep->enabled = is_ep0(ep);
+ INIT_LIST_HEAD(&ep->queue);
+ spin_lock_init(&ep->lock);
+ }
+
+ /* USB endpoints init */
+ for (i = 1; i < NR_USB_ENDPOINTS; i++) {
+ list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list,
+ &dev->gadget.ep_list);
+ usb_ep_set_maxpacket_limit(&dev->udc_usb_ep[i].usb_ep,
+ dev->udc_usb_ep[i].usb_ep.maxpacket);
+ }
+}
+
+/**
+ * udc_enable - Enables the udc device
+ * @udc: udc device
+ *
+ * Enables the udc device : enables clocks, udc interrupts, control endpoint
+ * interrupts, sets usb as UDC client and setups endpoints.
+ */
+static void udc_enable(struct pxa_udc *udc)
+{
+ if (udc->enabled)
+ return;
+
+ clk_enable(udc->clk);
+ udc_writel(udc, UDCICR0, 0);
+ udc_writel(udc, UDCICR1, 0);
+ udc_clear_mask_UDCCR(udc, UDCCR_UDE);
+
+ ep0_idle(udc);
+ udc->gadget.speed = USB_SPEED_FULL;
+ memset(&udc->stats, 0, sizeof(udc->stats));
+
+ pxa_eps_setup(udc);
+ udc_set_mask_UDCCR(udc, UDCCR_UDE);
+ ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
+ udelay(2);
+ if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
+ dev_err(udc->dev, "Configuration errors, udc disabled\n");
+
+ /*
+ * Caller must be able to sleep in order to cope with startup transients
+ */
+ msleep(100);
+
+ /* enable suspend/resume and reset irqs */
+ udc_writel(udc, UDCICR1,
+ UDCICR1_IECC | UDCICR1_IERU
+ | UDCICR1_IESU | UDCICR1_IERS);
+
+ /* enable ep0 irqs */
+ pio_irq_enable(&udc->pxa_ep[0]);
+
+ udc->enabled = 1;
+}
+
+/**
+ * pxa27x_udc_start - Register gadget driver
+ * @g: gadget
+ * @driver: gadget driver
+ *
+ * When a driver is successfully registered, it will receive control requests
+ * including set_configuration(), which enables non-control requests. Then
+ * usb traffic follows until a disconnect is reported. Then a host may connect
+ * again, or the driver might get unbound.
+ *
+ * Note that the udc is not automatically enabled. Check function
+ * should_enable_udc().
+ *
+ * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
+ */
+static int pxa27x_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct pxa_udc *udc = to_pxa(g);
+ int retval;
+
+ /* first hook up the driver ... */
+ udc->driver = driver;
+
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
+ retval = otg_set_peripheral(udc->transceiver->otg,
+ &udc->gadget);
+ if (retval) {
+ dev_err(udc->dev, "can't bind to transceiver\n");
+ goto fail;
+ }
+ }
+
+ if (should_enable_udc(udc))
+ udc_enable(udc);
+ return 0;
+
+fail:
+ udc->driver = NULL;
+ return retval;
+}
+
+/**
+ * stop_activity - Stops udc endpoints
+ * @udc: udc device
+ *
+ * Disables all udc endpoints (even control endpoint), report disconnect to
+ * the gadget user.
+ */
+static void stop_activity(struct pxa_udc *udc)
+{
+ int i;
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+
+ for (i = 0; i < NR_USB_ENDPOINTS; i++)
+ pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep);
+}
+
+/**
+ * pxa27x_udc_stop - Unregister the gadget driver
+ * @g: gadget
+ *
+ * Returns 0 if no error, -ENODEV, -EINVAL otherwise
+ */
+static int pxa27x_udc_stop(struct usb_gadget *g)
+{
+ struct pxa_udc *udc = to_pxa(g);
+
+ stop_activity(udc);
+ udc_disable(udc);
+
+ udc->driver = NULL;
+
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ return otg_set_peripheral(udc->transceiver->otg, NULL);
+ return 0;
+}
+
+/**
+ * handle_ep0_ctrl_req - handle control endpoint control request
+ * @udc: udc device
+ * @req: control request
+ */
+static void handle_ep0_ctrl_req(struct pxa_udc *udc,
+ struct pxa27x_request *req)
+{
+ struct pxa_ep *ep = &udc->pxa_ep[0];
+ union {
+ struct usb_ctrlrequest r;
+ u32 word[2];
+ } u;
+ int i;
+ int have_extrabytes = 0;
+ unsigned long flags;
+
+ nuke(ep, -EPROTO);
+ spin_lock_irqsave(&ep->lock, flags);
+
+ /*
+ * In the PXA320 manual, in the section about Back-to-Back setup
+ * packets, it describes this situation. The solution is to set OPC to
+ * get rid of the status packet, and then continue with the setup
+ * packet. Generalize to pxa27x CPUs.
+ */
+ if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
+ ep_write_UDCCSR(ep, UDCCSR0_OPC);
+
+ /* read SETUP packet */
+ for (i = 0; i < 2; i++) {
+ if (unlikely(ep_is_empty(ep)))
+ goto stall;
+ u.word[i] = udc_ep_readl(ep, UDCDR);
+ }
+
+ have_extrabytes = !ep_is_empty(ep);
+ while (!ep_is_empty(ep)) {
+ i = udc_ep_readl(ep, UDCDR);
+ ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
+ }
+
+ ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+ u.r.bRequestType, u.r.bRequest,
+ le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex),
+ le16_to_cpu(u.r.wLength));
+ if (unlikely(have_extrabytes))
+ goto stall;
+
+ if (u.r.bRequestType & USB_DIR_IN)
+ set_ep0state(udc, IN_DATA_STAGE);
+ else
+ set_ep0state(udc, OUT_DATA_STAGE);
+
+ /* Tell UDC to enter Data Stage */
+ ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+ i = udc->driver->setup(&udc->gadget, &u.r);
+ spin_lock_irqsave(&ep->lock, flags);
+ if (i < 0)
+ goto stall;
+out:
+ spin_unlock_irqrestore(&ep->lock, flags);
+ return;
+stall:
+ ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
+ udc_ep_readl(ep, UDCCSR), i);
+ ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
+ set_ep0state(udc, STALL);
+ goto out;
+}
+
+/**
+ * handle_ep0 - Handle control endpoint data transfers
+ * @udc: udc device
+ * @fifo_irq: 1 if triggered by fifo service type irq
+ * @opc_irq: 1 if triggered by output packet complete type irq
+ *
+ * Context : interrupt handler
+ *
+ * Tries to transfer all pending request data into the endpoint and/or
+ * transfer all pending data in the endpoint into usb requests.
+ * Handles states of ep0 automata.
+ *
+ * PXA27x hardware handles several standard usb control requests without
+ * driver notification. The requests fully handled by hardware are :
+ * SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE,
+ * GET_STATUS
+ * The requests handled by hardware, but with irq notification are :
+ * SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE
+ * The remaining standard requests really handled by handle_ep0 are :
+ * GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests.
+ * Requests standardized outside of USB 2.0 chapter 9 are handled more
+ * uniformly, by gadget drivers.
+ *
+ * The control endpoint state machine is _not_ USB spec compliant, it's even
+ * hardly compliant with Intel PXA270 developers guide.
+ * The key points which inferred this state machine are :
+ * - on every setup token, bit UDCCSR0_SA is raised and held until cleared by
+ * software.
+ * - on every OUT packet received, UDCCSR0_OPC is raised and held until
+ * cleared by software.
+ * - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
+ * before reading ep0.
+ * This is true only for PXA27x. This is not true anymore for PXA3xx family
+ * (check Back-to-Back setup packet in developers guide).
+ * - irq can be called on a "packet complete" event (opc_irq=1), while
+ * UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
+ * from experimentation).
+ * - as UDCCSR0_SA can be activated while in irq handling, and clearing
+ * UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC
+ * => we never actually read the "status stage" packet of an IN data stage
+ * => this is not documented in Intel documentation
+ * - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA
+ * STAGE. The driver add STATUS STAGE to send last zero length packet in
+ * OUT_STATUS_STAGE.
+ * - special attention was needed for IN_STATUS_STAGE. If a packet complete
+ * event is detected, we terminate the status stage without ackowledging the
+ * packet (not to risk to loose a potential SETUP packet)
+ */
+static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
+{
+ u32 udccsr0;
+ struct pxa_ep *ep = &udc->pxa_ep[0];
+ struct pxa27x_request *req = NULL;
+ int completed = 0;
+
+ if (!list_empty(&ep->queue))
+ req = list_entry(ep->queue.next, struct pxa27x_request, queue);
+
+ udccsr0 = udc_ep_readl(ep, UDCCSR);
+ ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n",
+ EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR),
+ (fifo_irq << 1 | opc_irq));
+
+ if (udccsr0 & UDCCSR0_SST) {
+ ep_dbg(ep, "clearing stall status\n");
+ nuke(ep, -EPIPE);
+ ep_write_UDCCSR(ep, UDCCSR0_SST);
+ ep0_idle(udc);
+ }
+
+ if (udccsr0 & UDCCSR0_SA) {
+ nuke(ep, 0);
+ set_ep0state(udc, SETUP_STAGE);
+ }
+
+ switch (udc->ep0state) {
+ case WAIT_FOR_SETUP:
+ /*
+ * Hardware bug : beware, we cannot clear OPC, since we would
+ * miss a potential OPC irq for a setup packet.
+ * So, we only do ... nothing, and hope for a next irq with
+ * UDCCSR0_SA set.
+ */
+ break;
+ case SETUP_STAGE:
+ udccsr0 &= UDCCSR0_CTRL_REQ_MASK;
+ if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK))
+ handle_ep0_ctrl_req(udc, req);
+ break;
+ case IN_DATA_STAGE: /* GET_DESCRIPTOR */
+ if (epout_has_pkt(ep))
+ ep_write_UDCCSR(ep, UDCCSR0_OPC);
+ if (req && !ep_is_full(ep))
+ completed = write_ep0_fifo(ep, req);
+ if (completed)
+ ep0_end_in_req(ep, req, NULL);
+ break;
+ case OUT_DATA_STAGE: /* SET_DESCRIPTOR */
+ if (epout_has_pkt(ep) && req)
+ completed = read_ep0_fifo(ep, req);
+ if (completed)
+ ep0_end_out_req(ep, req, NULL);
+ break;
+ case STALL:
+ ep_write_UDCCSR(ep, UDCCSR0_FST);
+ break;
+ case IN_STATUS_STAGE:
+ /*
+ * Hardware bug : beware, we cannot clear OPC, since we would
+ * miss a potential PC irq for a setup packet.
+ * So, we only put the ep0 into WAIT_FOR_SETUP state.
+ */
+ if (opc_irq)
+ ep0_idle(udc);
+ break;
+ case OUT_STATUS_STAGE:
+ case WAIT_ACK_SET_CONF_INTERF:
+ ep_warn(ep, "should never get in %s state here!!!\n",
+ EP0_STNAME(ep->dev));
+ ep0_idle(udc);
+ break;
+ }
+}
+
+/**
+ * handle_ep - Handle endpoint data tranfers
+ * @ep: pxa physical endpoint
+ *
+ * Tries to transfer all pending request data into the endpoint and/or
+ * transfer all pending data in the endpoint into usb requests.
+ *
+ * Is always called from the interrupt handler. ep->lock must not be held.
+ */
+static void handle_ep(struct pxa_ep *ep)
+{
+ struct pxa27x_request *req;
+ int completed;
+ u32 udccsr;
+ int is_in = ep->dir_in;
+ int loop = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ if (ep->in_handle_ep)
+ goto recursion_detected;
+ ep->in_handle_ep = 1;
+
+ do {
+ completed = 0;
+ udccsr = udc_ep_readl(ep, UDCCSR);
+
+ if (likely(!list_empty(&ep->queue)))
+ req = list_entry(ep->queue.next,
+ struct pxa27x_request, queue);
+ else
+ req = NULL;
+
+ ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n",
+ req, udccsr, loop++);
+
+ if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN)))
+ udc_ep_writel(ep, UDCCSR,
+ udccsr & (UDCCSR_SST | UDCCSR_TRN));
+ if (!req)
+ break;
+
+ if (unlikely(is_in)) {
+ if (likely(!ep_is_full(ep)))
+ completed = write_fifo(ep, req);
+ } else {
+ if (likely(epout_has_pkt(ep)))
+ completed = read_fifo(ep, req);
+ }
+
+ if (completed) {
+ if (is_in)
+ ep_end_in_req(ep, req, &flags);
+ else
+ ep_end_out_req(ep, req, &flags);
+ }
+ } while (completed);
+
+ ep->in_handle_ep = 0;
+recursion_detected:
+ spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+/**
+ * pxa27x_change_configuration - Handle SET_CONF usb request notification
+ * @udc: udc device
+ * @config: usb configuration
+ *
+ * Post the request to upper level.
+ * Don't use any pxa specific harware configuration capabilities
+ */
+static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
+{
+ struct usb_ctrlrequest req ;
+
+ dev_dbg(udc->dev, "config=%d\n", config);
+
+ udc->config = config;
+ udc->last_interface = 0;
+ udc->last_alternate = 0;
+
+ req.bRequestType = 0;
+ req.bRequest = USB_REQ_SET_CONFIGURATION;
+ req.wValue = config;
+ req.wIndex = 0;
+ req.wLength = 0;
+
+ set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
+ udc->driver->setup(&udc->gadget, &req);
+ ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
+}
+
+/**
+ * pxa27x_change_interface - Handle SET_INTERF usb request notification
+ * @udc: udc device
+ * @iface: interface number
+ * @alt: alternate setting number
+ *
+ * Post the request to upper level.
+ * Don't use any pxa specific harware configuration capabilities
+ */
+static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
+{
+ struct usb_ctrlrequest req;
+
+ dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt);
+
+ udc->last_interface = iface;
+ udc->last_alternate = alt;
+
+ req.bRequestType = USB_RECIP_INTERFACE;
+ req.bRequest = USB_REQ_SET_INTERFACE;
+ req.wValue = alt;
+ req.wIndex = iface;
+ req.wLength = 0;
+
+ set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
+ udc->driver->setup(&udc->gadget, &req);
+ ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
+}
+
+/*
+ * irq_handle_data - Handle data transfer
+ * @irq: irq IRQ number
+ * @udc: dev pxa_udc device structure
+ *
+ * Called from irq handler, transferts data to or from endpoint to queue
+ */
+static void irq_handle_data(int irq, struct pxa_udc *udc)
+{
+ int i;
+ struct pxa_ep *ep;
+ u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK;
+ u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK;
+
+ if (udcisr0 & UDCISR_INT_MASK) {
+ udc->pxa_ep[0].stats.irqs++;
+ udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK));
+ handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR),
+ !!(udcisr0 & UDCICR_PKTCOMPL));
+ }
+
+ udcisr0 >>= 2;
+ for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) {
+ if (!(udcisr0 & UDCISR_INT_MASK))
+ continue;
+
+ udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
+
+ WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+ if (i < ARRAY_SIZE(udc->pxa_ep)) {
+ ep = &udc->pxa_ep[i];
+ ep->stats.irqs++;
+ handle_ep(ep);
+ }
+ }
+
+ for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
+ udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK));
+ if (!(udcisr1 & UDCISR_INT_MASK))
+ continue;
+
+ WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+ if (i < ARRAY_SIZE(udc->pxa_ep)) {
+ ep = &udc->pxa_ep[i];
+ ep->stats.irqs++;
+ handle_ep(ep);
+ }
+ }
+
+}
+
+/**
+ * irq_udc_suspend - Handle IRQ "UDC Suspend"
+ * @udc: udc device
+ */
+static void irq_udc_suspend(struct pxa_udc *udc)
+{
+ udc_writel(udc, UDCISR1, UDCISR1_IRSU);
+ udc->stats.irqs_suspend++;
+
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN
+ && udc->driver && udc->driver->suspend)
+ udc->driver->suspend(&udc->gadget);
+ ep0_idle(udc);
+}
+
+/**
+ * irq_udc_resume - Handle IRQ "UDC Resume"
+ * @udc: udc device
+ */
+static void irq_udc_resume(struct pxa_udc *udc)
+{
+ udc_writel(udc, UDCISR1, UDCISR1_IRRU);
+ udc->stats.irqs_resume++;
+
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN
+ && udc->driver && udc->driver->resume)
+ udc->driver->resume(&udc->gadget);
+}
+
+/**
+ * irq_udc_reconfig - Handle IRQ "UDC Change Configuration"
+ * @udc: udc device
+ */
+static void irq_udc_reconfig(struct pxa_udc *udc)
+{
+ unsigned config, interface, alternate, config_change;
+ u32 udccr = udc_readl(udc, UDCCR);
+
+ udc_writel(udc, UDCISR1, UDCISR1_IRCC);
+ udc->stats.irqs_reconfig++;
+
+ config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S;
+ config_change = (config != udc->config);
+ pxa27x_change_configuration(udc, config);
+
+ interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S;
+ alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S;
+ pxa27x_change_interface(udc, interface, alternate);
+
+ if (config_change)
+ update_pxa_ep_matches(udc);
+ udc_set_mask_UDCCR(udc, UDCCR_SMAC);
+}
+
+/**
+ * irq_udc_reset - Handle IRQ "UDC Reset"
+ * @udc: udc device
+ */
+static void irq_udc_reset(struct pxa_udc *udc)
+{
+ u32 udccr = udc_readl(udc, UDCCR);
+ struct pxa_ep *ep = &udc->pxa_ep[0];
+
+ dev_info(udc->dev, "USB reset\n");
+ udc_writel(udc, UDCISR1, UDCISR1_IRRS);
+ udc->stats.irqs_reset++;
+
+ if ((udccr & UDCCR_UDA) == 0) {
+ dev_dbg(udc->dev, "USB reset start\n");
+ stop_activity(udc);
+ }
+ udc->gadget.speed = USB_SPEED_FULL;
+ memset(&udc->stats, 0, sizeof udc->stats);
+
+ nuke(ep, -EPROTO);
+ ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
+ ep0_idle(udc);
+}
+
+/**
+ * pxa_udc_irq - Main irq handler
+ * @irq: irq number
+ * @_dev: udc device
+ *
+ * Handles all udc interrupts
+ */
+static irqreturn_t pxa_udc_irq(int irq, void *_dev)
+{
+ struct pxa_udc *udc = _dev;
+ u32 udcisr0 = udc_readl(udc, UDCISR0);
+ u32 udcisr1 = udc_readl(udc, UDCISR1);
+ u32 udccr = udc_readl(udc, UDCCR);
+ u32 udcisr1_spec;
+
+ dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, "
+ "UDCCR:0x%08x\n", udcisr0, udcisr1, udccr);
+
+ udcisr1_spec = udcisr1 & 0xf8000000;
+ if (unlikely(udcisr1_spec & UDCISR1_IRSU))
+ irq_udc_suspend(udc);
+ if (unlikely(udcisr1_spec & UDCISR1_IRRU))
+ irq_udc_resume(udc);
+ if (unlikely(udcisr1_spec & UDCISR1_IRCC))
+ irq_udc_reconfig(udc);
+ if (unlikely(udcisr1_spec & UDCISR1_IRRS))
+ irq_udc_reset(udc);
+
+ if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK))
+ irq_handle_data(irq, udc);
+
+ return IRQ_HANDLED;
+}
+
+static struct pxa_udc memory = {
+ .gadget = {
+ .ops = &pxa_udc_ops,
+ .ep0 = &memory.udc_usb_ep[0].usb_ep,
+ .name = driver_name,
+ .dev = {
+ .init_name = "gadget",
+ },
+ },
+
+ .udc_usb_ep = {
+ USB_EP_CTRL,
+ USB_EP_OUT_BULK(1),
+ USB_EP_IN_BULK(2),
+ USB_EP_IN_ISO(3),
+ USB_EP_OUT_ISO(4),
+ USB_EP_IN_INT(5),
+ },
+
+ .pxa_ep = {
+ PXA_EP_CTRL,
+ /* Endpoints for gadget zero */
+ PXA_EP_OUT_BULK(1, 1, 3, 0, 0),
+ PXA_EP_IN_BULK(2, 2, 3, 0, 0),
+ /* Endpoints for ether gadget, file storage gadget */
+ PXA_EP_OUT_BULK(3, 1, 1, 0, 0),
+ PXA_EP_IN_BULK(4, 2, 1, 0, 0),
+ PXA_EP_IN_ISO(5, 3, 1, 0, 0),
+ PXA_EP_OUT_ISO(6, 4, 1, 0, 0),
+ PXA_EP_IN_INT(7, 5, 1, 0, 0),
+ /* Endpoints for RNDIS, serial */
+ PXA_EP_OUT_BULK(8, 1, 2, 0, 0),
+ PXA_EP_IN_BULK(9, 2, 2, 0, 0),
+ PXA_EP_IN_INT(10, 5, 2, 0, 0),
+ /*
+ * All the following endpoints are only for completion. They
+ * won't never work, as multiple interfaces are really broken on
+ * the pxa.
+ */
+ PXA_EP_OUT_BULK(11, 1, 2, 1, 0),
+ PXA_EP_IN_BULK(12, 2, 2, 1, 0),
+ /* Endpoint for CDC Ether */
+ PXA_EP_OUT_BULK(13, 1, 1, 1, 1),
+ PXA_EP_IN_BULK(14, 2, 1, 1, 1),
+ }
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id udc_pxa_dt_ids[] = {
+ { .compatible = "marvell,pxa270-udc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
+#endif
+
+/**
+ * pxa_udc_probe - probes the udc device
+ * @pdev: platform device
+ *
+ * Perform basic init : allocates udc clock, creates sysfs files, requests
+ * irq.
+ */
+static int pxa_udc_probe(struct platform_device *pdev)
+{
+ struct pxa_udc *udc = &memory;
+ int retval = 0, gpio;
+ struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
+ unsigned long gpio_flags;
+
+ if (mach) {
+ gpio_flags = mach->gpio_pullup_inverted ? GPIOF_ACTIVE_LOW : 0;
+ gpio = mach->gpio_pullup;
+ if (gpio_is_valid(gpio)) {
+ retval = devm_gpio_request_one(&pdev->dev, gpio,
+ gpio_flags,
+ "USB D+ pullup");
+ if (retval)
+ return retval;
+ udc->gpiod = gpio_to_desc(mach->gpio_pullup);
+ }
+ udc->udc_command = mach->udc_command;
+ } else {
+ udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS);
+ }
+
+ udc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(udc->regs))
+ return PTR_ERR(udc->regs);
+ udc->irq = platform_get_irq(pdev, 0);
+ if (udc->irq < 0)
+ return udc->irq;
+
+ udc->dev = &pdev->dev;
+ if (of_have_populated_dt()) {
+ udc->transceiver =
+ devm_usb_get_phy_by_phandle(udc->dev, "phys", 0);
+ if (IS_ERR(udc->transceiver))
+ return PTR_ERR(udc->transceiver);
+ } else {
+ udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+ }
+
+ if (IS_ERR(udc->gpiod)) {
+ dev_err(&pdev->dev, "Couldn't find or request D+ gpio : %ld\n",
+ PTR_ERR(udc->gpiod));
+ return PTR_ERR(udc->gpiod);
+ }
+ if (udc->gpiod)
+ gpiod_direction_output(udc->gpiod, 0);
+
+ udc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(udc->clk))
+ return PTR_ERR(udc->clk);
+
+ retval = clk_prepare(udc->clk);
+ if (retval)
+ return retval;
+
+ udc->vbus_sensed = 0;
+
+ the_controller = udc;
+ platform_set_drvdata(pdev, udc);
+ udc_init_data(udc);
+
+ /* irq setup after old hardware state is cleaned up */
+ retval = devm_request_irq(&pdev->dev, udc->irq, pxa_udc_irq,
+ IRQF_SHARED, driver_name, udc);
+ if (retval != 0) {
+ dev_err(udc->dev, "%s: can't get irq %i, err %d\n",
+ driver_name, udc->irq, retval);
+ goto err;
+ }
+
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ usb_register_notifier(udc->transceiver, &pxa27x_udc_phy);
+ retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (retval)
+ goto err_add_gadget;
+
+ pxa_init_debugfs(udc);
+ if (should_enable_udc(udc))
+ udc_enable(udc);
+ return 0;
+
+err_add_gadget:
+ if (!IS_ERR_OR_NULL(udc->transceiver))
+ usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
+err:
+ clk_unprepare(udc->clk);
+ return retval;
+}
+
+/**
+ * pxa_udc_remove - removes the udc device driver
+ * @_dev: platform device
+ */
+static int pxa_udc_remove(struct platform_device *_dev)
+{
+ struct pxa_udc *udc = platform_get_drvdata(_dev);
+
+ usb_del_gadget_udc(&udc->gadget);
+ pxa_cleanup_debugfs(udc);
+
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
+ usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
+ usb_put_phy(udc->transceiver);
+ }
+
+ udc->transceiver = NULL;
+ the_controller = NULL;
+ clk_unprepare(udc->clk);
+
+ return 0;
+}
+
+static void pxa_udc_shutdown(struct platform_device *_dev)
+{
+ struct pxa_udc *udc = platform_get_drvdata(_dev);
+
+ if (udc_readl(udc, UDCCR) & UDCCR_UDE)
+ udc_disable(udc);
+}
+
+#ifdef CONFIG_PXA27x
+extern void pxa27x_clear_otgph(void);
+#else
+#define pxa27x_clear_otgph() do {} while (0)
+#endif
+
+#ifdef CONFIG_PM
+/**
+ * pxa_udc_suspend - Suspend udc device
+ * @_dev: platform device
+ * @state: suspend state
+ *
+ * Suspends udc : saves configuration registers (UDCCR*), then disables the udc
+ * device.
+ */
+static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
+{
+ struct pxa_udc *udc = platform_get_drvdata(_dev);
+ struct pxa_ep *ep;
+
+ ep = &udc->pxa_ep[0];
+ udc->udccsr0 = udc_ep_readl(ep, UDCCSR);
+
+ udc_disable(udc);
+ udc->pullup_resume = udc->pullup_on;
+ dplus_pullup(udc, 0);
+
+ if (udc->driver)
+ udc->driver->disconnect(&udc->gadget);
+
+ return 0;
+}
+
+/**
+ * pxa_udc_resume - Resume udc device
+ * @_dev: platform device
+ *
+ * Resumes udc : restores configuration registers (UDCCR*), then enables the udc
+ * device.
+ */
+static int pxa_udc_resume(struct platform_device *_dev)
+{
+ struct pxa_udc *udc = platform_get_drvdata(_dev);
+ struct pxa_ep *ep;
+
+ ep = &udc->pxa_ep[0];
+ udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME));
+
+ dplus_pullup(udc, udc->pullup_resume);
+ if (should_enable_udc(udc))
+ udc_enable(udc);
+ /*
+ * We do not handle OTG yet.
+ *
+ * OTGPH bit is set when sleep mode is entered.
+ * it indicates that OTG pad is retaining its state.
+ * Upon exit from sleep mode and before clearing OTGPH,
+ * Software must configure the USB OTG pad, UDC, and UHC
+ * to the state they were in before entering sleep mode.
+ */
+ pxa27x_clear_otgph();
+
+ return 0;
+}
+#endif
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:pxa27x-udc");
+
+static struct platform_driver udc_driver = {
+ .driver = {
+ .name = "pxa27x-udc",
+ .of_match_table = of_match_ptr(udc_pxa_dt_ids),
+ },
+ .probe = pxa_udc_probe,
+ .remove = pxa_udc_remove,
+ .shutdown = pxa_udc_shutdown,
+#ifdef CONFIG_PM
+ .suspend = pxa_udc_suspend,
+ .resume = pxa_udc_resume
+#endif
+};
+
+module_platform_driver(udc_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Robert Jarzmik");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.h b/drivers/usb/gadget/udc/pxa27x_udc.h
new file mode 100644
index 000000000..31bf79ce9
--- /dev/null
+++ b/drivers/usb/gadget/udc/pxa27x_udc.h
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * linux/drivers/usb/gadget/pxa27x_udc.h
+ * Intel PXA27x on-chip full speed USB device controller
+ *
+ * Inspired by original driver by Frank Becker, David Brownell, and others.
+ * Copyright (C) 2008 Robert Jarzmik
+ */
+
+#ifndef __LINUX_USB_GADGET_PXA27X_H
+#define __LINUX_USB_GADGET_PXA27X_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/usb/otg.h>
+
+/*
+ * Register definitions
+ */
+/* Offsets */
+#define UDCCR 0x0000 /* UDC Control Register */
+#define UDCICR0 0x0004 /* UDC Interrupt Control Register0 */
+#define UDCICR1 0x0008 /* UDC Interrupt Control Register1 */
+#define UDCISR0 0x000C /* UDC Interrupt Status Register 0 */
+#define UDCISR1 0x0010 /* UDC Interrupt Status Register 1 */
+#define UDCFNR 0x0014 /* UDC Frame Number Register */
+#define UDCOTGICR 0x0018 /* UDC On-The-Go interrupt control */
+#define UP2OCR 0x0020 /* USB Port 2 Output Control register */
+#define UP3OCR 0x0024 /* USB Port 3 Output Control register */
+#define UDCCSRn(x) (0x0100 + ((x)<<2)) /* UDC Control/Status register */
+#define UDCBCRn(x) (0x0200 + ((x)<<2)) /* UDC Byte Count Register */
+#define UDCDRn(x) (0x0300 + ((x)<<2)) /* UDC Data Register */
+#define UDCCRn(x) (0x0400 + ((x)<<2)) /* UDC Control Register */
+
+#define UDCCR_OEN (1 << 31) /* On-the-Go Enable */
+#define UDCCR_AALTHNP (1 << 30) /* A-device Alternate Host Negotiation
+ Protocol Port Support */
+#define UDCCR_AHNP (1 << 29) /* A-device Host Negotiation Protocol
+ Support */
+#define UDCCR_BHNP (1 << 28) /* B-device Host Negotiation Protocol
+ Enable */
+#define UDCCR_DWRE (1 << 16) /* Device Remote Wake-up Enable */
+#define UDCCR_ACN (0x03 << 11) /* Active UDC configuration Number */
+#define UDCCR_ACN_S 11
+#define UDCCR_AIN (0x07 << 8) /* Active UDC interface Number */
+#define UDCCR_AIN_S 8
+#define UDCCR_AAISN (0x07 << 5) /* Active UDC Alternate Interface
+ Setting Number */
+#define UDCCR_AAISN_S 5
+#define UDCCR_SMAC (1 << 4) /* Switch Endpoint Memory to Active
+ Configuration */
+#define UDCCR_EMCE (1 << 3) /* Endpoint Memory Configuration
+ Error */
+#define UDCCR_UDR (1 << 2) /* UDC Resume */
+#define UDCCR_UDA (1 << 1) /* UDC Active */
+#define UDCCR_UDE (1 << 0) /* UDC Enable */
+
+#define UDCICR_INT(n, intr) (((intr) & 0x03) << (((n) & 0x0F) * 2))
+#define UDCICR1_IECC (1 << 31) /* IntEn - Configuration Change */
+#define UDCICR1_IESOF (1 << 30) /* IntEn - Start of Frame */
+#define UDCICR1_IERU (1 << 29) /* IntEn - Resume */
+#define UDCICR1_IESU (1 << 28) /* IntEn - Suspend */
+#define UDCICR1_IERS (1 << 27) /* IntEn - Reset */
+#define UDCICR_FIFOERR (1 << 1) /* FIFO Error interrupt for EP */
+#define UDCICR_PKTCOMPL (1 << 0) /* Packet Complete interrupt for EP */
+#define UDCICR_INT_MASK (UDCICR_FIFOERR | UDCICR_PKTCOMPL)
+
+#define UDCISR_INT(n, intr) (((intr) & 0x03) << (((n) & 0x0F) * 2))
+#define UDCISR1_IRCC (1 << 31) /* IntReq - Configuration Change */
+#define UDCISR1_IRSOF (1 << 30) /* IntReq - Start of Frame */
+#define UDCISR1_IRRU (1 << 29) /* IntReq - Resume */
+#define UDCISR1_IRSU (1 << 28) /* IntReq - Suspend */
+#define UDCISR1_IRRS (1 << 27) /* IntReq - Reset */
+#define UDCISR_INT_MASK (UDCICR_FIFOERR | UDCICR_PKTCOMPL)
+
+#define UDCOTGICR_IESF (1 << 24) /* OTG SET_FEATURE command recvd */
+#define UDCOTGICR_IEXR (1 << 17) /* Extra Transceiver Interrupt
+ Rising Edge Interrupt Enable */
+#define UDCOTGICR_IEXF (1 << 16) /* Extra Transceiver Interrupt
+ Falling Edge Interrupt Enable */
+#define UDCOTGICR_IEVV40R (1 << 9) /* OTG Vbus Valid 4.0V Rising Edge
+ Interrupt Enable */
+#define UDCOTGICR_IEVV40F (1 << 8) /* OTG Vbus Valid 4.0V Falling Edge
+ Interrupt Enable */
+#define UDCOTGICR_IEVV44R (1 << 7) /* OTG Vbus Valid 4.4V Rising Edge
+ Interrupt Enable */
+#define UDCOTGICR_IEVV44F (1 << 6) /* OTG Vbus Valid 4.4V Falling Edge
+ Interrupt Enable */
+#define UDCOTGICR_IESVR (1 << 5) /* OTG Session Valid Rising Edge
+ Interrupt Enable */
+#define UDCOTGICR_IESVF (1 << 4) /* OTG Session Valid Falling Edge
+ Interrupt Enable */
+#define UDCOTGICR_IESDR (1 << 3) /* OTG A-Device SRP Detect Rising
+ Edge Interrupt Enable */
+#define UDCOTGICR_IESDF (1 << 2) /* OTG A-Device SRP Detect Falling
+ Edge Interrupt Enable */
+#define UDCOTGICR_IEIDR (1 << 1) /* OTG ID Change Rising Edge
+ Interrupt Enable */
+#define UDCOTGICR_IEIDF (1 << 0) /* OTG ID Change Falling Edge
+ Interrupt Enable */
+
+/* Host Port 2 field bits */
+#define UP2OCR_CPVEN (1 << 0) /* Charge Pump Vbus Enable */
+#define UP2OCR_CPVPE (1 << 1) /* Charge Pump Vbus Pulse Enable */
+ /* Transceiver enablers */
+#define UP2OCR_DPPDE (1 << 2) /* D+ Pull Down Enable */
+#define UP2OCR_DMPDE (1 << 3) /* D- Pull Down Enable */
+#define UP2OCR_DPPUE (1 << 4) /* D+ Pull Up Enable */
+#define UP2OCR_DMPUE (1 << 5) /* D- Pull Up Enable */
+#define UP2OCR_DPPUBE (1 << 6) /* D+ Pull Up Bypass Enable */
+#define UP2OCR_DMPUBE (1 << 7) /* D- Pull Up Bypass Enable */
+#define UP2OCR_EXSP (1 << 8) /* External Transceiver Speed Control */
+#define UP2OCR_EXSUS (1 << 9) /* External Transceiver Speed Enable */
+#define UP2OCR_IDON (1 << 10) /* OTG ID Read Enable */
+#define UP2OCR_HXS (1 << 16) /* Transceiver Output Select */
+#define UP2OCR_HXOE (1 << 17) /* Transceiver Output Enable */
+#define UP2OCR_SEOS (1 << 24) /* Single-Ended Output Select */
+
+#define UDCCSR0_ACM (1 << 9) /* Ack Control Mode */
+#define UDCCSR0_AREN (1 << 8) /* Ack Response Enable */
+#define UDCCSR0_SA (1 << 7) /* Setup Active */
+#define UDCCSR0_RNE (1 << 6) /* Receive FIFO Not Empty */
+#define UDCCSR0_FST (1 << 5) /* Force Stall */
+#define UDCCSR0_SST (1 << 4) /* Sent Stall */
+#define UDCCSR0_DME (1 << 3) /* DMA Enable */
+#define UDCCSR0_FTF (1 << 2) /* Flush Transmit FIFO */
+#define UDCCSR0_IPR (1 << 1) /* IN Packet Ready */
+#define UDCCSR0_OPC (1 << 0) /* OUT Packet Complete */
+
+#define UDCCSR_DPE (1 << 9) /* Data Packet Error */
+#define UDCCSR_FEF (1 << 8) /* Flush Endpoint FIFO */
+#define UDCCSR_SP (1 << 7) /* Short Packet Control/Status */
+#define UDCCSR_BNE (1 << 6) /* Buffer Not Empty (IN endpoints) */
+#define UDCCSR_BNF (1 << 6) /* Buffer Not Full (OUT endpoints) */
+#define UDCCSR_FST (1 << 5) /* Force STALL */
+#define UDCCSR_SST (1 << 4) /* Sent STALL */
+#define UDCCSR_DME (1 << 3) /* DMA Enable */
+#define UDCCSR_TRN (1 << 2) /* Tx/Rx NAK */
+#define UDCCSR_PC (1 << 1) /* Packet Complete */
+#define UDCCSR_FS (1 << 0) /* FIFO needs service */
+
+#define UDCCONR_CN (0x03 << 25) /* Configuration Number */
+#define UDCCONR_CN_S 25
+#define UDCCONR_IN (0x07 << 22) /* Interface Number */
+#define UDCCONR_IN_S 22
+#define UDCCONR_AISN (0x07 << 19) /* Alternate Interface Number */
+#define UDCCONR_AISN_S 19
+#define UDCCONR_EN (0x0f << 15) /* Endpoint Number */
+#define UDCCONR_EN_S 15
+#define UDCCONR_ET (0x03 << 13) /* Endpoint Type: */
+#define UDCCONR_ET_S 13
+#define UDCCONR_ET_INT (0x03 << 13) /* Interrupt */
+#define UDCCONR_ET_BULK (0x02 << 13) /* Bulk */
+#define UDCCONR_ET_ISO (0x01 << 13) /* Isochronous */
+#define UDCCONR_ET_NU (0x00 << 13) /* Not used */
+#define UDCCONR_ED (1 << 12) /* Endpoint Direction */
+#define UDCCONR_MPS (0x3ff << 2) /* Maximum Packet Size */
+#define UDCCONR_MPS_S 2
+#define UDCCONR_DE (1 << 1) /* Double Buffering Enable */
+#define UDCCONR_EE (1 << 0) /* Endpoint Enable */
+
+#define UDCCR_MASK_BITS (UDCCR_OEN | UDCCR_SMAC | UDCCR_UDR | UDCCR_UDE)
+#define UDCCSR_WR_MASK (UDCCSR_DME | UDCCSR_FST)
+#define UDC_FNR_MASK (0x7ff)
+#define UDC_BCR_MASK (0x3ff)
+
+/*
+ * UDCCR = UDC Endpoint Configuration Registers
+ * UDCCSR = UDC Control/Status Register for this EP
+ * UDCBCR = UDC Byte Count Remaining (contents of OUT fifo)
+ * UDCDR = UDC Endpoint Data Register (the fifo)
+ */
+#define ofs_UDCCR(ep) (UDCCRn(ep->idx))
+#define ofs_UDCCSR(ep) (UDCCSRn(ep->idx))
+#define ofs_UDCBCR(ep) (UDCBCRn(ep->idx))
+#define ofs_UDCDR(ep) (UDCDRn(ep->idx))
+
+/* Register access macros */
+#define udc_ep_readl(ep, reg) \
+ __raw_readl((ep)->dev->regs + ofs_##reg(ep))
+#define udc_ep_writel(ep, reg, value) \
+ __raw_writel((value), ep->dev->regs + ofs_##reg(ep))
+#define udc_ep_readb(ep, reg) \
+ __raw_readb((ep)->dev->regs + ofs_##reg(ep))
+#define udc_ep_writeb(ep, reg, value) \
+ __raw_writeb((value), ep->dev->regs + ofs_##reg(ep))
+#define udc_readl(dev, reg) \
+ __raw_readl((dev)->regs + (reg))
+#define udc_writel(udc, reg, value) \
+ __raw_writel((value), (udc)->regs + (reg))
+
+#define UDCCSR_MASK (UDCCSR_FST | UDCCSR_DME)
+#define UDCCISR0_EP_MASK ~0
+#define UDCCISR1_EP_MASK 0xffff
+#define UDCCSR0_CTRL_REQ_MASK (UDCCSR0_OPC | UDCCSR0_SA | UDCCSR0_RNE)
+
+#define EPIDX(ep) (ep->idx)
+#define EPADDR(ep) (ep->addr)
+#define EPXFERTYPE(ep) (ep->type)
+#define EPNAME(ep) (ep->name)
+#define is_ep0(ep) (!ep->idx)
+#define EPXFERTYPE_is_ISO(ep) (EPXFERTYPE(ep) == USB_ENDPOINT_XFER_ISOC)
+
+/*
+ * Endpoint definitions
+ *
+ * Once enabled, pxa endpoint configuration is freezed, and cannot change
+ * unless a reset happens or the udc is disabled.
+ * Therefore, we must define all pxa potential endpoint definitions needed for
+ * all gadget and set them up before the udc is enabled.
+ *
+ * As the architecture chosen is fully static, meaning the pxa endpoint
+ * configurations are set up once and for all, we must provide a way to match
+ * one usb endpoint (usb_ep) to several pxa endpoints. The reason is that gadget
+ * layer autoconf doesn't choose the usb_ep endpoint on (config, interface, alt)
+ * criteria, while the pxa architecture requires that.
+ *
+ * The solution is to define several pxa endpoints matching one usb_ep. Ex:
+ * - "ep1-in" matches pxa endpoint EPA (which is an IN ep at addr 1, when
+ * the udc talks on (config=3, interface=0, alt=0)
+ * - "ep1-in" matches pxa endpoint EPB (which is an IN ep at addr 1, when
+ * the udc talks on (config=3, interface=0, alt=1)
+ * - "ep1-in" matches pxa endpoint EPC (which is an IN ep at addr 1, when
+ * the udc talks on (config=2, interface=0, alt=0)
+ *
+ * We'll define the pxa endpoint by its index (EPA => idx=1, EPB => idx=2, ...)
+ */
+
+/*
+ * Endpoint definition helpers
+ */
+#define USB_EP_DEF(addr, bname, dir, type, maxpkt, ctype, cdir) \
+{ .usb_ep = { .name = bname, .ops = &pxa_ep_ops, .maxpacket = maxpkt, \
+ .caps = USB_EP_CAPS(ctype, cdir), }, \
+ .desc = { .bEndpointAddress = addr | (dir ? USB_DIR_IN : 0), \
+ .bmAttributes = USB_ENDPOINT_XFER_ ## type, \
+ .wMaxPacketSize = maxpkt, }, \
+ .dev = &memory \
+}
+#define USB_EP_BULK(addr, bname, dir, cdir) \
+ USB_EP_DEF(addr, bname, dir, BULK, BULK_FIFO_SIZE, \
+ USB_EP_CAPS_TYPE_BULK, cdir)
+#define USB_EP_ISO(addr, bname, dir, cdir) \
+ USB_EP_DEF(addr, bname, dir, ISOC, ISO_FIFO_SIZE, \
+ USB_EP_CAPS_TYPE_ISO, cdir)
+#define USB_EP_INT(addr, bname, dir, cdir) \
+ USB_EP_DEF(addr, bname, dir, INT, INT_FIFO_SIZE, \
+ USB_EP_CAPS_TYPE_INT, cdir)
+#define USB_EP_IN_BULK(n) USB_EP_BULK(n, "ep" #n "in-bulk", 1, \
+ USB_EP_CAPS_DIR_IN)
+#define USB_EP_OUT_BULK(n) USB_EP_BULK(n, "ep" #n "out-bulk", 0, \
+ USB_EP_CAPS_DIR_OUT)
+#define USB_EP_IN_ISO(n) USB_EP_ISO(n, "ep" #n "in-iso", 1, \
+ USB_EP_CAPS_DIR_IN)
+#define USB_EP_OUT_ISO(n) USB_EP_ISO(n, "ep" #n "out-iso", 0, \
+ USB_EP_CAPS_DIR_OUT)
+#define USB_EP_IN_INT(n) USB_EP_INT(n, "ep" #n "in-int", 1, \
+ USB_EP_CAPS_DIR_IN)
+#define USB_EP_CTRL USB_EP_DEF(0, "ep0", 0, CONTROL, EP0_FIFO_SIZE, \
+ USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)
+
+#define PXA_EP_DEF(_idx, _addr, dir, _type, maxpkt, _config, iface, altset) \
+{ \
+ .dev = &memory, \
+ .name = "ep" #_idx, \
+ .idx = _idx, .enabled = 0, \
+ .dir_in = dir, .addr = _addr, \
+ .config = _config, .interface = iface, .alternate = altset, \
+ .type = _type, .fifo_size = maxpkt, \
+}
+#define PXA_EP_BULK(_idx, addr, dir, config, iface, alt) \
+ PXA_EP_DEF(_idx, addr, dir, USB_ENDPOINT_XFER_BULK, BULK_FIFO_SIZE, \
+ config, iface, alt)
+#define PXA_EP_ISO(_idx, addr, dir, config, iface, alt) \
+ PXA_EP_DEF(_idx, addr, dir, USB_ENDPOINT_XFER_ISOC, ISO_FIFO_SIZE, \
+ config, iface, alt)
+#define PXA_EP_INT(_idx, addr, dir, config, iface, alt) \
+ PXA_EP_DEF(_idx, addr, dir, USB_ENDPOINT_XFER_INT, INT_FIFO_SIZE, \
+ config, iface, alt)
+#define PXA_EP_IN_BULK(i, adr, c, f, a) PXA_EP_BULK(i, adr, 1, c, f, a)
+#define PXA_EP_OUT_BULK(i, adr, c, f, a) PXA_EP_BULK(i, adr, 0, c, f, a)
+#define PXA_EP_IN_ISO(i, adr, c, f, a) PXA_EP_ISO(i, adr, 1, c, f, a)
+#define PXA_EP_OUT_ISO(i, adr, c, f, a) PXA_EP_ISO(i, adr, 0, c, f, a)
+#define PXA_EP_IN_INT(i, adr, c, f, a) PXA_EP_INT(i, adr, 1, c, f, a)
+#define PXA_EP_CTRL PXA_EP_DEF(0, 0, 0, 0, EP0_FIFO_SIZE, 0, 0, 0)
+
+struct pxa27x_udc;
+
+struct stats {
+ unsigned long in_ops;
+ unsigned long out_ops;
+ unsigned long in_bytes;
+ unsigned long out_bytes;
+ unsigned long irqs;
+};
+
+/**
+ * struct udc_usb_ep - container of each usb_ep structure
+ * @usb_ep: usb endpoint
+ * @desc: usb descriptor, especially type and address
+ * @dev: udc managing this endpoint
+ * @pxa_ep: matching pxa_ep (cache of find_pxa_ep() call)
+ */
+struct udc_usb_ep {
+ struct usb_ep usb_ep;
+ struct usb_endpoint_descriptor desc;
+ struct pxa_udc *dev;
+ struct pxa_ep *pxa_ep;
+};
+
+/**
+ * struct pxa_ep - pxa endpoint
+ * @dev: udc device
+ * @queue: requests queue
+ * @lock: lock to pxa_ep data (queues and stats)
+ * @enabled: true when endpoint enabled (not stopped by gadget layer)
+ * @in_handle_ep: number of recursions of handle_ep() function
+ * Prevents deadlocks or infinite recursions of types :
+ * irq->handle_ep()->req_done()->req.complete()->pxa_ep_queue()->handle_ep()
+ * or
+ * pxa_ep_queue()->handle_ep()->req_done()->req.complete()->pxa_ep_queue()
+ * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX)
+ * @name: endpoint name (for trace/debug purpose)
+ * @dir_in: 1 if IN endpoint, 0 if OUT endpoint
+ * @addr: usb endpoint number
+ * @config: configuration in which this endpoint is active
+ * @interface: interface in which this endpoint is active
+ * @alternate: altsetting in which this endpoint is active
+ * @fifo_size: max packet size in the endpoint fifo
+ * @type: endpoint type (bulk, iso, int, ...)
+ * @udccsr_value: save register of UDCCSR0 for suspend/resume
+ * @udccr_value: save register of UDCCR for suspend/resume
+ * @stats: endpoint statistics
+ *
+ * The *PROBLEM* is that pxa's endpoint configuration scheme is both misdesigned
+ * (cares about config/interface/altsetting, thus placing needless limits on
+ * device capability) and full of implementation bugs forcing it to be set up
+ * for use more or less like a pxa255.
+ *
+ * As we define the pxa_ep statically, we must guess all needed pxa_ep for all
+ * gadget which may work with this udc driver.
+ */
+struct pxa_ep {
+ struct pxa_udc *dev;
+
+ struct list_head queue;
+ spinlock_t lock; /* Protects this structure */
+ /* (queues, stats) */
+ unsigned enabled:1;
+ unsigned in_handle_ep:1;
+
+ unsigned idx:5;
+ char *name;
+
+ /*
+ * Specific pxa endpoint data, needed for hardware initialization
+ */
+ unsigned dir_in:1;
+ unsigned addr:4;
+ unsigned config:2;
+ unsigned interface:3;
+ unsigned alternate:3;
+ unsigned fifo_size;
+ unsigned type;
+
+#ifdef CONFIG_PM
+ u32 udccsr_value;
+ u32 udccr_value;
+#endif
+ struct stats stats;
+};
+
+/**
+ * struct pxa27x_request - container of each usb_request structure
+ * @req: usb request
+ * @udc_usb_ep: usb endpoint the request was submitted on
+ * @in_use: sanity check if request already queued on an pxa_ep
+ * @queue: linked list of requests, linked on pxa_ep->queue
+ */
+struct pxa27x_request {
+ struct usb_request req;
+ struct udc_usb_ep *udc_usb_ep;
+ unsigned in_use:1;
+ struct list_head queue;
+};
+
+enum ep0_state {
+ WAIT_FOR_SETUP,
+ SETUP_STAGE,
+ IN_DATA_STAGE,
+ OUT_DATA_STAGE,
+ IN_STATUS_STAGE,
+ OUT_STATUS_STAGE,
+ STALL,
+ WAIT_ACK_SET_CONF_INTERF
+};
+
+static char *ep0_state_name[] = {
+ "WAIT_FOR_SETUP", "SETUP_STAGE", "IN_DATA_STAGE", "OUT_DATA_STAGE",
+ "IN_STATUS_STAGE", "OUT_STATUS_STAGE", "STALL",
+ "WAIT_ACK_SET_CONF_INTERF"
+};
+#define EP0_STNAME(udc) ep0_state_name[(udc)->ep0state]
+
+#define EP0_FIFO_SIZE 16U
+#define BULK_FIFO_SIZE 64U
+#define ISO_FIFO_SIZE 256U
+#define INT_FIFO_SIZE 16U
+
+struct udc_stats {
+ unsigned long irqs_reset;
+ unsigned long irqs_suspend;
+ unsigned long irqs_resume;
+ unsigned long irqs_reconfig;
+};
+
+#define NR_USB_ENDPOINTS (1 + 5) /* ep0 + ep1in-bulk + .. + ep3in-iso */
+#define NR_PXA_ENDPOINTS (1 + 14) /* ep0 + epA + epB + .. + epX */
+
+/**
+ * struct pxa_udc - udc structure
+ * @regs: mapped IO space
+ * @irq: udc irq
+ * @clk: udc clock
+ * @usb_gadget: udc gadget structure
+ * @driver: bound gadget (zero, g_ether, g_mass_storage, ...)
+ * @dev: device
+ * @udc_command: machine specific function to activate D+ pullup
+ * @gpiod: gpio descriptor of gpio for D+ pullup (or NULL if none)
+ * @transceiver: external transceiver to handle vbus sense and D+ pullup
+ * @ep0state: control endpoint state machine state
+ * @stats: statistics on udc usage
+ * @udc_usb_ep: array of usb endpoints offered by the gadget
+ * @pxa_ep: array of pxa available endpoints
+ * @enabled: UDC was enabled by a previous udc_enable()
+ * @pullup_on: if pullup resistor connected to D+ pin
+ * @pullup_resume: if pullup resistor should be connected to D+ pin on resume
+ * @config: UDC active configuration
+ * @last_interface: UDC interface of the last SET_INTERFACE host request
+ * @last_alternate: UDC altsetting of the last SET_INTERFACE host request
+ * @udccsr0: save of udccsr0 in case of suspend
+ * @debugfs_state: debugfs entry for "udcstate"
+ * @debugfs_queues: debugfs entry for "queues"
+ * @debugfs_eps: debugfs entry for "epstate"
+ */
+struct pxa_udc {
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct device *dev;
+ void (*udc_command)(int);
+ struct gpio_desc *gpiod;
+ struct usb_phy *transceiver;
+
+ enum ep0_state ep0state;
+ struct udc_stats stats;
+
+ struct udc_usb_ep udc_usb_ep[NR_USB_ENDPOINTS];
+ struct pxa_ep pxa_ep[NR_PXA_ENDPOINTS];
+
+ unsigned enabled:1;
+ unsigned pullup_on:1;
+ unsigned pullup_resume:1;
+ unsigned vbus_sensed:1;
+ unsigned config:2;
+ unsigned last_interface:3;
+ unsigned last_alternate:3;
+
+#ifdef CONFIG_PM
+ unsigned udccsr0;
+#endif
+};
+#define to_pxa(g) (container_of((g), struct pxa_udc, gadget))
+
+static inline struct pxa_udc *to_gadget_udc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct pxa_udc, gadget);
+}
+
+/*
+ * Debugging/message support
+ */
+#define ep_dbg(ep, fmt, arg...) \
+ dev_dbg(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
+#define ep_vdbg(ep, fmt, arg...) \
+ dev_vdbg(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
+#define ep_err(ep, fmt, arg...) \
+ dev_err(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
+#define ep_info(ep, fmt, arg...) \
+ dev_info(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
+#define ep_warn(ep, fmt, arg...) \
+ dev_warn(ep->dev->dev, "%s:%s:" fmt, EPNAME(ep), __func__, ## arg)
+
+#endif /* __LINUX_USB_GADGET_PXA27X_H */
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
new file mode 100644
index 000000000..38e4d6b50
--- /dev/null
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -0,0 +1,1980 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A66597 UDC (USB gadget)
+ *
+ * Copyright (C) 2006-2009 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "r8a66597-udc.h"
+
+#define DRIVER_VERSION "2011-09-26"
+
+static const char udc_name[] = "r8a66597_udc";
+static const char *r8a66597_ep_name[] = {
+ "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
+ "ep8", "ep9",
+};
+
+static void init_controller(struct r8a66597 *r8a66597);
+static void disable_controller(struct r8a66597 *r8a66597);
+static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
+static void irq_packet_write(struct r8a66597_ep *ep,
+ struct r8a66597_request *req);
+static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags);
+
+static void transfer_complete(struct r8a66597_ep *ep,
+ struct r8a66597_request *req, int status);
+
+/*-------------------------------------------------------------------------*/
+static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
+{
+ return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
+}
+
+static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
+ unsigned long reg)
+{
+ u16 tmp;
+
+ tmp = r8a66597_read(r8a66597, INTENB0);
+ r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
+ INTENB0);
+ r8a66597_bset(r8a66597, (1 << pipenum), reg);
+ r8a66597_write(r8a66597, tmp, INTENB0);
+}
+
+static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
+ unsigned long reg)
+{
+ u16 tmp;
+
+ tmp = r8a66597_read(r8a66597, INTENB0);
+ r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
+ INTENB0);
+ r8a66597_bclr(r8a66597, (1 << pipenum), reg);
+ r8a66597_write(r8a66597, tmp, INTENB0);
+}
+
+static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
+{
+ r8a66597_bset(r8a66597, CTRE, INTENB0);
+ r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
+
+ r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
+}
+
+static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+ r8a66597_bclr(r8a66597, CTRE, INTENB0);
+ r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
+ r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
+
+ r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock(&r8a66597->lock);
+ r8a66597->driver->disconnect(&r8a66597->gadget);
+ spin_lock(&r8a66597->lock);
+
+ disable_controller(r8a66597);
+ init_controller(r8a66597);
+ r8a66597_bset(r8a66597, VBSE, INTENB0);
+ INIT_LIST_HEAD(&r8a66597->ep[0].queue);
+}
+
+static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ u16 pid = 0;
+ unsigned long offset;
+
+ if (pipenum == 0) {
+ pid = r8a66597_read(r8a66597, DCPCTR) & PID;
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ pid = r8a66597_read(r8a66597, offset) & PID;
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+ pipenum);
+ }
+
+ return pid;
+}
+
+static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
+ u16 pid)
+{
+ unsigned long offset;
+
+ if (pipenum == 0) {
+ r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ r8a66597_mdfy(r8a66597, pid, PID, offset);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+ pipenum);
+ }
+}
+
+static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ control_reg_set_pid(r8a66597, pipenum, PID_BUF);
+}
+
+static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ control_reg_set_pid(r8a66597, pipenum, PID_NAK);
+}
+
+static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ control_reg_set_pid(r8a66597, pipenum, PID_STALL);
+}
+
+static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ u16 ret = 0;
+ unsigned long offset;
+
+ if (pipenum == 0) {
+ ret = r8a66597_read(r8a66597, DCPCTR);
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ ret = r8a66597_read(r8a66597, offset);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+ pipenum);
+ }
+
+ return ret;
+}
+
+static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ unsigned long offset;
+
+ pipe_stop(r8a66597, pipenum);
+
+ if (pipenum == 0) {
+ r8a66597_bset(r8a66597, SQCLR, DCPCTR);
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ r8a66597_bset(r8a66597, SQCLR, offset);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
+ pipenum);
+ }
+}
+
+static void control_reg_sqset(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ unsigned long offset;
+
+ pipe_stop(r8a66597, pipenum);
+
+ if (pipenum == 0) {
+ r8a66597_bset(r8a66597, SQSET, DCPCTR);
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ r8a66597_bset(r8a66597, SQSET, offset);
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "unexpect pipe num(%d)\n", pipenum);
+ }
+}
+
+static u16 control_reg_sqmon(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ unsigned long offset;
+
+ if (pipenum == 0) {
+ return r8a66597_read(r8a66597, DCPCTR) & SQMON;
+ } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ return r8a66597_read(r8a66597, offset) & SQMON;
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "unexpect pipe num(%d)\n", pipenum);
+ }
+
+ return 0;
+}
+
+static u16 save_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ return control_reg_sqmon(r8a66597, pipenum);
+}
+
+static void restore_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum,
+ u16 toggle)
+{
+ if (toggle)
+ control_reg_sqset(r8a66597, pipenum);
+ else
+ control_reg_sqclr(r8a66597, pipenum);
+}
+
+static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ u16 tmp;
+ int size;
+
+ if (pipenum == 0) {
+ tmp = r8a66597_read(r8a66597, DCPCFG);
+ if ((tmp & R8A66597_CNTMD) != 0)
+ size = 256;
+ else {
+ tmp = r8a66597_read(r8a66597, DCPMAXP);
+ size = tmp & MAXP;
+ }
+ } else {
+ r8a66597_write(r8a66597, pipenum, PIPESEL);
+ tmp = r8a66597_read(r8a66597, PIPECFG);
+ if ((tmp & R8A66597_CNTMD) != 0) {
+ tmp = r8a66597_read(r8a66597, PIPEBUF);
+ size = ((tmp >> 10) + 1) * 64;
+ } else {
+ tmp = r8a66597_read(r8a66597, PIPEMAXP);
+ size = tmp & MXPS;
+ }
+ }
+
+ return size;
+}
+
+static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
+{
+ if (r8a66597->pdata->on_chip)
+ return MBW_32;
+ else
+ return MBW_16;
+}
+
+static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
+ u16 isel, u16 fifosel)
+{
+ u16 tmp, mask, loop;
+ int i = 0;
+
+ if (!pipenum) {
+ mask = ISEL | CURPIPE;
+ loop = isel;
+ } else {
+ mask = CURPIPE;
+ loop = pipenum;
+ }
+ r8a66597_mdfy(r8a66597, loop, mask, fifosel);
+
+ do {
+ tmp = r8a66597_read(r8a66597, fifosel);
+ if (i++ > 1000000) {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "r8a66597: register%x, loop %x "
+ "is timeout\n", fifosel, loop);
+ break;
+ }
+ ndelay(1);
+ } while ((tmp & mask) != loop);
+}
+
+static void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
+
+ if (ep->use_dma)
+ r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
+
+ r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
+
+ ndelay(450);
+
+ if (r8a66597_is_sudmac(r8a66597) && ep->use_dma)
+ r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel);
+ else
+ r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
+
+ if (ep->use_dma)
+ r8a66597_bset(r8a66597, DREQE, ep->fifosel);
+}
+
+static int pipe_buffer_setting(struct r8a66597 *r8a66597,
+ struct r8a66597_pipe_info *info)
+{
+ u16 bufnum = 0, buf_bsize = 0;
+ u16 pipecfg = 0;
+
+ if (info->pipe == 0)
+ return -EINVAL;
+
+ r8a66597_write(r8a66597, info->pipe, PIPESEL);
+
+ if (info->dir_in)
+ pipecfg |= R8A66597_DIR;
+ pipecfg |= info->type;
+ pipecfg |= info->epnum;
+ switch (info->type) {
+ case R8A66597_INT:
+ bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
+ buf_bsize = 0;
+ break;
+ case R8A66597_BULK:
+ /* isochronous pipes may be used as bulk pipes */
+ if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
+ bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
+ else
+ bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
+
+ bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
+ buf_bsize = 7;
+ pipecfg |= R8A66597_DBLB;
+ if (!info->dir_in)
+ pipecfg |= R8A66597_SHTNAK;
+ break;
+ case R8A66597_ISO:
+ bufnum = R8A66597_BASE_BUFNUM +
+ (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
+ buf_bsize = 7;
+ break;
+ }
+
+ if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
+ pr_err("r8a66597 pipe memory is insufficient\n");
+ return -ENOMEM;
+ }
+
+ r8a66597_write(r8a66597, pipecfg, PIPECFG);
+ r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
+ r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
+ if (info->interval)
+ info->interval--;
+ r8a66597_write(r8a66597, info->interval, PIPEPERI);
+
+ return 0;
+}
+
+static void pipe_buffer_release(struct r8a66597 *r8a66597,
+ struct r8a66597_pipe_info *info)
+{
+ if (info->pipe == 0)
+ return;
+
+ if (is_bulk_pipe(info->pipe)) {
+ r8a66597->bulk--;
+ } else if (is_interrupt_pipe(info->pipe)) {
+ r8a66597->interrupt--;
+ } else if (is_isoc_pipe(info->pipe)) {
+ r8a66597->isochronous--;
+ if (info->type == R8A66597_BULK)
+ r8a66597->bulk--;
+ } else {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "ep_release: unexpect pipenum (%d)\n", info->pipe);
+ }
+}
+
+static void pipe_initialize(struct r8a66597_ep *ep)
+{
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+
+ r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
+
+ r8a66597_write(r8a66597, ACLRM, ep->pipectr);
+ r8a66597_write(r8a66597, 0, ep->pipectr);
+ r8a66597_write(r8a66597, SQCLR, ep->pipectr);
+ if (ep->use_dma) {
+ r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
+
+ ndelay(450);
+
+ r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
+ }
+}
+
+static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
+ struct r8a66597_ep *ep,
+ const struct usb_endpoint_descriptor *desc,
+ u16 pipenum, int dma)
+{
+ ep->use_dma = 0;
+ ep->fifoaddr = CFIFO;
+ ep->fifosel = CFIFOSEL;
+ ep->fifoctr = CFIFOCTR;
+
+ ep->pipectr = get_pipectr_addr(pipenum);
+ if (is_bulk_pipe(pipenum) || is_isoc_pipe(pipenum)) {
+ ep->pipetre = get_pipetre_addr(pipenum);
+ ep->pipetrn = get_pipetrn_addr(pipenum);
+ } else {
+ ep->pipetre = 0;
+ ep->pipetrn = 0;
+ }
+ ep->pipenum = pipenum;
+ ep->ep.maxpacket = usb_endpoint_maxp(desc);
+ r8a66597->pipenum2ep[pipenum] = ep;
+ r8a66597->epaddr2ep[usb_endpoint_num(desc)]
+ = ep;
+ INIT_LIST_HEAD(&ep->queue);
+}
+
+static void r8a66597_ep_release(struct r8a66597_ep *ep)
+{
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+ u16 pipenum = ep->pipenum;
+
+ if (pipenum == 0)
+ return;
+
+ if (ep->use_dma)
+ r8a66597->num_dma--;
+ ep->pipenum = 0;
+ ep->busy = 0;
+ ep->use_dma = 0;
+}
+
+static int alloc_pipe_config(struct r8a66597_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+ struct r8a66597_pipe_info info;
+ int dma = 0;
+ unsigned char *counter;
+ int ret;
+
+ ep->ep.desc = desc;
+
+ if (ep->pipenum) /* already allocated pipe */
+ return 0;
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
+ if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "bulk pipe is insufficient\n");
+ return -ENODEV;
+ } else {
+ info.pipe = R8A66597_BASE_PIPENUM_ISOC
+ + r8a66597->isochronous;
+ counter = &r8a66597->isochronous;
+ }
+ } else {
+ info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
+ counter = &r8a66597->bulk;
+ }
+ info.type = R8A66597_BULK;
+ dma = 1;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "interrupt pipe is insufficient\n");
+ return -ENODEV;
+ }
+ info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
+ info.type = R8A66597_INT;
+ counter = &r8a66597->interrupt;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "isochronous pipe is insufficient\n");
+ return -ENODEV;
+ }
+ info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
+ info.type = R8A66597_ISO;
+ counter = &r8a66597->isochronous;
+ break;
+ default:
+ dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
+ return -EINVAL;
+ }
+ ep->type = info.type;
+
+ info.epnum = usb_endpoint_num(desc);
+ info.maxpacket = usb_endpoint_maxp(desc);
+ info.interval = desc->bInterval;
+ if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ info.dir_in = 1;
+ else
+ info.dir_in = 0;
+
+ ret = pipe_buffer_setting(r8a66597, &info);
+ if (ret < 0) {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "pipe_buffer_setting fail\n");
+ return ret;
+ }
+
+ (*counter)++;
+ if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
+ r8a66597->bulk++;
+
+ r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
+ pipe_initialize(ep);
+
+ return 0;
+}
+
+static int free_pipe_config(struct r8a66597_ep *ep)
+{
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+ struct r8a66597_pipe_info info;
+
+ info.pipe = ep->pipenum;
+ info.type = ep->type;
+ pipe_buffer_release(r8a66597, &info);
+ r8a66597_ep_release(ep);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ enable_irq_ready(r8a66597, pipenum);
+ enable_irq_nrdy(r8a66597, pipenum);
+}
+
+static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ disable_irq_ready(r8a66597, pipenum);
+ disable_irq_nrdy(r8a66597, pipenum);
+}
+
+/* if complete is true, gadget driver complete function is not call */
+static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
+{
+ r8a66597->ep[0].internal_ccpl = ccpl;
+ pipe_start(r8a66597, 0);
+ r8a66597_bset(r8a66597, CCPL, DCPCTR);
+}
+
+static void start_ep0_write(struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+
+ pipe_change(r8a66597, ep->pipenum);
+ r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
+ r8a66597_write(r8a66597, BCLR, ep->fifoctr);
+ if (req->req.length == 0) {
+ r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
+ pipe_start(r8a66597, 0);
+ transfer_complete(ep, req, 0);
+ } else {
+ r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
+ irq_ep0_write(ep, req);
+ }
+}
+
+static void disable_fifosel(struct r8a66597 *r8a66597, u16 pipenum,
+ u16 fifosel)
+{
+ u16 tmp;
+
+ tmp = r8a66597_read(r8a66597, fifosel) & CURPIPE;
+ if (tmp == pipenum)
+ r8a66597_change_curpipe(r8a66597, 0, 0, fifosel);
+}
+
+static void change_bfre_mode(struct r8a66597 *r8a66597, u16 pipenum,
+ int enable)
+{
+ struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
+ u16 tmp, toggle;
+
+ /* check current BFRE bit */
+ r8a66597_write(r8a66597, pipenum, PIPESEL);
+ tmp = r8a66597_read(r8a66597, PIPECFG) & R8A66597_BFRE;
+ if ((enable && tmp) || (!enable && !tmp))
+ return;
+
+ /* change BFRE bit */
+ pipe_stop(r8a66597, pipenum);
+ disable_fifosel(r8a66597, pipenum, CFIFOSEL);
+ disable_fifosel(r8a66597, pipenum, D0FIFOSEL);
+ disable_fifosel(r8a66597, pipenum, D1FIFOSEL);
+
+ toggle = save_usb_toggle(r8a66597, pipenum);
+
+ r8a66597_write(r8a66597, pipenum, PIPESEL);
+ if (enable)
+ r8a66597_bset(r8a66597, R8A66597_BFRE, PIPECFG);
+ else
+ r8a66597_bclr(r8a66597, R8A66597_BFRE, PIPECFG);
+
+ /* initialize for internal BFRE flag */
+ r8a66597_bset(r8a66597, ACLRM, ep->pipectr);
+ r8a66597_bclr(r8a66597, ACLRM, ep->pipectr);
+
+ restore_usb_toggle(r8a66597, pipenum, toggle);
+}
+
+static int sudmac_alloc_channel(struct r8a66597 *r8a66597,
+ struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ struct r8a66597_dma *dma;
+
+ if (!r8a66597_is_sudmac(r8a66597))
+ return -ENODEV;
+
+ /* Check transfer type */
+ if (!is_bulk_pipe(ep->pipenum))
+ return -EIO;
+
+ if (r8a66597->dma.used)
+ return -EBUSY;
+
+ /* set SUDMAC parameters */
+ dma = &r8a66597->dma;
+ dma->used = 1;
+ if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) {
+ dma->dir = 1;
+ } else {
+ dma->dir = 0;
+ change_bfre_mode(r8a66597, ep->pipenum, 1);
+ }
+
+ /* set r8a66597_ep paramters */
+ ep->use_dma = 1;
+ ep->dma = dma;
+ ep->fifoaddr = D0FIFO;
+ ep->fifosel = D0FIFOSEL;
+ ep->fifoctr = D0FIFOCTR;
+
+ /* dma mapping */
+ return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir);
+}
+
+static void sudmac_free_channel(struct r8a66597 *r8a66597,
+ struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ if (!r8a66597_is_sudmac(r8a66597))
+ return;
+
+ usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir);
+
+ r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
+ r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel);
+
+ ep->dma->used = 0;
+ ep->use_dma = 0;
+ ep->fifoaddr = CFIFO;
+ ep->fifosel = CFIFOSEL;
+ ep->fifoctr = CFIFOCTR;
+}
+
+static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ BUG_ON(req->req.length == 0);
+
+ r8a66597_sudmac_write(r8a66597, LBA_WAIT, CH0CFG);
+ r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
+ r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
+ r8a66597_sudmac_write(r8a66597, CH0ENDE, DINTCTRL);
+
+ r8a66597_sudmac_write(r8a66597, DEN, CH0DEN);
+}
+
+static void start_packet_write(struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+ u16 tmp;
+
+ pipe_change(r8a66597, ep->pipenum);
+ disable_irq_empty(r8a66597, ep->pipenum);
+ pipe_start(r8a66597, ep->pipenum);
+
+ if (req->req.length == 0) {
+ transfer_complete(ep, req, 0);
+ } else {
+ r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS);
+ if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
+ /* PIO mode */
+ pipe_change(r8a66597, ep->pipenum);
+ disable_irq_empty(r8a66597, ep->pipenum);
+ pipe_start(r8a66597, ep->pipenum);
+ tmp = r8a66597_read(r8a66597, ep->fifoctr);
+ if (unlikely((tmp & FRDY) == 0))
+ pipe_irq_enable(r8a66597, ep->pipenum);
+ else
+ irq_packet_write(ep, req);
+ } else {
+ /* DMA mode */
+ pipe_change(r8a66597, ep->pipenum);
+ disable_irq_nrdy(r8a66597, ep->pipenum);
+ pipe_start(r8a66597, ep->pipenum);
+ enable_irq_nrdy(r8a66597, ep->pipenum);
+ sudmac_start(r8a66597, ep, req);
+ }
+ }
+}
+
+static void start_packet_read(struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+ u16 pipenum = ep->pipenum;
+
+ if (ep->pipenum == 0) {
+ r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
+ r8a66597_write(r8a66597, BCLR, ep->fifoctr);
+ pipe_start(r8a66597, pipenum);
+ pipe_irq_enable(r8a66597, pipenum);
+ } else {
+ pipe_stop(r8a66597, pipenum);
+ if (ep->pipetre) {
+ enable_irq_nrdy(r8a66597, pipenum);
+ r8a66597_write(r8a66597, TRCLR, ep->pipetre);
+ r8a66597_write(r8a66597,
+ DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
+ ep->pipetrn);
+ r8a66597_bset(r8a66597, TRENB, ep->pipetre);
+ }
+
+ if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
+ /* PIO mode */
+ change_bfre_mode(r8a66597, ep->pipenum, 0);
+ pipe_start(r8a66597, pipenum); /* trigger once */
+ pipe_irq_enable(r8a66597, pipenum);
+ } else {
+ pipe_change(r8a66597, pipenum);
+ sudmac_start(r8a66597, ep, req);
+ pipe_start(r8a66597, pipenum); /* trigger once */
+ }
+ }
+}
+
+static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
+{
+ if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
+ start_packet_write(ep, req);
+ else
+ start_packet_read(ep, req);
+}
+
+static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
+{
+ u16 ctsq;
+
+ ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
+
+ switch (ctsq) {
+ case CS_RDDS:
+ start_ep0_write(ep, req);
+ break;
+ case CS_WRDS:
+ start_packet_read(ep, req);
+ break;
+
+ case CS_WRND:
+ control_end(ep->r8a66597, 0);
+ break;
+ default:
+ dev_err(r8a66597_to_dev(ep->r8a66597),
+ "start_ep0: unexpect ctsq(%x)\n", ctsq);
+ break;
+ }
+}
+
+static void init_controller(struct r8a66597 *r8a66597)
+{
+ u16 vif = r8a66597->pdata->vif ? LDRV : 0;
+ u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
+ u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
+
+ if (r8a66597->pdata->on_chip) {
+ if (r8a66597->pdata->buswait)
+ r8a66597_write(r8a66597, r8a66597->pdata->buswait,
+ SYSCFG1);
+ else
+ r8a66597_write(r8a66597, 0x0f, SYSCFG1);
+ r8a66597_bset(r8a66597, HSE, SYSCFG0);
+
+ r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+ r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
+ r8a66597_bset(r8a66597, USBE, SYSCFG0);
+
+ r8a66597_bset(r8a66597, SCKE, SYSCFG0);
+
+ r8a66597_bset(r8a66597, irq_sense, INTENB1);
+ r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
+ DMA0CFG);
+ } else {
+ r8a66597_bset(r8a66597, vif | endian, PINCFG);
+ r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
+ r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
+ XTAL, SYSCFG0);
+
+ r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+ r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
+ r8a66597_bset(r8a66597, USBE, SYSCFG0);
+
+ r8a66597_bset(r8a66597, XCKE, SYSCFG0);
+
+ mdelay(3);
+
+ r8a66597_bset(r8a66597, PLLC, SYSCFG0);
+
+ mdelay(1);
+
+ r8a66597_bset(r8a66597, SCKE, SYSCFG0);
+
+ r8a66597_bset(r8a66597, irq_sense, INTENB1);
+ r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
+ DMA0CFG);
+ }
+}
+
+static void disable_controller(struct r8a66597 *r8a66597)
+{
+ if (r8a66597->pdata->on_chip) {
+ r8a66597_bset(r8a66597, SCKE, SYSCFG0);
+ r8a66597_bclr(r8a66597, UTST, TESTMODE);
+
+ /* disable interrupts */
+ r8a66597_write(r8a66597, 0, INTENB0);
+ r8a66597_write(r8a66597, 0, INTENB1);
+ r8a66597_write(r8a66597, 0, BRDYENB);
+ r8a66597_write(r8a66597, 0, BEMPENB);
+ r8a66597_write(r8a66597, 0, NRDYENB);
+
+ /* clear status */
+ r8a66597_write(r8a66597, 0, BRDYSTS);
+ r8a66597_write(r8a66597, 0, NRDYSTS);
+ r8a66597_write(r8a66597, 0, BEMPSTS);
+
+ r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+ r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
+
+ } else {
+ r8a66597_bclr(r8a66597, UTST, TESTMODE);
+ r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
+ udelay(1);
+ r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
+ udelay(1);
+ udelay(1);
+ r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
+ }
+}
+
+static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
+{
+ u16 tmp;
+
+ if (!r8a66597->pdata->on_chip) {
+ tmp = r8a66597_read(r8a66597, SYSCFG0);
+ if (!(tmp & XCKE))
+ r8a66597_bset(r8a66597, XCKE, SYSCFG0);
+ }
+}
+
+static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
+{
+ return list_entry(ep->queue.next, struct r8a66597_request, queue);
+}
+
+/*-------------------------------------------------------------------------*/
+static void transfer_complete(struct r8a66597_ep *ep,
+ struct r8a66597_request *req, int status)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+ int restart = 0;
+
+ if (unlikely(ep->pipenum == 0)) {
+ if (ep->internal_ccpl) {
+ ep->internal_ccpl = 0;
+ return;
+ }
+ }
+
+ list_del_init(&req->queue);
+ if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
+ req->req.status = -ESHUTDOWN;
+ else
+ req->req.status = status;
+
+ if (!list_empty(&ep->queue))
+ restart = 1;
+
+ if (ep->use_dma)
+ sudmac_free_channel(ep->r8a66597, ep, req);
+
+ spin_unlock(&ep->r8a66597->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&ep->r8a66597->lock);
+
+ if (restart) {
+ req = get_request_from_ep(ep);
+ if (ep->ep.desc)
+ start_packet(ep, req);
+ }
+}
+
+static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
+{
+ int i;
+ u16 tmp;
+ unsigned bufsize;
+ size_t size;
+ void *buf;
+ u16 pipenum = ep->pipenum;
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+
+ pipe_change(r8a66597, pipenum);
+ r8a66597_bset(r8a66597, ISEL, ep->fifosel);
+
+ i = 0;
+ do {
+ tmp = r8a66597_read(r8a66597, ep->fifoctr);
+ if (i++ > 100000) {
+ dev_err(r8a66597_to_dev(r8a66597),
+ "pipe0 is busy. maybe cpu i/o bus "
+ "conflict. please power off this controller.");
+ return;
+ }
+ ndelay(1);
+ } while ((tmp & FRDY) == 0);
+
+ /* prepare parameters */
+ bufsize = get_buffer_size(r8a66597, pipenum);
+ buf = req->req.buf + req->req.actual;
+ size = min(bufsize, req->req.length - req->req.actual);
+
+ /* write fifo */
+ if (req->req.buf) {
+ if (size > 0)
+ r8a66597_write_fifo(r8a66597, ep, buf, size);
+ if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
+ r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
+ }
+
+ /* update parameters */
+ req->req.actual += size;
+
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (size % ep->ep.maxpacket)
+ || (size == 0)) {
+ disable_irq_ready(r8a66597, pipenum);
+ disable_irq_empty(r8a66597, pipenum);
+ } else {
+ disable_irq_ready(r8a66597, pipenum);
+ enable_irq_empty(r8a66597, pipenum);
+ }
+ pipe_start(r8a66597, pipenum);
+}
+
+static void irq_packet_write(struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ u16 tmp;
+ unsigned bufsize;
+ size_t size;
+ void *buf;
+ u16 pipenum = ep->pipenum;
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+
+ pipe_change(r8a66597, pipenum);
+ tmp = r8a66597_read(r8a66597, ep->fifoctr);
+ if (unlikely((tmp & FRDY) == 0)) {
+ pipe_stop(r8a66597, pipenum);
+ pipe_irq_disable(r8a66597, pipenum);
+ dev_err(r8a66597_to_dev(r8a66597),
+ "write fifo not ready. pipnum=%d\n", pipenum);
+ return;
+ }
+
+ /* prepare parameters */
+ bufsize = get_buffer_size(r8a66597, pipenum);
+ buf = req->req.buf + req->req.actual;
+ size = min(bufsize, req->req.length - req->req.actual);
+
+ /* write fifo */
+ if (req->req.buf) {
+ r8a66597_write_fifo(r8a66597, ep, buf, size);
+ if ((size == 0)
+ || ((size % ep->ep.maxpacket) != 0)
+ || ((bufsize != ep->ep.maxpacket)
+ && (bufsize > size)))
+ r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
+ }
+
+ /* update parameters */
+ req->req.actual += size;
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (size % ep->ep.maxpacket)
+ || (size == 0)) {
+ disable_irq_ready(r8a66597, pipenum);
+ enable_irq_empty(r8a66597, pipenum);
+ } else {
+ disable_irq_empty(r8a66597, pipenum);
+ pipe_irq_enable(r8a66597, pipenum);
+ }
+}
+
+static void irq_packet_read(struct r8a66597_ep *ep,
+ struct r8a66597_request *req)
+{
+ u16 tmp;
+ int rcv_len, bufsize, req_len;
+ int size;
+ void *buf;
+ u16 pipenum = ep->pipenum;
+ struct r8a66597 *r8a66597 = ep->r8a66597;
+ int finish = 0;
+
+ pipe_change(r8a66597, pipenum);
+ tmp = r8a66597_read(r8a66597, ep->fifoctr);
+ if (unlikely((tmp & FRDY) == 0)) {
+ req->req.status = -EPIPE;
+ pipe_stop(r8a66597, pipenum);
+ pipe_irq_disable(r8a66597, pipenum);
+ dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
+ return;
+ }
+
+ /* prepare parameters */
+ rcv_len = tmp & DTLN;
+ bufsize = get_buffer_size(r8a66597, pipenum);
+
+ buf = req->req.buf + req->req.actual;
+ req_len = req->req.length - req->req.actual;
+ if (rcv_len < bufsize)
+ size = min(rcv_len, req_len);
+ else
+ size = min(bufsize, req_len);
+
+ /* update parameters */
+ req->req.actual += size;
+
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (size % ep->ep.maxpacket)
+ || (size == 0)) {
+ pipe_stop(r8a66597, pipenum);
+ pipe_irq_disable(r8a66597, pipenum);
+ finish = 1;
+ }
+
+ /* read fifo */
+ if (req->req.buf) {
+ if (size == 0)
+ r8a66597_write(r8a66597, BCLR, ep->fifoctr);
+ else
+ r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
+
+ }
+
+ if ((ep->pipenum != 0) && finish)
+ transfer_complete(ep, req, 0);
+}
+
+static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
+{
+ u16 check;
+ u16 pipenum;
+ struct r8a66597_ep *ep;
+ struct r8a66597_request *req;
+
+ if ((status & BRDY0) && (enb & BRDY0)) {
+ r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
+ r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
+
+ ep = &r8a66597->ep[0];
+ req = get_request_from_ep(ep);
+ irq_packet_read(ep, req);
+ } else {
+ for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+ check = 1 << pipenum;
+ if ((status & check) && (enb & check)) {
+ r8a66597_write(r8a66597, ~check, BRDYSTS);
+ ep = r8a66597->pipenum2ep[pipenum];
+ req = get_request_from_ep(ep);
+ if (ep->ep.desc->bEndpointAddress & USB_DIR_IN)
+ irq_packet_write(ep, req);
+ else
+ irq_packet_read(ep, req);
+ }
+ }
+ }
+}
+
+static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
+{
+ u16 tmp;
+ u16 check;
+ u16 pipenum;
+ struct r8a66597_ep *ep;
+ struct r8a66597_request *req;
+
+ if ((status & BEMP0) && (enb & BEMP0)) {
+ r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
+
+ ep = &r8a66597->ep[0];
+ req = get_request_from_ep(ep);
+ irq_ep0_write(ep, req);
+ } else {
+ for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+ check = 1 << pipenum;
+ if ((status & check) && (enb & check)) {
+ r8a66597_write(r8a66597, ~check, BEMPSTS);
+ tmp = control_reg_get(r8a66597, pipenum);
+ if ((tmp & INBUFM) == 0) {
+ disable_irq_empty(r8a66597, pipenum);
+ pipe_irq_disable(r8a66597, pipenum);
+ pipe_stop(r8a66597, pipenum);
+ ep = r8a66597->pipenum2ep[pipenum];
+ req = get_request_from_ep(ep);
+ if (!list_empty(&ep->queue))
+ transfer_complete(ep, req, 0);
+ }
+ }
+ }
+ }
+}
+
+static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+ struct r8a66597_ep *ep;
+ u16 pid;
+ u16 status = 0;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ status = r8a66597->device_status;
+ break;
+ case USB_RECIP_INTERFACE:
+ status = 0;
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+ pid = control_reg_get_pid(r8a66597, ep->pipenum);
+ if (pid == PID_STALL)
+ status = 1 << USB_ENDPOINT_HALT;
+ else
+ status = 0;
+ break;
+ default:
+ pipe_stall(r8a66597, 0);
+ return; /* exit */
+ }
+
+ r8a66597->ep0_data = cpu_to_le16(status);
+ r8a66597->ep0_req->buf = &r8a66597->ep0_data;
+ r8a66597->ep0_req->length = 2;
+ /* AV: what happens if we get called again before that gets through? */
+ spin_unlock(&r8a66597->lock);
+ r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
+ spin_lock(&r8a66597->lock);
+}
+
+static void clear_feature(struct r8a66597 *r8a66597,
+ struct usb_ctrlrequest *ctrl)
+{
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ control_end(r8a66597, 1);
+ break;
+ case USB_RECIP_INTERFACE:
+ control_end(r8a66597, 1);
+ break;
+ case USB_RECIP_ENDPOINT: {
+ struct r8a66597_ep *ep;
+ struct r8a66597_request *req;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+ if (!ep->wedge) {
+ pipe_stop(r8a66597, ep->pipenum);
+ control_reg_sqclr(r8a66597, ep->pipenum);
+ spin_unlock(&r8a66597->lock);
+ usb_ep_clear_halt(&ep->ep);
+ spin_lock(&r8a66597->lock);
+ }
+
+ control_end(r8a66597, 1);
+
+ req = get_request_from_ep(ep);
+ if (ep->busy) {
+ ep->busy = 0;
+ if (list_empty(&ep->queue))
+ break;
+ start_packet(ep, req);
+ } else if (!list_empty(&ep->queue))
+ pipe_start(r8a66597, ep->pipenum);
+ }
+ break;
+ default:
+ pipe_stall(r8a66597, 0);
+ break;
+ }
+}
+
+static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
+{
+ u16 tmp;
+ int timeout = 3000;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_DEVICE_TEST_MODE:
+ control_end(r8a66597, 1);
+ /* Wait for the completion of status stage */
+ do {
+ tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
+ udelay(1);
+ } while (tmp != CS_IDST && timeout-- > 0);
+
+ if (tmp == CS_IDST)
+ r8a66597_bset(r8a66597,
+ le16_to_cpu(ctrl->wIndex >> 8),
+ TESTMODE);
+ break;
+ default:
+ pipe_stall(r8a66597, 0);
+ break;
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+ control_end(r8a66597, 1);
+ break;
+ case USB_RECIP_ENDPOINT: {
+ struct r8a66597_ep *ep;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+ pipe_stall(r8a66597, ep->pipenum);
+
+ control_end(r8a66597, 1);
+ }
+ break;
+ default:
+ pipe_stall(r8a66597, 0);
+ break;
+ }
+}
+
+/* if return value is true, call class driver's setup() */
+static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
+{
+ u16 *p = (u16 *)ctrl;
+ unsigned long offset = USBREQ;
+ int i, ret = 0;
+
+ /* read fifo */
+ r8a66597_write(r8a66597, ~VALID, INTSTS0);
+
+ for (i = 0; i < 4; i++)
+ p[i] = r8a66597_read(r8a66597, offset + i*2);
+
+ /* check request */
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ get_status(r8a66597, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ clear_feature(r8a66597, ctrl);
+ break;
+ case USB_REQ_SET_FEATURE:
+ set_feature(r8a66597, ctrl);
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ } else
+ ret = 1;
+ return ret;
+}
+
+static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
+{
+ u16 speed = get_usb_speed(r8a66597);
+
+ switch (speed) {
+ case HSMODE:
+ r8a66597->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case FSMODE:
+ r8a66597->gadget.speed = USB_SPEED_FULL;
+ break;
+ default:
+ r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
+ dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
+ }
+}
+
+static void irq_device_state(struct r8a66597 *r8a66597)
+{
+ u16 dvsq;
+
+ dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
+ r8a66597_write(r8a66597, ~DVST, INTSTS0);
+
+ if (dvsq == DS_DFLT) {
+ /* bus reset */
+ spin_unlock(&r8a66597->lock);
+ usb_gadget_udc_reset(&r8a66597->gadget, r8a66597->driver);
+ spin_lock(&r8a66597->lock);
+ r8a66597_update_usb_speed(r8a66597);
+ }
+ if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
+ r8a66597_update_usb_speed(r8a66597);
+ if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
+ && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
+ r8a66597_update_usb_speed(r8a66597);
+
+ r8a66597->old_dvsq = dvsq;
+}
+
+static void irq_control_stage(struct r8a66597 *r8a66597)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+ struct usb_ctrlrequest ctrl;
+ u16 ctsq;
+
+ ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
+ r8a66597_write(r8a66597, ~CTRT, INTSTS0);
+
+ switch (ctsq) {
+ case CS_IDST: {
+ struct r8a66597_ep *ep;
+ struct r8a66597_request *req;
+ ep = &r8a66597->ep[0];
+ req = get_request_from_ep(ep);
+ transfer_complete(ep, req, 0);
+ }
+ break;
+
+ case CS_RDDS:
+ case CS_WRDS:
+ case CS_WRND:
+ if (setup_packet(r8a66597, &ctrl)) {
+ spin_unlock(&r8a66597->lock);
+ if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
+ < 0)
+ pipe_stall(r8a66597, 0);
+ spin_lock(&r8a66597->lock);
+ }
+ break;
+ case CS_RDSS:
+ case CS_WRSS:
+ control_end(r8a66597, 0);
+ break;
+ default:
+ dev_err(r8a66597_to_dev(r8a66597),
+ "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
+ break;
+ }
+}
+
+static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep)
+{
+ u16 pipenum;
+ struct r8a66597_request *req;
+ u32 len;
+ int i = 0;
+
+ pipenum = ep->pipenum;
+ pipe_change(r8a66597, pipenum);
+
+ while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) {
+ udelay(1);
+ if (unlikely(i++ >= 10000)) { /* timeout = 10 msec */
+ dev_err(r8a66597_to_dev(r8a66597),
+ "%s: FRDY was not set (%d)\n",
+ __func__, pipenum);
+ return;
+ }
+ }
+
+ r8a66597_bset(r8a66597, BCLR, ep->fifoctr);
+ req = get_request_from_ep(ep);
+
+ /* prepare parameters */
+ len = r8a66597_sudmac_read(r8a66597, CH0CBC);
+ req->req.actual += len;
+
+ /* clear */
+ r8a66597_sudmac_write(r8a66597, CH0STCLR, DSTSCLR);
+
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (len % ep->ep.maxpacket)) {
+ if (ep->dma->dir) {
+ disable_irq_ready(r8a66597, pipenum);
+ enable_irq_empty(r8a66597, pipenum);
+ } else {
+ /* Clear the interrupt flag for next transfer */
+ r8a66597_write(r8a66597, ~(1 << pipenum), BRDYSTS);
+ transfer_complete(ep, req, 0);
+ }
+ }
+}
+
+static void r8a66597_sudmac_irq(struct r8a66597 *r8a66597)
+{
+ u32 irqsts;
+ struct r8a66597_ep *ep;
+ u16 pipenum;
+
+ irqsts = r8a66597_sudmac_read(r8a66597, DINTSTS);
+ if (irqsts & CH0ENDS) {
+ r8a66597_sudmac_write(r8a66597, CH0ENDC, DINTSTSCLR);
+ pipenum = (r8a66597_read(r8a66597, D0FIFOSEL) & CURPIPE);
+ ep = r8a66597->pipenum2ep[pipenum];
+ sudmac_finish(r8a66597, ep);
+ }
+}
+
+static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
+{
+ struct r8a66597 *r8a66597 = _r8a66597;
+ u16 intsts0;
+ u16 intenb0;
+ u16 savepipe;
+ u16 mask0;
+
+ spin_lock(&r8a66597->lock);
+
+ if (r8a66597_is_sudmac(r8a66597))
+ r8a66597_sudmac_irq(r8a66597);
+
+ intsts0 = r8a66597_read(r8a66597, INTSTS0);
+ intenb0 = r8a66597_read(r8a66597, INTENB0);
+
+ savepipe = r8a66597_read(r8a66597, CFIFOSEL);
+
+ mask0 = intsts0 & intenb0;
+ if (mask0) {
+ u16 brdysts = r8a66597_read(r8a66597, BRDYSTS);
+ u16 bempsts = r8a66597_read(r8a66597, BEMPSTS);
+ u16 brdyenb = r8a66597_read(r8a66597, BRDYENB);
+ u16 bempenb = r8a66597_read(r8a66597, BEMPENB);
+
+ if (mask0 & VBINT) {
+ r8a66597_write(r8a66597, 0xffff & ~VBINT,
+ INTSTS0);
+ r8a66597_start_xclock(r8a66597);
+
+ /* start vbus sampling */
+ r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
+ & VBSTS;
+ r8a66597->scount = R8A66597_MAX_SAMPLING;
+
+ mod_timer(&r8a66597->timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ if (intsts0 & DVSQ)
+ irq_device_state(r8a66597);
+
+ if ((intsts0 & BRDY) && (intenb0 & BRDYE)
+ && (brdysts & brdyenb))
+ irq_pipe_ready(r8a66597, brdysts, brdyenb);
+ if ((intsts0 & BEMP) && (intenb0 & BEMPE)
+ && (bempsts & bempenb))
+ irq_pipe_empty(r8a66597, bempsts, bempenb);
+
+ if (intsts0 & CTRT)
+ irq_control_stage(r8a66597);
+ }
+
+ r8a66597_write(r8a66597, savepipe, CFIFOSEL);
+
+ spin_unlock(&r8a66597->lock);
+ return IRQ_HANDLED;
+}
+
+static void r8a66597_timer(struct timer_list *t)
+{
+ struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
+ unsigned long flags;
+ u16 tmp;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ tmp = r8a66597_read(r8a66597, SYSCFG0);
+ if (r8a66597->scount > 0) {
+ tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
+ if (tmp == r8a66597->old_vbus) {
+ r8a66597->scount--;
+ if (r8a66597->scount == 0) {
+ if (tmp == VBSTS)
+ r8a66597_usb_connect(r8a66597);
+ else
+ r8a66597_usb_disconnect(r8a66597);
+ } else {
+ mod_timer(&r8a66597->timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ } else {
+ r8a66597->scount = R8A66597_MAX_SAMPLING;
+ r8a66597->old_vbus = tmp;
+ mod_timer(&r8a66597->timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ }
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+static int r8a66597_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct r8a66597_ep *ep;
+
+ ep = container_of(_ep, struct r8a66597_ep, ep);
+ return alloc_pipe_config(ep, desc);
+}
+
+static int r8a66597_disable(struct usb_ep *_ep)
+{
+ struct r8a66597_ep *ep;
+ struct r8a66597_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct r8a66597_ep, ep);
+ BUG_ON(!ep);
+
+ while (!list_empty(&ep->queue)) {
+ req = get_request_from_ep(ep);
+ spin_lock_irqsave(&ep->r8a66597->lock, flags);
+ transfer_complete(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+ }
+
+ pipe_irq_disable(ep->r8a66597, ep->pipenum);
+ return free_pipe_config(ep);
+}
+
+static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct r8a66597_request *req;
+
+ req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct r8a66597_request *req;
+
+ req = container_of(_req, struct r8a66597_request, req);
+ kfree(req);
+}
+
+static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct r8a66597_ep *ep;
+ struct r8a66597_request *req;
+ unsigned long flags;
+ int request = 0;
+
+ ep = container_of(_ep, struct r8a66597_ep, ep);
+ req = container_of(_req, struct r8a66597_request, req);
+
+ if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&ep->r8a66597->lock, flags);
+
+ if (list_empty(&ep->queue))
+ request = 1;
+
+ list_add_tail(&req->queue, &ep->queue);
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+
+ if (ep->ep.desc == NULL) /* control */
+ start_ep0(ep, req);
+ else {
+ if (request && !ep->busy)
+ start_packet(ep, req);
+ }
+
+ spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+
+ return 0;
+}
+
+static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct r8a66597_ep *ep;
+ struct r8a66597_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct r8a66597_ep, ep);
+ req = container_of(_req, struct r8a66597_request, req);
+
+ spin_lock_irqsave(&ep->r8a66597->lock, flags);
+ if (!list_empty(&ep->queue))
+ transfer_complete(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+
+ return 0;
+}
+
+static int r8a66597_set_halt(struct usb_ep *_ep, int value)
+{
+ struct r8a66597_ep *ep = container_of(_ep, struct r8a66597_ep, ep);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ep->r8a66597->lock, flags);
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ } else if (value) {
+ ep->busy = 1;
+ pipe_stall(ep->r8a66597, ep->pipenum);
+ } else {
+ ep->busy = 0;
+ ep->wedge = 0;
+ pipe_stop(ep->r8a66597, ep->pipenum);
+ }
+ spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+ return ret;
+}
+
+static int r8a66597_set_wedge(struct usb_ep *_ep)
+{
+ struct r8a66597_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct r8a66597_ep, ep);
+
+ if (!ep || !ep->ep.desc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->r8a66597->lock, flags);
+ ep->wedge = 1;
+ spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+
+ return usb_ep_set_halt(_ep);
+}
+
+static void r8a66597_fifo_flush(struct usb_ep *_ep)
+{
+ struct r8a66597_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct r8a66597_ep, ep);
+ spin_lock_irqsave(&ep->r8a66597->lock, flags);
+ if (list_empty(&ep->queue) && !ep->busy) {
+ pipe_stop(ep->r8a66597, ep->pipenum);
+ r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
+ r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
+ r8a66597_write(ep->r8a66597, 0, ep->pipectr);
+ }
+ spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
+}
+
+static const struct usb_ep_ops r8a66597_ep_ops = {
+ .enable = r8a66597_enable,
+ .disable = r8a66597_disable,
+
+ .alloc_request = r8a66597_alloc_request,
+ .free_request = r8a66597_free_request,
+
+ .queue = r8a66597_queue,
+ .dequeue = r8a66597_dequeue,
+
+ .set_halt = r8a66597_set_halt,
+ .set_wedge = r8a66597_set_wedge,
+ .fifo_flush = r8a66597_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+static int r8a66597_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+
+ if (!driver
+ || driver->max_speed < USB_SPEED_HIGH
+ || !driver->setup)
+ return -EINVAL;
+ if (!r8a66597)
+ return -ENODEV;
+
+ /* hook up the driver */
+ r8a66597->driver = driver;
+
+ init_controller(r8a66597);
+ r8a66597_bset(r8a66597, VBSE, INTENB0);
+ if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
+ r8a66597_start_xclock(r8a66597);
+ /* start vbus sampling */
+ r8a66597->old_vbus = r8a66597_read(r8a66597,
+ INTSTS0) & VBSTS;
+ r8a66597->scount = R8A66597_MAX_SAMPLING;
+ mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
+ }
+
+ return 0;
+}
+
+static int r8a66597_stop(struct usb_gadget *gadget)
+{
+ struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ r8a66597_bclr(r8a66597, VBSE, INTENB0);
+ disable_controller(r8a66597);
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+
+ r8a66597->driver = NULL;
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static int r8a66597_get_frame(struct usb_gadget *_gadget)
+{
+ struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
+ return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
+}
+
+static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ if (is_on)
+ r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
+ else
+ r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+
+ return 0;
+}
+
+static int r8a66597_set_selfpowered(struct usb_gadget *gadget, int is_self)
+{
+ struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+
+ gadget->is_selfpowered = (is_self != 0);
+ if (is_self)
+ r8a66597->device_status |= 1 << USB_DEVICE_SELF_POWERED;
+ else
+ r8a66597->device_status &= ~(1 << USB_DEVICE_SELF_POWERED);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops r8a66597_gadget_ops = {
+ .get_frame = r8a66597_get_frame,
+ .udc_start = r8a66597_start,
+ .udc_stop = r8a66597_stop,
+ .pullup = r8a66597_pullup,
+ .set_selfpowered = r8a66597_set_selfpowered,
+};
+
+static int r8a66597_remove(struct platform_device *pdev)
+{
+ struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&r8a66597->gadget);
+ del_timer_sync(&r8a66597->timer);
+ r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
+
+ if (r8a66597->pdata->on_chip) {
+ clk_disable_unprepare(r8a66597->clk);
+ }
+
+ return 0;
+}
+
+static void nop_completion(struct usb_ep *ep, struct usb_request *r)
+{
+}
+
+static int r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
+ struct platform_device *pdev)
+{
+ r8a66597->sudmac_reg =
+ devm_platform_ioremap_resource_byname(pdev, "sudmac");
+ return PTR_ERR_OR_ZERO(r8a66597->sudmac_reg);
+}
+
+static int r8a66597_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ char clk_name[8];
+ struct resource *ires;
+ int irq;
+ void __iomem *reg = NULL;
+ struct r8a66597 *r8a66597 = NULL;
+ int ret = 0;
+ int i;
+ unsigned long irq_trigger;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires)
+ return -EINVAL;
+ irq = ires->start;
+ irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
+
+ if (irq < 0) {
+ dev_err(dev, "platform_get_irq error.\n");
+ return -ENODEV;
+ }
+
+ /* initialize ucd */
+ r8a66597 = devm_kzalloc(dev, sizeof(struct r8a66597), GFP_KERNEL);
+ if (r8a66597 == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&r8a66597->lock);
+ platform_set_drvdata(pdev, r8a66597);
+ r8a66597->pdata = dev_get_platdata(dev);
+ r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
+
+ r8a66597->gadget.ops = &r8a66597_gadget_ops;
+ r8a66597->gadget.max_speed = USB_SPEED_HIGH;
+ r8a66597->gadget.name = udc_name;
+
+ timer_setup(&r8a66597->timer, r8a66597_timer, 0);
+ r8a66597->reg = reg;
+
+ if (r8a66597->pdata->on_chip) {
+ snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
+ r8a66597->clk = devm_clk_get(dev, clk_name);
+ if (IS_ERR(r8a66597->clk)) {
+ dev_err(dev, "cannot get clock \"%s\"\n", clk_name);
+ return PTR_ERR(r8a66597->clk);
+ }
+ clk_prepare_enable(r8a66597->clk);
+ }
+
+ if (r8a66597->pdata->sudmac) {
+ ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
+ if (ret < 0)
+ goto clean_up2;
+ }
+
+ disable_controller(r8a66597); /* make sure controller is disabled */
+
+ ret = devm_request_irq(dev, irq, r8a66597_irq, IRQF_SHARED,
+ udc_name, r8a66597);
+ if (ret < 0) {
+ dev_err(dev, "request_irq error (%d)\n", ret);
+ goto clean_up2;
+ }
+
+ INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
+ r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
+ INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
+ for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
+ struct r8a66597_ep *ep = &r8a66597->ep[i];
+
+ if (i != 0) {
+ INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
+ list_add_tail(&r8a66597->ep[i].ep.ep_list,
+ &r8a66597->gadget.ep_list);
+ }
+ ep->r8a66597 = r8a66597;
+ INIT_LIST_HEAD(&ep->queue);
+ ep->ep.name = r8a66597_ep_name[i];
+ ep->ep.ops = &r8a66597_ep_ops;
+ usb_ep_set_maxpacket_limit(&ep->ep, 512);
+
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+ }
+ usb_ep_set_maxpacket_limit(&r8a66597->ep[0].ep, 64);
+ r8a66597->ep[0].pipenum = 0;
+ r8a66597->ep[0].fifoaddr = CFIFO;
+ r8a66597->ep[0].fifosel = CFIFOSEL;
+ r8a66597->ep[0].fifoctr = CFIFOCTR;
+ r8a66597->ep[0].pipectr = get_pipectr_addr(0);
+ r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
+ r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
+
+ r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
+ GFP_KERNEL);
+ if (r8a66597->ep0_req == NULL) {
+ ret = -ENOMEM;
+ goto clean_up2;
+ }
+ r8a66597->ep0_req->complete = nop_completion;
+
+ ret = usb_add_gadget_udc(dev, &r8a66597->gadget);
+ if (ret)
+ goto err_add_udc;
+
+ dev_info(dev, "version %s\n", DRIVER_VERSION);
+ return 0;
+
+err_add_udc:
+ r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
+clean_up2:
+ if (r8a66597->pdata->on_chip)
+ clk_disable_unprepare(r8a66597->clk);
+
+ if (r8a66597->ep0_req)
+ r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
+
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+static struct platform_driver r8a66597_driver = {
+ .remove = r8a66597_remove,
+ .driver = {
+ .name = udc_name,
+ },
+};
+
+module_platform_driver_probe(r8a66597_driver, r8a66597_probe);
+
+MODULE_DESCRIPTION("R8A66597 USB gadget driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:r8a66597_udc");
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.h b/drivers/usb/gadget/udc/r8a66597-udc.h
new file mode 100644
index 000000000..9a115caba
--- /dev/null
+++ b/drivers/usb/gadget/udc/r8a66597-udc.h
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A66597 UDC
+ *
+ * Copyright (C) 2007-2009 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ */
+
+#ifndef __R8A66597_H__
+#define __R8A66597_H__
+
+#include <linux/clk.h>
+#include <linux/usb/r8a66597.h>
+
+#define R8A66597_MAX_SAMPLING 10
+
+#define R8A66597_MAX_NUM_PIPE 8
+#define R8A66597_MAX_NUM_BULK 3
+#define R8A66597_MAX_NUM_ISOC 2
+#define R8A66597_MAX_NUM_INT 2
+
+#define R8A66597_BASE_PIPENUM_BULK 3
+#define R8A66597_BASE_PIPENUM_ISOC 1
+#define R8A66597_BASE_PIPENUM_INT 6
+
+#define R8A66597_BASE_BUFNUM 6
+#define R8A66597_MAX_BUFNUM 0x4F
+
+#define is_bulk_pipe(pipenum) \
+ ((pipenum >= R8A66597_BASE_PIPENUM_BULK) && \
+ (pipenum < (R8A66597_BASE_PIPENUM_BULK + R8A66597_MAX_NUM_BULK)))
+#define is_interrupt_pipe(pipenum) \
+ ((pipenum >= R8A66597_BASE_PIPENUM_INT) && \
+ (pipenum < (R8A66597_BASE_PIPENUM_INT + R8A66597_MAX_NUM_INT)))
+#define is_isoc_pipe(pipenum) \
+ ((pipenum >= R8A66597_BASE_PIPENUM_ISOC) && \
+ (pipenum < (R8A66597_BASE_PIPENUM_ISOC + R8A66597_MAX_NUM_ISOC)))
+
+#define r8a66597_is_sudmac(r8a66597) (r8a66597->pdata->sudmac)
+struct r8a66597_pipe_info {
+ u16 pipe;
+ u16 epnum;
+ u16 maxpacket;
+ u16 type;
+ u16 interval;
+ u16 dir_in;
+};
+
+struct r8a66597_request {
+ struct usb_request req;
+ struct list_head queue;
+};
+
+struct r8a66597_ep {
+ struct usb_ep ep;
+ struct r8a66597 *r8a66597;
+ struct r8a66597_dma *dma;
+
+ struct list_head queue;
+ unsigned busy:1;
+ unsigned wedge:1;
+ unsigned internal_ccpl:1; /* use only control */
+
+ /* this member can able to after r8a66597_enable */
+ unsigned use_dma:1;
+ u16 pipenum;
+ u16 type;
+
+ /* register address */
+ unsigned char fifoaddr;
+ unsigned char fifosel;
+ unsigned char fifoctr;
+ unsigned char pipectr;
+ unsigned char pipetre;
+ unsigned char pipetrn;
+};
+
+struct r8a66597_dma {
+ unsigned used:1;
+ unsigned dir:1; /* 1 = IN(write), 0 = OUT(read) */
+};
+
+struct r8a66597 {
+ spinlock_t lock;
+ void __iomem *reg;
+ void __iomem *sudmac_reg;
+
+ struct clk *clk;
+ struct r8a66597_platdata *pdata;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+ struct r8a66597_ep ep[R8A66597_MAX_NUM_PIPE];
+ struct r8a66597_ep *pipenum2ep[R8A66597_MAX_NUM_PIPE];
+ struct r8a66597_ep *epaddr2ep[16];
+ struct r8a66597_dma dma;
+
+ struct timer_list timer;
+ struct usb_request *ep0_req; /* for internal request */
+ u16 ep0_data; /* for internal request */
+ u16 old_vbus;
+ u16 scount;
+ u16 old_dvsq;
+ u16 device_status; /* for GET_STATUS */
+
+ /* pipe config */
+ unsigned char bulk;
+ unsigned char interrupt;
+ unsigned char isochronous;
+ unsigned char num_dma;
+
+ unsigned irq_sense_low:1;
+};
+
+#define gadget_to_r8a66597(_gadget) \
+ container_of(_gadget, struct r8a66597, gadget)
+#define r8a66597_to_gadget(r8a66597) (&r8a66597->gadget)
+#define r8a66597_to_dev(r8a66597) (r8a66597->gadget.dev.parent)
+
+static inline u16 r8a66597_read(struct r8a66597 *r8a66597, unsigned long offset)
+{
+ return ioread16(r8a66597->reg + offset);
+}
+
+static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597,
+ unsigned long offset,
+ unsigned char *buf,
+ int len)
+{
+ void __iomem *fifoaddr = r8a66597->reg + offset;
+ unsigned int data = 0;
+ int i;
+
+ if (r8a66597->pdata->on_chip) {
+ /* 32-bit accesses for on_chip controllers */
+
+ /* aligned buf case */
+ if (len >= 4 && !((unsigned long)buf & 0x03)) {
+ ioread32_rep(fifoaddr, buf, len / 4);
+ buf += len & ~0x03;
+ len &= 0x03;
+ }
+
+ /* unaligned buf case */
+ for (i = 0; i < len; i++) {
+ if (!(i & 0x03))
+ data = ioread32(fifoaddr);
+
+ buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
+ }
+ } else {
+ /* 16-bit accesses for external controllers */
+
+ /* aligned buf case */
+ if (len >= 2 && !((unsigned long)buf & 0x01)) {
+ ioread16_rep(fifoaddr, buf, len / 2);
+ buf += len & ~0x01;
+ len &= 0x01;
+ }
+
+ /* unaligned buf case */
+ for (i = 0; i < len; i++) {
+ if (!(i & 0x01))
+ data = ioread16(fifoaddr);
+
+ buf[i] = (data >> ((i & 0x01) * 8)) & 0xff;
+ }
+ }
+}
+
+static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val,
+ unsigned long offset)
+{
+ iowrite16(val, r8a66597->reg + offset);
+}
+
+static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
+ u16 val, u16 pat, unsigned long offset)
+{
+ u16 tmp;
+ tmp = r8a66597_read(r8a66597, offset);
+ tmp = tmp & (~pat);
+ tmp = tmp | val;
+ r8a66597_write(r8a66597, tmp, offset);
+}
+
+#define r8a66597_bclr(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, 0, val, offset)
+#define r8a66597_bset(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, val, 0, offset)
+
+static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
+ struct r8a66597_ep *ep,
+ unsigned char *buf,
+ int len)
+{
+ void __iomem *fifoaddr = r8a66597->reg + ep->fifoaddr;
+ int adj = 0;
+ int i;
+
+ if (r8a66597->pdata->on_chip) {
+ /* 32-bit access only if buf is 32-bit aligned */
+ if (len >= 4 && !((unsigned long)buf & 0x03)) {
+ iowrite32_rep(fifoaddr, buf, len / 4);
+ buf += len & ~0x03;
+ len &= 0x03;
+ }
+ } else {
+ /* 16-bit access only if buf is 16-bit aligned */
+ if (len >= 2 && !((unsigned long)buf & 0x01)) {
+ iowrite16_rep(fifoaddr, buf, len / 2);
+ buf += len & ~0x01;
+ len &= 0x01;
+ }
+ }
+
+ /* adjust fifo address in the little endian case */
+ if (!(r8a66597_read(r8a66597, CFIFOSEL) & BIGEND)) {
+ if (r8a66597->pdata->on_chip)
+ adj = 0x03; /* 32-bit wide */
+ else
+ adj = 0x01; /* 16-bit wide */
+ }
+
+ if (r8a66597->pdata->wr0_shorted_to_wr1)
+ r8a66597_bclr(r8a66597, MBW_16, ep->fifosel);
+ for (i = 0; i < len; i++)
+ iowrite8(buf[i], fifoaddr + adj - (i & adj));
+ if (r8a66597->pdata->wr0_shorted_to_wr1)
+ r8a66597_bclr(r8a66597, MBW_16, ep->fifosel);
+}
+
+static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
+{
+ u16 clock = 0;
+
+ switch (pdata->xtal) {
+ case R8A66597_PLATDATA_XTAL_12MHZ:
+ clock = XTAL12;
+ break;
+ case R8A66597_PLATDATA_XTAL_24MHZ:
+ clock = XTAL24;
+ break;
+ case R8A66597_PLATDATA_XTAL_48MHZ:
+ clock = XTAL48;
+ break;
+ default:
+ printk(KERN_ERR "r8a66597: platdata clock is wrong.\n");
+ break;
+ }
+
+ return clock;
+}
+
+static inline u32 r8a66597_sudmac_read(struct r8a66597 *r8a66597,
+ unsigned long offset)
+{
+ return ioread32(r8a66597->sudmac_reg + offset);
+}
+
+static inline void r8a66597_sudmac_write(struct r8a66597 *r8a66597, u32 val,
+ unsigned long offset)
+{
+ iowrite32(val, r8a66597->sudmac_reg + offset);
+}
+
+#define get_pipectr_addr(pipenum) (PIPE1CTR + (pipenum - 1) * 2)
+#define get_pipetre_addr(pipenum) (PIPE1TRE + (pipenum - 1) * 4)
+#define get_pipetrn_addr(pipenum) (PIPE1TRN + (pipenum - 1) * 4)
+
+#define enable_irq_ready(r8a66597, pipenum) \
+ enable_pipe_irq(r8a66597, pipenum, BRDYENB)
+#define disable_irq_ready(r8a66597, pipenum) \
+ disable_pipe_irq(r8a66597, pipenum, BRDYENB)
+#define enable_irq_empty(r8a66597, pipenum) \
+ enable_pipe_irq(r8a66597, pipenum, BEMPENB)
+#define disable_irq_empty(r8a66597, pipenum) \
+ disable_pipe_irq(r8a66597, pipenum, BEMPENB)
+#define enable_irq_nrdy(r8a66597, pipenum) \
+ enable_pipe_irq(r8a66597, pipenum, NRDYENB)
+#define disable_irq_nrdy(r8a66597, pipenum) \
+ disable_pipe_irq(r8a66597, pipenum, NRDYENB)
+
+#endif /* __R8A66597_H__ */
+
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
new file mode 100644
index 000000000..32c9e3692
--- /dev/null
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -0,0 +1,2999 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas USB3.0 Peripheral driver (USB gadget)
+ *
+ * Copyright (C) 2015-2017 Renesas Electronics Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/extcon-provider.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+#include <linux/uaccess.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/of.h>
+#include <linux/usb/role.h>
+
+/* register definitions */
+#define USB3_AXI_INT_STA 0x008
+#define USB3_AXI_INT_ENA 0x00c
+#define USB3_DMA_INT_STA 0x010
+#define USB3_DMA_INT_ENA 0x014
+#define USB3_DMA_CH0_CON(n) (0x030 + ((n) - 1) * 0x10) /* n = 1 to 4 */
+#define USB3_DMA_CH0_PRD_ADR(n) (0x034 + ((n) - 1) * 0x10) /* n = 1 to 4 */
+#define USB3_USB_COM_CON 0x200
+#define USB3_USB20_CON 0x204
+#define USB3_USB30_CON 0x208
+#define USB3_USB_STA 0x210
+#define USB3_DRD_CON(p) ((p)->is_rzv2m ? 0x400 : 0x218)
+#define USB3_USB_INT_STA_1 0x220
+#define USB3_USB_INT_STA_2 0x224
+#define USB3_USB_INT_ENA_1 0x228
+#define USB3_USB_INT_ENA_2 0x22c
+#define USB3_STUP_DAT_0 0x230
+#define USB3_STUP_DAT_1 0x234
+#define USB3_USB_OTG_STA(p) ((p)->is_rzv2m ? 0x410 : 0x268)
+#define USB3_USB_OTG_INT_STA(p) ((p)->is_rzv2m ? 0x414 : 0x26c)
+#define USB3_USB_OTG_INT_ENA(p) ((p)->is_rzv2m ? 0x418 : 0x270)
+#define USB3_P0_MOD 0x280
+#define USB3_P0_CON 0x288
+#define USB3_P0_STA 0x28c
+#define USB3_P0_INT_STA 0x290
+#define USB3_P0_INT_ENA 0x294
+#define USB3_P0_LNG 0x2a0
+#define USB3_P0_READ 0x2a4
+#define USB3_P0_WRITE 0x2a8
+#define USB3_PIPE_COM 0x2b0
+#define USB3_PN_MOD 0x2c0
+#define USB3_PN_RAMMAP 0x2c4
+#define USB3_PN_CON 0x2c8
+#define USB3_PN_STA 0x2cc
+#define USB3_PN_INT_STA 0x2d0
+#define USB3_PN_INT_ENA 0x2d4
+#define USB3_PN_LNG 0x2e0
+#define USB3_PN_READ 0x2e4
+#define USB3_PN_WRITE 0x2e8
+#define USB3_SSIFCMD 0x340
+
+/* AXI_INT_ENA and AXI_INT_STA */
+#define AXI_INT_DMAINT BIT(31)
+#define AXI_INT_EPCINT BIT(30)
+/* PRD's n = from 1 to 4 */
+#define AXI_INT_PRDEN_CLR_STA_SHIFT(n) (16 + (n) - 1)
+#define AXI_INT_PRDERR_STA_SHIFT(n) (0 + (n) - 1)
+#define AXI_INT_PRDEN_CLR_STA(n) (1 << AXI_INT_PRDEN_CLR_STA_SHIFT(n))
+#define AXI_INT_PRDERR_STA(n) (1 << AXI_INT_PRDERR_STA_SHIFT(n))
+
+/* DMA_INT_ENA and DMA_INT_STA */
+#define DMA_INT(n) BIT(n)
+
+/* DMA_CH0_CONn */
+#define DMA_CON_PIPE_DIR BIT(15) /* 1: In Transfer */
+#define DMA_CON_PIPE_NO_SHIFT 8
+#define DMA_CON_PIPE_NO_MASK GENMASK(12, DMA_CON_PIPE_NO_SHIFT)
+#define DMA_COM_PIPE_NO(n) (((n) << DMA_CON_PIPE_NO_SHIFT) & \
+ DMA_CON_PIPE_NO_MASK)
+#define DMA_CON_PRD_EN BIT(0)
+
+/* LCLKSEL */
+#define LCLKSEL_LSEL BIT(18)
+
+/* USB_COM_CON */
+#define USB_COM_CON_CONF BIT(24)
+#define USB_COM_CON_PN_WDATAIF_NL BIT(23)
+#define USB_COM_CON_PN_RDATAIF_NL BIT(22)
+#define USB_COM_CON_PN_LSTTR_PP BIT(21)
+#define USB_COM_CON_SPD_MODE BIT(17)
+#define USB_COM_CON_EP0_EN BIT(16)
+#define USB_COM_CON_DEV_ADDR_SHIFT 8
+#define USB_COM_CON_DEV_ADDR_MASK GENMASK(14, USB_COM_CON_DEV_ADDR_SHIFT)
+#define USB_COM_CON_DEV_ADDR(n) (((n) << USB_COM_CON_DEV_ADDR_SHIFT) & \
+ USB_COM_CON_DEV_ADDR_MASK)
+#define USB_COM_CON_RX_DETECTION BIT(1)
+#define USB_COM_CON_PIPE_CLR BIT(0)
+
+/* USB20_CON */
+#define USB20_CON_B2_PUE BIT(31)
+#define USB20_CON_B2_SUSPEND BIT(24)
+#define USB20_CON_B2_CONNECT BIT(17)
+#define USB20_CON_B2_TSTMOD_SHIFT 8
+#define USB20_CON_B2_TSTMOD_MASK GENMASK(10, USB20_CON_B2_TSTMOD_SHIFT)
+#define USB20_CON_B2_TSTMOD(n) (((n) << USB20_CON_B2_TSTMOD_SHIFT) & \
+ USB20_CON_B2_TSTMOD_MASK)
+#define USB20_CON_B2_TSTMOD_EN BIT(0)
+
+/* USB30_CON */
+#define USB30_CON_POW_SEL_SHIFT 24
+#define USB30_CON_POW_SEL_MASK GENMASK(26, USB30_CON_POW_SEL_SHIFT)
+#define USB30_CON_POW_SEL_IN_U3 BIT(26)
+#define USB30_CON_POW_SEL_IN_DISCON 0
+#define USB30_CON_POW_SEL_P2_TO_P0 BIT(25)
+#define USB30_CON_POW_SEL_P0_TO_P3 BIT(24)
+#define USB30_CON_POW_SEL_P0_TO_P2 0
+#define USB30_CON_B3_PLLWAKE BIT(23)
+#define USB30_CON_B3_CONNECT BIT(17)
+#define USB30_CON_B3_HOTRST_CMP BIT(1)
+
+/* USB_STA */
+#define USB_STA_SPEED_MASK (BIT(2) | BIT(1))
+#define USB_STA_SPEED_HS BIT(2)
+#define USB_STA_SPEED_FS BIT(1)
+#define USB_STA_SPEED_SS 0
+#define USB_STA_VBUS_STA BIT(0)
+
+/* DRD_CON */
+#define DRD_CON_PERI_RST BIT(31) /* rzv2m only */
+#define DRD_CON_HOST_RST BIT(30) /* rzv2m only */
+#define DRD_CON_PERI_CON BIT(24)
+#define DRD_CON_VBOUT BIT(0)
+
+/* USB_INT_ENA_1 and USB_INT_STA_1 */
+#define USB_INT_1_B3_PLLWKUP BIT(31)
+#define USB_INT_1_B3_LUPSUCS BIT(30)
+#define USB_INT_1_B3_DISABLE BIT(27)
+#define USB_INT_1_B3_WRMRST BIT(21)
+#define USB_INT_1_B3_HOTRST BIT(20)
+#define USB_INT_1_B2_USBRST BIT(12)
+#define USB_INT_1_B2_L1SPND BIT(11)
+#define USB_INT_1_B2_SPND BIT(9)
+#define USB_INT_1_B2_RSUM BIT(8)
+#define USB_INT_1_SPEED BIT(1)
+#define USB_INT_1_VBUS_CNG BIT(0)
+
+/* USB_INT_ENA_2 and USB_INT_STA_2 */
+#define USB_INT_2_PIPE(n) BIT(n)
+
+/* USB_OTG_STA, USB_OTG_INT_STA and USB_OTG_INT_ENA */
+#define USB_OTG_IDMON(p) ((p)->is_rzv2m ? BIT(0) : BIT(4))
+
+/* P0_MOD */
+#define P0_MOD_DIR BIT(6)
+
+/* P0_CON and PN_CON */
+#define PX_CON_BYTE_EN_MASK (BIT(10) | BIT(9))
+#define PX_CON_BYTE_EN_SHIFT 9
+#define PX_CON_BYTE_EN_BYTES(n) (((n) << PX_CON_BYTE_EN_SHIFT) & \
+ PX_CON_BYTE_EN_MASK)
+#define PX_CON_SEND BIT(8)
+
+/* P0_CON */
+#define P0_CON_ST_RES_MASK (BIT(27) | BIT(26))
+#define P0_CON_ST_RES_FORCE_STALL BIT(27)
+#define P0_CON_ST_RES_NORMAL BIT(26)
+#define P0_CON_ST_RES_FORCE_NRDY 0
+#define P0_CON_OT_RES_MASK (BIT(25) | BIT(24))
+#define P0_CON_OT_RES_FORCE_STALL BIT(25)
+#define P0_CON_OT_RES_NORMAL BIT(24)
+#define P0_CON_OT_RES_FORCE_NRDY 0
+#define P0_CON_IN_RES_MASK (BIT(17) | BIT(16))
+#define P0_CON_IN_RES_FORCE_STALL BIT(17)
+#define P0_CON_IN_RES_NORMAL BIT(16)
+#define P0_CON_IN_RES_FORCE_NRDY 0
+#define P0_CON_RES_WEN BIT(7)
+#define P0_CON_BCLR BIT(1)
+
+/* P0_STA and PN_STA */
+#define PX_STA_BUFSTS BIT(0)
+
+/* P0_INT_ENA and P0_INT_STA */
+#define P0_INT_STSED BIT(18)
+#define P0_INT_STSST BIT(17)
+#define P0_INT_SETUP BIT(16)
+#define P0_INT_RCVNL BIT(8)
+#define P0_INT_ERDY BIT(7)
+#define P0_INT_FLOW BIT(6)
+#define P0_INT_STALL BIT(2)
+#define P0_INT_NRDY BIT(1)
+#define P0_INT_BFRDY BIT(0)
+#define P0_INT_ALL_BITS (P0_INT_STSED | P0_INT_SETUP | P0_INT_BFRDY)
+
+/* PN_MOD */
+#define PN_MOD_DIR BIT(6)
+#define PN_MOD_TYPE_SHIFT 4
+#define PN_MOD_TYPE_MASK GENMASK(5, PN_MOD_TYPE_SHIFT)
+#define PN_MOD_TYPE(n) (((n) << PN_MOD_TYPE_SHIFT) & \
+ PN_MOD_TYPE_MASK)
+#define PN_MOD_EPNUM_MASK GENMASK(3, 0)
+#define PN_MOD_EPNUM(n) ((n) & PN_MOD_EPNUM_MASK)
+
+/* PN_RAMMAP */
+#define PN_RAMMAP_RAMAREA_SHIFT 29
+#define PN_RAMMAP_RAMAREA_MASK GENMASK(31, PN_RAMMAP_RAMAREA_SHIFT)
+#define PN_RAMMAP_RAMAREA_16KB BIT(31)
+#define PN_RAMMAP_RAMAREA_8KB (BIT(30) | BIT(29))
+#define PN_RAMMAP_RAMAREA_4KB BIT(30)
+#define PN_RAMMAP_RAMAREA_2KB BIT(29)
+#define PN_RAMMAP_RAMAREA_1KB 0
+#define PN_RAMMAP_MPKT_SHIFT 16
+#define PN_RAMMAP_MPKT_MASK GENMASK(26, PN_RAMMAP_MPKT_SHIFT)
+#define PN_RAMMAP_MPKT(n) (((n) << PN_RAMMAP_MPKT_SHIFT) & \
+ PN_RAMMAP_MPKT_MASK)
+#define PN_RAMMAP_RAMIF_SHIFT 14
+#define PN_RAMMAP_RAMIF_MASK GENMASK(15, PN_RAMMAP_RAMIF_SHIFT)
+#define PN_RAMMAP_RAMIF(n) (((n) << PN_RAMMAP_RAMIF_SHIFT) & \
+ PN_RAMMAP_RAMIF_MASK)
+#define PN_RAMMAP_BASEAD_MASK GENMASK(13, 0)
+#define PN_RAMMAP_BASEAD(offs) (((offs) >> 3) & PN_RAMMAP_BASEAD_MASK)
+#define PN_RAMMAP_DATA(area, ramif, basead) ((PN_RAMMAP_##area) | \
+ (PN_RAMMAP_RAMIF(ramif)) | \
+ (PN_RAMMAP_BASEAD(basead)))
+
+/* PN_CON */
+#define PN_CON_EN BIT(31)
+#define PN_CON_DATAIF_EN BIT(30)
+#define PN_CON_RES_MASK (BIT(17) | BIT(16))
+#define PN_CON_RES_FORCE_STALL BIT(17)
+#define PN_CON_RES_NORMAL BIT(16)
+#define PN_CON_RES_FORCE_NRDY 0
+#define PN_CON_LAST BIT(11)
+#define PN_CON_RES_WEN BIT(7)
+#define PN_CON_CLR BIT(0)
+
+/* PN_INT_STA and PN_INT_ENA */
+#define PN_INT_LSTTR BIT(4)
+#define PN_INT_BFRDY BIT(0)
+
+/* USB3_SSIFCMD */
+#define SSIFCMD_URES_U2 BIT(9)
+#define SSIFCMD_URES_U1 BIT(8)
+#define SSIFCMD_UDIR_U2 BIT(7)
+#define SSIFCMD_UDIR_U1 BIT(6)
+#define SSIFCMD_UREQ_U2 BIT(5)
+#define SSIFCMD_UREQ_U1 BIT(4)
+
+#define USB3_EP0_SS_MAX_PACKET_SIZE 512
+#define USB3_EP0_HSFS_MAX_PACKET_SIZE 64
+#define USB3_EP0_BUF_SIZE 8
+#define USB3_MAX_NUM_PIPES(p) ((p)->is_rzv2m ? 16 : 6) /* This includes PIPE 0 */
+#define USB3_WAIT_US 3
+#define USB3_DMA_NUM_SETTING_AREA 4
+/*
+ * To avoid double-meaning of "0" (xferred 65536 bytes or received zlp if
+ * buffer size is 65536), this driver uses the maximum size per a entry is
+ * 32768 bytes.
+ */
+#define USB3_DMA_MAX_XFER_SIZE 32768
+#define USB3_DMA_PRD_SIZE 4096
+
+struct renesas_usb3;
+
+/* Physical Region Descriptor Table */
+struct renesas_usb3_prd {
+ u32 word1;
+#define USB3_PRD1_E BIT(30) /* the end of chain */
+#define USB3_PRD1_U BIT(29) /* completion of transfer */
+#define USB3_PRD1_D BIT(28) /* Error occurred */
+#define USB3_PRD1_INT BIT(27) /* Interrupt occurred */
+#define USB3_PRD1_LST BIT(26) /* Last Packet */
+#define USB3_PRD1_B_INC BIT(24)
+#define USB3_PRD1_MPS_8 0
+#define USB3_PRD1_MPS_16 BIT(21)
+#define USB3_PRD1_MPS_32 BIT(22)
+#define USB3_PRD1_MPS_64 (BIT(22) | BIT(21))
+#define USB3_PRD1_MPS_512 BIT(23)
+#define USB3_PRD1_MPS_1024 (BIT(23) | BIT(21))
+#define USB3_PRD1_MPS_RESERVED (BIT(23) | BIT(22) | BIT(21))
+#define USB3_PRD1_SIZE_MASK GENMASK(15, 0)
+
+ u32 bap;
+};
+#define USB3_DMA_NUM_PRD_ENTRIES (USB3_DMA_PRD_SIZE / \
+ sizeof(struct renesas_usb3_prd))
+#define USB3_DMA_MAX_XFER_SIZE_ALL_PRDS (USB3_DMA_PRD_SIZE / \
+ sizeof(struct renesas_usb3_prd) * \
+ USB3_DMA_MAX_XFER_SIZE)
+
+struct renesas_usb3_dma {
+ struct renesas_usb3_prd *prd;
+ dma_addr_t prd_dma;
+ int num; /* Setting area number (from 1 to 4) */
+ bool used;
+};
+
+struct renesas_usb3_request {
+ struct usb_request req;
+ struct list_head queue;
+};
+
+#define USB3_EP_NAME_SIZE 8
+struct renesas_usb3_ep {
+ struct usb_ep ep;
+ struct renesas_usb3 *usb3;
+ struct renesas_usb3_dma *dma;
+ int num;
+ char ep_name[USB3_EP_NAME_SIZE];
+ struct list_head queue;
+ u32 rammap_val;
+ bool dir_in;
+ bool halt;
+ bool wedge;
+ bool started;
+};
+
+struct renesas_usb3_priv {
+ int ramsize_per_ramif; /* unit = bytes */
+ int num_ramif;
+ int ramsize_per_pipe; /* unit = bytes */
+ bool workaround_for_vbus; /* if true, don't check vbus signal */
+ bool is_rzv2m; /* if true, RZ/V2M SoC */
+};
+
+struct renesas_usb3 {
+ void __iomem *reg;
+ struct reset_control *drd_rstc;
+ struct reset_control *usbp_rstc;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct extcon_dev *extcon;
+ struct work_struct extcon_work;
+ struct phy *phy;
+ struct dentry *dentry;
+
+ struct usb_role_switch *role_sw;
+ struct device *host_dev;
+ struct work_struct role_work;
+ enum usb_role role;
+
+ struct renesas_usb3_ep *usb3_ep;
+ int num_usb3_eps;
+
+ struct renesas_usb3_dma dma[USB3_DMA_NUM_SETTING_AREA];
+
+ spinlock_t lock;
+ int disabled_count;
+
+ struct usb_request *ep0_req;
+
+ enum usb_role connection_state;
+ u16 test_mode;
+ u8 ep0_buf[USB3_EP0_BUF_SIZE];
+ bool softconnect;
+ bool workaround_for_vbus;
+ bool extcon_host; /* check id and set EXTCON_USB_HOST */
+ bool extcon_usb; /* check vbus and set EXTCON_USB */
+ bool forced_b_device;
+ bool start_to_connect;
+ bool role_sw_by_connector;
+ bool is_rzv2m;
+};
+
+#define gadget_to_renesas_usb3(_gadget) \
+ container_of(_gadget, struct renesas_usb3, gadget)
+#define renesas_usb3_to_gadget(renesas_usb3) (&renesas_usb3->gadget)
+#define usb3_to_dev(_usb3) (_usb3->gadget.dev.parent)
+
+#define usb_ep_to_usb3_ep(_ep) container_of(_ep, struct renesas_usb3_ep, ep)
+#define usb3_ep_to_usb3(_usb3_ep) (_usb3_ep->usb3)
+#define usb_req_to_usb3_req(_req) container_of(_req, \
+ struct renesas_usb3_request, req)
+
+#define usb3_get_ep(usb3, n) ((usb3)->usb3_ep + (n))
+#define usb3_for_each_ep(usb3_ep, usb3, i) \
+ for ((i) = 0, usb3_ep = usb3_get_ep(usb3, (i)); \
+ (i) < (usb3)->num_usb3_eps; \
+ (i)++, usb3_ep = usb3_get_ep(usb3, (i)))
+
+#define usb3_get_dma(usb3, i) (&(usb3)->dma[i])
+#define usb3_for_each_dma(usb3, dma, i) \
+ for ((i) = 0, dma = usb3_get_dma((usb3), (i)); \
+ (i) < USB3_DMA_NUM_SETTING_AREA; \
+ (i)++, dma = usb3_get_dma((usb3), (i)))
+
+static const char udc_name[] = "renesas_usb3";
+
+static bool use_dma = 1;
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "use dedicated DMAC");
+
+static void usb3_write(struct renesas_usb3 *usb3, u32 data, u32 offs)
+{
+ iowrite32(data, usb3->reg + offs);
+}
+
+static u32 usb3_read(struct renesas_usb3 *usb3, u32 offs)
+{
+ return ioread32(usb3->reg + offs);
+}
+
+static void usb3_set_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
+{
+ u32 val = usb3_read(usb3, offs);
+
+ val |= bits;
+ usb3_write(usb3, val, offs);
+}
+
+static void usb3_clear_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
+{
+ u32 val = usb3_read(usb3, offs);
+
+ val &= ~bits;
+ usb3_write(usb3, val, offs);
+}
+
+static int usb3_wait(struct renesas_usb3 *usb3, u32 reg, u32 mask,
+ u32 expected)
+{
+ int i;
+
+ for (i = 0; i < USB3_WAIT_US; i++) {
+ if ((usb3_read(usb3, reg) & mask) == expected)
+ return 0;
+ udelay(1);
+ }
+
+ dev_dbg(usb3_to_dev(usb3), "%s: timed out (%8x, %08x, %08x)\n",
+ __func__, reg, mask, expected);
+
+ return -EBUSY;
+}
+
+static void renesas_usb3_extcon_work(struct work_struct *work)
+{
+ struct renesas_usb3 *usb3 = container_of(work, struct renesas_usb3,
+ extcon_work);
+
+ extcon_set_state_sync(usb3->extcon, EXTCON_USB_HOST, usb3->extcon_host);
+ extcon_set_state_sync(usb3->extcon, EXTCON_USB, usb3->extcon_usb);
+}
+
+static void usb3_enable_irq_1(struct renesas_usb3 *usb3, u32 bits)
+{
+ usb3_set_bit(usb3, bits, USB3_USB_INT_ENA_1);
+}
+
+static void usb3_disable_irq_1(struct renesas_usb3 *usb3, u32 bits)
+{
+ usb3_clear_bit(usb3, bits, USB3_USB_INT_ENA_1);
+}
+
+static void usb3_enable_pipe_irq(struct renesas_usb3 *usb3, int num)
+{
+ usb3_set_bit(usb3, USB_INT_2_PIPE(num), USB3_USB_INT_ENA_2);
+}
+
+static void usb3_disable_pipe_irq(struct renesas_usb3 *usb3, int num)
+{
+ usb3_clear_bit(usb3, USB_INT_2_PIPE(num), USB3_USB_INT_ENA_2);
+}
+
+static bool usb3_is_host(struct renesas_usb3 *usb3)
+{
+ return !(usb3_read(usb3, USB3_DRD_CON(usb3)) & DRD_CON_PERI_CON);
+}
+
+static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
+{
+ /* Set AXI_INT */
+ usb3_write(usb3, ~0, USB3_DMA_INT_STA);
+ usb3_write(usb3, 0, USB3_DMA_INT_ENA);
+ usb3_set_bit(usb3, AXI_INT_DMAINT | AXI_INT_EPCINT, USB3_AXI_INT_ENA);
+}
+
+static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
+{
+ usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
+ if (!usb3->workaround_for_vbus)
+ usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
+}
+
+static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3)
+{
+ if (!(usb3_read(usb3, USB3_USB20_CON) & USB20_CON_B2_SUSPEND))
+ return true; /* already waked it up */
+
+ usb3_clear_bit(usb3, USB20_CON_B2_SUSPEND, USB3_USB20_CON);
+ usb3_enable_irq_1(usb3, USB_INT_1_B2_RSUM);
+
+ return false;
+}
+
+static void usb3_usb2_pullup(struct renesas_usb3 *usb3, int pullup)
+{
+ u32 bits = USB20_CON_B2_PUE | USB20_CON_B2_CONNECT;
+
+ if (usb3->softconnect && pullup)
+ usb3_set_bit(usb3, bits, USB3_USB20_CON);
+ else
+ usb3_clear_bit(usb3, bits, USB3_USB20_CON);
+}
+
+static void usb3_set_test_mode(struct renesas_usb3 *usb3)
+{
+ u32 val = usb3_read(usb3, USB3_USB20_CON);
+
+ val &= ~USB20_CON_B2_TSTMOD_MASK;
+ val |= USB20_CON_B2_TSTMOD(usb3->test_mode);
+ usb3_write(usb3, val | USB20_CON_B2_TSTMOD_EN, USB3_USB20_CON);
+ if (!usb3->test_mode)
+ usb3_clear_bit(usb3, USB20_CON_B2_TSTMOD_EN, USB3_USB20_CON);
+}
+
+static void usb3_start_usb2_connection(struct renesas_usb3 *usb3)
+{
+ usb3->disabled_count++;
+ usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
+ usb3_set_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
+ usb3_usb2_pullup(usb3, 1);
+}
+
+static int usb3_is_usb3_phy_in_u3(struct renesas_usb3 *usb3)
+{
+ return usb3_read(usb3, USB3_USB30_CON) & USB30_CON_POW_SEL_IN_U3;
+}
+
+static bool usb3_wakeup_usb3_phy(struct renesas_usb3 *usb3)
+{
+ if (!usb3_is_usb3_phy_in_u3(usb3))
+ return true; /* already waked it up */
+
+ usb3_set_bit(usb3, USB30_CON_B3_PLLWAKE, USB3_USB30_CON);
+ usb3_enable_irq_1(usb3, USB_INT_1_B3_PLLWKUP);
+
+ return false;
+}
+
+static u16 usb3_feature_get_un_enabled(struct renesas_usb3 *usb3)
+{
+ u32 mask_u2 = SSIFCMD_UDIR_U2 | SSIFCMD_UREQ_U2;
+ u32 mask_u1 = SSIFCMD_UDIR_U1 | SSIFCMD_UREQ_U1;
+ u32 val = usb3_read(usb3, USB3_SSIFCMD);
+ u16 ret = 0;
+
+ /* Enables {U2,U1} if the bits of UDIR and UREQ are set to 0 */
+ if (!(val & mask_u2))
+ ret |= 1 << USB_DEV_STAT_U2_ENABLED;
+ if (!(val & mask_u1))
+ ret |= 1 << USB_DEV_STAT_U1_ENABLED;
+
+ return ret;
+}
+
+static void usb3_feature_u2_enable(struct renesas_usb3 *usb3, bool enable)
+{
+ u32 bits = SSIFCMD_UDIR_U2 | SSIFCMD_UREQ_U2;
+
+ /* Enables U2 if the bits of UDIR and UREQ are set to 0 */
+ if (enable)
+ usb3_clear_bit(usb3, bits, USB3_SSIFCMD);
+ else
+ usb3_set_bit(usb3, bits, USB3_SSIFCMD);
+}
+
+static void usb3_feature_u1_enable(struct renesas_usb3 *usb3, bool enable)
+{
+ u32 bits = SSIFCMD_UDIR_U1 | SSIFCMD_UREQ_U1;
+
+ /* Enables U1 if the bits of UDIR and UREQ are set to 0 */
+ if (enable)
+ usb3_clear_bit(usb3, bits, USB3_SSIFCMD);
+ else
+ usb3_set_bit(usb3, bits, USB3_SSIFCMD);
+}
+
+static void usb3_start_operation_for_usb3(struct renesas_usb3 *usb3)
+{
+ usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
+ usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
+ usb3_set_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
+}
+
+static void usb3_start_usb3_connection(struct renesas_usb3 *usb3)
+{
+ usb3_start_operation_for_usb3(usb3);
+ usb3_set_bit(usb3, USB_COM_CON_RX_DETECTION, USB3_USB_COM_CON);
+
+ usb3_enable_irq_1(usb3, USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
+ USB_INT_1_SPEED);
+}
+
+static void usb3_stop_usb3_connection(struct renesas_usb3 *usb3)
+{
+ usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
+}
+
+static void usb3_transition_to_default_state(struct renesas_usb3 *usb3,
+ bool is_usb3)
+{
+ usb3_set_bit(usb3, USB_INT_2_PIPE(0), USB3_USB_INT_ENA_2);
+ usb3_write(usb3, P0_INT_ALL_BITS, USB3_P0_INT_STA);
+ usb3_set_bit(usb3, P0_INT_ALL_BITS, USB3_P0_INT_ENA);
+
+ if (is_usb3)
+ usb3_enable_irq_1(usb3, USB_INT_1_B3_WRMRST |
+ USB_INT_1_B3_HOTRST);
+ else
+ usb3_enable_irq_1(usb3, USB_INT_1_B2_SPND |
+ USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
+}
+
+static void usb3_connect(struct renesas_usb3 *usb3)
+{
+ if (usb3_wakeup_usb3_phy(usb3))
+ usb3_start_usb3_connection(usb3);
+}
+
+static void usb3_reset_epc(struct renesas_usb3 *usb3)
+{
+ usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
+ usb3_clear_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
+ usb3_set_bit(usb3, USB_COM_CON_PIPE_CLR, USB3_USB_COM_CON);
+ usb3->test_mode = 0;
+ usb3_set_test_mode(usb3);
+}
+
+static void usb3_disconnect(struct renesas_usb3 *usb3)
+{
+ usb3->disabled_count = 0;
+ usb3_usb2_pullup(usb3, 0);
+ usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
+ usb3_reset_epc(usb3);
+ usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
+ USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
+ USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
+ USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
+ USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
+ usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
+ usb3_init_epc_registers(usb3);
+
+ if (usb3->driver)
+ usb3->driver->disconnect(&usb3->gadget);
+}
+
+static void usb3_check_vbus(struct renesas_usb3 *usb3)
+{
+ if (usb3->workaround_for_vbus) {
+ usb3_connect(usb3);
+ } else {
+ usb3->extcon_usb = !!(usb3_read(usb3, USB3_USB_STA) &
+ USB_STA_VBUS_STA);
+ if (usb3->extcon_usb)
+ usb3_connect(usb3);
+ else
+ usb3_disconnect(usb3);
+
+ schedule_work(&usb3->extcon_work);
+ }
+}
+
+static void renesas_usb3_role_work(struct work_struct *work)
+{
+ struct renesas_usb3 *usb3 =
+ container_of(work, struct renesas_usb3, role_work);
+
+ usb_role_switch_set_role(usb3->role_sw, usb3->role);
+}
+
+static void usb3_set_mode(struct renesas_usb3 *usb3, bool host)
+{
+ if (usb3->is_rzv2m) {
+ if (host) {
+ usb3_set_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ usb3_clear_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ } else {
+ usb3_set_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ usb3_clear_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ }
+ }
+
+ if (host)
+ usb3_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
+ else
+ usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
+}
+
+static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
+{
+ if (usb3->role_sw) {
+ usb3->role = host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
+ schedule_work(&usb3->role_work);
+ } else {
+ usb3_set_mode(usb3, host);
+ }
+}
+
+static void usb3_vbus_out(struct renesas_usb3 *usb3, bool enable)
+{
+ if (enable)
+ usb3_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
+ else
+ usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
+}
+
+static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (!usb3->role_sw_by_connector ||
+ usb3->connection_state != USB_ROLE_NONE) {
+ usb3_set_mode_by_role_sw(usb3, host);
+ usb3_vbus_out(usb3, a_dev);
+ }
+ /* for A-Peripheral or forced B-device mode */
+ if ((!host && a_dev) || usb3->start_to_connect)
+ usb3_connect(usb3);
+ spin_unlock_irqrestore(&usb3->lock, flags);
+}
+
+static bool usb3_is_a_device(struct renesas_usb3 *usb3)
+{
+ return !(usb3_read(usb3, USB3_USB_OTG_STA(usb3)) & USB_OTG_IDMON(usb3));
+}
+
+static void usb3_check_id(struct renesas_usb3 *usb3)
+{
+ usb3->extcon_host = usb3_is_a_device(usb3);
+
+ if ((!usb3->role_sw_by_connector && usb3->extcon_host &&
+ !usb3->forced_b_device) || usb3->connection_state == USB_ROLE_HOST)
+ usb3_mode_config(usb3, true, true);
+ else
+ usb3_mode_config(usb3, false, false);
+
+ schedule_work(&usb3->extcon_work);
+}
+
+static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
+{
+ usb3_init_axi_bridge(usb3);
+ usb3_init_epc_registers(usb3);
+ usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
+ USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
+ USB3_USB_COM_CON);
+ usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_STA(usb3));
+ usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_ENA(usb3));
+
+ usb3_check_id(usb3);
+ usb3_check_vbus(usb3);
+}
+
+static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
+{
+ usb3_disconnect(usb3);
+ usb3_write(usb3, 0, USB3_P0_INT_ENA);
+ usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA(usb3));
+ usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
+ usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
+ usb3_write(usb3, 0, USB3_AXI_INT_ENA);
+}
+
+static void usb3_irq_epc_int_1_pll_wakeup(struct renesas_usb3 *usb3)
+{
+ usb3_disable_irq_1(usb3, USB_INT_1_B3_PLLWKUP);
+ usb3_clear_bit(usb3, USB30_CON_B3_PLLWAKE, USB3_USB30_CON);
+ usb3_start_usb3_connection(usb3);
+}
+
+static void usb3_irq_epc_int_1_linkup_success(struct renesas_usb3 *usb3)
+{
+ usb3_transition_to_default_state(usb3, true);
+}
+
+static void usb3_irq_epc_int_1_resume(struct renesas_usb3 *usb3)
+{
+ usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM);
+ usb3_start_usb2_connection(usb3);
+ usb3_transition_to_default_state(usb3, false);
+}
+
+static void usb3_irq_epc_int_1_suspend(struct renesas_usb3 *usb3)
+{
+ usb3_disable_irq_1(usb3, USB_INT_1_B2_SPND);
+
+ if (usb3->gadget.speed != USB_SPEED_UNKNOWN &&
+ usb3->gadget.state != USB_STATE_NOTATTACHED) {
+ if (usb3->driver && usb3->driver->suspend)
+ usb3->driver->suspend(&usb3->gadget);
+ usb_gadget_set_state(&usb3->gadget, USB_STATE_SUSPENDED);
+ }
+}
+
+static void usb3_irq_epc_int_1_disable(struct renesas_usb3 *usb3)
+{
+ usb3_stop_usb3_connection(usb3);
+ if (usb3_wakeup_usb2_phy(usb3))
+ usb3_irq_epc_int_1_resume(usb3);
+}
+
+static void usb3_irq_epc_int_1_bus_reset(struct renesas_usb3 *usb3)
+{
+ usb3_reset_epc(usb3);
+ if (usb3->disabled_count < 3)
+ usb3_start_usb3_connection(usb3);
+ else
+ usb3_start_usb2_connection(usb3);
+}
+
+static void usb3_irq_epc_int_1_vbus_change(struct renesas_usb3 *usb3)
+{
+ usb3_check_vbus(usb3);
+}
+
+static void usb3_irq_epc_int_1_hot_reset(struct renesas_usb3 *usb3)
+{
+ usb3_reset_epc(usb3);
+ usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
+
+ /* This bit shall be set within 12ms from the start of HotReset */
+ usb3_set_bit(usb3, USB30_CON_B3_HOTRST_CMP, USB3_USB30_CON);
+}
+
+static void usb3_irq_epc_int_1_warm_reset(struct renesas_usb3 *usb3)
+{
+ usb3_reset_epc(usb3);
+ usb3_set_bit(usb3, USB_COM_CON_EP0_EN, USB3_USB_COM_CON);
+
+ usb3_start_operation_for_usb3(usb3);
+ usb3_enable_irq_1(usb3, USB_INT_1_SPEED);
+}
+
+static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3)
+{
+ u32 speed = usb3_read(usb3, USB3_USB_STA) & USB_STA_SPEED_MASK;
+
+ switch (speed) {
+ case USB_STA_SPEED_SS:
+ usb3->gadget.speed = USB_SPEED_SUPER;
+ usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
+ break;
+ case USB_STA_SPEED_HS:
+ usb3->gadget.speed = USB_SPEED_HIGH;
+ usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
+ break;
+ case USB_STA_SPEED_FS:
+ usb3->gadget.speed = USB_SPEED_FULL;
+ usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
+ break;
+ default:
+ usb3->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static void usb3_irq_epc_int_1(struct renesas_usb3 *usb3, u32 int_sta_1)
+{
+ if (int_sta_1 & USB_INT_1_B3_PLLWKUP)
+ usb3_irq_epc_int_1_pll_wakeup(usb3);
+
+ if (int_sta_1 & USB_INT_1_B3_LUPSUCS)
+ usb3_irq_epc_int_1_linkup_success(usb3);
+
+ if (int_sta_1 & USB_INT_1_B3_HOTRST)
+ usb3_irq_epc_int_1_hot_reset(usb3);
+
+ if (int_sta_1 & USB_INT_1_B3_WRMRST)
+ usb3_irq_epc_int_1_warm_reset(usb3);
+
+ if (int_sta_1 & USB_INT_1_B3_DISABLE)
+ usb3_irq_epc_int_1_disable(usb3);
+
+ if (int_sta_1 & USB_INT_1_B2_USBRST)
+ usb3_irq_epc_int_1_bus_reset(usb3);
+
+ if (int_sta_1 & USB_INT_1_B2_RSUM)
+ usb3_irq_epc_int_1_resume(usb3);
+
+ if (int_sta_1 & USB_INT_1_B2_SPND)
+ usb3_irq_epc_int_1_suspend(usb3);
+
+ if (int_sta_1 & USB_INT_1_SPEED)
+ usb3_irq_epc_int_1_speed(usb3);
+
+ if (int_sta_1 & USB_INT_1_VBUS_CNG)
+ usb3_irq_epc_int_1_vbus_change(usb3);
+}
+
+static struct renesas_usb3_request *__usb3_get_request(struct renesas_usb3_ep
+ *usb3_ep)
+{
+ return list_first_entry_or_null(&usb3_ep->queue,
+ struct renesas_usb3_request, queue);
+}
+
+static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
+ *usb3_ep)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ struct renesas_usb3_request *usb3_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ usb3_req = __usb3_get_request(usb3_ep);
+ spin_unlock_irqrestore(&usb3->lock, flags);
+
+ return usb3_req;
+}
+
+static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req,
+ int status)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+
+ dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
+ usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
+ status);
+ usb3_req->req.status = status;
+ usb3_ep->started = false;
+ list_del_init(&usb3_req->queue);
+ spin_unlock(&usb3->lock);
+ usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
+ spin_lock(&usb3->lock);
+}
+
+static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req, int status)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ __usb3_request_done(usb3_ep, usb3_req, status);
+ spin_unlock_irqrestore(&usb3->lock, flags);
+}
+
+static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
+{
+ struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
+ struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+
+ if (usb3_req)
+ usb3_request_done(usb3_ep, usb3_req, 0);
+ if (usb3->test_mode)
+ usb3_set_test_mode(usb3);
+}
+
+static void usb3_get_setup_data(struct renesas_usb3 *usb3,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
+ u32 *data = (u32 *)ctrl;
+
+ *data++ = usb3_read(usb3, USB3_STUP_DAT_0);
+ *data = usb3_read(usb3, USB3_STUP_DAT_1);
+
+ /* update this driver's flag */
+ usb3_ep->dir_in = !!(ctrl->bRequestType & USB_DIR_IN);
+}
+
+static void usb3_set_p0_con_update_res(struct renesas_usb3 *usb3, u32 res)
+{
+ u32 val = usb3_read(usb3, USB3_P0_CON);
+
+ val &= ~(P0_CON_ST_RES_MASK | P0_CON_OT_RES_MASK | P0_CON_IN_RES_MASK);
+ val |= res | P0_CON_RES_WEN;
+ usb3_write(usb3, val, USB3_P0_CON);
+}
+
+static void usb3_set_p0_con_for_ctrl_read_data(struct renesas_usb3 *usb3)
+{
+ usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_NRDY |
+ P0_CON_OT_RES_FORCE_STALL |
+ P0_CON_IN_RES_NORMAL);
+}
+
+static void usb3_set_p0_con_for_ctrl_read_status(struct renesas_usb3 *usb3)
+{
+ usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_NORMAL |
+ P0_CON_OT_RES_FORCE_STALL |
+ P0_CON_IN_RES_NORMAL);
+}
+
+static void usb3_set_p0_con_for_ctrl_write_data(struct renesas_usb3 *usb3)
+{
+ usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_NRDY |
+ P0_CON_OT_RES_NORMAL |
+ P0_CON_IN_RES_FORCE_STALL);
+}
+
+static void usb3_set_p0_con_for_ctrl_write_status(struct renesas_usb3 *usb3)
+{
+ usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_NORMAL |
+ P0_CON_OT_RES_NORMAL |
+ P0_CON_IN_RES_FORCE_STALL);
+}
+
+static void usb3_set_p0_con_for_no_data(struct renesas_usb3 *usb3)
+{
+ usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_NORMAL |
+ P0_CON_OT_RES_FORCE_STALL |
+ P0_CON_IN_RES_FORCE_STALL);
+}
+
+static void usb3_set_p0_con_stall(struct renesas_usb3 *usb3)
+{
+ usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_STALL |
+ P0_CON_OT_RES_FORCE_STALL |
+ P0_CON_IN_RES_FORCE_STALL);
+}
+
+static void usb3_set_p0_con_stop(struct renesas_usb3 *usb3)
+{
+ usb3_set_p0_con_update_res(usb3, P0_CON_ST_RES_FORCE_NRDY |
+ P0_CON_OT_RES_FORCE_NRDY |
+ P0_CON_IN_RES_FORCE_NRDY);
+}
+
+static int usb3_pn_change(struct renesas_usb3 *usb3, int num)
+{
+ if (num == 0 || num > usb3->num_usb3_eps)
+ return -ENXIO;
+
+ usb3_write(usb3, num, USB3_PIPE_COM);
+
+ return 0;
+}
+
+static void usb3_set_pn_con_update_res(struct renesas_usb3 *usb3, u32 res)
+{
+ u32 val = usb3_read(usb3, USB3_PN_CON);
+
+ val &= ~PN_CON_RES_MASK;
+ val |= res & PN_CON_RES_MASK;
+ val |= PN_CON_RES_WEN;
+ usb3_write(usb3, val, USB3_PN_CON);
+}
+
+static void usb3_pn_start(struct renesas_usb3 *usb3)
+{
+ usb3_set_pn_con_update_res(usb3, PN_CON_RES_NORMAL);
+}
+
+static void usb3_pn_stop(struct renesas_usb3 *usb3)
+{
+ usb3_set_pn_con_update_res(usb3, PN_CON_RES_FORCE_NRDY);
+}
+
+static void usb3_pn_stall(struct renesas_usb3 *usb3)
+{
+ usb3_set_pn_con_update_res(usb3, PN_CON_RES_FORCE_STALL);
+}
+
+static int usb3_pn_con_clear(struct renesas_usb3 *usb3)
+{
+ usb3_set_bit(usb3, PN_CON_CLR, USB3_PN_CON);
+
+ return usb3_wait(usb3, USB3_PN_CON, PN_CON_CLR, 0);
+}
+
+static bool usb3_is_transfer_complete(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct usb_request *req = &usb3_req->req;
+
+ if ((!req->zero && req->actual == req->length) ||
+ (req->actual % usb3_ep->ep.maxpacket) || (req->length == 0))
+ return true;
+ else
+ return false;
+}
+
+static int usb3_wait_pipe_status(struct renesas_usb3_ep *usb3_ep, u32 mask)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ u32 sta_reg = usb3_ep->num ? USB3_PN_STA : USB3_P0_STA;
+
+ return usb3_wait(usb3, sta_reg, mask, mask);
+}
+
+static void usb3_set_px_con_send(struct renesas_usb3_ep *usb3_ep, int bytes,
+ bool last)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ u32 con_reg = usb3_ep->num ? USB3_PN_CON : USB3_P0_CON;
+ u32 val = usb3_read(usb3, con_reg);
+
+ val |= PX_CON_SEND | PX_CON_BYTE_EN_BYTES(bytes);
+ val |= (usb3_ep->num && last) ? PN_CON_LAST : 0;
+ usb3_write(usb3, val, con_reg);
+}
+
+static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req,
+ u32 fifo_reg)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ int i;
+ int len = min_t(unsigned, usb3_req->req.length - usb3_req->req.actual,
+ usb3_ep->ep.maxpacket);
+ u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
+ u32 tmp = 0;
+ bool is_last = !len ? true : false;
+
+ if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0)
+ return -EBUSY;
+
+ /* Update gadget driver parameter */
+ usb3_req->req.actual += len;
+
+ /* Write data to the register */
+ if (len >= 4) {
+ iowrite32_rep(usb3->reg + fifo_reg, buf, len / 4);
+ buf += (len / 4) * 4;
+ len %= 4; /* update len to use usb3_set_pX_con_send() */
+ }
+
+ if (len) {
+ for (i = 0; i < len; i++)
+ tmp |= buf[i] << (8 * i);
+ usb3_write(usb3, tmp, fifo_reg);
+ }
+
+ if (!is_last)
+ is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
+ /* Send the data */
+ usb3_set_px_con_send(usb3_ep, len, is_last);
+
+ return is_last ? 0 : -EAGAIN;
+}
+
+static u32 usb3_get_received_length(struct renesas_usb3_ep *usb3_ep)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ u32 lng_reg = usb3_ep->num ? USB3_PN_LNG : USB3_P0_LNG;
+
+ return usb3_read(usb3, lng_reg);
+}
+
+static int usb3_read_pipe(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req, u32 fifo_reg)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ int i;
+ int len = min_t(unsigned, usb3_req->req.length - usb3_req->req.actual,
+ usb3_get_received_length(usb3_ep));
+ u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
+ u32 tmp = 0;
+
+ if (!len)
+ return 0;
+
+ /* Update gadget driver parameter */
+ usb3_req->req.actual += len;
+
+ /* Read data from the register */
+ if (len >= 4) {
+ ioread32_rep(usb3->reg + fifo_reg, buf, len / 4);
+ buf += (len / 4) * 4;
+ len %= 4;
+ }
+
+ if (len) {
+ tmp = usb3_read(usb3, fifo_reg);
+ for (i = 0; i < len; i++)
+ buf[i] = (tmp >> (8 * i)) & 0xff;
+ }
+
+ return usb3_is_transfer_complete(usb3_ep, usb3_req) ? 0 : -EAGAIN;
+}
+
+static void usb3_set_status_stage(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+
+ if (usb3_ep->dir_in) {
+ usb3_set_p0_con_for_ctrl_read_status(usb3);
+ } else {
+ if (!usb3_req->req.length)
+ usb3_set_p0_con_for_no_data(usb3);
+ else
+ usb3_set_p0_con_for_ctrl_write_status(usb3);
+ }
+}
+
+static void usb3_p0_xfer(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ int ret;
+
+ if (usb3_ep->dir_in)
+ ret = usb3_write_pipe(usb3_ep, usb3_req, USB3_P0_WRITE);
+ else
+ ret = usb3_read_pipe(usb3_ep, usb3_req, USB3_P0_READ);
+
+ if (!ret)
+ usb3_set_status_stage(usb3_ep, usb3_req);
+}
+
+static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+
+ if (usb3_ep->started)
+ return;
+
+ usb3_ep->started = true;
+
+ if (usb3_ep->dir_in) {
+ usb3_set_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
+ usb3_set_p0_con_for_ctrl_read_data(usb3);
+ } else {
+ usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
+ if (usb3_req->req.length)
+ usb3_set_p0_con_for_ctrl_write_data(usb3);
+ }
+
+ usb3_p0_xfer(usb3_ep, usb3_req);
+}
+
+static void usb3_enable_dma_pipen(struct renesas_usb3 *usb3)
+{
+ usb3_set_bit(usb3, PN_CON_DATAIF_EN, USB3_PN_CON);
+}
+
+static void usb3_disable_dma_pipen(struct renesas_usb3 *usb3)
+{
+ usb3_clear_bit(usb3, PN_CON_DATAIF_EN, USB3_PN_CON);
+}
+
+static void usb3_enable_dma_irq(struct renesas_usb3 *usb3, int num)
+{
+ usb3_set_bit(usb3, DMA_INT(num), USB3_DMA_INT_ENA);
+}
+
+static void usb3_disable_dma_irq(struct renesas_usb3 *usb3, int num)
+{
+ usb3_clear_bit(usb3, DMA_INT(num), USB3_DMA_INT_ENA);
+}
+
+static u32 usb3_dma_mps_to_prd_word1(struct renesas_usb3_ep *usb3_ep)
+{
+ switch (usb3_ep->ep.maxpacket) {
+ case 8:
+ return USB3_PRD1_MPS_8;
+ case 16:
+ return USB3_PRD1_MPS_16;
+ case 32:
+ return USB3_PRD1_MPS_32;
+ case 64:
+ return USB3_PRD1_MPS_64;
+ case 512:
+ return USB3_PRD1_MPS_512;
+ case 1024:
+ return USB3_PRD1_MPS_1024;
+ default:
+ return USB3_PRD1_MPS_RESERVED;
+ }
+}
+
+static bool usb3_dma_get_setting_area(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ struct renesas_usb3_dma *dma;
+ int i;
+ bool ret = false;
+
+ if (usb3_req->req.length > USB3_DMA_MAX_XFER_SIZE_ALL_PRDS) {
+ dev_dbg(usb3_to_dev(usb3), "%s: the length is too big (%d)\n",
+ __func__, usb3_req->req.length);
+ return false;
+ }
+
+ /* The driver doesn't handle zero-length packet via dmac */
+ if (!usb3_req->req.length)
+ return false;
+
+ if (usb3_dma_mps_to_prd_word1(usb3_ep) == USB3_PRD1_MPS_RESERVED)
+ return false;
+
+ usb3_for_each_dma(usb3, dma, i) {
+ if (dma->used)
+ continue;
+
+ if (usb_gadget_map_request(&usb3->gadget, &usb3_req->req,
+ usb3_ep->dir_in) < 0)
+ break;
+
+ dma->used = true;
+ usb3_ep->dma = dma;
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+static void usb3_dma_put_setting_area(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ int i;
+ struct renesas_usb3_dma *dma;
+
+ usb3_for_each_dma(usb3, dma, i) {
+ if (usb3_ep->dma == dma) {
+ usb_gadget_unmap_request(&usb3->gadget, &usb3_req->req,
+ usb3_ep->dir_in);
+ dma->used = false;
+ usb3_ep->dma = NULL;
+ break;
+ }
+ }
+}
+
+static void usb3_dma_fill_prd(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct renesas_usb3_prd *cur_prd = usb3_ep->dma->prd;
+ u32 remain = usb3_req->req.length;
+ u32 dma = usb3_req->req.dma;
+ u32 len;
+ int i = 0;
+
+ do {
+ len = min_t(u32, remain, USB3_DMA_MAX_XFER_SIZE) &
+ USB3_PRD1_SIZE_MASK;
+ cur_prd->word1 = usb3_dma_mps_to_prd_word1(usb3_ep) |
+ USB3_PRD1_B_INC | len;
+ cur_prd->bap = dma;
+ remain -= len;
+ dma += len;
+ if (!remain || (i + 1) < USB3_DMA_NUM_PRD_ENTRIES)
+ break;
+
+ cur_prd++;
+ i++;
+ } while (1);
+
+ cur_prd->word1 |= USB3_PRD1_E | USB3_PRD1_INT;
+ if (usb3_ep->dir_in)
+ cur_prd->word1 |= USB3_PRD1_LST;
+}
+
+static void usb3_dma_kick_prd(struct renesas_usb3_ep *usb3_ep)
+{
+ struct renesas_usb3_dma *dma = usb3_ep->dma;
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ u32 dma_con = DMA_COM_PIPE_NO(usb3_ep->num) | DMA_CON_PRD_EN;
+
+ if (usb3_ep->dir_in)
+ dma_con |= DMA_CON_PIPE_DIR;
+
+ wmb(); /* prd entries should be in system memory here */
+
+ usb3_write(usb3, 1 << usb3_ep->num, USB3_DMA_INT_STA);
+ usb3_write(usb3, AXI_INT_PRDEN_CLR_STA(dma->num) |
+ AXI_INT_PRDERR_STA(dma->num), USB3_AXI_INT_STA);
+
+ usb3_write(usb3, dma->prd_dma, USB3_DMA_CH0_PRD_ADR(dma->num));
+ usb3_write(usb3, dma_con, USB3_DMA_CH0_CON(dma->num));
+ usb3_enable_dma_irq(usb3, usb3_ep->num);
+}
+
+static void usb3_dma_stop_prd(struct renesas_usb3_ep *usb3_ep)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ struct renesas_usb3_dma *dma = usb3_ep->dma;
+
+ usb3_disable_dma_irq(usb3, usb3_ep->num);
+ usb3_write(usb3, 0, USB3_DMA_CH0_CON(dma->num));
+}
+
+static int usb3_dma_update_status(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct renesas_usb3_prd *cur_prd = usb3_ep->dma->prd;
+ struct usb_request *req = &usb3_req->req;
+ u32 remain, len;
+ int i = 0;
+ int status = 0;
+
+ rmb(); /* The controller updated prd entries */
+
+ do {
+ if (cur_prd->word1 & USB3_PRD1_D)
+ status = -EIO;
+ if (cur_prd->word1 & USB3_PRD1_E)
+ len = req->length % USB3_DMA_MAX_XFER_SIZE;
+ else
+ len = USB3_DMA_MAX_XFER_SIZE;
+ remain = cur_prd->word1 & USB3_PRD1_SIZE_MASK;
+ req->actual += len - remain;
+
+ if (cur_prd->word1 & USB3_PRD1_E ||
+ (i + 1) < USB3_DMA_NUM_PRD_ENTRIES)
+ break;
+
+ cur_prd++;
+ i++;
+ } while (1);
+
+ return status;
+}
+
+static bool usb3_dma_try_start(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+
+ if (!use_dma)
+ return false;
+
+ if (usb3_dma_get_setting_area(usb3_ep, usb3_req)) {
+ usb3_pn_stop(usb3);
+ usb3_enable_dma_pipen(usb3);
+ usb3_dma_fill_prd(usb3_ep, usb3_req);
+ usb3_dma_kick_prd(usb3_ep);
+ usb3_pn_start(usb3);
+ return true;
+ }
+
+ return false;
+}
+
+static int usb3_dma_try_stop(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+ int status = 0;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (!usb3_ep->dma)
+ goto out;
+
+ if (!usb3_pn_change(usb3, usb3_ep->num))
+ usb3_disable_dma_pipen(usb3);
+ usb3_dma_stop_prd(usb3_ep);
+ status = usb3_dma_update_status(usb3_ep, usb3_req);
+ usb3_dma_put_setting_area(usb3_ep, usb3_req);
+
+out:
+ spin_unlock_irqrestore(&usb3->lock, flags);
+ return status;
+}
+
+static int renesas_usb3_dma_free_prd(struct renesas_usb3 *usb3,
+ struct device *dev)
+{
+ int i;
+ struct renesas_usb3_dma *dma;
+
+ usb3_for_each_dma(usb3, dma, i) {
+ if (dma->prd) {
+ dma_free_coherent(dev, USB3_DMA_PRD_SIZE,
+ dma->prd, dma->prd_dma);
+ dma->prd = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int renesas_usb3_dma_alloc_prd(struct renesas_usb3 *usb3,
+ struct device *dev)
+{
+ int i;
+ struct renesas_usb3_dma *dma;
+
+ if (!use_dma)
+ return 0;
+
+ usb3_for_each_dma(usb3, dma, i) {
+ dma->prd = dma_alloc_coherent(dev, USB3_DMA_PRD_SIZE,
+ &dma->prd_dma, GFP_KERNEL);
+ if (!dma->prd) {
+ renesas_usb3_dma_free_prd(usb3, dev);
+ return -ENOMEM;
+ }
+ dma->num = i + 1;
+ }
+
+ return 0;
+}
+
+static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ struct renesas_usb3_request *usb3_req_first;
+ unsigned long flags;
+ int ret = -EAGAIN;
+ u32 enable_bits = 0;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (usb3_ep->halt || usb3_ep->started)
+ goto out;
+ usb3_req_first = __usb3_get_request(usb3_ep);
+ if (!usb3_req_first || usb3_req != usb3_req_first)
+ goto out;
+
+ if (usb3_pn_change(usb3, usb3_ep->num) < 0)
+ goto out;
+
+ usb3_ep->started = true;
+
+ if (usb3_dma_try_start(usb3_ep, usb3_req))
+ goto out;
+
+ usb3_pn_start(usb3);
+
+ if (usb3_ep->dir_in) {
+ ret = usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE);
+ enable_bits |= PN_INT_LSTTR;
+ }
+
+ if (ret < 0)
+ enable_bits |= PN_INT_BFRDY;
+
+ if (enable_bits) {
+ usb3_set_bit(usb3, enable_bits, USB3_PN_INT_ENA);
+ usb3_enable_pipe_irq(usb3, usb3_ep->num);
+ }
+out:
+ spin_unlock_irqrestore(&usb3->lock, flags);
+}
+
+static int renesas_usb3_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
+ struct renesas_usb3_request *usb3_req = usb_req_to_usb3_req(_req);
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+
+ dev_dbg(usb3_to_dev(usb3), "ep_queue: ep%2d, %u\n", usb3_ep->num,
+ _req->length);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+ spin_lock_irqsave(&usb3->lock, flags);
+ list_add_tail(&usb3_req->queue, &usb3_ep->queue);
+ spin_unlock_irqrestore(&usb3->lock, flags);
+
+ if (!usb3_ep->num)
+ usb3_start_pipe0(usb3_ep, usb3_req);
+ else
+ usb3_start_pipen(usb3_ep, usb3_req);
+
+ return 0;
+}
+
+static void usb3_set_device_address(struct renesas_usb3 *usb3, u16 addr)
+{
+ /* DEV_ADDR bit field is cleared by WarmReset, HotReset and BusReset */
+ usb3_set_bit(usb3, USB_COM_CON_DEV_ADDR(addr), USB3_USB_COM_CON);
+}
+
+static bool usb3_std_req_set_address(struct renesas_usb3 *usb3,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (le16_to_cpu(ctrl->wValue) >= 128)
+ return true; /* stall */
+
+ usb3_set_device_address(usb3, le16_to_cpu(ctrl->wValue));
+ usb3_set_p0_con_for_no_data(usb3);
+
+ return false;
+}
+
+static void usb3_pipe0_internal_xfer(struct renesas_usb3 *usb3,
+ void *tx_data, size_t len,
+ void (*complete)(struct usb_ep *ep,
+ struct usb_request *req))
+{
+ struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
+
+ if (tx_data)
+ memcpy(usb3->ep0_buf, tx_data,
+ min_t(size_t, len, USB3_EP0_BUF_SIZE));
+
+ usb3->ep0_req->buf = &usb3->ep0_buf;
+ usb3->ep0_req->length = len;
+ usb3->ep0_req->complete = complete;
+ renesas_usb3_ep_queue(&usb3_ep->ep, usb3->ep0_req, GFP_ATOMIC);
+}
+
+static void usb3_pipe0_get_status_completion(struct usb_ep *ep,
+ struct usb_request *req)
+{
+}
+
+static bool usb3_std_req_get_status(struct renesas_usb3 *usb3,
+ struct usb_ctrlrequest *ctrl)
+{
+ bool stall = false;
+ struct renesas_usb3_ep *usb3_ep;
+ int num;
+ u16 status = 0;
+ __le16 tx_data;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ if (usb3->gadget.is_selfpowered)
+ status |= 1 << USB_DEVICE_SELF_POWERED;
+ if (usb3->gadget.speed == USB_SPEED_SUPER)
+ status |= usb3_feature_get_un_enabled(usb3);
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:
+ num = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+ usb3_ep = usb3_get_ep(usb3, num);
+ if (usb3_ep->halt)
+ status |= 1 << USB_ENDPOINT_HALT;
+ break;
+ default:
+ stall = true;
+ break;
+ }
+
+ if (!stall) {
+ tx_data = cpu_to_le16(status);
+ dev_dbg(usb3_to_dev(usb3), "get_status: req = %p\n",
+ usb_req_to_usb3_req(usb3->ep0_req));
+ usb3_pipe0_internal_xfer(usb3, &tx_data, sizeof(tx_data),
+ usb3_pipe0_get_status_completion);
+ }
+
+ return stall;
+}
+
+static bool usb3_std_req_feature_device(struct renesas_usb3 *usb3,
+ struct usb_ctrlrequest *ctrl, bool set)
+{
+ bool stall = true;
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ switch (w_value) {
+ case USB_DEVICE_TEST_MODE:
+ if (!set)
+ break;
+ usb3->test_mode = le16_to_cpu(ctrl->wIndex) >> 8;
+ stall = false;
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ case USB_DEVICE_U2_ENABLE:
+ if (usb3->gadget.speed != USB_SPEED_SUPER)
+ break;
+ if (w_value == USB_DEVICE_U1_ENABLE)
+ usb3_feature_u1_enable(usb3, set);
+ if (w_value == USB_DEVICE_U2_ENABLE)
+ usb3_feature_u2_enable(usb3, set);
+ stall = false;
+ break;
+ default:
+ break;
+ }
+
+ return stall;
+}
+
+static int usb3_set_halt_p0(struct renesas_usb3_ep *usb3_ep, bool halt)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+
+ if (unlikely(usb3_ep->num))
+ return -EINVAL;
+
+ usb3_ep->halt = halt;
+ if (halt)
+ usb3_set_p0_con_stall(usb3);
+ else
+ usb3_set_p0_con_stop(usb3);
+
+ return 0;
+}
+
+static int usb3_set_halt_pn(struct renesas_usb3_ep *usb3_ep, bool halt,
+ bool is_clear_feature)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (!usb3_pn_change(usb3, usb3_ep->num)) {
+ usb3_ep->halt = halt;
+ if (halt) {
+ usb3_pn_stall(usb3);
+ } else if (!is_clear_feature || !usb3_ep->wedge) {
+ usb3_pn_con_clear(usb3);
+ usb3_set_bit(usb3, PN_CON_EN, USB3_PN_CON);
+ usb3_pn_stop(usb3);
+ }
+ }
+ spin_unlock_irqrestore(&usb3->lock, flags);
+
+ return 0;
+}
+
+static int usb3_set_halt(struct renesas_usb3_ep *usb3_ep, bool halt,
+ bool is_clear_feature)
+{
+ int ret = 0;
+
+ if (halt && usb3_ep->started)
+ return -EAGAIN;
+
+ if (usb3_ep->num)
+ ret = usb3_set_halt_pn(usb3_ep, halt, is_clear_feature);
+ else
+ ret = usb3_set_halt_p0(usb3_ep, halt);
+
+ return ret;
+}
+
+static bool usb3_std_req_feature_endpoint(struct renesas_usb3 *usb3,
+ struct usb_ctrlrequest *ctrl,
+ bool set)
+{
+ int num = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+ struct renesas_usb3_ep *usb3_ep;
+ struct renesas_usb3_request *usb3_req;
+
+ if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT)
+ return true; /* stall */
+
+ usb3_ep = usb3_get_ep(usb3, num);
+ usb3_set_halt(usb3_ep, set, true);
+
+ /* Restarts a queue if clear feature */
+ if (!set) {
+ usb3_ep->started = false;
+ usb3_req = usb3_get_request(usb3_ep);
+ if (usb3_req)
+ usb3_start_pipen(usb3_ep, usb3_req);
+ }
+
+ return false;
+}
+
+static bool usb3_std_req_feature(struct renesas_usb3 *usb3,
+ struct usb_ctrlrequest *ctrl, bool set)
+{
+ bool stall = false;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ stall = usb3_std_req_feature_device(usb3, ctrl, set);
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:
+ stall = usb3_std_req_feature_endpoint(usb3, ctrl, set);
+ break;
+ default:
+ stall = true;
+ break;
+ }
+
+ if (!stall)
+ usb3_set_p0_con_for_no_data(usb3);
+
+ return stall;
+}
+
+static void usb3_pipe0_set_sel_completion(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ /* TODO */
+}
+
+static bool usb3_std_req_set_sel(struct renesas_usb3 *usb3,
+ struct usb_ctrlrequest *ctrl)
+{
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ if (w_length != 6)
+ return true; /* stall */
+
+ dev_dbg(usb3_to_dev(usb3), "set_sel: req = %p\n",
+ usb_req_to_usb3_req(usb3->ep0_req));
+ usb3_pipe0_internal_xfer(usb3, NULL, 6, usb3_pipe0_set_sel_completion);
+
+ return false;
+}
+
+static bool usb3_std_req_set_configuration(struct renesas_usb3 *usb3,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (le16_to_cpu(ctrl->wValue) > 0)
+ usb3_set_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
+ else
+ usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
+
+ return false;
+}
+
+/**
+ * usb3_handle_standard_request - handle some standard requests
+ * @usb3: the renesas_usb3 pointer
+ * @ctrl: a pointer of setup data
+ *
+ * Returns true if this function handled a standard request
+ */
+static bool usb3_handle_standard_request(struct renesas_usb3 *usb3,
+ struct usb_ctrlrequest *ctrl)
+{
+ bool ret = false;
+ bool stall = false;
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (ctrl->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ stall = usb3_std_req_set_address(usb3, ctrl);
+ ret = true;
+ break;
+ case USB_REQ_GET_STATUS:
+ stall = usb3_std_req_get_status(usb3, ctrl);
+ ret = true;
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ stall = usb3_std_req_feature(usb3, ctrl, false);
+ ret = true;
+ break;
+ case USB_REQ_SET_FEATURE:
+ stall = usb3_std_req_feature(usb3, ctrl, true);
+ ret = true;
+ break;
+ case USB_REQ_SET_SEL:
+ stall = usb3_std_req_set_sel(usb3, ctrl);
+ ret = true;
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ /* This hardware doesn't support Isochronous xfer */
+ stall = true;
+ ret = true;
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ usb3_std_req_set_configuration(usb3, ctrl);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (stall)
+ usb3_set_p0_con_stall(usb3);
+
+ return ret;
+}
+
+static int usb3_p0_con_clear_buffer(struct renesas_usb3 *usb3)
+{
+ usb3_set_bit(usb3, P0_CON_BCLR, USB3_P0_CON);
+
+ return usb3_wait(usb3, USB3_P0_CON, P0_CON_BCLR, 0);
+}
+
+static void usb3_irq_epc_pipe0_setup(struct renesas_usb3 *usb3)
+{
+ struct usb_ctrlrequest ctrl;
+ struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
+
+ /* Call giveback function if previous transfer is not completed */
+ if (usb3_ep->started)
+ usb3_request_done(usb3_ep, usb3_get_request(usb3_ep),
+ -ECONNRESET);
+
+ usb3_p0_con_clear_buffer(usb3);
+ usb3_get_setup_data(usb3, &ctrl);
+ if (!usb3_handle_standard_request(usb3, &ctrl))
+ if (usb3->driver->setup(&usb3->gadget, &ctrl) < 0)
+ usb3_set_p0_con_stall(usb3);
+}
+
+static void usb3_irq_epc_pipe0_bfrdy(struct renesas_usb3 *usb3)
+{
+ struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, 0);
+ struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+
+ if (!usb3_req)
+ return;
+
+ usb3_p0_xfer(usb3_ep, usb3_req);
+}
+
+static void usb3_irq_epc_pipe0(struct renesas_usb3 *usb3)
+{
+ u32 p0_int_sta = usb3_read(usb3, USB3_P0_INT_STA);
+
+ p0_int_sta &= usb3_read(usb3, USB3_P0_INT_ENA);
+ usb3_write(usb3, p0_int_sta, USB3_P0_INT_STA);
+ if (p0_int_sta & P0_INT_STSED)
+ usb3_irq_epc_pipe0_status_end(usb3);
+ if (p0_int_sta & P0_INT_SETUP)
+ usb3_irq_epc_pipe0_setup(usb3);
+ if (p0_int_sta & P0_INT_BFRDY)
+ usb3_irq_epc_pipe0_bfrdy(usb3);
+}
+
+static void usb3_request_done_pipen(struct renesas_usb3 *usb3,
+ struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req,
+ int status)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (usb3_pn_change(usb3, usb3_ep->num))
+ usb3_pn_stop(usb3);
+ spin_unlock_irqrestore(&usb3->lock, flags);
+
+ usb3_disable_pipe_irq(usb3, usb3_ep->num);
+ usb3_request_done(usb3_ep, usb3_req, status);
+
+ /* get next usb3_req */
+ usb3_req = usb3_get_request(usb3_ep);
+ if (usb3_req)
+ usb3_start_pipen(usb3_ep, usb3_req);
+}
+
+static void usb3_irq_epc_pipen_lsttr(struct renesas_usb3 *usb3, int num)
+{
+ struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
+ struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+
+ if (!usb3_req)
+ return;
+
+ if (usb3_ep->dir_in) {
+ dev_dbg(usb3_to_dev(usb3), "%s: len = %u, actual = %u\n",
+ __func__, usb3_req->req.length, usb3_req->req.actual);
+ usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
+ }
+}
+
+static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num)
+{
+ struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
+ struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+ bool done = false;
+
+ if (!usb3_req)
+ return;
+
+ spin_lock(&usb3->lock);
+ if (usb3_pn_change(usb3, num))
+ goto out;
+
+ if (usb3_ep->dir_in) {
+ /* Do not stop the IN pipe here to detect LSTTR interrupt */
+ if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
+ usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
+ } else {
+ if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
+ done = true;
+ }
+
+out:
+ /* need to unlock because usb3_request_done_pipen() locks it */
+ spin_unlock(&usb3->lock);
+
+ if (done)
+ usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
+}
+
+static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
+{
+ u32 pn_int_sta;
+
+ spin_lock(&usb3->lock);
+ if (usb3_pn_change(usb3, num) < 0) {
+ spin_unlock(&usb3->lock);
+ return;
+ }
+
+ pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
+ pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
+ usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
+ spin_unlock(&usb3->lock);
+ if (pn_int_sta & PN_INT_LSTTR)
+ usb3_irq_epc_pipen_lsttr(usb3, num);
+ if (pn_int_sta & PN_INT_BFRDY)
+ usb3_irq_epc_pipen_bfrdy(usb3, num);
+}
+
+static void usb3_irq_epc_int_2(struct renesas_usb3 *usb3, u32 int_sta_2)
+{
+ int i;
+
+ for (i = 0; i < usb3->num_usb3_eps; i++) {
+ if (int_sta_2 & USB_INT_2_PIPE(i)) {
+ if (!i)
+ usb3_irq_epc_pipe0(usb3);
+ else
+ usb3_irq_epc_pipen(usb3, i);
+ }
+ }
+}
+
+static void usb3_irq_idmon_change(struct renesas_usb3 *usb3)
+{
+ usb3_check_id(usb3);
+}
+
+static void usb3_irq_otg_int(struct renesas_usb3 *usb3)
+{
+ u32 otg_int_sta = usb3_read(usb3, USB3_USB_OTG_INT_STA(usb3));
+
+ otg_int_sta &= usb3_read(usb3, USB3_USB_OTG_INT_ENA(usb3));
+ if (otg_int_sta)
+ usb3_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA(usb3));
+
+ if (otg_int_sta & USB_OTG_IDMON(usb3))
+ usb3_irq_idmon_change(usb3);
+}
+
+static void usb3_irq_epc(struct renesas_usb3 *usb3)
+{
+ u32 int_sta_1 = usb3_read(usb3, USB3_USB_INT_STA_1);
+ u32 int_sta_2 = usb3_read(usb3, USB3_USB_INT_STA_2);
+
+ int_sta_1 &= usb3_read(usb3, USB3_USB_INT_ENA_1);
+ if (int_sta_1) {
+ usb3_write(usb3, int_sta_1, USB3_USB_INT_STA_1);
+ usb3_irq_epc_int_1(usb3, int_sta_1);
+ }
+
+ int_sta_2 &= usb3_read(usb3, USB3_USB_INT_ENA_2);
+ if (int_sta_2)
+ usb3_irq_epc_int_2(usb3, int_sta_2);
+
+ if (!usb3->is_rzv2m)
+ usb3_irq_otg_int(usb3);
+}
+
+static void usb3_irq_dma_int(struct renesas_usb3 *usb3, u32 dma_sta)
+{
+ struct renesas_usb3_ep *usb3_ep;
+ struct renesas_usb3_request *usb3_req;
+ int i, status;
+
+ for (i = 0; i < usb3->num_usb3_eps; i++) {
+ if (!(dma_sta & DMA_INT(i)))
+ continue;
+
+ usb3_ep = usb3_get_ep(usb3, i);
+ if (!(usb3_read(usb3, USB3_AXI_INT_STA) &
+ AXI_INT_PRDEN_CLR_STA(usb3_ep->dma->num)))
+ continue;
+
+ usb3_req = usb3_get_request(usb3_ep);
+ status = usb3_dma_try_stop(usb3_ep, usb3_req);
+ usb3_request_done_pipen(usb3, usb3_ep, usb3_req, status);
+ }
+}
+
+static void usb3_irq_dma(struct renesas_usb3 *usb3)
+{
+ u32 dma_sta = usb3_read(usb3, USB3_DMA_INT_STA);
+
+ dma_sta &= usb3_read(usb3, USB3_DMA_INT_ENA);
+ if (dma_sta) {
+ usb3_write(usb3, dma_sta, USB3_DMA_INT_STA);
+ usb3_irq_dma_int(usb3, dma_sta);
+ }
+}
+
+static irqreturn_t renesas_usb3_irq(int irq, void *_usb3)
+{
+ struct renesas_usb3 *usb3 = _usb3;
+ irqreturn_t ret = IRQ_NONE;
+ u32 axi_int_sta = usb3_read(usb3, USB3_AXI_INT_STA);
+
+ if (axi_int_sta & AXI_INT_DMAINT) {
+ usb3_irq_dma(usb3);
+ ret = IRQ_HANDLED;
+ }
+
+ if (axi_int_sta & AXI_INT_EPCINT) {
+ usb3_irq_epc(usb3);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t renesas_usb3_otg_irq(int irq, void *_usb3)
+{
+ struct renesas_usb3 *usb3 = _usb3;
+
+ usb3_irq_otg_int(usb3);
+
+ return IRQ_HANDLED;
+}
+
+static void usb3_write_pn_mod(struct renesas_usb3_ep *usb3_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ u32 val = 0;
+
+ val |= usb3_ep->dir_in ? PN_MOD_DIR : 0;
+ val |= PN_MOD_TYPE(usb_endpoint_type(desc));
+ val |= PN_MOD_EPNUM(usb_endpoint_num(desc));
+ usb3_write(usb3, val, USB3_PN_MOD);
+}
+
+static u32 usb3_calc_ramarea(int ram_size)
+{
+ WARN_ON(ram_size > SZ_16K);
+
+ if (ram_size <= SZ_1K)
+ return PN_RAMMAP_RAMAREA_1KB;
+ else if (ram_size <= SZ_2K)
+ return PN_RAMMAP_RAMAREA_2KB;
+ else if (ram_size <= SZ_4K)
+ return PN_RAMMAP_RAMAREA_4KB;
+ else if (ram_size <= SZ_8K)
+ return PN_RAMMAP_RAMAREA_8KB;
+ else
+ return PN_RAMMAP_RAMAREA_16KB;
+}
+
+static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ int i;
+ static const u32 max_packet_array[] = {8, 16, 32, 64, 512};
+ u32 mpkt = PN_RAMMAP_MPKT(1024);
+
+ for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
+ if (usb_endpoint_maxp(desc) <= max_packet_array[i])
+ mpkt = PN_RAMMAP_MPKT(max_packet_array[i]);
+ }
+
+ return usb3_ep->rammap_val | mpkt;
+}
+
+static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+
+ usb3_ep->dir_in = usb_endpoint_dir_in(desc);
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (!usb3_pn_change(usb3, usb3_ep->num)) {
+ usb3_write_pn_mod(usb3_ep, desc);
+ usb3_write(usb3, usb3_calc_rammap_val(usb3_ep, desc),
+ USB3_PN_RAMMAP);
+ usb3_pn_con_clear(usb3);
+ usb3_set_bit(usb3, PN_CON_EN, USB3_PN_CON);
+ }
+ spin_unlock_irqrestore(&usb3->lock, flags);
+
+ return 0;
+}
+
+static int usb3_disable_pipe_n(struct renesas_usb3_ep *usb3_ep)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+
+ usb3_ep->halt = false;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (!usb3_pn_change(usb3, usb3_ep->num)) {
+ usb3_write(usb3, 0, USB3_PN_INT_ENA);
+ usb3_write(usb3, 0, USB3_PN_RAMMAP);
+ usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON);
+ }
+ spin_unlock_irqrestore(&usb3->lock, flags);
+
+ return 0;
+}
+
+/*------- usb_ep_ops -----------------------------------------------------*/
+static int renesas_usb3_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
+
+ return usb3_enable_pipe_n(usb3_ep, desc);
+}
+
+static int renesas_usb3_ep_disable(struct usb_ep *_ep)
+{
+ struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
+ struct renesas_usb3_request *usb3_req;
+
+ do {
+ usb3_req = usb3_get_request(usb3_ep);
+ if (!usb3_req)
+ break;
+ usb3_dma_try_stop(usb3_ep, usb3_req);
+ usb3_request_done(usb3_ep, usb3_req, -ESHUTDOWN);
+ } while (1);
+
+ return usb3_disable_pipe_n(usb3_ep);
+}
+
+static struct usb_request *__renesas_usb3_ep_alloc_request(gfp_t gfp_flags)
+{
+ struct renesas_usb3_request *usb3_req;
+
+ usb3_req = kzalloc(sizeof(struct renesas_usb3_request), gfp_flags);
+ if (!usb3_req)
+ return NULL;
+
+ INIT_LIST_HEAD(&usb3_req->queue);
+
+ return &usb3_req->req;
+}
+
+static void __renesas_usb3_ep_free_request(struct usb_request *_req)
+{
+ struct renesas_usb3_request *usb3_req = usb_req_to_usb3_req(_req);
+
+ kfree(usb3_req);
+}
+
+static struct usb_request *renesas_usb3_ep_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ return __renesas_usb3_ep_alloc_request(gfp_flags);
+}
+
+static void renesas_usb3_ep_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ __renesas_usb3_ep_free_request(_req);
+}
+
+static int renesas_usb3_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
+ struct renesas_usb3_request *usb3_req = usb_req_to_usb3_req(_req);
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+
+ dev_dbg(usb3_to_dev(usb3), "ep_dequeue: ep%2d, %u\n", usb3_ep->num,
+ _req->length);
+
+ usb3_dma_try_stop(usb3_ep, usb3_req);
+ usb3_request_done_pipen(usb3, usb3_ep, usb3_req, -ECONNRESET);
+
+ return 0;
+}
+
+static int renesas_usb3_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ return usb3_set_halt(usb_ep_to_usb3_ep(_ep), !!value, false);
+}
+
+static int renesas_usb3_ep_set_wedge(struct usb_ep *_ep)
+{
+ struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
+
+ usb3_ep->wedge = true;
+ return usb3_set_halt(usb3_ep, true, false);
+}
+
+static void renesas_usb3_ep_fifo_flush(struct usb_ep *_ep)
+{
+ struct renesas_usb3_ep *usb3_ep = usb_ep_to_usb3_ep(_ep);
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+
+ if (usb3_ep->num) {
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (!usb3_pn_change(usb3, usb3_ep->num)) {
+ usb3_pn_con_clear(usb3);
+ usb3_set_bit(usb3, PN_CON_EN, USB3_PN_CON);
+ }
+ spin_unlock_irqrestore(&usb3->lock, flags);
+ } else {
+ usb3_p0_con_clear_buffer(usb3);
+ }
+}
+
+static const struct usb_ep_ops renesas_usb3_ep_ops = {
+ .enable = renesas_usb3_ep_enable,
+ .disable = renesas_usb3_ep_disable,
+
+ .alloc_request = renesas_usb3_ep_alloc_request,
+ .free_request = renesas_usb3_ep_free_request,
+
+ .queue = renesas_usb3_ep_queue,
+ .dequeue = renesas_usb3_ep_dequeue,
+
+ .set_halt = renesas_usb3_ep_set_halt,
+ .set_wedge = renesas_usb3_ep_set_wedge,
+ .fifo_flush = renesas_usb3_ep_fifo_flush,
+};
+
+/*------- usb_gadget_ops -------------------------------------------------*/
+static int renesas_usb3_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct renesas_usb3 *usb3;
+
+ if (!driver || driver->max_speed < USB_SPEED_FULL ||
+ !driver->setup)
+ return -EINVAL;
+
+ usb3 = gadget_to_renesas_usb3(gadget);
+
+ /* hook up the driver */
+ usb3->driver = driver;
+
+ if (usb3->phy)
+ phy_init(usb3->phy);
+
+ pm_runtime_get_sync(usb3_to_dev(usb3));
+
+ renesas_usb3_init_controller(usb3);
+
+ return 0;
+}
+
+static int renesas_usb3_stop(struct usb_gadget *gadget)
+{
+ struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
+
+ usb3->softconnect = false;
+ usb3->gadget.speed = USB_SPEED_UNKNOWN;
+ usb3->driver = NULL;
+ renesas_usb3_stop_controller(usb3);
+
+ if (usb3->phy)
+ phy_exit(usb3->phy);
+
+ pm_runtime_put(usb3_to_dev(usb3));
+
+ return 0;
+}
+
+static int renesas_usb3_get_frame(struct usb_gadget *_gadget)
+{
+ return -EOPNOTSUPP;
+}
+
+static int renesas_usb3_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
+
+ usb3->softconnect = !!is_on;
+
+ return 0;
+}
+
+static int renesas_usb3_set_selfpowered(struct usb_gadget *gadget, int is_self)
+{
+ gadget->is_selfpowered = !!is_self;
+
+ return 0;
+}
+
+static const struct usb_gadget_ops renesas_usb3_gadget_ops = {
+ .get_frame = renesas_usb3_get_frame,
+ .udc_start = renesas_usb3_start,
+ .udc_stop = renesas_usb3_stop,
+ .pullup = renesas_usb3_pullup,
+ .set_selfpowered = renesas_usb3_set_selfpowered,
+};
+
+static enum usb_role renesas_usb3_role_switch_get(struct usb_role_switch *sw)
+{
+ struct renesas_usb3 *usb3 = usb_role_switch_get_drvdata(sw);
+ enum usb_role cur_role;
+
+ pm_runtime_get_sync(usb3_to_dev(usb3));
+ cur_role = usb3_is_host(usb3) ? USB_ROLE_HOST : USB_ROLE_DEVICE;
+ pm_runtime_put(usb3_to_dev(usb3));
+
+ return cur_role;
+}
+
+static void handle_ext_role_switch_states(struct device *dev,
+ enum usb_role role)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+ struct device *host = usb3->host_dev;
+ enum usb_role cur_role = renesas_usb3_role_switch_get(usb3->role_sw);
+
+ switch (role) {
+ case USB_ROLE_NONE:
+ usb3->connection_state = USB_ROLE_NONE;
+ if (cur_role == USB_ROLE_HOST)
+ device_release_driver(host);
+ if (usb3->driver)
+ usb3_disconnect(usb3);
+ usb3_vbus_out(usb3, false);
+ break;
+ case USB_ROLE_DEVICE:
+ if (usb3->connection_state == USB_ROLE_NONE) {
+ usb3->connection_state = USB_ROLE_DEVICE;
+ usb3_set_mode(usb3, false);
+ if (usb3->driver)
+ usb3_connect(usb3);
+ } else if (cur_role == USB_ROLE_HOST) {
+ device_release_driver(host);
+ usb3_set_mode(usb3, false);
+ if (usb3->driver)
+ usb3_connect(usb3);
+ }
+ usb3_vbus_out(usb3, false);
+ break;
+ case USB_ROLE_HOST:
+ if (usb3->connection_state == USB_ROLE_NONE) {
+ if (usb3->driver)
+ usb3_disconnect(usb3);
+
+ usb3->connection_state = USB_ROLE_HOST;
+ usb3_set_mode(usb3, true);
+ usb3_vbus_out(usb3, true);
+ if (device_attach(host) < 0)
+ dev_err(dev, "device_attach(host) failed\n");
+ } else if (cur_role == USB_ROLE_DEVICE) {
+ usb3_disconnect(usb3);
+ /* Must set the mode before device_attach of the host */
+ usb3_set_mode(usb3, true);
+ /* This device_attach() might sleep */
+ if (device_attach(host) < 0)
+ dev_err(dev, "device_attach(host) failed\n");
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void handle_role_switch_states(struct device *dev,
+ enum usb_role role)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+ struct device *host = usb3->host_dev;
+ enum usb_role cur_role = renesas_usb3_role_switch_get(usb3->role_sw);
+
+ if (cur_role == USB_ROLE_HOST && role == USB_ROLE_DEVICE) {
+ device_release_driver(host);
+ usb3_set_mode(usb3, false);
+ } else if (cur_role == USB_ROLE_DEVICE && role == USB_ROLE_HOST) {
+ /* Must set the mode before device_attach of the host */
+ usb3_set_mode(usb3, true);
+ /* This device_attach() might sleep */
+ if (device_attach(host) < 0)
+ dev_err(dev, "device_attach(host) failed\n");
+ }
+}
+
+static int renesas_usb3_role_switch_set(struct usb_role_switch *sw,
+ enum usb_role role)
+{
+ struct renesas_usb3 *usb3 = usb_role_switch_get_drvdata(sw);
+
+ pm_runtime_get_sync(usb3_to_dev(usb3));
+
+ if (usb3->role_sw_by_connector)
+ handle_ext_role_switch_states(usb3_to_dev(usb3), role);
+ else
+ handle_role_switch_states(usb3_to_dev(usb3), role);
+
+ pm_runtime_put(usb3_to_dev(usb3));
+
+ return 0;
+}
+
+static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+ bool new_mode_is_host;
+
+ if (!usb3->driver)
+ return -ENODEV;
+
+ if (usb3->forced_b_device)
+ return -EBUSY;
+
+ if (sysfs_streq(buf, "host"))
+ new_mode_is_host = true;
+ else if (sysfs_streq(buf, "peripheral"))
+ new_mode_is_host = false;
+ else
+ return -EINVAL;
+
+ if (new_mode_is_host == usb3_is_host(usb3))
+ return -EINVAL;
+
+ usb3_mode_config(usb3, new_mode_is_host, usb3_is_a_device(usb3));
+
+ return count;
+}
+
+static ssize_t role_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+
+ if (!usb3->driver)
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", usb3_is_host(usb3) ? "host" : "peripheral");
+}
+static DEVICE_ATTR_RW(role);
+
+static int renesas_usb3_b_device_show(struct seq_file *s, void *unused)
+{
+ struct renesas_usb3 *usb3 = s->private;
+
+ seq_printf(s, "%d\n", usb3->forced_b_device);
+
+ return 0;
+}
+
+static int renesas_usb3_b_device_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, renesas_usb3_b_device_show, inode->i_private);
+}
+
+static ssize_t renesas_usb3_b_device_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct renesas_usb3 *usb3 = s->private;
+ char buf[32];
+
+ if (!usb3->driver)
+ return -ENODEV;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ usb3->start_to_connect = false;
+ if (usb3->workaround_for_vbus && usb3->forced_b_device &&
+ !strncmp(buf, "2", 1))
+ usb3->start_to_connect = true;
+ else if (!strncmp(buf, "1", 1))
+ usb3->forced_b_device = true;
+ else
+ usb3->forced_b_device = false;
+
+ if (usb3->workaround_for_vbus)
+ usb3_disconnect(usb3);
+
+ /* Let this driver call usb3_connect() if needed */
+ usb3_check_id(usb3);
+
+ return count;
+}
+
+static const struct file_operations renesas_usb3_b_device_fops = {
+ .open = renesas_usb3_b_device_open,
+ .write = renesas_usb3_b_device_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
+ struct device *dev)
+{
+ usb3->dentry = debugfs_create_dir(dev_name(dev), usb_debug_root);
+
+ debugfs_create_file("b_device", 0644, usb3->dentry, usb3,
+ &renesas_usb3_b_device_fops);
+}
+
+/*------- platform_driver ------------------------------------------------*/
+static int renesas_usb3_remove(struct platform_device *pdev)
+{
+ struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(usb3->dentry);
+ device_remove_file(&pdev->dev, &dev_attr_role);
+
+ cancel_work_sync(&usb3->role_work);
+ usb_role_switch_unregister(usb3->role_sw);
+
+ usb_del_gadget_udc(&usb3->gadget);
+ reset_control_assert(usb3->usbp_rstc);
+ reset_control_assert(usb3->drd_rstc);
+ renesas_usb3_dma_free_prd(usb3, &pdev->dev);
+
+ __renesas_usb3_ep_free_request(usb3->ep0_req);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
+ const struct renesas_usb3_priv *priv)
+{
+ struct renesas_usb3_ep *usb3_ep;
+ int i;
+
+ /* calculate num_usb3_eps from renesas_usb3_priv */
+ usb3->num_usb3_eps = priv->ramsize_per_ramif * priv->num_ramif * 2 /
+ priv->ramsize_per_pipe + 1;
+
+ if (usb3->num_usb3_eps > USB3_MAX_NUM_PIPES(usb3))
+ usb3->num_usb3_eps = USB3_MAX_NUM_PIPES(usb3);
+
+ usb3->usb3_ep = devm_kcalloc(dev,
+ usb3->num_usb3_eps, sizeof(*usb3_ep),
+ GFP_KERNEL);
+ if (!usb3->usb3_ep)
+ return -ENOMEM;
+
+ dev_dbg(dev, "%s: num_usb3_eps = %d\n", __func__, usb3->num_usb3_eps);
+ /*
+ * This driver prepares pipes as follows:
+ * - odd pipes = IN pipe
+ * - even pipes = OUT pipe (except pipe 0)
+ */
+ usb3_for_each_ep(usb3_ep, usb3, i) {
+ snprintf(usb3_ep->ep_name, sizeof(usb3_ep->ep_name), "ep%d", i);
+ usb3_ep->usb3 = usb3;
+ usb3_ep->num = i;
+ usb3_ep->ep.name = usb3_ep->ep_name;
+ usb3_ep->ep.ops = &renesas_usb3_ep_ops;
+ INIT_LIST_HEAD(&usb3_ep->queue);
+ INIT_LIST_HEAD(&usb3_ep->ep.ep_list);
+ if (!i) {
+ /* for control pipe */
+ usb3->gadget.ep0 = &usb3_ep->ep;
+ usb_ep_set_maxpacket_limit(&usb3_ep->ep,
+ USB3_EP0_SS_MAX_PACKET_SIZE);
+ usb3_ep->ep.caps.type_control = true;
+ usb3_ep->ep.caps.dir_in = true;
+ usb3_ep->ep.caps.dir_out = true;
+ continue;
+ }
+
+ /* for bulk or interrupt pipe */
+ usb_ep_set_maxpacket_limit(&usb3_ep->ep, ~0);
+ list_add_tail(&usb3_ep->ep.ep_list, &usb3->gadget.ep_list);
+ usb3_ep->ep.caps.type_bulk = true;
+ usb3_ep->ep.caps.type_int = true;
+ if (i & 1)
+ usb3_ep->ep.caps.dir_in = true;
+ else
+ usb3_ep->ep.caps.dir_out = true;
+ }
+
+ return 0;
+}
+
+static void renesas_usb3_init_ram(struct renesas_usb3 *usb3, struct device *dev,
+ const struct renesas_usb3_priv *priv)
+{
+ struct renesas_usb3_ep *usb3_ep;
+ int i;
+ u32 ramif[2], basead[2]; /* index 0 = for IN pipes */
+ u32 *cur_ramif, *cur_basead;
+ u32 val;
+
+ memset(ramif, 0, sizeof(ramif));
+ memset(basead, 0, sizeof(basead));
+
+ /*
+ * This driver prepares pipes as follows:
+ * - all pipes = the same size as "ramsize_per_pipe"
+ * Please refer to the "Method of Specifying RAM Mapping"
+ */
+ usb3_for_each_ep(usb3_ep, usb3, i) {
+ if (!i)
+ continue; /* out of scope if ep num = 0 */
+ if (usb3_ep->ep.caps.dir_in) {
+ cur_ramif = &ramif[0];
+ cur_basead = &basead[0];
+ } else {
+ cur_ramif = &ramif[1];
+ cur_basead = &basead[1];
+ }
+
+ if (*cur_basead > priv->ramsize_per_ramif)
+ continue; /* out of memory for IN or OUT pipe */
+
+ /* calculate rammap_val */
+ val = PN_RAMMAP_RAMIF(*cur_ramif);
+ val |= usb3_calc_ramarea(priv->ramsize_per_pipe);
+ val |= PN_RAMMAP_BASEAD(*cur_basead);
+ usb3_ep->rammap_val = val;
+
+ dev_dbg(dev, "ep%2d: val = %08x, ramif = %d, base = %x\n",
+ i, val, *cur_ramif, *cur_basead);
+
+ /* update current ramif */
+ if (*cur_ramif + 1 == priv->num_ramif) {
+ *cur_ramif = 0;
+ *cur_basead += priv->ramsize_per_pipe;
+ } else {
+ (*cur_ramif)++;
+ }
+ }
+}
+
+static const struct renesas_usb3_priv renesas_usb3_priv_r8a7795_es1 = {
+ .ramsize_per_ramif = SZ_16K,
+ .num_ramif = 2,
+ .ramsize_per_pipe = SZ_4K,
+ .workaround_for_vbus = true,
+};
+
+static const struct renesas_usb3_priv renesas_usb3_priv_gen3 = {
+ .ramsize_per_ramif = SZ_16K,
+ .num_ramif = 4,
+ .ramsize_per_pipe = SZ_4K,
+};
+
+static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
+ .ramsize_per_ramif = SZ_16K,
+ .num_ramif = 4,
+ .ramsize_per_pipe = SZ_4K,
+ .workaround_for_vbus = true,
+};
+
+static const struct renesas_usb3_priv renesas_usb3_priv_rzv2m = {
+ .ramsize_per_ramif = SZ_16K,
+ .num_ramif = 1,
+ .ramsize_per_pipe = SZ_4K,
+ .is_rzv2m = true,
+};
+
+static const struct of_device_id usb3_of_match[] = {
+ {
+ .compatible = "renesas,r8a774c0-usb3-peri",
+ .data = &renesas_usb3_priv_r8a77990,
+ }, {
+ .compatible = "renesas,r8a7795-usb3-peri",
+ .data = &renesas_usb3_priv_gen3,
+ }, {
+ .compatible = "renesas,r8a77990-usb3-peri",
+ .data = &renesas_usb3_priv_r8a77990,
+ }, {
+ .compatible = "renesas,rzv2m-usb3-peri",
+ .data = &renesas_usb3_priv_rzv2m,
+ }, {
+ .compatible = "renesas,rcar-gen3-usb3-peri",
+ .data = &renesas_usb3_priv_gen3,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, usb3_of_match);
+
+static const struct soc_device_attribute renesas_usb3_quirks_match[] = {
+ {
+ .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = &renesas_usb3_priv_r8a7795_es1,
+ },
+ { /* sentinel */ }
+};
+
+static const unsigned int renesas_usb3_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
+ .set = renesas_usb3_role_switch_set,
+ .get = renesas_usb3_role_switch_get,
+ .allow_userspace_control = true,
+};
+
+static int renesas_usb3_probe(struct platform_device *pdev)
+{
+ struct renesas_usb3 *usb3;
+ int irq, drd_irq, ret;
+ const struct renesas_usb3_priv *priv;
+ const struct soc_device_attribute *attr;
+
+ attr = soc_device_match(renesas_usb3_quirks_match);
+ if (attr)
+ priv = attr->data;
+ else
+ priv = of_device_get_match_data(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ if (priv->is_rzv2m) {
+ drd_irq = platform_get_irq_byname(pdev, "drd");
+ if (drd_irq < 0)
+ return drd_irq;
+ }
+
+ usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
+ if (!usb3)
+ return -ENOMEM;
+
+ usb3->is_rzv2m = priv->is_rzv2m;
+
+ usb3->reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(usb3->reg))
+ return PTR_ERR(usb3->reg);
+
+ platform_set_drvdata(pdev, usb3);
+ spin_lock_init(&usb3->lock);
+
+ usb3->gadget.ops = &renesas_usb3_gadget_ops;
+ usb3->gadget.name = udc_name;
+ usb3->gadget.max_speed = USB_SPEED_SUPER;
+ INIT_LIST_HEAD(&usb3->gadget.ep_list);
+ ret = renesas_usb3_init_ep(usb3, &pdev->dev, priv);
+ if (ret < 0)
+ return ret;
+ renesas_usb3_init_ram(usb3, &pdev->dev, priv);
+
+ ret = devm_request_irq(&pdev->dev, irq, renesas_usb3_irq, 0,
+ dev_name(&pdev->dev), usb3);
+ if (ret < 0)
+ return ret;
+
+ if (usb3->is_rzv2m) {
+ ret = devm_request_irq(&pdev->dev, drd_irq,
+ renesas_usb3_otg_irq, 0,
+ dev_name(&pdev->dev), usb3);
+ if (ret < 0)
+ return ret;
+ }
+
+ INIT_WORK(&usb3->extcon_work, renesas_usb3_extcon_work);
+ usb3->extcon = devm_extcon_dev_allocate(&pdev->dev, renesas_usb3_cable);
+ if (IS_ERR(usb3->extcon))
+ return PTR_ERR(usb3->extcon);
+
+ ret = devm_extcon_dev_register(&pdev->dev, usb3->extcon);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register extcon\n");
+ return ret;
+ }
+
+ /* for ep0 handling */
+ usb3->ep0_req = __renesas_usb3_ep_alloc_request(GFP_KERNEL);
+ if (!usb3->ep0_req)
+ return -ENOMEM;
+
+ ret = renesas_usb3_dma_alloc_prd(usb3, &pdev->dev);
+ if (ret < 0)
+ goto err_alloc_prd;
+
+ /*
+ * This is optional. So, if this driver cannot get a phy,
+ * this driver will not handle a phy anymore.
+ */
+ usb3->phy = devm_phy_optional_get(&pdev->dev, "usb");
+ if (IS_ERR(usb3->phy)) {
+ ret = PTR_ERR(usb3->phy);
+ goto err_add_udc;
+ }
+
+ usb3->drd_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
+ "drd_reset");
+ if (IS_ERR(usb3->drd_rstc)) {
+ ret = PTR_ERR(usb3->drd_rstc);
+ goto err_add_udc;
+ }
+
+ usb3->usbp_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
+ "aresetn_p");
+ if (IS_ERR(usb3->usbp_rstc)) {
+ ret = PTR_ERR(usb3->usbp_rstc);
+ goto err_add_udc;
+ }
+
+ reset_control_deassert(usb3->drd_rstc);
+ reset_control_deassert(usb3->usbp_rstc);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
+ if (ret < 0)
+ goto err_reset;
+
+ ret = device_create_file(&pdev->dev, &dev_attr_role);
+ if (ret < 0)
+ goto err_dev_create;
+
+ if (device_property_read_bool(&pdev->dev, "usb-role-switch")) {
+ usb3->role_sw_by_connector = true;
+ renesas_usb3_role_switch_desc.fwnode = dev_fwnode(&pdev->dev);
+ }
+
+ renesas_usb3_role_switch_desc.driver_data = usb3;
+
+ INIT_WORK(&usb3->role_work, renesas_usb3_role_work);
+ usb3->role_sw = usb_role_switch_register(&pdev->dev,
+ &renesas_usb3_role_switch_desc);
+ if (!IS_ERR(usb3->role_sw)) {
+ usb3->host_dev = usb_of_get_companion_dev(&pdev->dev);
+ if (!usb3->host_dev) {
+ /* If not found, this driver will not use a role sw */
+ usb_role_switch_unregister(usb3->role_sw);
+ usb3->role_sw = NULL;
+ }
+ } else {
+ usb3->role_sw = NULL;
+ }
+
+ usb3->workaround_for_vbus = priv->workaround_for_vbus;
+
+ renesas_usb3_debugfs_init(usb3, &pdev->dev);
+
+ dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : "");
+
+ return 0;
+
+err_dev_create:
+ usb_del_gadget_udc(&usb3->gadget);
+
+err_reset:
+ reset_control_assert(usb3->usbp_rstc);
+ reset_control_assert(usb3->drd_rstc);
+
+err_add_udc:
+ renesas_usb3_dma_free_prd(usb3, &pdev->dev);
+
+err_alloc_prd:
+ __renesas_usb3_ep_free_request(usb3->ep0_req);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int renesas_usb3_suspend(struct device *dev)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+
+ /* Not started */
+ if (!usb3->driver)
+ return 0;
+
+ renesas_usb3_stop_controller(usb3);
+ if (usb3->phy)
+ phy_exit(usb3->phy);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int renesas_usb3_resume(struct device *dev)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+
+ /* Not started */
+ if (!usb3->driver)
+ return 0;
+
+ if (usb3->phy)
+ phy_init(usb3->phy);
+ pm_runtime_get_sync(dev);
+ renesas_usb3_init_controller(usb3);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(renesas_usb3_pm_ops, renesas_usb3_suspend,
+ renesas_usb3_resume);
+
+static struct platform_driver renesas_usb3_driver = {
+ .probe = renesas_usb3_probe,
+ .remove = renesas_usb3_remove,
+ .driver = {
+ .name = udc_name,
+ .pm = &renesas_usb3_pm_ops,
+ .of_match_table = of_match_ptr(usb3_of_match),
+ },
+};
+module_platform_driver(renesas_usb3_driver);
+
+MODULE_DESCRIPTION("Renesas USB3.0 Peripheral driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
+MODULE_ALIAS("platform:renesas_usb3");
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
new file mode 100644
index 000000000..4b7eb7701
--- /dev/null
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -0,0 +1,1319 @@
+// SPDX-License-Identifier: GPL-2.0
+/* linux/drivers/usb/gadget/s3c-hsudc.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S3C24XX USB 2.0 High-speed USB controller gadget driver
+ *
+ * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
+ * Each endpoint can be configured as either in or out endpoint. Endpoints
+ * can be configured for Bulk or Interrupt transfer mode.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/prefetch.h>
+#include <linux/platform_data/s3c-hsudc.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+
+#define S3C_HSUDC_REG(x) (x)
+
+/* Non-Indexed Registers */
+#define S3C_IR S3C_HSUDC_REG(0x00) /* Index Register */
+#define S3C_EIR S3C_HSUDC_REG(0x04) /* EP Intr Status */
+#define S3C_EIR_EP0 (1<<0)
+#define S3C_EIER S3C_HSUDC_REG(0x08) /* EP Intr Enable */
+#define S3C_FAR S3C_HSUDC_REG(0x0c) /* Gadget Address */
+#define S3C_FNR S3C_HSUDC_REG(0x10) /* Frame Number */
+#define S3C_EDR S3C_HSUDC_REG(0x14) /* EP Direction */
+#define S3C_TR S3C_HSUDC_REG(0x18) /* Test Register */
+#define S3C_SSR S3C_HSUDC_REG(0x1c) /* System Status */
+#define S3C_SSR_DTZIEN_EN (0xff8f)
+#define S3C_SSR_ERR (0xff80)
+#define S3C_SSR_VBUSON (1 << 8)
+#define S3C_SSR_HSP (1 << 4)
+#define S3C_SSR_SDE (1 << 3)
+#define S3C_SSR_RESUME (1 << 2)
+#define S3C_SSR_SUSPEND (1 << 1)
+#define S3C_SSR_RESET (1 << 0)
+#define S3C_SCR S3C_HSUDC_REG(0x20) /* System Control */
+#define S3C_SCR_DTZIEN_EN (1 << 14)
+#define S3C_SCR_RRD_EN (1 << 5)
+#define S3C_SCR_SUS_EN (1 << 1)
+#define S3C_SCR_RST_EN (1 << 0)
+#define S3C_EP0SR S3C_HSUDC_REG(0x24) /* EP0 Status */
+#define S3C_EP0SR_EP0_LWO (1 << 6)
+#define S3C_EP0SR_STALL (1 << 4)
+#define S3C_EP0SR_TX_SUCCESS (1 << 1)
+#define S3C_EP0SR_RX_SUCCESS (1 << 0)
+#define S3C_EP0CR S3C_HSUDC_REG(0x28) /* EP0 Control */
+#define S3C_BR(_x) S3C_HSUDC_REG(0x60 + (_x * 4))
+
+/* Indexed Registers */
+#define S3C_ESR S3C_HSUDC_REG(0x2c) /* EPn Status */
+#define S3C_ESR_FLUSH (1 << 6)
+#define S3C_ESR_STALL (1 << 5)
+#define S3C_ESR_LWO (1 << 4)
+#define S3C_ESR_PSIF_ONE (1 << 2)
+#define S3C_ESR_PSIF_TWO (2 << 2)
+#define S3C_ESR_TX_SUCCESS (1 << 1)
+#define S3C_ESR_RX_SUCCESS (1 << 0)
+#define S3C_ECR S3C_HSUDC_REG(0x30) /* EPn Control */
+#define S3C_ECR_DUEN (1 << 7)
+#define S3C_ECR_FLUSH (1 << 6)
+#define S3C_ECR_STALL (1 << 1)
+#define S3C_ECR_IEMS (1 << 0)
+#define S3C_BRCR S3C_HSUDC_REG(0x34) /* Read Count */
+#define S3C_BWCR S3C_HSUDC_REG(0x38) /* Write Count */
+#define S3C_MPR S3C_HSUDC_REG(0x3c) /* Max Pkt Size */
+
+#define WAIT_FOR_SETUP (0)
+#define DATA_STATE_XMIT (1)
+#define DATA_STATE_RECV (2)
+
+static const char * const s3c_hsudc_supply_names[] = {
+ "vdda", /* analog phy supply, 3.3V */
+ "vddi", /* digital phy supply, 1.2V */
+ "vddosc", /* oscillator supply, 1.8V - 3.3V */
+};
+
+/**
+ * struct s3c_hsudc_ep - Endpoint representation used by driver.
+ * @ep: USB gadget layer representation of device endpoint.
+ * @name: Endpoint name (as required by ep autoconfiguration).
+ * @dev: Reference to the device controller to which this EP belongs.
+ * @desc: Endpoint descriptor obtained from the gadget driver.
+ * @queue: Transfer request queue for the endpoint.
+ * @stopped: Maintains state of endpoint, set if EP is halted.
+ * @bEndpointAddress: EP address (including direction bit).
+ * @fifo: Base address of EP FIFO.
+ */
+struct s3c_hsudc_ep {
+ struct usb_ep ep;
+ char name[20];
+ struct s3c_hsudc *dev;
+ struct list_head queue;
+ u8 stopped;
+ u8 wedge;
+ u8 bEndpointAddress;
+ void __iomem *fifo;
+};
+
+/**
+ * struct s3c_hsudc_req - Driver encapsulation of USB gadget transfer request.
+ * @req: Reference to USB gadget transfer request.
+ * @queue: Used for inserting this request to the endpoint request queue.
+ */
+struct s3c_hsudc_req {
+ struct usb_request req;
+ struct list_head queue;
+};
+
+/**
+ * struct s3c_hsudc - Driver's abstraction of the device controller.
+ * @gadget: Instance of usb_gadget which is referenced by gadget driver.
+ * @driver: Reference to currently active gadget driver.
+ * @dev: The device reference used by probe function.
+ * @lock: Lock to synchronize the usage of Endpoints (EP's are indexed).
+ * @regs: Remapped base address of controller's register space.
+ * irq: IRQ number used by the controller.
+ * uclk: Reference to the controller clock.
+ * ep0state: Current state of EP0.
+ * ep: List of endpoints supported by the controller.
+ */
+struct s3c_hsudc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct device *dev;
+ struct s3c24xx_hsudc_platdata *pd;
+ struct usb_phy *transceiver;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsudc_supply_names)];
+ spinlock_t lock;
+ void __iomem *regs;
+ int irq;
+ struct clk *uclk;
+ int ep0state;
+ struct s3c_hsudc_ep ep[];
+};
+
+#define ep_maxpacket(_ep) ((_ep)->ep.maxpacket)
+#define ep_is_in(_ep) ((_ep)->bEndpointAddress & USB_DIR_IN)
+#define ep_index(_ep) ((_ep)->bEndpointAddress & \
+ USB_ENDPOINT_NUMBER_MASK)
+
+static const char driver_name[] = "s3c-udc";
+static const char ep0name[] = "ep0-control";
+
+static inline struct s3c_hsudc_req *our_req(struct usb_request *req)
+{
+ return container_of(req, struct s3c_hsudc_req, req);
+}
+
+static inline struct s3c_hsudc_ep *our_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct s3c_hsudc_ep, ep);
+}
+
+static inline struct s3c_hsudc *to_hsudc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct s3c_hsudc, gadget);
+}
+
+static inline void set_index(struct s3c_hsudc *hsudc, int ep_addr)
+{
+ ep_addr &= USB_ENDPOINT_NUMBER_MASK;
+ writel(ep_addr, hsudc->regs + S3C_IR);
+}
+
+static inline void __orr32(void __iomem *ptr, u32 val)
+{
+ writel(readl(ptr) | val, ptr);
+}
+
+/**
+ * s3c_hsudc_complete_request - Complete a transfer request.
+ * @hsep: Endpoint to which the request belongs.
+ * @hsreq: Transfer request to be completed.
+ * @status: Transfer completion status for the transfer request.
+ */
+static void s3c_hsudc_complete_request(struct s3c_hsudc_ep *hsep,
+ struct s3c_hsudc_req *hsreq, int status)
+{
+ unsigned int stopped = hsep->stopped;
+ struct s3c_hsudc *hsudc = hsep->dev;
+
+ list_del_init(&hsreq->queue);
+ hsreq->req.status = status;
+
+ if (!ep_index(hsep)) {
+ hsudc->ep0state = WAIT_FOR_SETUP;
+ hsep->bEndpointAddress &= ~USB_DIR_IN;
+ }
+
+ hsep->stopped = 1;
+ spin_unlock(&hsudc->lock);
+ usb_gadget_giveback_request(&hsep->ep, &hsreq->req);
+ spin_lock(&hsudc->lock);
+ hsep->stopped = stopped;
+}
+
+/**
+ * s3c_hsudc_nuke_ep - Terminate all requests queued for a endpoint.
+ * @hsep: Endpoint for which queued requests have to be terminated.
+ * @status: Transfer completion status for the transfer request.
+ */
+static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status)
+{
+ struct s3c_hsudc_req *hsreq;
+
+ while (!list_empty(&hsep->queue)) {
+ hsreq = list_entry(hsep->queue.next,
+ struct s3c_hsudc_req, queue);
+ s3c_hsudc_complete_request(hsep, hsreq, status);
+ }
+}
+
+/**
+ * s3c_hsudc_stop_activity - Stop activity on all endpoints.
+ * @hsudc: Device controller for which EP activity is to be stopped.
+ *
+ * All the endpoints are stopped and any pending transfer requests if any on
+ * the endpoint are terminated.
+ */
+static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc)
+{
+ struct s3c_hsudc_ep *hsep;
+ int epnum;
+
+ hsudc->gadget.speed = USB_SPEED_UNKNOWN;
+
+ for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) {
+ hsep = &hsudc->ep[epnum];
+ hsep->stopped = 1;
+ s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
+ }
+}
+
+/**
+ * s3c_hsudc_read_setup_pkt - Read the received setup packet from EP0 fifo.
+ * @hsudc: Device controller from which setup packet is to be read.
+ * @buf: The buffer into which the setup packet is read.
+ *
+ * The setup packet received in the EP0 fifo is read and stored into a
+ * given buffer address.
+ */
+
+static void s3c_hsudc_read_setup_pkt(struct s3c_hsudc *hsudc, u16 *buf)
+{
+ int count;
+
+ count = readl(hsudc->regs + S3C_BRCR);
+ while (count--)
+ *buf++ = (u16)readl(hsudc->regs + S3C_BR(0));
+
+ writel(S3C_EP0SR_RX_SUCCESS, hsudc->regs + S3C_EP0SR);
+}
+
+/**
+ * s3c_hsudc_write_fifo - Write next chunk of transfer data to EP fifo.
+ * @hsep: Endpoint to which the data is to be written.
+ * @hsreq: Transfer request from which the next chunk of data is written.
+ *
+ * Write the next chunk of data from a transfer request to the endpoint FIFO.
+ * If the transfer request completes, 1 is returned, otherwise 0 is returned.
+ */
+static int s3c_hsudc_write_fifo(struct s3c_hsudc_ep *hsep,
+ struct s3c_hsudc_req *hsreq)
+{
+ u16 *buf;
+ u32 max = ep_maxpacket(hsep);
+ u32 count, length;
+ bool is_last;
+ void __iomem *fifo = hsep->fifo;
+
+ buf = hsreq->req.buf + hsreq->req.actual;
+ prefetch(buf);
+
+ length = hsreq->req.length - hsreq->req.actual;
+ length = min(length, max);
+ hsreq->req.actual += length;
+
+ writel(length, hsep->dev->regs + S3C_BWCR);
+ for (count = 0; count < length; count += 2)
+ writel(*buf++, fifo);
+
+ if (count != max) {
+ is_last = true;
+ } else {
+ if (hsreq->req.length != hsreq->req.actual || hsreq->req.zero)
+ is_last = false;
+ else
+ is_last = true;
+ }
+
+ if (is_last) {
+ s3c_hsudc_complete_request(hsep, hsreq, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * s3c_hsudc_read_fifo - Read the next chunk of data from EP fifo.
+ * @hsep: Endpoint from which the data is to be read.
+ * @hsreq: Transfer request to which the next chunk of data read is written.
+ *
+ * Read the next chunk of data from the endpoint FIFO and a write it to the
+ * transfer request buffer. If the transfer request completes, 1 is returned,
+ * otherwise 0 is returned.
+ */
+static int s3c_hsudc_read_fifo(struct s3c_hsudc_ep *hsep,
+ struct s3c_hsudc_req *hsreq)
+{
+ struct s3c_hsudc *hsudc = hsep->dev;
+ u32 csr, offset;
+ u16 *buf, word;
+ u32 buflen, rcnt, rlen;
+ void __iomem *fifo = hsep->fifo;
+ u32 is_short = 0;
+
+ offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR;
+ csr = readl(hsudc->regs + offset);
+ if (!(csr & S3C_ESR_RX_SUCCESS))
+ return -EINVAL;
+
+ buf = hsreq->req.buf + hsreq->req.actual;
+ prefetchw(buf);
+ buflen = hsreq->req.length - hsreq->req.actual;
+
+ rcnt = readl(hsudc->regs + S3C_BRCR);
+ rlen = (csr & S3C_ESR_LWO) ? (rcnt * 2 - 1) : (rcnt * 2);
+
+ hsreq->req.actual += min(rlen, buflen);
+ is_short = (rlen < hsep->ep.maxpacket);
+
+ while (rcnt-- != 0) {
+ word = (u16)readl(fifo);
+ if (buflen) {
+ *buf++ = word;
+ buflen--;
+ } else {
+ hsreq->req.status = -EOVERFLOW;
+ }
+ }
+
+ writel(S3C_ESR_RX_SUCCESS, hsudc->regs + offset);
+
+ if (is_short || hsreq->req.actual == hsreq->req.length) {
+ s3c_hsudc_complete_request(hsep, hsreq, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * s3c_hsudc_epin_intr - Handle in-endpoint interrupt.
+ * @hsudc - Device controller for which the interrupt is to be handled.
+ * @ep_idx - Endpoint number on which an interrupt is pending.
+ *
+ * Handles interrupt for a in-endpoint. The interrupts that are handled are
+ * stall and data transmit complete interrupt.
+ */
+static void s3c_hsudc_epin_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
+{
+ struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx];
+ struct s3c_hsudc_req *hsreq;
+ u32 csr;
+
+ csr = readl(hsudc->regs + S3C_ESR);
+ if (csr & S3C_ESR_STALL) {
+ writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
+ return;
+ }
+
+ if (csr & S3C_ESR_TX_SUCCESS) {
+ writel(S3C_ESR_TX_SUCCESS, hsudc->regs + S3C_ESR);
+ if (list_empty(&hsep->queue))
+ return;
+
+ hsreq = list_entry(hsep->queue.next,
+ struct s3c_hsudc_req, queue);
+ if ((s3c_hsudc_write_fifo(hsep, hsreq) == 0) &&
+ (csr & S3C_ESR_PSIF_TWO))
+ s3c_hsudc_write_fifo(hsep, hsreq);
+ }
+}
+
+/**
+ * s3c_hsudc_epout_intr - Handle out-endpoint interrupt.
+ * @hsudc - Device controller for which the interrupt is to be handled.
+ * @ep_idx - Endpoint number on which an interrupt is pending.
+ *
+ * Handles interrupt for a out-endpoint. The interrupts that are handled are
+ * stall, flush and data ready interrupt.
+ */
+static void s3c_hsudc_epout_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
+{
+ struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx];
+ struct s3c_hsudc_req *hsreq;
+ u32 csr;
+
+ csr = readl(hsudc->regs + S3C_ESR);
+ if (csr & S3C_ESR_STALL) {
+ writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
+ return;
+ }
+
+ if (csr & S3C_ESR_FLUSH) {
+ __orr32(hsudc->regs + S3C_ECR, S3C_ECR_FLUSH);
+ return;
+ }
+
+ if (csr & S3C_ESR_RX_SUCCESS) {
+ if (list_empty(&hsep->queue))
+ return;
+
+ hsreq = list_entry(hsep->queue.next,
+ struct s3c_hsudc_req, queue);
+ if (((s3c_hsudc_read_fifo(hsep, hsreq)) == 0) &&
+ (csr & S3C_ESR_PSIF_TWO))
+ s3c_hsudc_read_fifo(hsep, hsreq);
+ }
+}
+
+/** s3c_hsudc_set_halt - Set or clear a endpoint halt.
+ * @_ep: Endpoint on which halt has to be set or cleared.
+ * @value: 1 for setting halt on endpoint, 0 to clear halt.
+ *
+ * Set or clear endpoint halt. If halt is set, the endpoint is stopped.
+ * If halt is cleared, for in-endpoints, if there are any pending
+ * transfer requests, transfers are started.
+ */
+static int s3c_hsudc_set_halt(struct usb_ep *_ep, int value)
+{
+ struct s3c_hsudc_ep *hsep = our_ep(_ep);
+ struct s3c_hsudc *hsudc = hsep->dev;
+ struct s3c_hsudc_req *hsreq;
+ unsigned long irqflags;
+ u32 ecr;
+ u32 offset;
+
+ if (value && ep_is_in(hsep) && !list_empty(&hsep->queue))
+ return -EAGAIN;
+
+ spin_lock_irqsave(&hsudc->lock, irqflags);
+ set_index(hsudc, ep_index(hsep));
+ offset = (ep_index(hsep)) ? S3C_ECR : S3C_EP0CR;
+ ecr = readl(hsudc->regs + offset);
+
+ if (value) {
+ ecr |= S3C_ECR_STALL;
+ if (ep_index(hsep))
+ ecr |= S3C_ECR_FLUSH;
+ hsep->stopped = 1;
+ } else {
+ ecr &= ~S3C_ECR_STALL;
+ hsep->stopped = hsep->wedge = 0;
+ }
+ writel(ecr, hsudc->regs + offset);
+
+ if (ep_is_in(hsep) && !list_empty(&hsep->queue) && !value) {
+ hsreq = list_entry(hsep->queue.next,
+ struct s3c_hsudc_req, queue);
+ if (hsreq)
+ s3c_hsudc_write_fifo(hsep, hsreq);
+ }
+
+ spin_unlock_irqrestore(&hsudc->lock, irqflags);
+ return 0;
+}
+
+/** s3c_hsudc_set_wedge - Sets the halt feature with the clear requests ignored
+ * @_ep: Endpoint on which wedge has to be set.
+ *
+ * Sets the halt feature with the clear requests ignored.
+ */
+static int s3c_hsudc_set_wedge(struct usb_ep *_ep)
+{
+ struct s3c_hsudc_ep *hsep = our_ep(_ep);
+
+ if (!hsep)
+ return -EINVAL;
+
+ hsep->wedge = 1;
+ return usb_ep_set_halt(_ep);
+}
+
+/** s3c_hsudc_handle_reqfeat - Handle set feature or clear feature requests.
+ * @_ep: Device controller on which the set/clear feature needs to be handled.
+ * @ctrl: Control request as received on the endpoint 0.
+ *
+ * Handle set feature or clear feature control requests on the control endpoint.
+ */
+static int s3c_hsudc_handle_reqfeat(struct s3c_hsudc *hsudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct s3c_hsudc_ep *hsep;
+ bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+ u8 ep_num = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
+
+ if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
+ hsep = &hsudc->ep[ep_num];
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_ENDPOINT_HALT:
+ if (set || !hsep->wedge)
+ s3c_hsudc_set_halt(&hsep->ep, set);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * s3c_hsudc_process_req_status - Handle get status control request.
+ * @hsudc: Device controller on which get status request has be handled.
+ * @ctrl: Control request as received on the endpoint 0.
+ *
+ * Handle get status control request received on control endpoint.
+ */
+static void s3c_hsudc_process_req_status(struct s3c_hsudc *hsudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct s3c_hsudc_ep *hsep0 = &hsudc->ep[0];
+ struct s3c_hsudc_req hsreq;
+ struct s3c_hsudc_ep *hsep;
+ __le16 reply;
+ u8 epnum;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ reply = cpu_to_le16(0);
+ break;
+
+ case USB_RECIP_INTERFACE:
+ reply = cpu_to_le16(0);
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+ hsep = &hsudc->ep[epnum];
+ reply = cpu_to_le16(hsep->stopped ? 1 : 0);
+ break;
+ }
+
+ INIT_LIST_HEAD(&hsreq.queue);
+ hsreq.req.length = 2;
+ hsreq.req.buf = &reply;
+ hsreq.req.actual = 0;
+ hsreq.req.complete = NULL;
+ s3c_hsudc_write_fifo(hsep0, &hsreq);
+}
+
+/**
+ * s3c_hsudc_process_setup - Process control request received on endpoint 0.
+ * @hsudc: Device controller on which control request has been received.
+ *
+ * Read the control request received on endpoint 0, decode it and handle
+ * the request.
+ */
+static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc)
+{
+ struct s3c_hsudc_ep *hsep = &hsudc->ep[0];
+ struct usb_ctrlrequest ctrl = {0};
+ int ret;
+
+ s3c_hsudc_nuke_ep(hsep, -EPROTO);
+ s3c_hsudc_read_setup_pkt(hsudc, (u16 *)&ctrl);
+
+ if (ctrl.bRequestType & USB_DIR_IN) {
+ hsep->bEndpointAddress |= USB_DIR_IN;
+ hsudc->ep0state = DATA_STATE_XMIT;
+ } else {
+ hsep->bEndpointAddress &= ~USB_DIR_IN;
+ hsudc->ep0state = DATA_STATE_RECV;
+ }
+
+ switch (ctrl.bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
+ break;
+ hsudc->ep0state = WAIT_FOR_SETUP;
+ return;
+
+ case USB_REQ_GET_STATUS:
+ if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
+ break;
+ s3c_hsudc_process_req_status(hsudc, &ctrl);
+ return;
+
+ case USB_REQ_SET_FEATURE:
+ case USB_REQ_CLEAR_FEATURE:
+ if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
+ break;
+ s3c_hsudc_handle_reqfeat(hsudc, &ctrl);
+ hsudc->ep0state = WAIT_FOR_SETUP;
+ return;
+ }
+
+ if (hsudc->driver) {
+ spin_unlock(&hsudc->lock);
+ ret = hsudc->driver->setup(&hsudc->gadget, &ctrl);
+ spin_lock(&hsudc->lock);
+
+ if (ctrl.bRequest == USB_REQ_SET_CONFIGURATION) {
+ hsep->bEndpointAddress &= ~USB_DIR_IN;
+ hsudc->ep0state = WAIT_FOR_SETUP;
+ }
+
+ if (ret < 0) {
+ dev_err(hsudc->dev, "setup failed, returned %d\n",
+ ret);
+ s3c_hsudc_set_halt(&hsep->ep, 1);
+ hsudc->ep0state = WAIT_FOR_SETUP;
+ hsep->bEndpointAddress &= ~USB_DIR_IN;
+ }
+ }
+}
+
+/** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt.
+ * @hsudc: Device controller on which endpoint 0 interrupt has occurred.
+ *
+ * Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur
+ * when a stall handshake is sent to host or data is sent/received on
+ * endpoint 0.
+ */
+static void s3c_hsudc_handle_ep0_intr(struct s3c_hsudc *hsudc)
+{
+ struct s3c_hsudc_ep *hsep = &hsudc->ep[0];
+ struct s3c_hsudc_req *hsreq;
+ u32 csr = readl(hsudc->regs + S3C_EP0SR);
+ u32 ecr;
+
+ if (csr & S3C_EP0SR_STALL) {
+ ecr = readl(hsudc->regs + S3C_EP0CR);
+ ecr &= ~(S3C_ECR_STALL | S3C_ECR_FLUSH);
+ writel(ecr, hsudc->regs + S3C_EP0CR);
+
+ writel(S3C_EP0SR_STALL, hsudc->regs + S3C_EP0SR);
+ hsep->stopped = 0;
+
+ s3c_hsudc_nuke_ep(hsep, -ECONNABORTED);
+ hsudc->ep0state = WAIT_FOR_SETUP;
+ hsep->bEndpointAddress &= ~USB_DIR_IN;
+ return;
+ }
+
+ if (csr & S3C_EP0SR_TX_SUCCESS) {
+ writel(S3C_EP0SR_TX_SUCCESS, hsudc->regs + S3C_EP0SR);
+ if (ep_is_in(hsep)) {
+ if (list_empty(&hsep->queue))
+ return;
+
+ hsreq = list_entry(hsep->queue.next,
+ struct s3c_hsudc_req, queue);
+ s3c_hsudc_write_fifo(hsep, hsreq);
+ }
+ }
+
+ if (csr & S3C_EP0SR_RX_SUCCESS) {
+ if (hsudc->ep0state == WAIT_FOR_SETUP)
+ s3c_hsudc_process_setup(hsudc);
+ else {
+ if (!ep_is_in(hsep)) {
+ if (list_empty(&hsep->queue))
+ return;
+ hsreq = list_entry(hsep->queue.next,
+ struct s3c_hsudc_req, queue);
+ s3c_hsudc_read_fifo(hsep, hsreq);
+ }
+ }
+ }
+}
+
+/**
+ * s3c_hsudc_ep_enable - Enable a endpoint.
+ * @_ep: The endpoint to be enabled.
+ * @desc: Endpoint descriptor.
+ *
+ * Enables a endpoint when called from the gadget driver. Endpoint stall if
+ * any is cleared, transfer type is configured and endpoint interrupt is
+ * enabled.
+ */
+static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct s3c_hsudc_ep *hsep;
+ struct s3c_hsudc *hsudc;
+ unsigned long flags;
+ u32 ecr = 0;
+
+ hsep = our_ep(_ep);
+ if (!_ep || !desc || _ep->name == ep0name
+ || desc->bDescriptorType != USB_DT_ENDPOINT
+ || hsep->bEndpointAddress != desc->bEndpointAddress
+ || ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
+ return -EINVAL;
+
+ if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
+ && usb_endpoint_maxp(desc) != ep_maxpacket(hsep))
+ || !desc->wMaxPacketSize)
+ return -ERANGE;
+
+ hsudc = hsep->dev;
+ if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&hsudc->lock, flags);
+
+ set_index(hsudc, hsep->bEndpointAddress);
+ ecr |= ((usb_endpoint_xfer_int(desc)) ? S3C_ECR_IEMS : S3C_ECR_DUEN);
+ writel(ecr, hsudc->regs + S3C_ECR);
+
+ hsep->stopped = hsep->wedge = 0;
+ hsep->ep.desc = desc;
+ hsep->ep.maxpacket = usb_endpoint_maxp(desc);
+
+ s3c_hsudc_set_halt(_ep, 0);
+ __set_bit(ep_index(hsep), hsudc->regs + S3C_EIER);
+
+ spin_unlock_irqrestore(&hsudc->lock, flags);
+ return 0;
+}
+
+/**
+ * s3c_hsudc_ep_disable - Disable a endpoint.
+ * @_ep: The endpoint to be disabled.
+ * @desc: Endpoint descriptor.
+ *
+ * Disables a endpoint when called from the gadget driver.
+ */
+static int s3c_hsudc_ep_disable(struct usb_ep *_ep)
+{
+ struct s3c_hsudc_ep *hsep = our_ep(_ep);
+ struct s3c_hsudc *hsudc = hsep->dev;
+ unsigned long flags;
+
+ if (!_ep || !hsep->ep.desc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hsudc->lock, flags);
+
+ set_index(hsudc, hsep->bEndpointAddress);
+ __clear_bit(ep_index(hsep), hsudc->regs + S3C_EIER);
+
+ s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
+
+ hsep->ep.desc = NULL;
+ hsep->stopped = 1;
+
+ spin_unlock_irqrestore(&hsudc->lock, flags);
+ return 0;
+}
+
+/**
+ * s3c_hsudc_alloc_request - Allocate a new request.
+ * @_ep: Endpoint for which request is allocated (not used).
+ * @gfp_flags: Flags used for the allocation.
+ *
+ * Allocates a single transfer request structure when called from gadget driver.
+ */
+static struct usb_request *s3c_hsudc_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct s3c_hsudc_req *hsreq;
+
+ hsreq = kzalloc(sizeof(*hsreq), gfp_flags);
+ if (!hsreq)
+ return NULL;
+
+ INIT_LIST_HEAD(&hsreq->queue);
+ return &hsreq->req;
+}
+
+/**
+ * s3c_hsudc_free_request - Deallocate a request.
+ * @ep: Endpoint for which request is deallocated (not used).
+ * @_req: Request to be deallocated.
+ *
+ * Allocates a single transfer request structure when called from gadget driver.
+ */
+static void s3c_hsudc_free_request(struct usb_ep *ep, struct usb_request *_req)
+{
+ struct s3c_hsudc_req *hsreq;
+
+ hsreq = our_req(_req);
+ WARN_ON(!list_empty(&hsreq->queue));
+ kfree(hsreq);
+}
+
+/**
+ * s3c_hsudc_queue - Queue a transfer request for the endpoint.
+ * @_ep: Endpoint for which the request is queued.
+ * @_req: Request to be queued.
+ * @gfp_flags: Not used.
+ *
+ * Start or enqueue a request for a endpoint when called from gadget driver.
+ */
+static int s3c_hsudc_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct s3c_hsudc_req *hsreq;
+ struct s3c_hsudc_ep *hsep;
+ struct s3c_hsudc *hsudc;
+ unsigned long flags;
+ u32 offset;
+ u32 csr;
+
+ hsreq = our_req(_req);
+ if ((!_req || !_req->complete || !_req->buf ||
+ !list_empty(&hsreq->queue)))
+ return -EINVAL;
+
+ hsep = our_ep(_ep);
+ hsudc = hsep->dev;
+ if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&hsudc->lock, flags);
+ set_index(hsudc, hsep->bEndpointAddress);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ if (!ep_index(hsep) && _req->length == 0) {
+ hsudc->ep0state = WAIT_FOR_SETUP;
+ s3c_hsudc_complete_request(hsep, hsreq, 0);
+ spin_unlock_irqrestore(&hsudc->lock, flags);
+ return 0;
+ }
+
+ if (list_empty(&hsep->queue) && !hsep->stopped) {
+ offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR;
+ if (ep_is_in(hsep)) {
+ csr = readl(hsudc->regs + offset);
+ if (!(csr & S3C_ESR_TX_SUCCESS) &&
+ (s3c_hsudc_write_fifo(hsep, hsreq) == 1))
+ hsreq = NULL;
+ } else {
+ csr = readl(hsudc->regs + offset);
+ if ((csr & S3C_ESR_RX_SUCCESS)
+ && (s3c_hsudc_read_fifo(hsep, hsreq) == 1))
+ hsreq = NULL;
+ }
+ }
+
+ if (hsreq)
+ list_add_tail(&hsreq->queue, &hsep->queue);
+
+ spin_unlock_irqrestore(&hsudc->lock, flags);
+ return 0;
+}
+
+/**
+ * s3c_hsudc_dequeue - Dequeue a transfer request from an endpoint.
+ * @_ep: Endpoint from which the request is dequeued.
+ * @_req: Request to be dequeued.
+ *
+ * Dequeue a request from a endpoint when called from gadget driver.
+ */
+static int s3c_hsudc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct s3c_hsudc_ep *hsep = our_ep(_ep);
+ struct s3c_hsudc *hsudc = hsep->dev;
+ struct s3c_hsudc_req *hsreq = NULL, *iter;
+ unsigned long flags;
+
+ hsep = our_ep(_ep);
+ if (!_ep || hsep->ep.name == ep0name)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hsudc->lock, flags);
+
+ list_for_each_entry(iter, &hsep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ hsreq = iter;
+ break;
+ }
+ if (!hsreq) {
+ spin_unlock_irqrestore(&hsudc->lock, flags);
+ return -EINVAL;
+ }
+
+ set_index(hsudc, hsep->bEndpointAddress);
+ s3c_hsudc_complete_request(hsep, hsreq, -ECONNRESET);
+
+ spin_unlock_irqrestore(&hsudc->lock, flags);
+ return 0;
+}
+
+static const struct usb_ep_ops s3c_hsudc_ep_ops = {
+ .enable = s3c_hsudc_ep_enable,
+ .disable = s3c_hsudc_ep_disable,
+ .alloc_request = s3c_hsudc_alloc_request,
+ .free_request = s3c_hsudc_free_request,
+ .queue = s3c_hsudc_queue,
+ .dequeue = s3c_hsudc_dequeue,
+ .set_halt = s3c_hsudc_set_halt,
+ .set_wedge = s3c_hsudc_set_wedge,
+};
+
+/**
+ * s3c_hsudc_initep - Initialize a endpoint to default state.
+ * @hsudc - Reference to the device controller.
+ * @hsep - Endpoint to be initialized.
+ * @epnum - Address to be assigned to the endpoint.
+ *
+ * Initialize a endpoint with default configuration.
+ */
+static void s3c_hsudc_initep(struct s3c_hsudc *hsudc,
+ struct s3c_hsudc_ep *hsep, int epnum)
+{
+ char *dir;
+
+ if ((epnum % 2) == 0) {
+ dir = "out";
+ } else {
+ dir = "in";
+ hsep->bEndpointAddress = USB_DIR_IN;
+ }
+
+ hsep->bEndpointAddress |= epnum;
+ if (epnum)
+ snprintf(hsep->name, sizeof(hsep->name), "ep%d%s", epnum, dir);
+ else
+ snprintf(hsep->name, sizeof(hsep->name), "%s", ep0name);
+
+ INIT_LIST_HEAD(&hsep->queue);
+ INIT_LIST_HEAD(&hsep->ep.ep_list);
+ if (epnum)
+ list_add_tail(&hsep->ep.ep_list, &hsudc->gadget.ep_list);
+
+ hsep->dev = hsudc;
+ hsep->ep.name = hsep->name;
+ usb_ep_set_maxpacket_limit(&hsep->ep, epnum ? 512 : 64);
+ hsep->ep.ops = &s3c_hsudc_ep_ops;
+ hsep->fifo = hsudc->regs + S3C_BR(epnum);
+ hsep->ep.desc = NULL;
+ hsep->stopped = 0;
+ hsep->wedge = 0;
+
+ if (epnum == 0) {
+ hsep->ep.caps.type_control = true;
+ hsep->ep.caps.dir_in = true;
+ hsep->ep.caps.dir_out = true;
+ } else {
+ hsep->ep.caps.type_iso = true;
+ hsep->ep.caps.type_bulk = true;
+ hsep->ep.caps.type_int = true;
+ }
+
+ if (epnum & 1)
+ hsep->ep.caps.dir_in = true;
+ else
+ hsep->ep.caps.dir_out = true;
+
+ set_index(hsudc, epnum);
+ writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR);
+}
+
+/**
+ * s3c_hsudc_setup_ep - Configure all endpoints to default state.
+ * @hsudc: Reference to device controller.
+ *
+ * Configures all endpoints to default state.
+ */
+static void s3c_hsudc_setup_ep(struct s3c_hsudc *hsudc)
+{
+ int epnum;
+
+ hsudc->ep0state = WAIT_FOR_SETUP;
+ INIT_LIST_HEAD(&hsudc->gadget.ep_list);
+ for (epnum = 0; epnum < hsudc->pd->epnum; epnum++)
+ s3c_hsudc_initep(hsudc, &hsudc->ep[epnum], epnum);
+}
+
+/**
+ * s3c_hsudc_reconfig - Reconfigure the device controller to default state.
+ * @hsudc: Reference to device controller.
+ *
+ * Reconfigures the device controller registers to a default state.
+ */
+static void s3c_hsudc_reconfig(struct s3c_hsudc *hsudc)
+{
+ writel(0xAA, hsudc->regs + S3C_EDR);
+ writel(1, hsudc->regs + S3C_EIER);
+ writel(0, hsudc->regs + S3C_TR);
+ writel(S3C_SCR_DTZIEN_EN | S3C_SCR_RRD_EN | S3C_SCR_SUS_EN |
+ S3C_SCR_RST_EN, hsudc->regs + S3C_SCR);
+ writel(0, hsudc->regs + S3C_EP0CR);
+
+ s3c_hsudc_setup_ep(hsudc);
+}
+
+/**
+ * s3c_hsudc_irq - Interrupt handler for device controller.
+ * @irq: Not used.
+ * @_dev: Reference to the device controller.
+ *
+ * Interrupt handler for the device controller. This handler handles controller
+ * interrupts and endpoint interrupts.
+ */
+static irqreturn_t s3c_hsudc_irq(int irq, void *_dev)
+{
+ struct s3c_hsudc *hsudc = _dev;
+ struct s3c_hsudc_ep *hsep;
+ u32 ep_intr;
+ u32 sys_status;
+ u32 ep_idx;
+
+ spin_lock(&hsudc->lock);
+
+ sys_status = readl(hsudc->regs + S3C_SSR);
+ ep_intr = readl(hsudc->regs + S3C_EIR) & 0x3FF;
+
+ if (!ep_intr && !(sys_status & S3C_SSR_DTZIEN_EN)) {
+ spin_unlock(&hsudc->lock);
+ return IRQ_HANDLED;
+ }
+
+ if (sys_status) {
+ if (sys_status & S3C_SSR_VBUSON)
+ writel(S3C_SSR_VBUSON, hsudc->regs + S3C_SSR);
+
+ if (sys_status & S3C_SSR_ERR)
+ writel(S3C_SSR_ERR, hsudc->regs + S3C_SSR);
+
+ if (sys_status & S3C_SSR_SDE) {
+ writel(S3C_SSR_SDE, hsudc->regs + S3C_SSR);
+ hsudc->gadget.speed = (sys_status & S3C_SSR_HSP) ?
+ USB_SPEED_HIGH : USB_SPEED_FULL;
+ }
+
+ if (sys_status & S3C_SSR_SUSPEND) {
+ writel(S3C_SSR_SUSPEND, hsudc->regs + S3C_SSR);
+ if (hsudc->gadget.speed != USB_SPEED_UNKNOWN
+ && hsudc->driver && hsudc->driver->suspend)
+ hsudc->driver->suspend(&hsudc->gadget);
+ }
+
+ if (sys_status & S3C_SSR_RESUME) {
+ writel(S3C_SSR_RESUME, hsudc->regs + S3C_SSR);
+ if (hsudc->gadget.speed != USB_SPEED_UNKNOWN
+ && hsudc->driver && hsudc->driver->resume)
+ hsudc->driver->resume(&hsudc->gadget);
+ }
+
+ if (sys_status & S3C_SSR_RESET) {
+ writel(S3C_SSR_RESET, hsudc->regs + S3C_SSR);
+ for (ep_idx = 0; ep_idx < hsudc->pd->epnum; ep_idx++) {
+ hsep = &hsudc->ep[ep_idx];
+ hsep->stopped = 1;
+ s3c_hsudc_nuke_ep(hsep, -ECONNRESET);
+ }
+ s3c_hsudc_reconfig(hsudc);
+ hsudc->ep0state = WAIT_FOR_SETUP;
+ }
+ }
+
+ if (ep_intr & S3C_EIR_EP0) {
+ writel(S3C_EIR_EP0, hsudc->regs + S3C_EIR);
+ set_index(hsudc, 0);
+ s3c_hsudc_handle_ep0_intr(hsudc);
+ }
+
+ ep_intr >>= 1;
+ ep_idx = 1;
+ while (ep_intr) {
+ if (ep_intr & 1) {
+ hsep = &hsudc->ep[ep_idx];
+ set_index(hsudc, ep_idx);
+ writel(1 << ep_idx, hsudc->regs + S3C_EIR);
+ if (ep_is_in(hsep))
+ s3c_hsudc_epin_intr(hsudc, ep_idx);
+ else
+ s3c_hsudc_epout_intr(hsudc, ep_idx);
+ }
+ ep_intr >>= 1;
+ ep_idx++;
+ }
+
+ spin_unlock(&hsudc->lock);
+ return IRQ_HANDLED;
+}
+
+static int s3c_hsudc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
+ int ret;
+
+ if (!driver
+ || driver->max_speed < USB_SPEED_FULL
+ || !driver->setup)
+ return -EINVAL;
+
+ if (!hsudc)
+ return -ENODEV;
+
+ if (hsudc->driver)
+ return -EBUSY;
+
+ hsudc->driver = driver;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hsudc->supplies),
+ hsudc->supplies);
+ if (ret != 0) {
+ dev_err(hsudc->dev, "failed to enable supplies: %d\n", ret);
+ goto err_supplies;
+ }
+
+ /* connect to bus through transceiver */
+ if (!IS_ERR_OR_NULL(hsudc->transceiver)) {
+ ret = otg_set_peripheral(hsudc->transceiver->otg,
+ &hsudc->gadget);
+ if (ret) {
+ dev_err(hsudc->dev, "%s: can't bind to transceiver\n",
+ hsudc->gadget.name);
+ goto err_otg;
+ }
+ }
+
+ enable_irq(hsudc->irq);
+ s3c_hsudc_reconfig(hsudc);
+
+ pm_runtime_get_sync(hsudc->dev);
+
+ if (hsudc->pd->phy_init)
+ hsudc->pd->phy_init();
+ if (hsudc->pd->gpio_init)
+ hsudc->pd->gpio_init();
+
+ return 0;
+err_otg:
+ regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+err_supplies:
+ hsudc->driver = NULL;
+ return ret;
+}
+
+static int s3c_hsudc_stop(struct usb_gadget *gadget)
+{
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
+ unsigned long flags;
+
+ if (!hsudc)
+ return -ENODEV;
+
+ spin_lock_irqsave(&hsudc->lock, flags);
+ hsudc->gadget.speed = USB_SPEED_UNKNOWN;
+ if (hsudc->pd->phy_uninit)
+ hsudc->pd->phy_uninit();
+
+ pm_runtime_put(hsudc->dev);
+
+ if (hsudc->pd->gpio_uninit)
+ hsudc->pd->gpio_uninit();
+ s3c_hsudc_stop_activity(hsudc);
+ spin_unlock_irqrestore(&hsudc->lock, flags);
+
+ if (!IS_ERR_OR_NULL(hsudc->transceiver))
+ (void) otg_set_peripheral(hsudc->transceiver->otg, NULL);
+
+ disable_irq(hsudc->irq);
+
+ regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+ hsudc->driver = NULL;
+
+ return 0;
+}
+
+static inline u32 s3c_hsudc_read_frameno(struct s3c_hsudc *hsudc)
+{
+ return readl(hsudc->regs + S3C_FNR) & 0x3FF;
+}
+
+static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget)
+{
+ return s3c_hsudc_read_frameno(to_hsudc(gadget));
+}
+
+static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
+
+ if (!hsudc)
+ return -ENODEV;
+
+ if (!IS_ERR_OR_NULL(hsudc->transceiver))
+ return usb_phy_set_power(hsudc->transceiver, mA);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct usb_gadget_ops s3c_hsudc_gadget_ops = {
+ .get_frame = s3c_hsudc_gadget_getframe,
+ .udc_start = s3c_hsudc_start,
+ .udc_stop = s3c_hsudc_stop,
+ .vbus_draw = s3c_hsudc_vbus_draw,
+};
+
+static int s3c_hsudc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct s3c_hsudc *hsudc;
+ struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev);
+ int ret, i;
+
+ hsudc = devm_kzalloc(&pdev->dev, struct_size(hsudc, ep, pd->epnum),
+ GFP_KERNEL);
+ if (!hsudc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dev);
+ hsudc->dev = dev;
+ hsudc->pd = dev_get_platdata(&pdev->dev);
+
+ hsudc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+
+ for (i = 0; i < ARRAY_SIZE(hsudc->supplies); i++)
+ hsudc->supplies[i].supply = s3c_hsudc_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies),
+ hsudc->supplies);
+ if (ret != 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to request supplies: %d\n", ret);
+ goto err_supplies;
+ }
+
+ hsudc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hsudc->regs)) {
+ ret = PTR_ERR(hsudc->regs);
+ goto err_res;
+ }
+
+ spin_lock_init(&hsudc->lock);
+
+ hsudc->gadget.max_speed = USB_SPEED_HIGH;
+ hsudc->gadget.ops = &s3c_hsudc_gadget_ops;
+ hsudc->gadget.name = dev_name(dev);
+ hsudc->gadget.ep0 = &hsudc->ep[0].ep;
+ hsudc->gadget.is_otg = 0;
+ hsudc->gadget.is_a_peripheral = 0;
+ hsudc->gadget.speed = USB_SPEED_UNKNOWN;
+
+ s3c_hsudc_setup_ep(hsudc);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_res;
+ hsudc->irq = ret;
+
+ ret = devm_request_irq(&pdev->dev, hsudc->irq, s3c_hsudc_irq, 0,
+ driver_name, hsudc);
+ if (ret < 0) {
+ dev_err(dev, "irq request failed\n");
+ goto err_res;
+ }
+
+ hsudc->uclk = devm_clk_get(&pdev->dev, "usb-device");
+ if (IS_ERR(hsudc->uclk)) {
+ dev_err(dev, "failed to find usb-device clock source\n");
+ ret = PTR_ERR(hsudc->uclk);
+ goto err_res;
+ }
+ clk_enable(hsudc->uclk);
+
+ local_irq_disable();
+
+ disable_irq(hsudc->irq);
+ local_irq_enable();
+
+ ret = usb_add_gadget_udc(&pdev->dev, &hsudc->gadget);
+ if (ret)
+ goto err_add_udc;
+
+ pm_runtime_enable(dev);
+
+ return 0;
+err_add_udc:
+ clk_disable(hsudc->uclk);
+err_res:
+ if (!IS_ERR_OR_NULL(hsudc->transceiver))
+ usb_put_phy(hsudc->transceiver);
+
+err_supplies:
+ return ret;
+}
+
+static struct platform_driver s3c_hsudc_driver = {
+ .driver = {
+ .name = "s3c-hsudc",
+ },
+ .probe = s3c_hsudc_probe,
+};
+
+module_platform_driver(s3c_hsudc_driver);
+
+MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver");
+MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c-hsudc");
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
new file mode 100644
index 000000000..8c57b191e
--- /dev/null
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -0,0 +1,1980 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * linux/drivers/usb/gadget/s3c2410_udc.c
+ *
+ * Samsung S3C24xx series on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ * Additional cleanups by Ben Dooks <ben-linux@fluff.org>
+ */
+
+#define pr_fmt(fmt) "s3c2410_udc: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/prefetch.h>
+#include <linux/io.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/usb.h>
+#include <linux/usb/gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+#include <linux/platform_data/usb-s3c2410_udc.h>
+
+#include "s3c2410_udc.h"
+#include "s3c2410_udc_regs.h"
+
+#define DRIVER_DESC "S3C2410 USB Device Controller Gadget"
+#define DRIVER_AUTHOR "Herbert Pötzl <herbert@13thfloor.at>, " \
+ "Arnaud Patard <arnaud.patard@rtp-net.org>"
+
+static const char gadget_name[] = "s3c2410_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+static struct s3c2410_udc *the_controller;
+static struct clk *udc_clock;
+static struct clk *usb_bus_clock;
+static void __iomem *base_addr;
+static int irq_usbd;
+static struct dentry *s3c2410_udc_debugfs_root;
+
+static inline u32 udc_read(u32 reg)
+{
+ return readb(base_addr + reg);
+}
+
+static inline void udc_write(u32 value, u32 reg)
+{
+ writeb(value, base_addr + reg);
+}
+
+static inline void udc_writeb(void __iomem *base, u32 value, u32 reg)
+{
+ writeb(value, base + reg);
+}
+
+static struct s3c2410_udc_mach_info *udc_info;
+
+/*************************** DEBUG FUNCTION ***************************/
+#define DEBUG_NORMAL 1
+#define DEBUG_VERBOSE 2
+
+#ifdef CONFIG_USB_S3C2410_DEBUG
+#define USB_S3C2410_DEBUG_LEVEL 0
+
+static uint32_t s3c2410_ticks = 0;
+
+__printf(2, 3)
+static void dprintk(int level, const char *fmt, ...)
+{
+ static long prevticks;
+ static int invocation;
+ struct va_format vaf;
+ va_list args;
+
+ if (level > USB_S3C2410_DEBUG_LEVEL)
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (s3c2410_ticks != prevticks) {
+ prevticks = s3c2410_ticks;
+ invocation = 0;
+ }
+
+ pr_debug("%1lu.%02d USB: %pV", prevticks, invocation++, &vaf);
+
+ va_end(args);
+}
+#else
+__printf(2, 3)
+static void dprintk(int level, const char *fmt, ...)
+{
+}
+#endif
+
+static int s3c2410_udc_debugfs_show(struct seq_file *m, void *p)
+{
+ u32 addr_reg, pwr_reg, ep_int_reg, usb_int_reg;
+ u32 ep_int_en_reg, usb_int_en_reg, ep0_csr;
+ u32 ep1_i_csr1, ep1_i_csr2, ep1_o_csr1, ep1_o_csr2;
+ u32 ep2_i_csr1, ep2_i_csr2, ep2_o_csr1, ep2_o_csr2;
+
+ addr_reg = udc_read(S3C2410_UDC_FUNC_ADDR_REG);
+ pwr_reg = udc_read(S3C2410_UDC_PWR_REG);
+ ep_int_reg = udc_read(S3C2410_UDC_EP_INT_REG);
+ usb_int_reg = udc_read(S3C2410_UDC_USB_INT_REG);
+ ep_int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
+ usb_int_en_reg = udc_read(S3C2410_UDC_USB_INT_EN_REG);
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+ ep0_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ udc_write(1, S3C2410_UDC_INDEX_REG);
+ ep1_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ ep1_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
+ ep1_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ ep1_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
+ udc_write(2, S3C2410_UDC_INDEX_REG);
+ ep2_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ ep2_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
+ ep2_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ ep2_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
+
+ seq_printf(m, "FUNC_ADDR_REG : 0x%04X\n"
+ "PWR_REG : 0x%04X\n"
+ "EP_INT_REG : 0x%04X\n"
+ "USB_INT_REG : 0x%04X\n"
+ "EP_INT_EN_REG : 0x%04X\n"
+ "USB_INT_EN_REG : 0x%04X\n"
+ "EP0_CSR : 0x%04X\n"
+ "EP1_I_CSR1 : 0x%04X\n"
+ "EP1_I_CSR2 : 0x%04X\n"
+ "EP1_O_CSR1 : 0x%04X\n"
+ "EP1_O_CSR2 : 0x%04X\n"
+ "EP2_I_CSR1 : 0x%04X\n"
+ "EP2_I_CSR2 : 0x%04X\n"
+ "EP2_O_CSR1 : 0x%04X\n"
+ "EP2_O_CSR2 : 0x%04X\n",
+ addr_reg, pwr_reg, ep_int_reg, usb_int_reg,
+ ep_int_en_reg, usb_int_en_reg, ep0_csr,
+ ep1_i_csr1, ep1_i_csr2, ep1_o_csr1, ep1_o_csr2,
+ ep2_i_csr1, ep2_i_csr2, ep2_o_csr1, ep2_o_csr2
+ );
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s3c2410_udc_debugfs);
+
+/* io macros */
+
+static inline void s3c2410_udc_clear_ep0_opr(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, S3C2410_UDC_EP0_CSR_SOPKTRDY,
+ S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_clear_ep0_sst(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ writeb(0x00, base + S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_clear_ep0_se(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, S3C2410_UDC_EP0_CSR_SSE, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_ipr(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, S3C2410_UDC_EP0_CSR_IPKRDY, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_de(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, S3C2410_UDC_EP0_CSR_DE, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_ss(void __iomem *b)
+{
+ udc_writeb(b, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(b, S3C2410_UDC_EP0_CSR_SENDSTL, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_de_out(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+
+ udc_writeb(base, (S3C2410_UDC_EP0_CSR_SOPKTRDY
+ | S3C2410_UDC_EP0_CSR_DE),
+ S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_de_in(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, (S3C2410_UDC_EP0_CSR_IPKRDY
+ | S3C2410_UDC_EP0_CSR_DE),
+ S3C2410_UDC_EP0_CSR_REG);
+}
+
+/*------------------------- I/O ----------------------------------*/
+
+/*
+ * s3c2410_udc_done
+ */
+static void s3c2410_udc_done(struct s3c2410_ep *ep,
+ struct s3c2410_request *req, int status)
+{
+ unsigned halted = ep->halted;
+
+ list_del_init(&req->queue);
+
+ if (likely(req->req.status == -EINPROGRESS))
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ ep->halted = 1;
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ ep->halted = halted;
+}
+
+static void s3c2410_udc_nuke(struct s3c2410_udc *udc,
+ struct s3c2410_ep *ep, int status)
+{
+ while (!list_empty(&ep->queue)) {
+ struct s3c2410_request *req;
+ req = list_entry(ep->queue.next, struct s3c2410_request,
+ queue);
+ s3c2410_udc_done(ep, req, status);
+ }
+}
+
+static inline int s3c2410_udc_fifo_count_out(void)
+{
+ int tmp;
+
+ tmp = udc_read(S3C2410_UDC_OUT_FIFO_CNT2_REG) << 8;
+ tmp |= udc_read(S3C2410_UDC_OUT_FIFO_CNT1_REG);
+ return tmp;
+}
+
+/*
+ * s3c2410_udc_write_packet
+ */
+static inline int s3c2410_udc_write_packet(int fifo,
+ struct s3c2410_request *req,
+ unsigned max)
+{
+ unsigned len = min(req->req.length - req->req.actual, max);
+ u8 *buf = req->req.buf + req->req.actual;
+
+ prefetch(buf);
+
+ dprintk(DEBUG_VERBOSE, "%s %d %d %d %d\n", __func__,
+ req->req.actual, req->req.length, len, req->req.actual + len);
+
+ req->req.actual += len;
+
+ udelay(5);
+ writesb(base_addr + fifo, buf, len);
+ return len;
+}
+
+/*
+ * s3c2410_udc_write_fifo
+ *
+ * return: 0 = still running, 1 = completed, negative = errno
+ */
+static int s3c2410_udc_write_fifo(struct s3c2410_ep *ep,
+ struct s3c2410_request *req)
+{
+ unsigned count;
+ int is_last;
+ u32 idx;
+ int fifo_reg;
+ u32 ep_csr;
+
+ idx = ep->bEndpointAddress & 0x7F;
+ switch (idx) {
+ default:
+ idx = 0;
+ fallthrough;
+ case 0:
+ fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
+ break;
+ case 1:
+ fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
+ break;
+ case 2:
+ fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
+ break;
+ case 3:
+ fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
+ break;
+ case 4:
+ fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
+ break;
+ }
+
+ count = s3c2410_udc_write_packet(fifo_reg, req, ep->ep.maxpacket);
+
+ /* last packet is often short (sometimes a zlp) */
+ if (count != ep->ep.maxpacket)
+ is_last = 1;
+ else if (req->req.length != req->req.actual || req->req.zero)
+ is_last = 0;
+ else
+ is_last = 2;
+
+ /* Only ep0 debug messages are interesting */
+ if (idx == 0)
+ dprintk(DEBUG_NORMAL,
+ "Written ep%d %d.%d of %d b [last %d,z %d]\n",
+ idx, count, req->req.actual, req->req.length,
+ is_last, req->req.zero);
+
+ if (is_last) {
+ /* The order is important. It prevents sending 2 packets
+ * at the same time */
+
+ if (idx == 0) {
+ /* Reset signal => no need to say 'data sent' */
+ if (!(udc_read(S3C2410_UDC_USB_INT_REG)
+ & S3C2410_UDC_USBINT_RESET))
+ s3c2410_udc_set_ep0_de_in(base_addr);
+ ep->dev->ep0state = EP0_IDLE;
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
+ S3C2410_UDC_IN_CSR1_REG);
+ }
+
+ s3c2410_udc_done(ep, req, 0);
+ is_last = 1;
+ } else {
+ if (idx == 0) {
+ /* Reset signal => no need to say 'data sent' */
+ if (!(udc_read(S3C2410_UDC_USB_INT_REG)
+ & S3C2410_UDC_USBINT_RESET))
+ s3c2410_udc_set_ep0_ipr(base_addr);
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
+ S3C2410_UDC_IN_CSR1_REG);
+ }
+ }
+
+ return is_last;
+}
+
+static inline int s3c2410_udc_read_packet(int fifo, u8 *buf,
+ struct s3c2410_request *req, unsigned avail)
+{
+ unsigned len;
+
+ len = min(req->req.length - req->req.actual, avail);
+ req->req.actual += len;
+
+ readsb(fifo + base_addr, buf, len);
+ return len;
+}
+
+/*
+ * return: 0 = still running, 1 = queue empty, negative = errno
+ */
+static int s3c2410_udc_read_fifo(struct s3c2410_ep *ep,
+ struct s3c2410_request *req)
+{
+ u8 *buf;
+ u32 ep_csr;
+ unsigned bufferspace;
+ int is_last = 1;
+ unsigned avail;
+ int fifo_count = 0;
+ u32 idx;
+ int fifo_reg;
+
+ idx = ep->bEndpointAddress & 0x7F;
+
+ switch (idx) {
+ default:
+ idx = 0;
+ fallthrough;
+ case 0:
+ fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
+ break;
+ case 1:
+ fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
+ break;
+ case 2:
+ fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
+ break;
+ case 3:
+ fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
+ break;
+ case 4:
+ fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
+ break;
+ }
+
+ if (!req->req.length)
+ return 1;
+
+ buf = req->req.buf + req->req.actual;
+ bufferspace = req->req.length - req->req.actual;
+ if (!bufferspace) {
+ dprintk(DEBUG_NORMAL, "%s: buffer full!\n", __func__);
+ return -1;
+ }
+
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+
+ fifo_count = s3c2410_udc_fifo_count_out();
+ dprintk(DEBUG_NORMAL, "%s fifo count : %d\n", __func__, fifo_count);
+
+ if (fifo_count > ep->ep.maxpacket)
+ avail = ep->ep.maxpacket;
+ else
+ avail = fifo_count;
+
+ fifo_count = s3c2410_udc_read_packet(fifo_reg, buf, req, avail);
+
+ /* checking this with ep0 is not accurate as we already
+ * read a control request
+ **/
+ if (idx != 0 && fifo_count < ep->ep.maxpacket) {
+ is_last = 1;
+ /* overflowed this request? flush extra data */
+ if (fifo_count != avail)
+ req->req.status = -EOVERFLOW;
+ } else {
+ is_last = (req->req.length <= req->req.actual) ? 1 : 0;
+ }
+
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ fifo_count = s3c2410_udc_fifo_count_out();
+
+ /* Only ep0 debug messages are interesting */
+ if (idx == 0)
+ dprintk(DEBUG_VERBOSE, "%s fifo count : %d [last %d]\n",
+ __func__, fifo_count, is_last);
+
+ if (is_last) {
+ if (idx == 0) {
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ ep->dev->ep0state = EP0_IDLE;
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
+ S3C2410_UDC_OUT_CSR1_REG);
+ }
+
+ s3c2410_udc_done(ep, req, 0);
+ } else {
+ if (idx == 0) {
+ s3c2410_udc_clear_ep0_opr(base_addr);
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
+ S3C2410_UDC_OUT_CSR1_REG);
+ }
+ }
+
+ return is_last;
+}
+
+static int s3c2410_udc_read_fifo_crq(struct usb_ctrlrequest *crq)
+{
+ unsigned char *outbuf = (unsigned char *)crq;
+ int bytes_read = 0;
+
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+
+ bytes_read = s3c2410_udc_fifo_count_out();
+
+ dprintk(DEBUG_NORMAL, "%s: fifo_count=%d\n", __func__, bytes_read);
+
+ if (bytes_read > sizeof(struct usb_ctrlrequest))
+ bytes_read = sizeof(struct usb_ctrlrequest);
+
+ readsb(S3C2410_UDC_EP0_FIFO_REG + base_addr, outbuf, bytes_read);
+
+ dprintk(DEBUG_VERBOSE, "%s: len=%d %02x:%02x {%x,%x,%x}\n", __func__,
+ bytes_read, crq->bRequest, crq->bRequestType,
+ crq->wValue, crq->wIndex, crq->wLength);
+
+ return bytes_read;
+}
+
+static int s3c2410_udc_get_status(struct s3c2410_udc *dev,
+ struct usb_ctrlrequest *crq)
+{
+ u16 status = 0;
+ u8 ep_num = crq->wIndex & 0x7F;
+ u8 is_in = crq->wIndex & USB_DIR_IN;
+
+ switch (crq->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_INTERFACE:
+ break;
+
+ case USB_RECIP_DEVICE:
+ status = dev->devstatus;
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ if (ep_num > 4 || crq->wLength > 2)
+ return 1;
+
+ if (ep_num == 0) {
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+ status = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ status = status & S3C2410_UDC_EP0_CSR_SENDSTL;
+ } else {
+ udc_write(ep_num, S3C2410_UDC_INDEX_REG);
+ if (is_in) {
+ status = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ status = status & S3C2410_UDC_ICSR1_SENDSTL;
+ } else {
+ status = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+ status = status & S3C2410_UDC_OCSR1_SENDSTL;
+ }
+ }
+
+ status = status ? 1 : 0;
+ break;
+
+ default:
+ return 1;
+ }
+
+ /* Seems to be needed to get it working. ouch :( */
+ udelay(5);
+ udc_write(status & 0xFF, S3C2410_UDC_EP0_FIFO_REG);
+ udc_write(status >> 8, S3C2410_UDC_EP0_FIFO_REG);
+ s3c2410_udc_set_ep0_de_in(base_addr);
+
+ return 0;
+}
+/*------------------------- usb state machine -------------------------------*/
+static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value);
+
+static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev,
+ struct s3c2410_ep *ep,
+ struct usb_ctrlrequest *crq,
+ u32 ep0csr)
+{
+ int len, ret, tmp;
+
+ /* start control request? */
+ if (!(ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY))
+ return;
+
+ s3c2410_udc_nuke(dev, ep, -EPROTO);
+
+ len = s3c2410_udc_read_fifo_crq(crq);
+ if (len != sizeof(*crq)) {
+ dprintk(DEBUG_NORMAL, "setup begin: fifo READ ERROR"
+ " wanted %d bytes got %d. Stalling out...\n",
+ sizeof(*crq), len);
+ s3c2410_udc_set_ep0_ss(base_addr);
+ return;
+ }
+
+ dprintk(DEBUG_NORMAL, "bRequest = %d bRequestType %d wLength = %d\n",
+ crq->bRequest, crq->bRequestType, crq->wLength);
+
+ /* cope with automagic for some standard requests. */
+ dev->req_std = (crq->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD;
+ dev->req_config = 0;
+ dev->req_pending = 1;
+
+ switch (crq->bRequest) {
+ case USB_REQ_SET_CONFIGURATION:
+ dprintk(DEBUG_NORMAL, "USB_REQ_SET_CONFIGURATION ...\n");
+
+ if (crq->bRequestType == USB_RECIP_DEVICE) {
+ dev->req_config = 1;
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ }
+ break;
+
+ case USB_REQ_SET_INTERFACE:
+ dprintk(DEBUG_NORMAL, "USB_REQ_SET_INTERFACE ...\n");
+
+ if (crq->bRequestType == USB_RECIP_INTERFACE) {
+ dev->req_config = 1;
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ }
+ break;
+
+ case USB_REQ_SET_ADDRESS:
+ dprintk(DEBUG_NORMAL, "USB_REQ_SET_ADDRESS ...\n");
+
+ if (crq->bRequestType == USB_RECIP_DEVICE) {
+ tmp = crq->wValue & 0x7F;
+ dev->address = tmp;
+ udc_write((tmp | S3C2410_UDC_FUNCADDR_UPDATE),
+ S3C2410_UDC_FUNC_ADDR_REG);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ return;
+ }
+ break;
+
+ case USB_REQ_GET_STATUS:
+ dprintk(DEBUG_NORMAL, "USB_REQ_GET_STATUS ...\n");
+ s3c2410_udc_clear_ep0_opr(base_addr);
+
+ if (dev->req_std) {
+ if (!s3c2410_udc_get_status(dev, crq))
+ return;
+ }
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ s3c2410_udc_clear_ep0_opr(base_addr);
+
+ if (crq->bRequestType != USB_RECIP_ENDPOINT)
+ break;
+
+ if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
+ break;
+
+ s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 0);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ return;
+
+ case USB_REQ_SET_FEATURE:
+ s3c2410_udc_clear_ep0_opr(base_addr);
+
+ if (crq->bRequestType != USB_RECIP_ENDPOINT)
+ break;
+
+ if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
+ break;
+
+ s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 1);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ return;
+
+ default:
+ s3c2410_udc_clear_ep0_opr(base_addr);
+ break;
+ }
+
+ if (crq->bRequestType & USB_DIR_IN)
+ dev->ep0state = EP0_IN_DATA_PHASE;
+ else
+ dev->ep0state = EP0_OUT_DATA_PHASE;
+
+ if (!dev->driver)
+ return;
+
+ /* deliver the request to the gadget driver */
+ ret = dev->driver->setup(&dev->gadget, crq);
+ if (ret < 0) {
+ if (dev->req_config) {
+ dprintk(DEBUG_NORMAL, "config change %02x fail %d?\n",
+ crq->bRequest, ret);
+ return;
+ }
+
+ if (ret == -EOPNOTSUPP)
+ dprintk(DEBUG_NORMAL, "Operation not supported\n");
+ else
+ dprintk(DEBUG_NORMAL,
+ "dev->driver->setup failed. (%d)\n", ret);
+
+ udelay(5);
+ s3c2410_udc_set_ep0_ss(base_addr);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ dev->ep0state = EP0_IDLE;
+ /* deferred i/o == no response yet */
+ } else if (dev->req_pending) {
+ dprintk(DEBUG_VERBOSE, "dev->req_pending... what now?\n");
+ dev->req_pending = 0;
+ }
+
+ dprintk(DEBUG_VERBOSE, "ep0state %s\n", ep0states[dev->ep0state]);
+}
+
+static void s3c2410_udc_handle_ep0(struct s3c2410_udc *dev)
+{
+ u32 ep0csr;
+ struct s3c2410_ep *ep = &dev->ep[0];
+ struct s3c2410_request *req;
+ struct usb_ctrlrequest crq;
+
+ if (list_empty(&ep->queue))
+ req = NULL;
+ else
+ req = list_entry(ep->queue.next, struct s3c2410_request, queue);
+
+ /* We make the assumption that S3C2410_UDC_IN_CSR1_REG equal to
+ * S3C2410_UDC_EP0_CSR_REG when index is zero */
+
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+ ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+
+ dprintk(DEBUG_NORMAL, "ep0csr %x ep0state %s\n",
+ ep0csr, ep0states[dev->ep0state]);
+
+ /* clear stall status */
+ if (ep0csr & S3C2410_UDC_EP0_CSR_SENTSTL) {
+ s3c2410_udc_nuke(dev, ep, -EPIPE);
+ dprintk(DEBUG_NORMAL, "... clear SENT_STALL ...\n");
+ s3c2410_udc_clear_ep0_sst(base_addr);
+ dev->ep0state = EP0_IDLE;
+ return;
+ }
+
+ /* clear setup end */
+ if (ep0csr & S3C2410_UDC_EP0_CSR_SE) {
+ dprintk(DEBUG_NORMAL, "... serviced SETUP_END ...\n");
+ s3c2410_udc_nuke(dev, ep, 0);
+ s3c2410_udc_clear_ep0_se(base_addr);
+ dev->ep0state = EP0_IDLE;
+ }
+
+ switch (dev->ep0state) {
+ case EP0_IDLE:
+ s3c2410_udc_handle_ep0_idle(dev, ep, &crq, ep0csr);
+ break;
+
+ case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
+ dprintk(DEBUG_NORMAL, "EP0_IN_DATA_PHASE ... what now?\n");
+ if (!(ep0csr & S3C2410_UDC_EP0_CSR_IPKRDY) && req)
+ s3c2410_udc_write_fifo(ep, req);
+ break;
+
+ case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
+ dprintk(DEBUG_NORMAL, "EP0_OUT_DATA_PHASE ... what now?\n");
+ if ((ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY) && req)
+ s3c2410_udc_read_fifo(ep, req);
+ break;
+
+ case EP0_END_XFER:
+ dprintk(DEBUG_NORMAL, "EP0_END_XFER ... what now?\n");
+ dev->ep0state = EP0_IDLE;
+ break;
+
+ case EP0_STALL:
+ dprintk(DEBUG_NORMAL, "EP0_STALL ... what now?\n");
+ dev->ep0state = EP0_IDLE;
+ break;
+ }
+}
+
+/*
+ * handle_ep - Manage I/O endpoints
+ */
+
+static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep)
+{
+ struct s3c2410_request *req;
+ int is_in = ep->bEndpointAddress & USB_DIR_IN;
+ u32 ep_csr1;
+ u32 idx;
+
+ if (likely(!list_empty(&ep->queue)))
+ req = list_entry(ep->queue.next,
+ struct s3c2410_request, queue);
+ else
+ req = NULL;
+
+ idx = ep->bEndpointAddress & 0x7F;
+
+ if (is_in) {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ dprintk(DEBUG_VERBOSE, "ep%01d write csr:%02x %d\n",
+ idx, ep_csr1, req ? 1 : 0);
+
+ if (ep_csr1 & S3C2410_UDC_ICSR1_SENTSTL) {
+ dprintk(DEBUG_VERBOSE, "st\n");
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr1 & ~S3C2410_UDC_ICSR1_SENTSTL,
+ S3C2410_UDC_IN_CSR1_REG);
+ return;
+ }
+
+ if (!(ep_csr1 & S3C2410_UDC_ICSR1_PKTRDY) && req)
+ s3c2410_udc_write_fifo(ep, req);
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr1 = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+ dprintk(DEBUG_VERBOSE, "ep%01d rd csr:%02x\n", idx, ep_csr1);
+
+ if (ep_csr1 & S3C2410_UDC_OCSR1_SENTSTL) {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr1 & ~S3C2410_UDC_OCSR1_SENTSTL,
+ S3C2410_UDC_OUT_CSR1_REG);
+ return;
+ }
+
+ if ((ep_csr1 & S3C2410_UDC_OCSR1_PKTRDY) && req)
+ s3c2410_udc_read_fifo(ep, req);
+ }
+}
+
+/*
+ * s3c2410_udc_irq - interrupt handler
+ */
+static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev)
+{
+ struct s3c2410_udc *dev = _dev;
+ int usb_status;
+ int usbd_status;
+ int pwr_reg;
+ int ep0csr;
+ int i;
+ u32 idx, idx2;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* Driver connected ? */
+ if (!dev->driver) {
+ /* Clear interrupts */
+ udc_write(udc_read(S3C2410_UDC_USB_INT_REG),
+ S3C2410_UDC_USB_INT_REG);
+ udc_write(udc_read(S3C2410_UDC_EP_INT_REG),
+ S3C2410_UDC_EP_INT_REG);
+ }
+
+ /* Save index */
+ idx = udc_read(S3C2410_UDC_INDEX_REG);
+
+ /* Read status registers */
+ usb_status = udc_read(S3C2410_UDC_USB_INT_REG);
+ usbd_status = udc_read(S3C2410_UDC_EP_INT_REG);
+ pwr_reg = udc_read(S3C2410_UDC_PWR_REG);
+
+ udc_writeb(base_addr, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+
+ dprintk(DEBUG_NORMAL, "usbs=%02x, usbds=%02x, pwr=%02x ep0csr=%02x\n",
+ usb_status, usbd_status, pwr_reg, ep0csr);
+
+ /*
+ * Now, handle interrupts. There's two types :
+ * - Reset, Resume, Suspend coming -> usb_int_reg
+ * - EP -> ep_int_reg
+ */
+
+ /* RESET */
+ if (usb_status & S3C2410_UDC_USBINT_RESET) {
+ /* two kind of reset :
+ * - reset start -> pwr reg = 8
+ * - reset end -> pwr reg = 0
+ **/
+ dprintk(DEBUG_NORMAL, "USB reset csr %x pwr %x\n",
+ ep0csr, pwr_reg);
+
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ udc_write(0x00, S3C2410_UDC_INDEX_REG);
+ udc_write((dev->ep[0].ep.maxpacket & 0x7ff) >> 3,
+ S3C2410_UDC_MAXP_REG);
+ dev->address = 0;
+
+ dev->ep0state = EP0_IDLE;
+ dev->gadget.speed = USB_SPEED_FULL;
+
+ /* clear interrupt */
+ udc_write(S3C2410_UDC_USBINT_RESET,
+ S3C2410_UDC_USB_INT_REG);
+
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* RESUME */
+ if (usb_status & S3C2410_UDC_USBINT_RESUME) {
+ dprintk(DEBUG_NORMAL, "USB resume\n");
+
+ /* clear interrupt */
+ udc_write(S3C2410_UDC_USBINT_RESUME,
+ S3C2410_UDC_USB_INT_REG);
+
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN
+ && dev->driver
+ && dev->driver->resume)
+ dev->driver->resume(&dev->gadget);
+ }
+
+ /* SUSPEND */
+ if (usb_status & S3C2410_UDC_USBINT_SUSPEND) {
+ dprintk(DEBUG_NORMAL, "USB suspend\n");
+
+ /* clear interrupt */
+ udc_write(S3C2410_UDC_USBINT_SUSPEND,
+ S3C2410_UDC_USB_INT_REG);
+
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN
+ && dev->driver
+ && dev->driver->suspend)
+ dev->driver->suspend(&dev->gadget);
+
+ dev->ep0state = EP0_IDLE;
+ }
+
+ /* EP */
+ /* control traffic */
+ /* check on ep0csr != 0 is not a good idea as clearing in_pkt_ready
+ * generate an interrupt
+ */
+ if (usbd_status & S3C2410_UDC_INT_EP0) {
+ dprintk(DEBUG_VERBOSE, "USB ep0 irq\n");
+ /* Clear the interrupt bit by setting it to 1 */
+ udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_REG);
+ s3c2410_udc_handle_ep0(dev);
+ }
+
+ /* endpoint data transfers */
+ for (i = 1; i < S3C2410_ENDPOINTS; i++) {
+ u32 tmp = 1 << i;
+ if (usbd_status & tmp) {
+ dprintk(DEBUG_VERBOSE, "USB ep%d irq\n", i);
+
+ /* Clear the interrupt bit by setting it to 1 */
+ udc_write(tmp, S3C2410_UDC_EP_INT_REG);
+ s3c2410_udc_handle_ep(&dev->ep[i]);
+ }
+ }
+
+ /* what else causes this interrupt? a receive! who is it? */
+ if (!usb_status && !usbd_status && !pwr_reg && !ep0csr) {
+ for (i = 1; i < S3C2410_ENDPOINTS; i++) {
+ idx2 = udc_read(S3C2410_UDC_INDEX_REG);
+ udc_write(i, S3C2410_UDC_INDEX_REG);
+
+ if (udc_read(S3C2410_UDC_OUT_CSR1_REG) & 0x1)
+ s3c2410_udc_handle_ep(&dev->ep[i]);
+
+ /* restore index */
+ udc_write(idx2, S3C2410_UDC_INDEX_REG);
+ }
+ }
+
+ dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", irq_usbd);
+
+ /* Restore old index */
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return IRQ_HANDLED;
+}
+/*------------------------- s3c2410_ep_ops ----------------------------------*/
+
+static inline struct s3c2410_ep *to_s3c2410_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct s3c2410_ep, ep);
+}
+
+static inline struct s3c2410_udc *to_s3c2410_udc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct s3c2410_udc, gadget);
+}
+
+static inline struct s3c2410_request *to_s3c2410_req(struct usb_request *req)
+{
+ return container_of(req, struct s3c2410_request, req);
+}
+
+/*
+ * s3c2410_udc_ep_enable
+ */
+static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct s3c2410_udc *dev;
+ struct s3c2410_ep *ep;
+ u32 max, tmp;
+ unsigned long flags;
+ u32 csr1, csr2;
+ u32 int_en_reg;
+
+ ep = to_s3c2410_ep(_ep);
+
+ if (!_ep || !desc
+ || _ep->name == ep0name
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ max = usb_endpoint_maxp(desc);
+
+ local_irq_save(flags);
+ _ep->maxpacket = max;
+ ep->ep.desc = desc;
+ ep->halted = 0;
+ ep->bEndpointAddress = desc->bEndpointAddress;
+
+ /* set max packet */
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(max >> 3, S3C2410_UDC_MAXP_REG);
+
+ /* set type, direction, address; reset fifo counters */
+ if (desc->bEndpointAddress & USB_DIR_IN) {
+ csr1 = S3C2410_UDC_ICSR1_FFLUSH|S3C2410_UDC_ICSR1_CLRDT;
+ csr2 = S3C2410_UDC_ICSR2_MODEIN|S3C2410_UDC_ICSR2_DMAIEN;
+
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
+ } else {
+ /* don't flush in fifo or it will cause endpoint interrupt */
+ csr1 = S3C2410_UDC_ICSR1_CLRDT;
+ csr2 = S3C2410_UDC_ICSR2_DMAIEN;
+
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
+
+ csr1 = S3C2410_UDC_OCSR1_FFLUSH | S3C2410_UDC_OCSR1_CLRDT;
+ csr2 = S3C2410_UDC_OCSR2_DMAIEN;
+
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr1, S3C2410_UDC_OUT_CSR1_REG);
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr2, S3C2410_UDC_OUT_CSR2_REG);
+ }
+
+ /* enable irqs */
+ int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
+ udc_write(int_en_reg | (1 << ep->num), S3C2410_UDC_EP_INT_EN_REG);
+
+ /* print some debug message */
+ tmp = desc->bEndpointAddress;
+ dprintk(DEBUG_NORMAL, "enable %s(%d) ep%x%s-blk max %02x\n",
+ _ep->name, ep->num, tmp,
+ desc->bEndpointAddress & USB_DIR_IN ? "in" : "out", max);
+
+ local_irq_restore(flags);
+ s3c2410_udc_set_halt(_ep, 0);
+
+ return 0;
+}
+
+/*
+ * s3c2410_udc_ep_disable
+ */
+static int s3c2410_udc_ep_disable(struct usb_ep *_ep)
+{
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ unsigned long flags;
+ u32 int_en_reg;
+
+ if (!_ep || !ep->ep.desc) {
+ dprintk(DEBUG_NORMAL, "%s not enabled\n",
+ _ep ? ep->ep.name : NULL);
+ return -EINVAL;
+ }
+
+ local_irq_save(flags);
+
+ dprintk(DEBUG_NORMAL, "ep_disable: %s\n", _ep->name);
+
+ ep->ep.desc = NULL;
+ ep->halted = 1;
+
+ s3c2410_udc_nuke(ep->dev, ep, -ESHUTDOWN);
+
+ /* disable irqs */
+ int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
+ udc_write(int_en_reg & ~(1<<ep->num), S3C2410_UDC_EP_INT_EN_REG);
+
+ local_irq_restore(flags);
+
+ dprintk(DEBUG_NORMAL, "%s disabled\n", _ep->name);
+
+ return 0;
+}
+
+/*
+ * s3c2410_udc_alloc_request
+ */
+static struct usb_request *
+s3c2410_udc_alloc_request(struct usb_ep *_ep, gfp_t mem_flags)
+{
+ struct s3c2410_request *req;
+
+ dprintk(DEBUG_VERBOSE, "%s(%p,%d)\n", __func__, _ep, mem_flags);
+
+ if (!_ep)
+ return NULL;
+
+ req = kzalloc(sizeof(struct s3c2410_request), mem_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+ return &req->req;
+}
+
+/*
+ * s3c2410_udc_free_request
+ */
+static void
+s3c2410_udc_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ struct s3c2410_request *req = to_s3c2410_req(_req);
+
+ dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
+
+ if (!ep || !_req || (!ep->ep.desc && _ep->name != ep0name))
+ return;
+
+ WARN_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+/*
+ * s3c2410_udc_queue
+ */
+static int s3c2410_udc_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct s3c2410_request *req = to_s3c2410_req(_req);
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ struct s3c2410_udc *dev;
+ u32 ep_csr = 0;
+ int fifo_count = 0;
+ unsigned long flags;
+
+ if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) {
+ dprintk(DEBUG_NORMAL, "%s: invalid args\n", __func__);
+ return -EINVAL;
+ }
+
+ dev = ep->dev;
+ if (unlikely(!dev->driver
+ || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+ return -ESHUTDOWN;
+ }
+
+ local_irq_save(flags);
+
+ if (unlikely(!_req || !_req->complete
+ || !_req->buf || !list_empty(&req->queue))) {
+ if (!_req)
+ dprintk(DEBUG_NORMAL, "%s: 1 X X X\n", __func__);
+ else {
+ dprintk(DEBUG_NORMAL, "%s: 0 %01d %01d %01d\n",
+ __func__, !_req->complete, !_req->buf,
+ !list_empty(&req->queue));
+ }
+
+ local_irq_restore(flags);
+ return -EINVAL;
+ }
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ dprintk(DEBUG_VERBOSE, "%s: ep%x len %d\n",
+ __func__, ep->bEndpointAddress, _req->length);
+
+ if (ep->bEndpointAddress) {
+ udc_write(ep->bEndpointAddress & 0x7F, S3C2410_UDC_INDEX_REG);
+
+ ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN)
+ ? S3C2410_UDC_IN_CSR1_REG
+ : S3C2410_UDC_OUT_CSR1_REG);
+ fifo_count = s3c2410_udc_fifo_count_out();
+ } else {
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ fifo_count = s3c2410_udc_fifo_count_out();
+ }
+
+ /* kickstart this i/o queue? */
+ if (list_empty(&ep->queue) && !ep->halted) {
+ if (ep->bEndpointAddress == 0 /* ep0 */) {
+ switch (dev->ep0state) {
+ case EP0_IN_DATA_PHASE:
+ if (!(ep_csr&S3C2410_UDC_EP0_CSR_IPKRDY)
+ && s3c2410_udc_write_fifo(ep,
+ req)) {
+ dev->ep0state = EP0_IDLE;
+ req = NULL;
+ }
+ break;
+
+ case EP0_OUT_DATA_PHASE:
+ if ((!_req->length)
+ || ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
+ && s3c2410_udc_read_fifo(ep,
+ req))) {
+ dev->ep0state = EP0_IDLE;
+ req = NULL;
+ }
+ break;
+
+ default:
+ local_irq_restore(flags);
+ return -EL2HLT;
+ }
+ } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0
+ && (!(ep_csr&S3C2410_UDC_OCSR1_PKTRDY))
+ && s3c2410_udc_write_fifo(ep, req)) {
+ req = NULL;
+ } else if ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
+ && fifo_count
+ && s3c2410_udc_read_fifo(ep, req)) {
+ req = NULL;
+ }
+ }
+
+ /* pio or dma irq handler advances the queue. */
+ if (likely(req))
+ list_add_tail(&req->queue, &ep->queue);
+
+ local_irq_restore(flags);
+
+ dprintk(DEBUG_VERBOSE, "%s ok\n", __func__);
+ return 0;
+}
+
+/*
+ * s3c2410_udc_dequeue
+ */
+static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ int retval = -EINVAL;
+ unsigned long flags;
+ struct s3c2410_request *req = NULL, *iter;
+
+ dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
+
+ if (!the_controller->driver)
+ return -ESHUTDOWN;
+
+ if (!_ep || !_req)
+ return retval;
+
+ local_irq_save(flags);
+
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->req != _req)
+ continue;
+ list_del_init(&iter->queue);
+ _req->status = -ECONNRESET;
+ req = iter;
+ retval = 0;
+ break;
+ }
+
+ if (retval == 0) {
+ dprintk(DEBUG_VERBOSE,
+ "dequeued req %p from %s, len %d buf %p\n",
+ req, _ep->name, _req->length, _req->buf);
+
+ s3c2410_udc_done(ep, req, -ECONNRESET);
+ }
+
+ local_irq_restore(flags);
+ return retval;
+}
+
+/*
+ * s3c2410_udc_set_halt
+ */
+static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value)
+{
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ u32 ep_csr = 0;
+ unsigned long flags;
+ u32 idx;
+
+ if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) {
+ dprintk(DEBUG_NORMAL, "%s: inval 2\n", __func__);
+ return -EINVAL;
+ }
+
+ local_irq_save(flags);
+
+ idx = ep->bEndpointAddress & 0x7F;
+
+ if (idx == 0) {
+ s3c2410_udc_set_ep0_ss(base_addr);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN)
+ ? S3C2410_UDC_IN_CSR1_REG
+ : S3C2410_UDC_OUT_CSR1_REG);
+
+ if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
+ if (value)
+ udc_write(ep_csr | S3C2410_UDC_ICSR1_SENDSTL,
+ S3C2410_UDC_IN_CSR1_REG);
+ else {
+ ep_csr &= ~S3C2410_UDC_ICSR1_SENDSTL;
+ udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
+ ep_csr |= S3C2410_UDC_ICSR1_CLRDT;
+ udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
+ }
+ } else {
+ if (value)
+ udc_write(ep_csr | S3C2410_UDC_OCSR1_SENDSTL,
+ S3C2410_UDC_OUT_CSR1_REG);
+ else {
+ ep_csr &= ~S3C2410_UDC_OCSR1_SENDSTL;
+ udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
+ ep_csr |= S3C2410_UDC_OCSR1_CLRDT;
+ udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
+ }
+ }
+ }
+
+ ep->halted = value ? 1 : 0;
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static const struct usb_ep_ops s3c2410_ep_ops = {
+ .enable = s3c2410_udc_ep_enable,
+ .disable = s3c2410_udc_ep_disable,
+
+ .alloc_request = s3c2410_udc_alloc_request,
+ .free_request = s3c2410_udc_free_request,
+
+ .queue = s3c2410_udc_queue,
+ .dequeue = s3c2410_udc_dequeue,
+
+ .set_halt = s3c2410_udc_set_halt,
+};
+
+/*------------------------- usb_gadget_ops ----------------------------------*/
+
+/*
+ * s3c2410_udc_get_frame
+ */
+static int s3c2410_udc_get_frame(struct usb_gadget *_gadget)
+{
+ int tmp;
+
+ dprintk(DEBUG_VERBOSE, "%s()\n", __func__);
+
+ tmp = udc_read(S3C2410_UDC_FRAME_NUM2_REG) << 8;
+ tmp |= udc_read(S3C2410_UDC_FRAME_NUM1_REG);
+ return tmp;
+}
+
+/*
+ * s3c2410_udc_wakeup
+ */
+static int s3c2410_udc_wakeup(struct usb_gadget *_gadget)
+{
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+ return 0;
+}
+
+/*
+ * s3c2410_udc_set_selfpowered
+ */
+static int s3c2410_udc_set_selfpowered(struct usb_gadget *gadget, int value)
+{
+ struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
+
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ gadget->is_selfpowered = (value != 0);
+ if (value)
+ udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
+ else
+ udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+
+ return 0;
+}
+
+static void s3c2410_udc_disable(struct s3c2410_udc *dev);
+static void s3c2410_udc_enable(struct s3c2410_udc *dev);
+
+static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on)
+{
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ if (udc_info && (udc_info->udc_command || udc->pullup_gpiod)) {
+
+ if (is_on)
+ s3c2410_udc_enable(udc);
+ else {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+ if (udc->driver && udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
+
+ }
+ s3c2410_udc_disable(udc);
+ }
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int s3c2410_udc_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
+
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ udc->vbus = (is_active != 0);
+ s3c2410_udc_set_pullup(udc, is_active);
+ return 0;
+}
+
+static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
+
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ s3c2410_udc_set_pullup(udc, is_on);
+ return 0;
+}
+
+static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev)
+{
+ struct s3c2410_udc *dev = _dev;
+ unsigned int value;
+
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ value = gpiod_get_value(dev->vbus_gpiod);
+
+ if (value != dev->vbus)
+ s3c2410_udc_vbus_session(&dev->gadget, value);
+
+ return IRQ_HANDLED;
+}
+
+static int s3c2410_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
+{
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ if (udc_info && udc_info->vbus_draw) {
+ udc_info->vbus_draw(ma);
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int s3c2410_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int s3c2410_udc_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops s3c2410_ops = {
+ .get_frame = s3c2410_udc_get_frame,
+ .wakeup = s3c2410_udc_wakeup,
+ .set_selfpowered = s3c2410_udc_set_selfpowered,
+ .pullup = s3c2410_udc_pullup,
+ .vbus_session = s3c2410_udc_vbus_session,
+ .vbus_draw = s3c2410_vbus_draw,
+ .udc_start = s3c2410_udc_start,
+ .udc_stop = s3c2410_udc_stop,
+};
+
+static void s3c2410_udc_command(struct s3c2410_udc *udc,
+ enum s3c2410_udc_cmd_e cmd)
+{
+ if (!udc_info)
+ return;
+
+ if (udc_info->udc_command) {
+ udc_info->udc_command(cmd);
+ } else if (udc->pullup_gpiod) {
+ int value;
+
+ switch (cmd) {
+ case S3C2410_UDC_P_ENABLE:
+ value = 1;
+ break;
+ case S3C2410_UDC_P_DISABLE:
+ value = 0;
+ break;
+ default:
+ return;
+ }
+
+ gpiod_set_value(udc->pullup_gpiod, value);
+ }
+}
+
+/*------------------------- gadget driver handling---------------------------*/
+/*
+ * s3c2410_udc_disable
+ */
+static void s3c2410_udc_disable(struct s3c2410_udc *dev)
+{
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ /* Disable all interrupts */
+ udc_write(0x00, S3C2410_UDC_USB_INT_EN_REG);
+ udc_write(0x00, S3C2410_UDC_EP_INT_EN_REG);
+
+ /* Clear the interrupt registers */
+ udc_write(S3C2410_UDC_USBINT_RESET
+ | S3C2410_UDC_USBINT_RESUME
+ | S3C2410_UDC_USBINT_SUSPEND,
+ S3C2410_UDC_USB_INT_REG);
+
+ udc_write(0x1F, S3C2410_UDC_EP_INT_REG);
+
+ /* Good bye, cruel world */
+ s3c2410_udc_command(dev, S3C2410_UDC_P_DISABLE);
+
+ /* Set speed to unknown */
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+}
+
+/*
+ * s3c2410_udc_reinit
+ */
+static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
+{
+ u32 i;
+
+ /* device/ep0 records init */
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+ dev->ep0state = EP0_IDLE;
+
+ for (i = 0; i < S3C2410_ENDPOINTS; i++) {
+ struct s3c2410_ep *ep = &dev->ep[i];
+
+ if (i != 0)
+ list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+
+ ep->dev = dev;
+ ep->ep.desc = NULL;
+ ep->halted = 0;
+ INIT_LIST_HEAD(&ep->queue);
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
+ }
+}
+
+/*
+ * s3c2410_udc_enable
+ */
+static void s3c2410_udc_enable(struct s3c2410_udc *dev)
+{
+ int i;
+
+ dprintk(DEBUG_NORMAL, "s3c2410_udc_enable called\n");
+
+ /* dev->gadget.speed = USB_SPEED_UNKNOWN; */
+ dev->gadget.speed = USB_SPEED_FULL;
+
+ /* Set MAXP for all endpoints */
+ for (i = 0; i < S3C2410_ENDPOINTS; i++) {
+ udc_write(i, S3C2410_UDC_INDEX_REG);
+ udc_write((dev->ep[i].ep.maxpacket & 0x7ff) >> 3,
+ S3C2410_UDC_MAXP_REG);
+ }
+
+ /* Set default power state */
+ udc_write(DEFAULT_POWER_STATE, S3C2410_UDC_PWR_REG);
+
+ /* Enable reset and suspend interrupt interrupts */
+ udc_write(S3C2410_UDC_USBINT_RESET | S3C2410_UDC_USBINT_SUSPEND,
+ S3C2410_UDC_USB_INT_EN_REG);
+
+ /* Enable ep0 interrupt */
+ udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG);
+
+ /* time to say "hello, world" */
+ s3c2410_udc_command(dev, S3C2410_UDC_P_ENABLE);
+}
+
+static int s3c2410_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct s3c2410_udc *udc = to_s3c2410(g);
+
+ dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name);
+
+ /* Hook the driver */
+ udc->driver = driver;
+
+ /* Enable udc */
+ s3c2410_udc_enable(udc);
+
+ return 0;
+}
+
+static int s3c2410_udc_stop(struct usb_gadget *g)
+{
+ struct s3c2410_udc *udc = to_s3c2410(g);
+
+ udc->driver = NULL;
+
+ /* Disable udc */
+ s3c2410_udc_disable(udc);
+
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+static struct s3c2410_udc memory = {
+ .gadget = {
+ .ops = &s3c2410_ops,
+ .ep0 = &memory.ep[0].ep,
+ .name = gadget_name,
+ .dev = {
+ .init_name = "gadget",
+ },
+ },
+
+ /* control endpoint */
+ .ep[0] = {
+ .num = 0,
+ .ep = {
+ .name = ep0name,
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP0_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .dev = &memory,
+ },
+
+ /* first group of endpoints */
+ .ep[1] = {
+ .num = 1,
+ .ep = {
+ .name = "ep1-bulk",
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .dev = &memory,
+ .fifo_size = EP_FIFO_SIZE,
+ .bEndpointAddress = 1,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ },
+ .ep[2] = {
+ .num = 2,
+ .ep = {
+ .name = "ep2-bulk",
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .dev = &memory,
+ .fifo_size = EP_FIFO_SIZE,
+ .bEndpointAddress = 2,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ },
+ .ep[3] = {
+ .num = 3,
+ .ep = {
+ .name = "ep3-bulk",
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .dev = &memory,
+ .fifo_size = EP_FIFO_SIZE,
+ .bEndpointAddress = 3,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ },
+ .ep[4] = {
+ .num = 4,
+ .ep = {
+ .name = "ep4-bulk",
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP_FIFO_SIZE,
+ .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ },
+ .dev = &memory,
+ .fifo_size = EP_FIFO_SIZE,
+ .bEndpointAddress = 4,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ }
+
+};
+
+/*
+ * probe - binds to the platform device
+ */
+static int s3c2410_udc_probe(struct platform_device *pdev)
+{
+ struct s3c2410_udc *udc = &memory;
+ struct device *dev = &pdev->dev;
+ int retval;
+ int irq;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ usb_bus_clock = clk_get(NULL, "usb-bus-gadget");
+ if (IS_ERR(usb_bus_clock)) {
+ dev_err(dev, "failed to get usb bus clock source\n");
+ return PTR_ERR(usb_bus_clock);
+ }
+
+ clk_prepare_enable(usb_bus_clock);
+
+ udc_clock = clk_get(NULL, "usb-device");
+ if (IS_ERR(udc_clock)) {
+ dev_err(dev, "failed to get udc clock source\n");
+ retval = PTR_ERR(udc_clock);
+ goto err_usb_bus_clk;
+ }
+
+ clk_prepare_enable(udc_clock);
+
+ mdelay(10);
+
+ dev_dbg(dev, "got and enabled clocks\n");
+
+ if (strncmp(pdev->name, "s3c2440", 7) == 0) {
+ dev_info(dev, "S3C2440: increasing FIFO to 128 bytes\n");
+ memory.ep[1].fifo_size = S3C2440_EP_FIFO_SIZE;
+ memory.ep[2].fifo_size = S3C2440_EP_FIFO_SIZE;
+ memory.ep[3].fifo_size = S3C2440_EP_FIFO_SIZE;
+ memory.ep[4].fifo_size = S3C2440_EP_FIFO_SIZE;
+ }
+
+ spin_lock_init(&udc->lock);
+ udc_info = dev_get_platdata(&pdev->dev);
+
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base_addr)) {
+ retval = PTR_ERR(base_addr);
+ goto err_udc_clk;
+ }
+
+ the_controller = udc;
+ platform_set_drvdata(pdev, udc);
+
+ s3c2410_udc_disable(udc);
+ s3c2410_udc_reinit(udc);
+
+ irq_usbd = platform_get_irq(pdev, 0);
+ if (irq_usbd < 0) {
+ retval = irq_usbd;
+ goto err_udc_clk;
+ }
+
+ /* irq setup after old hardware state is cleaned up */
+ retval = request_irq(irq_usbd, s3c2410_udc_irq,
+ 0, gadget_name, udc);
+
+ if (retval != 0) {
+ dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
+ retval = -EBUSY;
+ goto err_udc_clk;
+ }
+
+ dev_dbg(dev, "got irq %i\n", irq_usbd);
+
+ udc->vbus_gpiod = gpiod_get_optional(dev, "vbus", GPIOD_IN);
+ if (IS_ERR(udc->vbus_gpiod)) {
+ retval = PTR_ERR(udc->vbus_gpiod);
+ goto err_int;
+ }
+ if (udc->vbus_gpiod) {
+ gpiod_set_consumer_name(udc->vbus_gpiod, "udc vbus");
+
+ irq = gpiod_to_irq(udc->vbus_gpiod);
+ if (irq < 0) {
+ dev_err(dev, "no irq for gpio vbus pin\n");
+ retval = irq;
+ goto err_gpio_claim;
+ }
+
+ retval = request_irq(irq, s3c2410_udc_vbus_irq,
+ IRQF_TRIGGER_RISING
+ | IRQF_TRIGGER_FALLING | IRQF_SHARED,
+ gadget_name, udc);
+
+ if (retval != 0) {
+ dev_err(dev, "can't get vbus irq %d, err %d\n",
+ irq, retval);
+ retval = -EBUSY;
+ goto err_gpio_claim;
+ }
+
+ dev_dbg(dev, "got irq %i\n", irq);
+ } else {
+ udc->vbus = 1;
+ }
+
+ udc->pullup_gpiod = gpiod_get_optional(dev, "pullup", GPIOD_OUT_LOW);
+ if (IS_ERR(udc->pullup_gpiod)) {
+ retval = PTR_ERR(udc->pullup_gpiod);
+ goto err_vbus_irq;
+ }
+ gpiod_set_consumer_name(udc->pullup_gpiod, "udc pullup");
+
+ retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (retval)
+ goto err_add_udc;
+
+ debugfs_create_file("registers", S_IRUGO, s3c2410_udc_debugfs_root, udc,
+ &s3c2410_udc_debugfs_fops);
+
+ dev_dbg(dev, "probe ok\n");
+
+ return 0;
+
+err_add_udc:
+err_vbus_irq:
+ if (udc->vbus_gpiod)
+ free_irq(gpiod_to_irq(udc->vbus_gpiod), udc);
+err_gpio_claim:
+err_int:
+ free_irq(irq_usbd, udc);
+err_udc_clk:
+ clk_disable_unprepare(udc_clock);
+ clk_put(udc_clock);
+ udc_clock = NULL;
+err_usb_bus_clk:
+ clk_disable_unprepare(usb_bus_clock);
+ clk_put(usb_bus_clock);
+ usb_bus_clock = NULL;
+
+ return retval;
+}
+
+/*
+ * s3c2410_udc_remove
+ */
+static int s3c2410_udc_remove(struct platform_device *pdev)
+{
+ struct s3c2410_udc *udc = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s()\n", __func__);
+
+ if (udc->driver)
+ return -EBUSY;
+
+ usb_del_gadget_udc(&udc->gadget);
+ debugfs_remove(debugfs_lookup("registers", s3c2410_udc_debugfs_root));
+
+ if (udc->vbus_gpiod)
+ free_irq(gpiod_to_irq(udc->vbus_gpiod), udc);
+
+ free_irq(irq_usbd, udc);
+
+ if (!IS_ERR(udc_clock) && udc_clock != NULL) {
+ clk_disable_unprepare(udc_clock);
+ clk_put(udc_clock);
+ udc_clock = NULL;
+ }
+
+ if (!IS_ERR(usb_bus_clock) && usb_bus_clock != NULL) {
+ clk_disable_unprepare(usb_bus_clock);
+ clk_put(usb_bus_clock);
+ usb_bus_clock = NULL;
+ }
+
+ dev_dbg(&pdev->dev, "%s: remove ok\n", __func__);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message)
+{
+ struct s3c2410_udc *udc = platform_get_drvdata(pdev);
+
+ s3c2410_udc_command(udc, S3C2410_UDC_P_DISABLE);
+
+ return 0;
+}
+
+static int s3c2410_udc_resume(struct platform_device *pdev)
+{
+ struct s3c2410_udc *udc = platform_get_drvdata(pdev);
+
+ s3c2410_udc_command(udc, S3C2410_UDC_P_ENABLE);
+
+ return 0;
+}
+#else
+#define s3c2410_udc_suspend NULL
+#define s3c2410_udc_resume NULL
+#endif
+
+static const struct platform_device_id s3c_udc_ids[] = {
+ { "s3c2410-usbgadget", },
+ { "s3c2440-usbgadget", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, s3c_udc_ids);
+
+static struct platform_driver udc_driver_24x0 = {
+ .driver = {
+ .name = "s3c24x0-usbgadget",
+ },
+ .probe = s3c2410_udc_probe,
+ .remove = s3c2410_udc_remove,
+ .suspend = s3c2410_udc_suspend,
+ .resume = s3c2410_udc_resume,
+ .id_table = s3c_udc_ids,
+};
+
+static int __init udc_init(void)
+{
+ int retval;
+
+ dprintk(DEBUG_NORMAL, "%s\n", gadget_name);
+
+ s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name,
+ usb_debug_root);
+
+ retval = platform_driver_register(&udc_driver_24x0);
+ if (retval)
+ goto err;
+
+ return 0;
+
+err:
+ debugfs_remove(s3c2410_udc_debugfs_root);
+ return retval;
+}
+
+static void __exit udc_exit(void)
+{
+ platform_driver_unregister(&udc_driver_24x0);
+ debugfs_remove_recursive(s3c2410_udc_debugfs_root);
+}
+
+module_init(udc_init);
+module_exit(udc_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.h b/drivers/usb/gadget/udc/s3c2410_udc.h
new file mode 100644
index 000000000..cdbf202e5
--- /dev/null
+++ b/drivers/usb/gadget/udc/s3c2410_udc.h
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * linux/drivers/usb/gadget/s3c2410_udc.h
+ * Samsung on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ * Additional cleanups by Ben Dooks <ben-linux@fluff.org>
+ */
+
+#ifndef _S3C2410_UDC_H
+#define _S3C2410_UDC_H
+
+struct s3c2410_ep {
+ struct list_head queue;
+ unsigned long last_io; /* jiffies timestamp */
+ struct usb_gadget *gadget;
+ struct s3c2410_udc *dev;
+ struct usb_ep ep;
+ u8 num;
+
+ unsigned short fifo_size;
+ u8 bEndpointAddress;
+ u8 bmAttributes;
+
+ unsigned halted : 1;
+ unsigned already_seen : 1;
+ unsigned setup_stage : 1;
+};
+
+
+/* Warning : ep0 has a fifo of 16 bytes */
+/* Don't try to set 32 or 64 */
+/* also testusb 14 fails wit 16 but is */
+/* fine with 8 */
+#define EP0_FIFO_SIZE 8
+#define EP_FIFO_SIZE 64
+#define DEFAULT_POWER_STATE 0x00
+
+#define S3C2440_EP_FIFO_SIZE 128
+
+static const char ep0name [] = "ep0";
+
+static const char *const ep_name[] = {
+ ep0name, /* everyone has ep0 */
+ /* s3c2410 four bidirectional bulk endpoints */
+ "ep1-bulk", "ep2-bulk", "ep3-bulk", "ep4-bulk",
+};
+
+#define S3C2410_ENDPOINTS ARRAY_SIZE(ep_name)
+
+struct s3c2410_request {
+ struct list_head queue; /* ep's requests */
+ struct usb_request req;
+};
+
+enum ep0_state {
+ EP0_IDLE,
+ EP0_IN_DATA_PHASE,
+ EP0_OUT_DATA_PHASE,
+ EP0_END_XFER,
+ EP0_STALL,
+};
+
+static const char *ep0states[]= {
+ "EP0_IDLE",
+ "EP0_IN_DATA_PHASE",
+ "EP0_OUT_DATA_PHASE",
+ "EP0_END_XFER",
+ "EP0_STALL",
+};
+
+struct s3c2410_udc {
+ spinlock_t lock;
+
+ struct s3c2410_ep ep[S3C2410_ENDPOINTS];
+ int address;
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct s3c2410_request fifo_req;
+ u8 fifo_buf[EP_FIFO_SIZE];
+ u16 devstatus;
+
+ u32 port_status;
+ int ep0state;
+
+ struct gpio_desc *vbus_gpiod;
+ struct gpio_desc *pullup_gpiod;
+
+ unsigned got_irq : 1;
+
+ unsigned req_std : 1;
+ unsigned req_config : 1;
+ unsigned req_pending : 1;
+ u8 vbus;
+ int irq;
+};
+#define to_s3c2410(g) (container_of((g), struct s3c2410_udc, gadget))
+
+#endif
diff --git a/drivers/usb/gadget/udc/s3c2410_udc_regs.h b/drivers/usb/gadget/udc/s3c2410_udc_regs.h
new file mode 100644
index 000000000..d8d2eeaca
--- /dev/null
+++ b/drivers/usb/gadget/udc/s3c2410_udc_regs.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2004 Herbert Poetzl <herbert@13thfloor.at>
+ */
+
+#ifndef __ASM_ARCH_REGS_UDC_H
+#define __ASM_ARCH_REGS_UDC_H
+
+#define S3C2410_USBDREG(x) (x)
+
+#define S3C2410_UDC_FUNC_ADDR_REG S3C2410_USBDREG(0x0140)
+#define S3C2410_UDC_PWR_REG S3C2410_USBDREG(0x0144)
+#define S3C2410_UDC_EP_INT_REG S3C2410_USBDREG(0x0148)
+
+#define S3C2410_UDC_USB_INT_REG S3C2410_USBDREG(0x0158)
+#define S3C2410_UDC_EP_INT_EN_REG S3C2410_USBDREG(0x015c)
+
+#define S3C2410_UDC_USB_INT_EN_REG S3C2410_USBDREG(0x016c)
+
+#define S3C2410_UDC_FRAME_NUM1_REG S3C2410_USBDREG(0x0170)
+#define S3C2410_UDC_FRAME_NUM2_REG S3C2410_USBDREG(0x0174)
+
+#define S3C2410_UDC_EP0_FIFO_REG S3C2410_USBDREG(0x01c0)
+#define S3C2410_UDC_EP1_FIFO_REG S3C2410_USBDREG(0x01c4)
+#define S3C2410_UDC_EP2_FIFO_REG S3C2410_USBDREG(0x01c8)
+#define S3C2410_UDC_EP3_FIFO_REG S3C2410_USBDREG(0x01cc)
+#define S3C2410_UDC_EP4_FIFO_REG S3C2410_USBDREG(0x01d0)
+
+#define S3C2410_UDC_EP1_DMA_CON S3C2410_USBDREG(0x0200)
+#define S3C2410_UDC_EP1_DMA_UNIT S3C2410_USBDREG(0x0204)
+#define S3C2410_UDC_EP1_DMA_FIFO S3C2410_USBDREG(0x0208)
+#define S3C2410_UDC_EP1_DMA_TTC_L S3C2410_USBDREG(0x020c)
+#define S3C2410_UDC_EP1_DMA_TTC_M S3C2410_USBDREG(0x0210)
+#define S3C2410_UDC_EP1_DMA_TTC_H S3C2410_USBDREG(0x0214)
+
+#define S3C2410_UDC_EP2_DMA_CON S3C2410_USBDREG(0x0218)
+#define S3C2410_UDC_EP2_DMA_UNIT S3C2410_USBDREG(0x021c)
+#define S3C2410_UDC_EP2_DMA_FIFO S3C2410_USBDREG(0x0220)
+#define S3C2410_UDC_EP2_DMA_TTC_L S3C2410_USBDREG(0x0224)
+#define S3C2410_UDC_EP2_DMA_TTC_M S3C2410_USBDREG(0x0228)
+#define S3C2410_UDC_EP2_DMA_TTC_H S3C2410_USBDREG(0x022c)
+
+#define S3C2410_UDC_EP3_DMA_CON S3C2410_USBDREG(0x0240)
+#define S3C2410_UDC_EP3_DMA_UNIT S3C2410_USBDREG(0x0244)
+#define S3C2410_UDC_EP3_DMA_FIFO S3C2410_USBDREG(0x0248)
+#define S3C2410_UDC_EP3_DMA_TTC_L S3C2410_USBDREG(0x024c)
+#define S3C2410_UDC_EP3_DMA_TTC_M S3C2410_USBDREG(0x0250)
+#define S3C2410_UDC_EP3_DMA_TTC_H S3C2410_USBDREG(0x0254)
+
+#define S3C2410_UDC_EP4_DMA_CON S3C2410_USBDREG(0x0258)
+#define S3C2410_UDC_EP4_DMA_UNIT S3C2410_USBDREG(0x025c)
+#define S3C2410_UDC_EP4_DMA_FIFO S3C2410_USBDREG(0x0260)
+#define S3C2410_UDC_EP4_DMA_TTC_L S3C2410_USBDREG(0x0264)
+#define S3C2410_UDC_EP4_DMA_TTC_M S3C2410_USBDREG(0x0268)
+#define S3C2410_UDC_EP4_DMA_TTC_H S3C2410_USBDREG(0x026c)
+
+#define S3C2410_UDC_INDEX_REG S3C2410_USBDREG(0x0178)
+
+/* indexed registers */
+
+#define S3C2410_UDC_MAXP_REG S3C2410_USBDREG(0x0180)
+
+#define S3C2410_UDC_EP0_CSR_REG S3C2410_USBDREG(0x0184)
+
+#define S3C2410_UDC_IN_CSR1_REG S3C2410_USBDREG(0x0184)
+#define S3C2410_UDC_IN_CSR2_REG S3C2410_USBDREG(0x0188)
+
+#define S3C2410_UDC_OUT_CSR1_REG S3C2410_USBDREG(0x0190)
+#define S3C2410_UDC_OUT_CSR2_REG S3C2410_USBDREG(0x0194)
+#define S3C2410_UDC_OUT_FIFO_CNT1_REG S3C2410_USBDREG(0x0198)
+#define S3C2410_UDC_OUT_FIFO_CNT2_REG S3C2410_USBDREG(0x019c)
+
+#define S3C2410_UDC_FUNCADDR_UPDATE (1 << 7)
+
+#define S3C2410_UDC_PWR_ISOUP (1 << 7) /* R/W */
+#define S3C2410_UDC_PWR_RESET (1 << 3) /* R */
+#define S3C2410_UDC_PWR_RESUME (1 << 2) /* R/W */
+#define S3C2410_UDC_PWR_SUSPEND (1 << 1) /* R */
+#define S3C2410_UDC_PWR_ENSUSPEND (1 << 0) /* R/W */
+
+#define S3C2410_UDC_PWR_DEFAULT (0x00)
+
+#define S3C2410_UDC_INT_EP4 (1 << 4) /* R/W (clear only) */
+#define S3C2410_UDC_INT_EP3 (1 << 3) /* R/W (clear only) */
+#define S3C2410_UDC_INT_EP2 (1 << 2) /* R/W (clear only) */
+#define S3C2410_UDC_INT_EP1 (1 << 1) /* R/W (clear only) */
+#define S3C2410_UDC_INT_EP0 (1 << 0) /* R/W (clear only) */
+
+#define S3C2410_UDC_USBINT_RESET (1 << 2) /* R/W (clear only) */
+#define S3C2410_UDC_USBINT_RESUME (1 << 1) /* R/W (clear only) */
+#define S3C2410_UDC_USBINT_SUSPEND (1 << 0) /* R/W (clear only) */
+
+#define S3C2410_UDC_INTE_EP4 (1 << 4) /* R/W */
+#define S3C2410_UDC_INTE_EP3 (1 << 3) /* R/W */
+#define S3C2410_UDC_INTE_EP2 (1 << 2) /* R/W */
+#define S3C2410_UDC_INTE_EP1 (1 << 1) /* R/W */
+#define S3C2410_UDC_INTE_EP0 (1 << 0) /* R/W */
+
+#define S3C2410_UDC_USBINTE_RESET (1 << 2) /* R/W */
+#define S3C2410_UDC_USBINTE_SUSPEND (1 << 0) /* R/W */
+
+#define S3C2410_UDC_INDEX_EP0 (0x00)
+#define S3C2410_UDC_INDEX_EP1 (0x01)
+#define S3C2410_UDC_INDEX_EP2 (0x02)
+#define S3C2410_UDC_INDEX_EP3 (0x03)
+#define S3C2410_UDC_INDEX_EP4 (0x04)
+
+#define S3C2410_UDC_ICSR1_CLRDT (1 << 6) /* R/W */
+#define S3C2410_UDC_ICSR1_SENTSTL (1 << 5) /* R/W (clear only) */
+#define S3C2410_UDC_ICSR1_SENDSTL (1 << 4) /* R/W */
+#define S3C2410_UDC_ICSR1_FFLUSH (1 << 3) /* W (set only) */
+#define S3C2410_UDC_ICSR1_UNDRUN (1 << 2) /* R/W (clear only) */
+#define S3C2410_UDC_ICSR1_PKTRDY (1 << 0) /* R/W (set only) */
+
+#define S3C2410_UDC_ICSR2_AUTOSET (1 << 7) /* R/W */
+#define S3C2410_UDC_ICSR2_ISO (1 << 6) /* R/W */
+#define S3C2410_UDC_ICSR2_MODEIN (1 << 5) /* R/W */
+#define S3C2410_UDC_ICSR2_DMAIEN (1 << 4) /* R/W */
+
+#define S3C2410_UDC_OCSR1_CLRDT (1 << 7) /* R/W */
+#define S3C2410_UDC_OCSR1_SENTSTL (1 << 6) /* R/W (clear only) */
+#define S3C2410_UDC_OCSR1_SENDSTL (1 << 5) /* R/W */
+#define S3C2410_UDC_OCSR1_FFLUSH (1 << 4) /* R/W */
+#define S3C2410_UDC_OCSR1_DERROR (1 << 3) /* R */
+#define S3C2410_UDC_OCSR1_OVRRUN (1 << 2) /* R/W (clear only) */
+#define S3C2410_UDC_OCSR1_PKTRDY (1 << 0) /* R/W (clear only) */
+
+#define S3C2410_UDC_OCSR2_AUTOCLR (1 << 7) /* R/W */
+#define S3C2410_UDC_OCSR2_ISO (1 << 6) /* R/W */
+#define S3C2410_UDC_OCSR2_DMAIEN (1 << 5) /* R/W */
+
+#define S3C2410_UDC_EP0_CSR_OPKRDY (1 << 0)
+#define S3C2410_UDC_EP0_CSR_IPKRDY (1 << 1)
+#define S3C2410_UDC_EP0_CSR_SENTSTL (1 << 2)
+#define S3C2410_UDC_EP0_CSR_DE (1 << 3)
+#define S3C2410_UDC_EP0_CSR_SE (1 << 4)
+#define S3C2410_UDC_EP0_CSR_SENDSTL (1 << 5)
+#define S3C2410_UDC_EP0_CSR_SOPKTRDY (1 << 6)
+#define S3C2410_UDC_EP0_CSR_SSE (1 << 7)
+
+#define S3C2410_UDC_MAXP_8 (1 << 0)
+#define S3C2410_UDC_MAXP_16 (1 << 1)
+#define S3C2410_UDC_MAXP_32 (1 << 2)
+#define S3C2410_UDC_MAXP_64 (1 << 3)
+
+#endif
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
new file mode 100644
index 000000000..2fc5d4d27
--- /dev/null
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -0,0 +1,3192 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
+ *
+ * Copyright (C) 2005-2007 AMD (https://www.amd.com)
+ * Author: Thomas Dahlmann
+ */
+
+/*
+ * This file does the core driver implementation for the UDC that is based
+ * on Synopsys device controller IP (different than HS OTG IP) that is either
+ * connected through PCI bus or integrated to SoC platforms.
+ */
+
+/* Driver strings */
+#define UDC_MOD_DESCRIPTION "Synopsys USB Device Controller"
+#define UDC_DRIVER_VERSION_STRING "01.00.0206"
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/dmapool.h>
+#include <linux/prefetch.h>
+#include <linux/moduleparam.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include "amd5536udc.h"
+
+static void udc_setup_endpoints(struct udc *dev);
+static void udc_soft_reset(struct udc *dev);
+static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
+static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
+
+/* description */
+static const char mod_desc[] = UDC_MOD_DESCRIPTION;
+static const char name[] = "udc";
+
+/* structure to hold endpoint function pointers */
+static const struct usb_ep_ops udc_ep_ops;
+
+/* received setup data */
+static union udc_setup_data setup_data;
+
+/* pointer to device object */
+static struct udc *udc;
+
+/* irq spin lock for soft reset */
+static DEFINE_SPINLOCK(udc_irq_spinlock);
+/* stall spin lock */
+static DEFINE_SPINLOCK(udc_stall_spinlock);
+
+/*
+* slave mode: pending bytes in rx fifo after nyet,
+* used if EPIN irq came but no req was available
+*/
+static unsigned int udc_rxfifo_pending;
+
+/* count soft resets after suspend to avoid loop */
+static int soft_reset_occured;
+static int soft_reset_after_usbreset_occured;
+
+/* timer */
+static struct timer_list udc_timer;
+static int stop_timer;
+
+/* set_rde -- Is used to control enabling of RX DMA. Problem is
+ * that UDC has only one bit (RDE) to enable/disable RX DMA for
+ * all OUT endpoints. So we have to handle race conditions like
+ * when OUT data reaches the fifo but no request was queued yet.
+ * This cannot be solved by letting the RX DMA disabled until a
+ * request gets queued because there may be other OUT packets
+ * in the FIFO (important for not blocking control traffic).
+ * The value of set_rde controls the corresponding timer.
+ *
+ * set_rde -1 == not used, means it is alloed to be set to 0 or 1
+ * set_rde 0 == do not touch RDE, do no start the RDE timer
+ * set_rde 1 == timer function will look whether FIFO has data
+ * set_rde 2 == set by timer function to enable RX DMA on next call
+ */
+static int set_rde = -1;
+
+static DECLARE_COMPLETION(on_exit);
+static struct timer_list udc_pollstall_timer;
+static int stop_pollstall_timer;
+static DECLARE_COMPLETION(on_pollstall_exit);
+
+/* endpoint names used for print */
+static const char ep0_string[] = "ep0in";
+static const struct {
+ const char *name;
+ const struct usb_ep_caps caps;
+} ep_info[] = {
+#define EP_INFO(_name, _caps) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ }
+
+ EP_INFO(ep0_string,
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep1in-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep2in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep3in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep4in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep5in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep6in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep7in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep8in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep9in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep10in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep11in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep12in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep13in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep14in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep15in-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+ EP_INFO("ep0out",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep1out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep2out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep3out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep4out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep5out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep6out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep7out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep8out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep9out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep10out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep11out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep12out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep13out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep14out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+ EP_INFO("ep15out-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+
+#undef EP_INFO
+};
+
+/* buffer fill mode */
+static int use_dma_bufferfill_mode;
+/* tx buffer size for high speed */
+static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
+
+/*---------------------------------------------------------------------------*/
+/* Prints UDC device registers and endpoint irq registers */
+static void print_regs(struct udc *dev)
+{
+ DBG(dev, "------- Device registers -------\n");
+ DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
+ DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
+ DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
+ DBG(dev, "\n");
+ DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
+ DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
+ DBG(dev, "\n");
+ DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
+ DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
+ DBG(dev, "\n");
+ DBG(dev, "USE DMA = %d\n", use_dma);
+ if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
+ DBG(dev, "DMA mode = PPBNDU (packet per buffer "
+ "WITHOUT desc. update)\n");
+ dev_info(dev->dev, "DMA mode (%s)\n", "PPBNDU");
+ } else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
+ DBG(dev, "DMA mode = PPBDU (packet per buffer "
+ "WITH desc. update)\n");
+ dev_info(dev->dev, "DMA mode (%s)\n", "PPBDU");
+ }
+ if (use_dma && use_dma_bufferfill_mode) {
+ DBG(dev, "DMA mode = BF (buffer fill mode)\n");
+ dev_info(dev->dev, "DMA mode (%s)\n", "BF");
+ }
+ if (!use_dma)
+ dev_info(dev->dev, "FIFO mode\n");
+ DBG(dev, "-------------------------------------------------------\n");
+}
+
+/* Masks unused interrupts */
+int udc_mask_unused_interrupts(struct udc *dev)
+{
+ u32 tmp;
+
+ /* mask all dev interrupts */
+ tmp = AMD_BIT(UDC_DEVINT_SVC) |
+ AMD_BIT(UDC_DEVINT_ENUM) |
+ AMD_BIT(UDC_DEVINT_US) |
+ AMD_BIT(UDC_DEVINT_UR) |
+ AMD_BIT(UDC_DEVINT_ES) |
+ AMD_BIT(UDC_DEVINT_SI) |
+ AMD_BIT(UDC_DEVINT_SOF)|
+ AMD_BIT(UDC_DEVINT_SC);
+ writel(tmp, &dev->regs->irqmsk);
+
+ /* mask all ep interrupts */
+ writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(udc_mask_unused_interrupts);
+
+/* Enables endpoint 0 interrupts */
+static int udc_enable_ep0_interrupts(struct udc *dev)
+{
+ u32 tmp;
+
+ DBG(dev, "udc_enable_ep0_interrupts()\n");
+
+ /* read irq mask */
+ tmp = readl(&dev->regs->ep_irqmsk);
+ /* enable ep0 irq's */
+ tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
+ & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
+ writel(tmp, &dev->regs->ep_irqmsk);
+
+ return 0;
+}
+
+/* Enables device interrupts for SET_INTF and SET_CONFIG */
+int udc_enable_dev_setup_interrupts(struct udc *dev)
+{
+ u32 tmp;
+
+ DBG(dev, "enable device interrupts for setup data\n");
+
+ /* read irq mask */
+ tmp = readl(&dev->regs->irqmsk);
+
+ /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
+ tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
+ & AMD_UNMASK_BIT(UDC_DEVINT_SC)
+ & AMD_UNMASK_BIT(UDC_DEVINT_UR)
+ & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
+ & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
+ writel(tmp, &dev->regs->irqmsk);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(udc_enable_dev_setup_interrupts);
+
+/* Calculates fifo start of endpoint based on preceding endpoints */
+static int udc_set_txfifo_addr(struct udc_ep *ep)
+{
+ struct udc *dev;
+ u32 tmp;
+ int i;
+
+ if (!ep || !(ep->in))
+ return -EINVAL;
+
+ dev = ep->dev;
+ ep->txfifo = dev->txfifo;
+
+ /* traverse ep's */
+ for (i = 0; i < ep->num; i++) {
+ if (dev->ep[i].regs) {
+ /* read fifo size */
+ tmp = readl(&dev->ep[i].regs->bufin_framenum);
+ tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
+ ep->txfifo += tmp;
+ }
+ }
+ return 0;
+}
+
+/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
+static u32 cnak_pending;
+
+static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
+{
+ if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
+ DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
+ cnak_pending |= 1 << (num);
+ ep->naking = 1;
+ } else
+ cnak_pending = cnak_pending & (~(1 << (num)));
+}
+
+
+/* Enables endpoint, is called by gadget driver */
+static int
+udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
+{
+ struct udc_ep *ep;
+ struct udc *dev;
+ u32 tmp;
+ unsigned long iflags;
+ u8 udc_csr_epix;
+ unsigned maxpacket;
+
+ if (!usbep
+ || usbep->name == ep0_string
+ || !desc
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ dev = ep->dev;
+
+ DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
+
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&dev->lock, iflags);
+ ep->ep.desc = desc;
+
+ ep->halted = 0;
+
+ /* set traffic type */
+ tmp = readl(&dev->ep[ep->num].regs->ctl);
+ tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
+ writel(tmp, &dev->ep[ep->num].regs->ctl);
+
+ /* set max packet size */
+ maxpacket = usb_endpoint_maxp(desc);
+ tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
+ tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
+ ep->ep.maxpacket = maxpacket;
+ writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
+
+ /* IN ep */
+ if (ep->in) {
+
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num;
+
+ /* set buffer size (tx fifo entries) */
+ tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
+ /* double buffering: fifo size = 2 x max packet size */
+ tmp = AMD_ADDBITS(
+ tmp,
+ maxpacket * UDC_EPIN_BUFF_SIZE_MULT
+ / UDC_DWORD_BYTES,
+ UDC_EPIN_BUFF_SIZE);
+ writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
+
+ /* calc. tx fifo base addr */
+ udc_set_txfifo_addr(ep);
+
+ /* flush fifo */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_F);
+ writel(tmp, &ep->regs->ctl);
+
+ /* OUT ep */
+ } else {
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
+
+ /* set max packet size UDC CSR */
+ tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
+ tmp = AMD_ADDBITS(tmp, maxpacket,
+ UDC_CSR_NE_MAX_PKT);
+ writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
+
+ if (use_dma && !ep->in) {
+ /* alloc and init BNA dummy request */
+ ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
+ ep->bna_occurred = 0;
+ }
+
+ if (ep->num != UDC_EP0OUT_IX)
+ dev->data_ep_enabled = 1;
+ }
+
+ /* set ep values */
+ tmp = readl(&dev->csr->ne[udc_csr_epix]);
+ /* max packet */
+ tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
+ /* ep number */
+ tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
+ /* ep direction */
+ tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
+ /* ep type */
+ tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
+ /* ep config */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
+ /* ep interface */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
+ /* ep alt */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
+ /* write reg */
+ writel(tmp, &dev->csr->ne[udc_csr_epix]);
+
+ /* enable ep irq */
+ tmp = readl(&dev->regs->ep_irqmsk);
+ tmp &= AMD_UNMASK_BIT(ep->num);
+ writel(tmp, &dev->regs->ep_irqmsk);
+
+ /*
+ * clear NAK by writing CNAK
+ * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
+ */
+ if (!use_dma || ep->in) {
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->naking = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+ tmp = desc->bEndpointAddress;
+ DBG(dev, "%s enabled\n", usbep->name);
+
+ spin_unlock_irqrestore(&dev->lock, iflags);
+ return 0;
+}
+
+/* Resets endpoint */
+static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
+{
+ u32 tmp;
+
+ VDBG(ep->dev, "ep-%d reset\n", ep->num);
+ ep->ep.desc = NULL;
+ ep->ep.ops = &udc_ep_ops;
+ INIT_LIST_HEAD(&ep->queue);
+
+ usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
+ /* set NAK */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_SNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->naking = 1;
+
+ /* disable interrupt */
+ tmp = readl(&regs->ep_irqmsk);
+ tmp |= AMD_BIT(ep->num);
+ writel(tmp, &regs->ep_irqmsk);
+
+ if (ep->in) {
+ /* unset P and IN bit of potential former DMA */
+ tmp = readl(&ep->regs->ctl);
+ tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
+ writel(tmp, &ep->regs->ctl);
+
+ tmp = readl(&ep->regs->sts);
+ tmp |= AMD_BIT(UDC_EPSTS_IN);
+ writel(tmp, &ep->regs->sts);
+
+ /* flush the fifo */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_F);
+ writel(tmp, &ep->regs->ctl);
+
+ }
+ /* reset desc pointer */
+ writel(0, &ep->regs->desptr);
+}
+
+/* Disables endpoint, is called by gadget driver */
+static int udc_ep_disable(struct usb_ep *usbep)
+{
+ struct udc_ep *ep = NULL;
+ unsigned long iflags;
+
+ if (!usbep)
+ return -EINVAL;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ if (usbep->name == ep0_string || !ep->ep.desc)
+ return -EINVAL;
+
+ DBG(ep->dev, "Disable ep-%d\n", ep->num);
+
+ spin_lock_irqsave(&ep->dev->lock, iflags);
+ udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
+ empty_req_queue(ep);
+ ep_init(ep->dev->regs, ep);
+ spin_unlock_irqrestore(&ep->dev->lock, iflags);
+
+ return 0;
+}
+
+/* Allocates request packet, called by gadget driver */
+static struct usb_request *
+udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
+{
+ struct udc_request *req;
+ struct udc_data_dma *dma_desc;
+ struct udc_ep *ep;
+
+ if (!usbep)
+ return NULL;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+
+ VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
+ req = kzalloc(sizeof(struct udc_request), gfp);
+ if (!req)
+ return NULL;
+
+ req->req.dma = DMA_DONT_USE;
+ INIT_LIST_HEAD(&req->queue);
+
+ if (ep->dma) {
+ /* ep0 in requests are allocated from data pool here */
+ dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
+ &req->td_phys);
+ if (!dma_desc) {
+ kfree(req);
+ return NULL;
+ }
+
+ VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
+ "td_phys = %lx\n",
+ req, dma_desc,
+ (unsigned long)req->td_phys);
+ /* prevent from using desc. - set HOST BUSY */
+ dma_desc->status = AMD_ADDBITS(dma_desc->status,
+ UDC_DMA_STP_STS_BS_HOST_BUSY,
+ UDC_DMA_STP_STS_BS);
+ dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
+ req->td_data = dma_desc;
+ req->td_data_last = NULL;
+ req->chain_len = 1;
+ }
+
+ return &req->req;
+}
+
+/* frees pci pool descriptors of a DMA chain */
+static void udc_free_dma_chain(struct udc *dev, struct udc_request *req)
+{
+ struct udc_data_dma *td = req->td_data;
+ unsigned int i;
+
+ dma_addr_t addr_next = 0x00;
+ dma_addr_t addr = (dma_addr_t)td->next;
+
+ DBG(dev, "free chain req = %p\n", req);
+
+ /* do not free first desc., will be done by free for request */
+ for (i = 1; i < req->chain_len; i++) {
+ td = phys_to_virt(addr);
+ addr_next = (dma_addr_t)td->next;
+ dma_pool_free(dev->data_requests, td, addr);
+ addr = addr_next;
+ }
+}
+
+/* Frees request packet, called by gadget driver */
+static void
+udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
+{
+ struct udc_ep *ep;
+ struct udc_request *req;
+
+ if (!usbep || !usbreq)
+ return;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ req = container_of(usbreq, struct udc_request, req);
+ VDBG(ep->dev, "free_req req=%p\n", req);
+ BUG_ON(!list_empty(&req->queue));
+ if (req->td_data) {
+ VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
+
+ /* free dma chain if created */
+ if (req->chain_len > 1)
+ udc_free_dma_chain(ep->dev, req);
+
+ dma_pool_free(ep->dev->data_requests, req->td_data,
+ req->td_phys);
+ }
+ kfree(req);
+}
+
+/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
+static void udc_init_bna_dummy(struct udc_request *req)
+{
+ if (req) {
+ /* set last bit */
+ req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
+ /* set next pointer to itself */
+ req->td_data->next = req->td_phys;
+ /* set HOST BUSY */
+ req->td_data->status
+ = AMD_ADDBITS(req->td_data->status,
+ UDC_DMA_STP_STS_BS_DMA_DONE,
+ UDC_DMA_STP_STS_BS);
+#ifdef UDC_VERBOSE
+ pr_debug("bna desc = %p, sts = %08x\n",
+ req->td_data, req->td_data->status);
+#endif
+ }
+}
+
+/* Allocate BNA dummy descriptor */
+static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
+{
+ struct udc_request *req = NULL;
+ struct usb_request *_req = NULL;
+
+ /* alloc the dummy request */
+ _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
+ if (_req) {
+ req = container_of(_req, struct udc_request, req);
+ ep->bna_dummy_req = req;
+ udc_init_bna_dummy(req);
+ }
+ return req;
+}
+
+/* Write data to TX fifo for IN packets */
+static void
+udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
+{
+ u8 *req_buf;
+ u32 *buf;
+ int i, j;
+ unsigned bytes = 0;
+ unsigned remaining = 0;
+
+ if (!req || !ep)
+ return;
+
+ req_buf = req->buf + req->actual;
+ prefetch(req_buf);
+ remaining = req->length - req->actual;
+
+ buf = (u32 *) req_buf;
+
+ bytes = ep->ep.maxpacket;
+ if (bytes > remaining)
+ bytes = remaining;
+
+ /* dwords first */
+ for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
+ writel(*(buf + i), ep->txfifo);
+
+ /* remaining bytes must be written by byte access */
+ for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
+ writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
+ ep->txfifo);
+ }
+
+ /* dummy write confirm */
+ writel(0, &ep->regs->confirm);
+}
+
+/* Read dwords from RX fifo for OUT transfers */
+static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
+{
+ int i;
+
+ VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
+
+ for (i = 0; i < dwords; i++)
+ *(buf + i) = readl(dev->rxfifo);
+ return 0;
+}
+
+/* Read bytes from RX fifo for OUT transfers */
+static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
+{
+ int i, j;
+ u32 tmp;
+
+ VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
+
+ /* dwords first */
+ for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
+ *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
+
+ /* remaining bytes must be read by byte access */
+ if (bytes % UDC_DWORD_BYTES) {
+ tmp = readl(dev->rxfifo);
+ for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
+ *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
+ tmp = tmp >> UDC_BITS_PER_BYTE;
+ }
+ }
+
+ return 0;
+}
+
+/* Read data from RX fifo for OUT transfers */
+static int
+udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
+{
+ u8 *buf;
+ unsigned buf_space;
+ unsigned bytes = 0;
+ unsigned finished = 0;
+
+ /* received number bytes */
+ bytes = readl(&ep->regs->sts);
+ bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
+
+ buf_space = req->req.length - req->req.actual;
+ buf = req->req.buf + req->req.actual;
+ if (bytes > buf_space) {
+ if ((buf_space % ep->ep.maxpacket) != 0) {
+ DBG(ep->dev,
+ "%s: rx %d bytes, rx-buf space = %d bytesn\n",
+ ep->ep.name, bytes, buf_space);
+ req->req.status = -EOVERFLOW;
+ }
+ bytes = buf_space;
+ }
+ req->req.actual += bytes;
+
+ /* last packet ? */
+ if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
+ || ((req->req.actual == req->req.length) && !req->req.zero))
+ finished = 1;
+
+ /* read rx fifo bytes */
+ VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
+ udc_rxfifo_read_bytes(ep->dev, buf, bytes);
+
+ return finished;
+}
+
+/* Creates or re-inits a DMA chain */
+static int udc_create_dma_chain(
+ struct udc_ep *ep,
+ struct udc_request *req,
+ unsigned long buf_len, gfp_t gfp_flags
+)
+{
+ unsigned long bytes = req->req.length;
+ unsigned int i;
+ dma_addr_t dma_addr;
+ struct udc_data_dma *td = NULL;
+ struct udc_data_dma *last = NULL;
+ unsigned long txbytes;
+ unsigned create_new_chain = 0;
+ unsigned len;
+
+ VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
+ bytes, buf_len);
+ dma_addr = DMA_DONT_USE;
+
+ /* unset L bit in first desc for OUT */
+ if (!ep->in)
+ req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
+
+ /* alloc only new desc's if not already available */
+ len = req->req.length / ep->ep.maxpacket;
+ if (req->req.length % ep->ep.maxpacket)
+ len++;
+
+ if (len > req->chain_len) {
+ /* shorter chain already allocated before */
+ if (req->chain_len > 1)
+ udc_free_dma_chain(ep->dev, req);
+ req->chain_len = len;
+ create_new_chain = 1;
+ }
+
+ td = req->td_data;
+ /* gen. required number of descriptors and buffers */
+ for (i = buf_len; i < bytes; i += buf_len) {
+ /* create or determine next desc. */
+ if (create_new_chain) {
+ td = dma_pool_alloc(ep->dev->data_requests,
+ gfp_flags, &dma_addr);
+ if (!td)
+ return -ENOMEM;
+
+ td->status = 0;
+ } else if (i == buf_len) {
+ /* first td */
+ td = (struct udc_data_dma *)phys_to_virt(
+ req->td_data->next);
+ td->status = 0;
+ } else {
+ td = (struct udc_data_dma *)phys_to_virt(last->next);
+ td->status = 0;
+ }
+
+ if (td)
+ td->bufptr = req->req.dma + i; /* assign buffer */
+ else
+ break;
+
+ /* short packet ? */
+ if ((bytes - i) >= buf_len) {
+ txbytes = buf_len;
+ } else {
+ /* short packet */
+ txbytes = bytes - i;
+ }
+
+ /* link td and assign tx bytes */
+ if (i == buf_len) {
+ if (create_new_chain)
+ req->td_data->next = dma_addr;
+ /*
+ * else
+ * req->td_data->next = virt_to_phys(td);
+ */
+ /* write tx bytes */
+ if (ep->in) {
+ /* first desc */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ ep->ep.maxpacket,
+ UDC_DMA_IN_STS_TXBYTES);
+ /* second desc */
+ td->status = AMD_ADDBITS(td->status,
+ txbytes,
+ UDC_DMA_IN_STS_TXBYTES);
+ }
+ } else {
+ if (create_new_chain)
+ last->next = dma_addr;
+ /*
+ * else
+ * last->next = virt_to_phys(td);
+ */
+ if (ep->in) {
+ /* write tx bytes */
+ td->status = AMD_ADDBITS(td->status,
+ txbytes,
+ UDC_DMA_IN_STS_TXBYTES);
+ }
+ }
+ last = td;
+ }
+ /* set last bit */
+ if (td) {
+ td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
+ /* last desc. points to itself */
+ req->td_data_last = td;
+ }
+
+ return 0;
+}
+
+/* create/re-init a DMA descriptor or a DMA descriptor chain */
+static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
+{
+ int retval = 0;
+ u32 tmp;
+
+ VDBG(ep->dev, "prep_dma\n");
+ VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
+ ep->num, req->td_data);
+
+ /* set buffer pointer */
+ req->td_data->bufptr = req->req.dma;
+
+ /* set last bit */
+ req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
+
+ /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
+ if (use_dma_ppb) {
+
+ retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
+ if (retval != 0) {
+ if (retval == -ENOMEM)
+ DBG(ep->dev, "Out of DMA memory\n");
+ return retval;
+ }
+ if (ep->in) {
+ if (req->req.length == ep->ep.maxpacket) {
+ /* write tx bytes */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ ep->ep.maxpacket,
+ UDC_DMA_IN_STS_TXBYTES);
+
+ }
+ }
+
+ }
+
+ if (ep->in) {
+ VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
+ "maxpacket=%d ep%d\n",
+ use_dma_ppb, req->req.length,
+ ep->ep.maxpacket, ep->num);
+ /*
+ * if bytes < max packet then tx bytes must
+ * be written in packet per buffer mode
+ */
+ if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
+ || ep->num == UDC_EP0OUT_IX
+ || ep->num == UDC_EP0IN_IX) {
+ /* write tx bytes */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ req->req.length,
+ UDC_DMA_IN_STS_TXBYTES);
+ /* reset frame num */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ 0,
+ UDC_DMA_IN_STS_FRAMENUM);
+ }
+ /* set HOST BUSY */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ UDC_DMA_STP_STS_BS_HOST_BUSY,
+ UDC_DMA_STP_STS_BS);
+ } else {
+ VDBG(ep->dev, "OUT set host ready\n");
+ /* set HOST READY */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ UDC_DMA_STP_STS_BS_HOST_READY,
+ UDC_DMA_STP_STS_BS);
+
+ /* clear NAK by writing CNAK */
+ if (ep->naking) {
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->naking = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+
+ }
+
+ return retval;
+}
+
+/* Completes request packet ... caller MUST hold lock */
+static void
+complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
+__releases(ep->dev->lock)
+__acquires(ep->dev->lock)
+{
+ struct udc *dev;
+ unsigned halted;
+
+ VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
+
+ dev = ep->dev;
+ /* unmap DMA */
+ if (ep->dma)
+ usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
+
+ halted = ep->halted;
+ ep->halted = 1;
+
+ /* set new status if pending */
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = sts;
+
+ /* remove from ep queue */
+ list_del_init(&req->queue);
+
+ VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
+ &req->req, req->req.length, ep->ep.name, sts);
+
+ spin_unlock(&dev->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&dev->lock);
+ ep->halted = halted;
+}
+
+/* Iterates to the end of a DMA chain and returns last descriptor */
+static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
+{
+ struct udc_data_dma *td;
+
+ td = req->td_data;
+ while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
+ td = phys_to_virt(td->next);
+
+ return td;
+
+}
+
+/* Iterates to the end of a DMA chain and counts bytes received */
+static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
+{
+ struct udc_data_dma *td;
+ u32 count;
+
+ td = req->td_data;
+ /* received number bytes */
+ count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
+
+ while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
+ td = phys_to_virt(td->next);
+ /* received number bytes */
+ if (td) {
+ count += AMD_GETBITS(td->status,
+ UDC_DMA_OUT_STS_RXBYTES);
+ }
+ }
+
+ return count;
+
+}
+
+/* Enabling RX DMA */
+static void udc_set_rde(struct udc *dev)
+{
+ u32 tmp;
+
+ VDBG(dev, "udc_set_rde()\n");
+ /* stop RDE timer */
+ if (timer_pending(&udc_timer)) {
+ set_rde = 0;
+ mod_timer(&udc_timer, jiffies - 1);
+ }
+ /* set RDE */
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_RDE);
+ writel(tmp, &dev->regs->ctl);
+}
+
+/* Queues a request packet, called by gadget driver */
+static int
+udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
+{
+ int retval = 0;
+ u8 open_rxfifo = 0;
+ unsigned long iflags;
+ struct udc_ep *ep;
+ struct udc_request *req;
+ struct udc *dev;
+ u32 tmp;
+
+ /* check the inputs */
+ req = container_of(usbreq, struct udc_request, req);
+
+ if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
+ || !list_empty(&req->queue))
+ return -EINVAL;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
+ return -EINVAL;
+
+ VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
+ dev = ep->dev;
+
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ /* map dma (usually done before) */
+ if (ep->dma) {
+ VDBG(dev, "DMA map req %p\n", req);
+ retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
+ if (retval)
+ return retval;
+ }
+
+ VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
+ usbep->name, usbreq, usbreq->length,
+ req->td_data, usbreq->buf);
+
+ spin_lock_irqsave(&dev->lock, iflags);
+ usbreq->actual = 0;
+ usbreq->status = -EINPROGRESS;
+ req->dma_done = 0;
+
+ /* on empty queue just do first transfer */
+ if (list_empty(&ep->queue)) {
+ /* zlp */
+ if (usbreq->length == 0) {
+ /* IN zlp's are handled by hardware */
+ complete_req(ep, req, 0);
+ VDBG(dev, "%s: zlp\n", ep->ep.name);
+ /*
+ * if set_config or set_intf is waiting for ack by zlp
+ * then set CSR_DONE
+ */
+ if (dev->set_cfg_not_acked) {
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
+ writel(tmp, &dev->regs->ctl);
+ dev->set_cfg_not_acked = 0;
+ }
+ /* setup command is ACK'ed now by zlp */
+ if (dev->waiting_zlp_ack_ep0in) {
+ /* clear NAK by writing CNAK in EP0_IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ dev->ep[UDC_EP0IN_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
+ UDC_EP0IN_IX);
+ dev->waiting_zlp_ack_ep0in = 0;
+ }
+ goto finished;
+ }
+ if (ep->dma) {
+ retval = prep_dma(ep, req, GFP_ATOMIC);
+ if (retval != 0)
+ goto finished;
+ /* write desc pointer to enable DMA */
+ if (ep->in) {
+ /* set HOST READY */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ UDC_DMA_IN_STS_BS_HOST_READY,
+ UDC_DMA_IN_STS_BS);
+ }
+
+ /* disabled rx dma while descriptor update */
+ if (!ep->in) {
+ /* stop RDE timer */
+ if (timer_pending(&udc_timer)) {
+ set_rde = 0;
+ mod_timer(&udc_timer, jiffies - 1);
+ }
+ /* clear RDE */
+ tmp = readl(&dev->regs->ctl);
+ tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
+ writel(tmp, &dev->regs->ctl);
+ open_rxfifo = 1;
+
+ /*
+ * if BNA occurred then let BNA dummy desc.
+ * point to current desc.
+ */
+ if (ep->bna_occurred) {
+ VDBG(dev, "copy to BNA dummy desc.\n");
+ memcpy(ep->bna_dummy_req->td_data,
+ req->td_data,
+ sizeof(struct udc_data_dma));
+ }
+ }
+ /* write desc pointer */
+ writel(req->td_phys, &ep->regs->desptr);
+
+ /* clear NAK by writing CNAK */
+ if (ep->naking) {
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->naking = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+
+ if (ep->in) {
+ /* enable ep irq */
+ tmp = readl(&dev->regs->ep_irqmsk);
+ tmp &= AMD_UNMASK_BIT(ep->num);
+ writel(tmp, &dev->regs->ep_irqmsk);
+ }
+ } else if (ep->in) {
+ /* enable ep irq */
+ tmp = readl(&dev->regs->ep_irqmsk);
+ tmp &= AMD_UNMASK_BIT(ep->num);
+ writel(tmp, &dev->regs->ep_irqmsk);
+ }
+
+ } else if (ep->dma) {
+
+ /*
+ * prep_dma not used for OUT ep's, this is not possible
+ * for PPB modes, because of chain creation reasons
+ */
+ if (ep->in) {
+ retval = prep_dma(ep, req, GFP_ATOMIC);
+ if (retval != 0)
+ goto finished;
+ }
+ }
+ VDBG(dev, "list_add\n");
+ /* add request to ep queue */
+ if (req) {
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ /* open rxfifo if out data queued */
+ if (open_rxfifo) {
+ /* enable DMA */
+ req->dma_going = 1;
+ udc_set_rde(dev);
+ if (ep->num != UDC_EP0OUT_IX)
+ dev->data_ep_queued = 1;
+ }
+ /* stop OUT naking */
+ if (!ep->in) {
+ if (!use_dma && udc_rxfifo_pending) {
+ DBG(dev, "udc_queue(): pending bytes in "
+ "rxfifo after nyet\n");
+ /*
+ * read pending bytes afer nyet:
+ * referring to isr
+ */
+ if (udc_rxfifo_read(ep, req)) {
+ /* finish */
+ complete_req(ep, req, 0);
+ }
+ udc_rxfifo_pending = 0;
+
+ }
+ }
+ }
+
+finished:
+ spin_unlock_irqrestore(&dev->lock, iflags);
+ return retval;
+}
+
+/* Empty request queue of an endpoint; caller holds spinlock */
+void empty_req_queue(struct udc_ep *ep)
+{
+ struct udc_request *req;
+
+ ep->halted = 1;
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct udc_request,
+ queue);
+ complete_req(ep, req, -ESHUTDOWN);
+ }
+}
+EXPORT_SYMBOL_GPL(empty_req_queue);
+
+/* Dequeues a request packet, called by gadget driver */
+static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
+{
+ struct udc_ep *ep;
+ struct udc_request *req;
+ unsigned halted;
+ unsigned long iflags;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
+ && ep->num != UDC_EP0OUT_IX)))
+ return -EINVAL;
+
+ req = container_of(usbreq, struct udc_request, req);
+
+ spin_lock_irqsave(&ep->dev->lock, iflags);
+ halted = ep->halted;
+ ep->halted = 1;
+ /* request in processing or next one */
+ if (ep->queue.next == &req->queue) {
+ if (ep->dma && req->dma_going) {
+ if (ep->in)
+ ep->cancel_transfer = 1;
+ else {
+ u32 tmp;
+ u32 dma_sts;
+ /* stop potential receive DMA */
+ tmp = readl(&udc->regs->ctl);
+ writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
+ &udc->regs->ctl);
+ /*
+ * Cancel transfer later in ISR
+ * if descriptor was touched.
+ */
+ dma_sts = AMD_GETBITS(req->td_data->status,
+ UDC_DMA_OUT_STS_BS);
+ if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
+ ep->cancel_transfer = 1;
+ else {
+ udc_init_bna_dummy(ep->req);
+ writel(ep->bna_dummy_req->td_phys,
+ &ep->regs->desptr);
+ }
+ writel(tmp, &udc->regs->ctl);
+ }
+ }
+ }
+ complete_req(ep, req, -ECONNRESET);
+ ep->halted = halted;
+
+ spin_unlock_irqrestore(&ep->dev->lock, iflags);
+ return 0;
+}
+
+/* Halt or clear halt of endpoint */
+static int
+udc_set_halt(struct usb_ep *usbep, int halt)
+{
+ struct udc_ep *ep;
+ u32 tmp;
+ unsigned long iflags;
+ int retval = 0;
+
+ if (!usbep)
+ return -EINVAL;
+
+ pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
+ return -EINVAL;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&udc_stall_spinlock, iflags);
+ /* halt or clear halt */
+ if (halt) {
+ if (ep->num == 0)
+ ep->dev->stall_ep0in = 1;
+ else {
+ /*
+ * set STALL
+ * rxfifo empty not taken into acount
+ */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);
+ ep->halted = 1;
+
+ /* setup poll timer */
+ if (!timer_pending(&udc_pollstall_timer)) {
+ udc_pollstall_timer.expires = jiffies +
+ HZ * UDC_POLLSTALL_TIMER_USECONDS
+ / (1000 * 1000);
+ if (!stop_pollstall_timer) {
+ DBG(ep->dev, "start polltimer\n");
+ add_timer(&udc_pollstall_timer);
+ }
+ }
+ }
+ } else {
+ /* ep is halted by set_halt() before */
+ if (ep->halted) {
+ tmp = readl(&ep->regs->ctl);
+ /* clear stall bit */
+ tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
+ /* clear NAK by writing CNAK */
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->halted = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+ }
+ spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
+ return retval;
+}
+
+/* gadget interface */
+static const struct usb_ep_ops udc_ep_ops = {
+ .enable = udc_ep_enable,
+ .disable = udc_ep_disable,
+
+ .alloc_request = udc_alloc_request,
+ .free_request = udc_free_request,
+
+ .queue = udc_queue,
+ .dequeue = udc_dequeue,
+
+ .set_halt = udc_set_halt,
+ /* fifo ops not implemented */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Get frame counter (not implemented) */
+static int udc_get_frame(struct usb_gadget *gadget)
+{
+ return -EOPNOTSUPP;
+}
+
+/* Initiates a remote wakeup */
+static int udc_remote_wakeup(struct udc *dev)
+{
+ unsigned long flags;
+ u32 tmp;
+
+ DBG(dev, "UDC initiates remote wakeup\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_RES);
+ writel(tmp, &dev->regs->ctl);
+ tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
+ writel(tmp, &dev->regs->ctl);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+/* Remote wakeup gadget interface */
+static int udc_wakeup(struct usb_gadget *gadget)
+{
+ struct udc *dev;
+
+ if (!gadget)
+ return -EINVAL;
+ dev = container_of(gadget, struct udc, gadget);
+ udc_remote_wakeup(dev);
+
+ return 0;
+}
+
+static int amd5536_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int amd5536_udc_stop(struct usb_gadget *g);
+
+static const struct usb_gadget_ops udc_ops = {
+ .wakeup = udc_wakeup,
+ .get_frame = udc_get_frame,
+ .udc_start = amd5536_udc_start,
+ .udc_stop = amd5536_udc_stop,
+};
+
+/* Setups endpoint parameters, adds endpoints to linked list */
+static void make_ep_lists(struct udc *dev)
+{
+ /* make gadget ep lists */
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+ list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
+ &dev->gadget.ep_list);
+ list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
+ &dev->gadget.ep_list);
+ list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
+ &dev->gadget.ep_list);
+
+ /* fifo config */
+ dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
+ dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
+}
+
+/* Inits UDC context */
+void udc_basic_init(struct udc *dev)
+{
+ u32 tmp;
+
+ DBG(dev, "udc_basic_init()\n");
+
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ /* stop RDE timer */
+ if (timer_pending(&udc_timer)) {
+ set_rde = 0;
+ mod_timer(&udc_timer, jiffies - 1);
+ }
+ /* stop poll stall timer */
+ if (timer_pending(&udc_pollstall_timer))
+ mod_timer(&udc_pollstall_timer, jiffies - 1);
+ /* disable DMA */
+ tmp = readl(&dev->regs->ctl);
+ tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
+ tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
+ writel(tmp, &dev->regs->ctl);
+
+ /* enable dynamic CSR programming */
+ tmp = readl(&dev->regs->cfg);
+ tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
+ /* set self powered */
+ tmp |= AMD_BIT(UDC_DEVCFG_SP);
+ /* set remote wakeupable */
+ tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
+ writel(tmp, &dev->regs->cfg);
+
+ make_ep_lists(dev);
+
+ dev->data_ep_enabled = 0;
+ dev->data_ep_queued = 0;
+}
+EXPORT_SYMBOL_GPL(udc_basic_init);
+
+/* init registers at driver load time */
+static int startup_registers(struct udc *dev)
+{
+ u32 tmp;
+
+ /* init controller by soft reset */
+ udc_soft_reset(dev);
+
+ /* mask not needed interrupts */
+ udc_mask_unused_interrupts(dev);
+
+ /* put into initial config */
+ udc_basic_init(dev);
+ /* link up all endpoints */
+ udc_setup_endpoints(dev);
+
+ /* program speed */
+ tmp = readl(&dev->regs->cfg);
+ if (use_fullspeed)
+ tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
+ else
+ tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
+ writel(tmp, &dev->regs->cfg);
+
+ return 0;
+}
+
+/* Sets initial endpoint parameters */
+static void udc_setup_endpoints(struct udc *dev)
+{
+ struct udc_ep *ep;
+ u32 tmp;
+ u32 reg;
+
+ DBG(dev, "udc_setup_endpoints()\n");
+
+ /* read enum speed */
+ tmp = readl(&dev->regs->sts);
+ tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
+ if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
+ dev->gadget.speed = USB_SPEED_HIGH;
+ else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
+ dev->gadget.speed = USB_SPEED_FULL;
+
+ /* set basic ep parameters */
+ for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
+ ep = &dev->ep[tmp];
+ ep->dev = dev;
+ ep->ep.name = ep_info[tmp].name;
+ ep->ep.caps = ep_info[tmp].caps;
+ ep->num = tmp;
+ /* txfifo size is calculated at enable time */
+ ep->txfifo = dev->txfifo;
+
+ /* fifo size */
+ if (tmp < UDC_EPIN_NUM) {
+ ep->fifo_depth = UDC_TXFIFO_SIZE;
+ ep->in = 1;
+ } else {
+ ep->fifo_depth = UDC_RXFIFO_SIZE;
+ ep->in = 0;
+
+ }
+ ep->regs = &dev->ep_regs[tmp];
+ /*
+ * ep will be reset only if ep was not enabled before to avoid
+ * disabling ep interrupts when ENUM interrupt occurs but ep is
+ * not enabled by gadget driver
+ */
+ if (!ep->ep.desc)
+ ep_init(dev->regs, ep);
+
+ if (use_dma) {
+ /*
+ * ep->dma is not really used, just to indicate that
+ * DMA is active: remove this
+ * dma regs = dev control regs
+ */
+ ep->dma = &dev->regs->ctl;
+
+ /* nak OUT endpoints until enable - not for ep0 */
+ if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
+ && tmp > UDC_EPIN_NUM) {
+ /* set NAK */
+ reg = readl(&dev->ep[tmp].regs->ctl);
+ reg |= AMD_BIT(UDC_EPCTL_SNAK);
+ writel(reg, &dev->ep[tmp].regs->ctl);
+ dev->ep[tmp].naking = 1;
+
+ }
+ }
+ }
+ /* EP0 max packet */
+ if (dev->gadget.speed == USB_SPEED_FULL) {
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
+ UDC_FS_EP0IN_MAX_PKT_SIZE);
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
+ UDC_FS_EP0OUT_MAX_PKT_SIZE);
+ } else if (dev->gadget.speed == USB_SPEED_HIGH) {
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
+ UDC_EP0IN_MAX_PKT_SIZE);
+ usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
+ UDC_EP0OUT_MAX_PKT_SIZE);
+ }
+
+ /*
+ * with suspend bug workaround, ep0 params for gadget driver
+ * are set at gadget driver bind() call
+ */
+ dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
+ dev->ep[UDC_EP0IN_IX].halted = 0;
+ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+
+ /* init cfg/alt/int */
+ dev->cur_config = 0;
+ dev->cur_intf = 0;
+ dev->cur_alt = 0;
+}
+
+/* Bringup after Connect event, initial bringup to be ready for ep0 events */
+static void usb_connect(struct udc *dev)
+{
+ /* Return if already connected */
+ if (dev->connected)
+ return;
+
+ dev_info(dev->dev, "USB Connect\n");
+
+ dev->connected = 1;
+
+ /* put into initial config */
+ udc_basic_init(dev);
+
+ /* enable device setup interrupts */
+ udc_enable_dev_setup_interrupts(dev);
+}
+
+/*
+ * Calls gadget with disconnect event and resets the UDC and makes
+ * initial bringup to be ready for ep0 events
+ */
+static void usb_disconnect(struct udc *dev)
+{
+ u32 tmp;
+
+ /* Return if already disconnected */
+ if (!dev->connected)
+ return;
+
+ dev_info(dev->dev, "USB Disconnect\n");
+
+ dev->connected = 0;
+
+ /* mask interrupts */
+ udc_mask_unused_interrupts(dev);
+
+ if (dev->driver) {
+ spin_unlock(&dev->lock);
+ dev->driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+
+ /* empty queues */
+ for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
+ empty_req_queue(&dev->ep[tmp]);
+ }
+
+ /* disable ep0 */
+ ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
+
+ if (!soft_reset_occured) {
+ /* init controller by soft reset */
+ udc_soft_reset(dev);
+ soft_reset_occured++;
+ }
+
+ /* re-enable dev interrupts */
+ udc_enable_dev_setup_interrupts(dev);
+ /* back to full speed ? */
+ if (use_fullspeed) {
+ tmp = readl(&dev->regs->cfg);
+ tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
+ writel(tmp, &dev->regs->cfg);
+ }
+}
+
+/* Reset the UDC core */
+static void udc_soft_reset(struct udc *dev)
+{
+ unsigned long flags;
+
+ DBG(dev, "Soft reset\n");
+ /*
+ * reset possible waiting interrupts, because int.
+ * status is lost after soft reset,
+ * ep int. status reset
+ */
+ writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
+ /* device int. status reset */
+ writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
+
+ /* Don't do this for Broadcom UDC since this is a reserved
+ * bit.
+ */
+ if (dev->chiprev != UDC_BCM_REV) {
+ spin_lock_irqsave(&udc_irq_spinlock, flags);
+ writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
+ readl(&dev->regs->cfg);
+ spin_unlock_irqrestore(&udc_irq_spinlock, flags);
+ }
+}
+
+/* RDE timer callback to set RDE bit */
+static void udc_timer_function(struct timer_list *unused)
+{
+ u32 tmp;
+
+ spin_lock_irq(&udc_irq_spinlock);
+
+ if (set_rde > 0) {
+ /*
+ * open the fifo if fifo was filled on last timer call
+ * conditionally
+ */
+ if (set_rde > 1) {
+ /* set RDE to receive setup data */
+ tmp = readl(&udc->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_RDE);
+ writel(tmp, &udc->regs->ctl);
+ set_rde = -1;
+ } else if (readl(&udc->regs->sts)
+ & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
+ /*
+ * if fifo empty setup polling, do not just
+ * open the fifo
+ */
+ udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
+ if (!stop_timer)
+ add_timer(&udc_timer);
+ } else {
+ /*
+ * fifo contains data now, setup timer for opening
+ * the fifo when timer expires to be able to receive
+ * setup packets, when data packets gets queued by
+ * gadget layer then timer will forced to expire with
+ * set_rde=0 (RDE is set in udc_queue())
+ */
+ set_rde++;
+ /* debug: lhadmot_timer_start = 221070 */
+ udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
+ if (!stop_timer)
+ add_timer(&udc_timer);
+ }
+
+ } else
+ set_rde = -1; /* RDE was set by udc_queue() */
+ spin_unlock_irq(&udc_irq_spinlock);
+ if (stop_timer)
+ complete(&on_exit);
+
+}
+
+/* Handle halt state, used in stall poll timer */
+static void udc_handle_halt_state(struct udc_ep *ep)
+{
+ u32 tmp;
+ /* set stall as long not halted */
+ if (ep->halted == 1) {
+ tmp = readl(&ep->regs->ctl);
+ /* STALL cleared ? */
+ if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
+ /*
+ * FIXME: MSC spec requires that stall remains
+ * even on receivng of CLEAR_FEATURE HALT. So
+ * we would set STALL again here to be compliant.
+ * But with current mass storage drivers this does
+ * not work (would produce endless host retries).
+ * So we clear halt on CLEAR_FEATURE.
+ *
+ DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
+ tmp |= AMD_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);*/
+
+ /* clear NAK by writing CNAK */
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->halted = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+ }
+}
+
+/* Stall timer callback to poll S bit and set it again after */
+static void udc_pollstall_timer_function(struct timer_list *unused)
+{
+ struct udc_ep *ep;
+ int halted = 0;
+
+ spin_lock_irq(&udc_stall_spinlock);
+ /*
+ * only one IN and OUT endpoints are handled
+ * IN poll stall
+ */
+ ep = &udc->ep[UDC_EPIN_IX];
+ udc_handle_halt_state(ep);
+ if (ep->halted)
+ halted = 1;
+ /* OUT poll stall */
+ ep = &udc->ep[UDC_EPOUT_IX];
+ udc_handle_halt_state(ep);
+ if (ep->halted)
+ halted = 1;
+
+ /* setup timer again when still halted */
+ if (!stop_pollstall_timer && halted) {
+ udc_pollstall_timer.expires = jiffies +
+ HZ * UDC_POLLSTALL_TIMER_USECONDS
+ / (1000 * 1000);
+ add_timer(&udc_pollstall_timer);
+ }
+ spin_unlock_irq(&udc_stall_spinlock);
+
+ if (stop_pollstall_timer)
+ complete(&on_pollstall_exit);
+}
+
+/* Inits endpoint 0 so that SETUP packets are processed */
+static void activate_control_endpoints(struct udc *dev)
+{
+ u32 tmp;
+
+ DBG(dev, "activate_control_endpoints\n");
+
+ /* flush fifo */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_F);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+
+ /* set ep0 directions */
+ dev->ep[UDC_EP0IN_IX].in = 1;
+ dev->ep[UDC_EP0OUT_IX].in = 0;
+
+ /* set buffer size (tx fifo entries) of EP0_IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
+ UDC_EPIN_BUFF_SIZE);
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
+ UDC_EPIN_BUFF_SIZE);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
+
+ /* set max packet size of EP0_IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
+ UDC_EP_MAX_PKT_SIZE);
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
+ UDC_EP_MAX_PKT_SIZE);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
+
+ /* set max packet size of EP0_OUT */
+ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
+ UDC_EP_MAX_PKT_SIZE);
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
+ UDC_EP_MAX_PKT_SIZE);
+ writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
+
+ /* set max packet size of EP0 in UDC CSR */
+ tmp = readl(&dev->csr->ne[0]);
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
+ UDC_CSR_NE_MAX_PKT);
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
+ UDC_CSR_NE_MAX_PKT);
+ writel(tmp, &dev->csr->ne[0]);
+
+ if (use_dma) {
+ dev->ep[UDC_EP0OUT_IX].td->status |=
+ AMD_BIT(UDC_DMA_OUT_STS_L);
+ /* write dma desc address */
+ writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
+ &dev->ep[UDC_EP0OUT_IX].regs->subptr);
+ writel(dev->ep[UDC_EP0OUT_IX].td_phys,
+ &dev->ep[UDC_EP0OUT_IX].regs->desptr);
+ /* stop RDE timer */
+ if (timer_pending(&udc_timer)) {
+ set_rde = 0;
+ mod_timer(&udc_timer, jiffies - 1);
+ }
+ /* stop pollstall timer */
+ if (timer_pending(&udc_pollstall_timer))
+ mod_timer(&udc_pollstall_timer, jiffies - 1);
+ /* enable DMA */
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_MODE)
+ | AMD_BIT(UDC_DEVCTL_RDE)
+ | AMD_BIT(UDC_DEVCTL_TDE);
+ if (use_dma_bufferfill_mode)
+ tmp |= AMD_BIT(UDC_DEVCTL_BF);
+ else if (use_dma_ppb_du)
+ tmp |= AMD_BIT(UDC_DEVCTL_DU);
+ writel(tmp, &dev->regs->ctl);
+ }
+
+ /* clear NAK by writing CNAK for EP0IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ dev->ep[UDC_EP0IN_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
+
+ /* clear NAK by writing CNAK for EP0OUT */
+ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ dev->ep[UDC_EP0OUT_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
+}
+
+/* Make endpoint 0 ready for control traffic */
+static int setup_ep0(struct udc *dev)
+{
+ activate_control_endpoints(dev);
+ /* enable ep0 interrupts */
+ udc_enable_ep0_interrupts(dev);
+ /* enable device setup interrupts */
+ udc_enable_dev_setup_interrupts(dev);
+
+ return 0;
+}
+
+/* Called by gadget driver to register itself */
+static int amd5536_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct udc *dev = to_amd5536_udc(g);
+ u32 tmp;
+
+ dev->driver = driver;
+
+ /* Some gadget drivers use both ep0 directions.
+ * NOTE: to gadget driver, ep0 is just one endpoint...
+ */
+ dev->ep[UDC_EP0OUT_IX].ep.driver_data =
+ dev->ep[UDC_EP0IN_IX].ep.driver_data;
+
+ /* get ready for ep0 traffic */
+ setup_ep0(dev);
+
+ /* clear SD */
+ tmp = readl(&dev->regs->ctl);
+ tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
+ writel(tmp, &dev->regs->ctl);
+
+ usb_connect(dev);
+
+ return 0;
+}
+
+/* shutdown requests and disconnect from gadget */
+static void
+shutdown(struct udc *dev, struct usb_gadget_driver *driver)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+ int tmp;
+
+ /* empty queues and init hardware */
+ udc_basic_init(dev);
+
+ for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
+ empty_req_queue(&dev->ep[tmp]);
+
+ udc_setup_endpoints(dev);
+}
+
+/* Called by gadget driver to unregister itself */
+static int amd5536_udc_stop(struct usb_gadget *g)
+{
+ struct udc *dev = to_amd5536_udc(g);
+ unsigned long flags;
+ u32 tmp;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ udc_mask_unused_interrupts(dev);
+ shutdown(dev, NULL);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ dev->driver = NULL;
+
+ /* set SD */
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_SD);
+ writel(tmp, &dev->regs->ctl);
+
+ return 0;
+}
+
+/* Clear pending NAK bits */
+static void udc_process_cnak_queue(struct udc *dev)
+{
+ u32 tmp;
+ u32 reg;
+
+ /* check epin's */
+ DBG(dev, "CNAK pending queue processing\n");
+ for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
+ if (cnak_pending & (1 << tmp)) {
+ DBG(dev, "CNAK pending for ep%d\n", tmp);
+ /* clear NAK by writing CNAK */
+ reg = readl(&dev->ep[tmp].regs->ctl);
+ reg |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(reg, &dev->ep[tmp].regs->ctl);
+ dev->ep[tmp].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
+ }
+ }
+ /* ... and ep0out */
+ if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
+ DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
+ /* clear NAK by writing CNAK */
+ reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ reg |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ dev->ep[UDC_EP0OUT_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
+ dev->ep[UDC_EP0OUT_IX].num);
+ }
+}
+
+/* Enabling RX DMA after setup packet */
+static void udc_ep0_set_rde(struct udc *dev)
+{
+ if (use_dma) {
+ /*
+ * only enable RXDMA when no data endpoint enabled
+ * or data is queued
+ */
+ if (!dev->data_ep_enabled || dev->data_ep_queued) {
+ udc_set_rde(dev);
+ } else {
+ /*
+ * setup timer for enabling RDE (to not enable
+ * RXFIFO DMA for data endpoints to early)
+ */
+ if (set_rde != 0 && !timer_pending(&udc_timer)) {
+ udc_timer.expires =
+ jiffies + HZ/UDC_RDE_TIMER_DIV;
+ set_rde = 1;
+ if (!stop_timer)
+ add_timer(&udc_timer);
+ }
+ }
+ }
+}
+
+
+/* Interrupt handler for data OUT traffic */
+static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ struct udc_ep *ep;
+ struct udc_request *req;
+ unsigned int count;
+ struct udc_data_dma *td = NULL;
+ unsigned dma_done;
+
+ VDBG(dev, "ep%d irq\n", ep_ix);
+ ep = &dev->ep[ep_ix];
+
+ tmp = readl(&ep->regs->sts);
+ if (use_dma) {
+ /* BNA event ? */
+ if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
+ DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
+ ep->num, readl(&ep->regs->desptr));
+ /* clear BNA */
+ writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
+ if (!ep->cancel_transfer)
+ ep->bna_occurred = 1;
+ else
+ ep->cancel_transfer = 0;
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+ }
+ /* HE event ? */
+ if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
+ dev_err(dev->dev, "HE ep%dout occurred\n", ep->num);
+
+ /* clear HE */
+ writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+
+ if (!list_empty(&ep->queue)) {
+
+ /* next request */
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+ } else {
+ req = NULL;
+ udc_rxfifo_pending = 1;
+ }
+ VDBG(dev, "req = %p\n", req);
+ /* fifo mode */
+ if (!use_dma) {
+
+ /* read fifo */
+ if (req && udc_rxfifo_read(ep, req)) {
+ ret_val = IRQ_HANDLED;
+
+ /* finish */
+ complete_req(ep, req, 0);
+ /* next request */
+ if (!list_empty(&ep->queue) && !ep->halted) {
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+ } else
+ req = NULL;
+ }
+
+ /* DMA */
+ } else if (!ep->cancel_transfer && req) {
+ ret_val = IRQ_HANDLED;
+
+ /* check for DMA done */
+ if (!use_dma_ppb) {
+ dma_done = AMD_GETBITS(req->td_data->status,
+ UDC_DMA_OUT_STS_BS);
+ /* packet per buffer mode - rx bytes */
+ } else {
+ /*
+ * if BNA occurred then recover desc. from
+ * BNA dummy desc.
+ */
+ if (ep->bna_occurred) {
+ VDBG(dev, "Recover desc. from BNA dummy\n");
+ memcpy(req->td_data, ep->bna_dummy_req->td_data,
+ sizeof(struct udc_data_dma));
+ ep->bna_occurred = 0;
+ udc_init_bna_dummy(ep->req);
+ }
+ td = udc_get_last_dma_desc(req);
+ dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
+ }
+ if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
+ /* buffer fill mode - rx bytes */
+ if (!use_dma_ppb) {
+ /* received number bytes */
+ count = AMD_GETBITS(req->td_data->status,
+ UDC_DMA_OUT_STS_RXBYTES);
+ VDBG(dev, "rx bytes=%u\n", count);
+ /* packet per buffer mode - rx bytes */
+ } else {
+ VDBG(dev, "req->td_data=%p\n", req->td_data);
+ VDBG(dev, "last desc = %p\n", td);
+ /* received number bytes */
+ if (use_dma_ppb_du) {
+ /* every desc. counts bytes */
+ count = udc_get_ppbdu_rxbytes(req);
+ } else {
+ /* last desc. counts bytes */
+ count = AMD_GETBITS(td->status,
+ UDC_DMA_OUT_STS_RXBYTES);
+ if (!count && req->req.length
+ == UDC_DMA_MAXPACKET) {
+ /*
+ * on 64k packets the RXBYTES
+ * field is zero
+ */
+ count = UDC_DMA_MAXPACKET;
+ }
+ }
+ VDBG(dev, "last desc rx bytes=%u\n", count);
+ }
+
+ tmp = req->req.length - req->req.actual;
+ if (count > tmp) {
+ if ((tmp % ep->ep.maxpacket) != 0) {
+ DBG(dev, "%s: rx %db, space=%db\n",
+ ep->ep.name, count, tmp);
+ req->req.status = -EOVERFLOW;
+ }
+ count = tmp;
+ }
+ req->req.actual += count;
+ req->dma_going = 0;
+ /* complete request */
+ complete_req(ep, req, 0);
+
+ /* next request */
+ if (!list_empty(&ep->queue) && !ep->halted) {
+ req = list_entry(ep->queue.next,
+ struct udc_request,
+ queue);
+ /*
+ * DMA may be already started by udc_queue()
+ * called by gadget drivers completion
+ * routine. This happens when queue
+ * holds one request only.
+ */
+ if (req->dma_going == 0) {
+ /* next dma */
+ if (prep_dma(ep, req, GFP_ATOMIC) != 0)
+ goto finished;
+ /* write desc pointer */
+ writel(req->td_phys,
+ &ep->regs->desptr);
+ req->dma_going = 1;
+ /* enable DMA */
+ udc_set_rde(dev);
+ }
+ } else {
+ /*
+ * implant BNA dummy descriptor to allow
+ * RXFIFO opening by RDE
+ */
+ if (ep->bna_dummy_req) {
+ /* write desc pointer */
+ writel(ep->bna_dummy_req->td_phys,
+ &ep->regs->desptr);
+ ep->bna_occurred = 0;
+ }
+
+ /*
+ * schedule timer for setting RDE if queue
+ * remains empty to allow ep0 packets pass
+ * through
+ */
+ if (set_rde != 0
+ && !timer_pending(&udc_timer)) {
+ udc_timer.expires =
+ jiffies
+ + HZ*UDC_RDE_TIMER_SECONDS;
+ set_rde = 1;
+ if (!stop_timer)
+ add_timer(&udc_timer);
+ }
+ if (ep->num != UDC_EP0OUT_IX)
+ dev->data_ep_queued = 0;
+ }
+
+ } else {
+ /*
+ * RX DMA must be reenabled for each desc in PPBDU mode
+ * and must be enabled for PPBNDU mode in case of BNA
+ */
+ udc_set_rde(dev);
+ }
+
+ } else if (ep->cancel_transfer) {
+ ret_val = IRQ_HANDLED;
+ ep->cancel_transfer = 0;
+ }
+
+ /* check pending CNAKS */
+ if (cnak_pending) {
+ /* CNAk processing when rxfifo empty only */
+ if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
+ udc_process_cnak_queue(dev);
+ }
+
+ /* clear OUT bits in ep status */
+ writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
+finished:
+ return ret_val;
+}
+
+/* Interrupt handler for data IN traffic */
+static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ u32 epsts;
+ struct udc_ep *ep;
+ struct udc_request *req;
+ struct udc_data_dma *td;
+ unsigned len;
+
+ ep = &dev->ep[ep_ix];
+
+ epsts = readl(&ep->regs->sts);
+ if (use_dma) {
+ /* BNA ? */
+ if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
+ dev_err(dev->dev,
+ "BNA ep%din occurred - DESPTR = %08lx\n",
+ ep->num,
+ (unsigned long) readl(&ep->regs->desptr));
+
+ /* clear BNA */
+ writel(epsts, &ep->regs->sts);
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+ }
+ /* HE event ? */
+ if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
+ dev_err(dev->dev,
+ "HE ep%dn occurred - DESPTR = %08lx\n",
+ ep->num, (unsigned long) readl(&ep->regs->desptr));
+
+ /* clear HE */
+ writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+
+ /* DMA completion */
+ if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
+ VDBG(dev, "TDC set- completion\n");
+ ret_val = IRQ_HANDLED;
+ if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+ /*
+ * length bytes transferred
+ * check dma done of last desc. in PPBDU mode
+ */
+ if (use_dma_ppb_du) {
+ td = udc_get_last_dma_desc(req);
+ if (td)
+ req->req.actual = req->req.length;
+ } else {
+ /* assume all bytes transferred */
+ req->req.actual = req->req.length;
+ }
+
+ if (req->req.actual == req->req.length) {
+ /* complete req */
+ complete_req(ep, req, 0);
+ req->dma_going = 0;
+ /* further request available ? */
+ if (list_empty(&ep->queue)) {
+ /* disable interrupt */
+ tmp = readl(&dev->regs->ep_irqmsk);
+ tmp |= AMD_BIT(ep->num);
+ writel(tmp, &dev->regs->ep_irqmsk);
+ }
+ }
+ }
+ ep->cancel_transfer = 0;
+
+ }
+ /*
+ * status reg has IN bit set and TDC not set (if TDC was handled,
+ * IN must not be handled (UDC defect) ?
+ */
+ if ((epsts & AMD_BIT(UDC_EPSTS_IN))
+ && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
+ ret_val = IRQ_HANDLED;
+ if (!list_empty(&ep->queue)) {
+ /* next request */
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+ /* FIFO mode */
+ if (!use_dma) {
+ /* write fifo */
+ udc_txfifo_write(ep, &req->req);
+ len = req->req.length - req->req.actual;
+ if (len > ep->ep.maxpacket)
+ len = ep->ep.maxpacket;
+ req->req.actual += len;
+ if (req->req.actual == req->req.length
+ || (len != ep->ep.maxpacket)) {
+ /* complete req */
+ complete_req(ep, req, 0);
+ }
+ /* DMA */
+ } else if (req && !req->dma_going) {
+ VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
+ req, req->td_data);
+ if (req->td_data) {
+
+ req->dma_going = 1;
+
+ /*
+ * unset L bit of first desc.
+ * for chain
+ */
+ if (use_dma_ppb && req->req.length >
+ ep->ep.maxpacket) {
+ req->td_data->status &=
+ AMD_CLEAR_BIT(
+ UDC_DMA_IN_STS_L);
+ }
+
+ /* write desc pointer */
+ writel(req->td_phys, &ep->regs->desptr);
+
+ /* set HOST READY */
+ req->td_data->status =
+ AMD_ADDBITS(
+ req->td_data->status,
+ UDC_DMA_IN_STS_BS_HOST_READY,
+ UDC_DMA_IN_STS_BS);
+
+ /* set poll demand bit */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_P);
+ writel(tmp, &ep->regs->ctl);
+ }
+ }
+
+ } else if (!use_dma && ep->in) {
+ /* disable interrupt */
+ tmp = readl(
+ &dev->regs->ep_irqmsk);
+ tmp |= AMD_BIT(ep->num);
+ writel(tmp,
+ &dev->regs->ep_irqmsk);
+ }
+ }
+ /* clear status bits */
+ writel(epsts, &ep->regs->sts);
+
+finished:
+ return ret_val;
+
+}
+
+/* Interrupt handler for Control OUT traffic */
+static irqreturn_t udc_control_out_isr(struct udc *dev)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ int setup_supported;
+ u32 count;
+ int set = 0;
+ struct udc_ep *ep;
+ struct udc_ep *ep_tmp;
+
+ ep = &dev->ep[UDC_EP0OUT_IX];
+
+ /* clear irq */
+ writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
+
+ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
+ /* check BNA and clear if set */
+ if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
+ VDBG(dev, "ep0: BNA set\n");
+ writel(AMD_BIT(UDC_EPSTS_BNA),
+ &dev->ep[UDC_EP0OUT_IX].regs->sts);
+ ep->bna_occurred = 1;
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+
+ /* type of data: SETUP or DATA 0 bytes */
+ tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
+ VDBG(dev, "data_typ = %x\n", tmp);
+
+ /* setup data */
+ if (tmp == UDC_EPSTS_OUT_SETUP) {
+ ret_val = IRQ_HANDLED;
+
+ ep->dev->stall_ep0in = 0;
+ dev->waiting_zlp_ack_ep0in = 0;
+
+ /* set NAK for EP0_IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_SNAK);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ dev->ep[UDC_EP0IN_IX].naking = 1;
+ /* get setup data */
+ if (use_dma) {
+
+ /* clear OUT bits in ep status */
+ writel(UDC_EPSTS_OUT_CLEAR,
+ &dev->ep[UDC_EP0OUT_IX].regs->sts);
+
+ setup_data.data[0] =
+ dev->ep[UDC_EP0OUT_IX].td_stp->data12;
+ setup_data.data[1] =
+ dev->ep[UDC_EP0OUT_IX].td_stp->data34;
+ /* set HOST READY */
+ dev->ep[UDC_EP0OUT_IX].td_stp->status =
+ UDC_DMA_STP_STS_BS_HOST_READY;
+ } else {
+ /* read fifo */
+ udc_rxfifo_read_dwords(dev, setup_data.data, 2);
+ }
+
+ /* determine direction of control data */
+ if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
+ dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
+ /* enable RDE */
+ udc_ep0_set_rde(dev);
+ set = 0;
+ } else {
+ dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
+ /*
+ * implant BNA dummy descriptor to allow RXFIFO opening
+ * by RDE
+ */
+ if (ep->bna_dummy_req) {
+ /* write desc pointer */
+ writel(ep->bna_dummy_req->td_phys,
+ &dev->ep[UDC_EP0OUT_IX].regs->desptr);
+ ep->bna_occurred = 0;
+ }
+
+ set = 1;
+ dev->ep[UDC_EP0OUT_IX].naking = 1;
+ /*
+ * setup timer for enabling RDE (to not enable
+ * RXFIFO DMA for data to early)
+ */
+ set_rde = 1;
+ if (!timer_pending(&udc_timer)) {
+ udc_timer.expires = jiffies +
+ HZ/UDC_RDE_TIMER_DIV;
+ if (!stop_timer)
+ add_timer(&udc_timer);
+ }
+ }
+
+ /*
+ * mass storage reset must be processed here because
+ * next packet may be a CLEAR_FEATURE HALT which would not
+ * clear the stall bit when no STALL handshake was received
+ * before (autostall can cause this)
+ */
+ if (setup_data.data[0] == UDC_MSCRES_DWORD0
+ && setup_data.data[1] == UDC_MSCRES_DWORD1) {
+ DBG(dev, "MSC Reset\n");
+ /*
+ * clear stall bits
+ * only one IN and OUT endpoints are handled
+ */
+ ep_tmp = &udc->ep[UDC_EPIN_IX];
+ udc_set_halt(&ep_tmp->ep, 0);
+ ep_tmp = &udc->ep[UDC_EPOUT_IX];
+ udc_set_halt(&ep_tmp->ep, 0);
+ }
+
+ /* call gadget with setup data received */
+ spin_unlock(&dev->lock);
+ setup_supported = dev->driver->setup(&dev->gadget,
+ &setup_data.request);
+ spin_lock(&dev->lock);
+
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ /* ep0 in returns data (not zlp) on IN phase */
+ if (setup_supported >= 0 && setup_supported <
+ UDC_EP0IN_MAXPACKET) {
+ /* clear NAK by writing CNAK in EP0_IN */
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ dev->ep[UDC_EP0IN_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
+
+ /* if unsupported request then stall */
+ } else if (setup_supported < 0) {
+ tmp |= AMD_BIT(UDC_EPCTL_S);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ } else
+ dev->waiting_zlp_ack_ep0in = 1;
+
+
+ /* clear NAK by writing CNAK in EP0_OUT */
+ if (!set) {
+ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ dev->ep[UDC_EP0OUT_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
+ }
+
+ if (!use_dma) {
+ /* clear OUT bits in ep status */
+ writel(UDC_EPSTS_OUT_CLEAR,
+ &dev->ep[UDC_EP0OUT_IX].regs->sts);
+ }
+
+ /* data packet 0 bytes */
+ } else if (tmp == UDC_EPSTS_OUT_DATA) {
+ /* clear OUT bits in ep status */
+ writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
+
+ /* get setup data: only 0 packet */
+ if (use_dma) {
+ /* no req if 0 packet, just reactivate */
+ if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
+ VDBG(dev, "ZLP\n");
+
+ /* set HOST READY */
+ dev->ep[UDC_EP0OUT_IX].td->status =
+ AMD_ADDBITS(
+ dev->ep[UDC_EP0OUT_IX].td->status,
+ UDC_DMA_OUT_STS_BS_HOST_READY,
+ UDC_DMA_OUT_STS_BS);
+ /* enable RDE */
+ udc_ep0_set_rde(dev);
+ ret_val = IRQ_HANDLED;
+
+ } else {
+ /* control write */
+ ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
+ /* re-program desc. pointer for possible ZLPs */
+ writel(dev->ep[UDC_EP0OUT_IX].td_phys,
+ &dev->ep[UDC_EP0OUT_IX].regs->desptr);
+ /* enable RDE */
+ udc_ep0_set_rde(dev);
+ }
+ } else {
+
+ /* received number bytes */
+ count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
+ count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
+ /* out data for fifo mode not working */
+ count = 0;
+
+ /* 0 packet or real data ? */
+ if (count != 0) {
+ ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
+ } else {
+ /* dummy read confirm */
+ readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
+ ret_val = IRQ_HANDLED;
+ }
+ }
+ }
+
+ /* check pending CNAKS */
+ if (cnak_pending) {
+ /* CNAk processing when rxfifo empty only */
+ if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
+ udc_process_cnak_queue(dev);
+ }
+
+finished:
+ return ret_val;
+}
+
+/* Interrupt handler for Control IN traffic */
+static irqreturn_t udc_control_in_isr(struct udc *dev)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ struct udc_ep *ep;
+ struct udc_request *req;
+ unsigned len;
+
+ ep = &dev->ep[UDC_EP0IN_IX];
+
+ /* clear irq */
+ writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
+
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
+ /* DMA completion */
+ if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
+ VDBG(dev, "isr: TDC clear\n");
+ ret_val = IRQ_HANDLED;
+
+ /* clear TDC bit */
+ writel(AMD_BIT(UDC_EPSTS_TDC),
+ &dev->ep[UDC_EP0IN_IX].regs->sts);
+
+ /* status reg has IN bit set ? */
+ } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
+ ret_val = IRQ_HANDLED;
+
+ if (ep->dma) {
+ /* clear IN bit */
+ writel(AMD_BIT(UDC_EPSTS_IN),
+ &dev->ep[UDC_EP0IN_IX].regs->sts);
+ }
+ if (dev->stall_ep0in) {
+ DBG(dev, "stall ep0in\n");
+ /* halt ep0in */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);
+ } else {
+ if (!list_empty(&ep->queue)) {
+ /* next request */
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+
+ if (ep->dma) {
+ /* write desc pointer */
+ writel(req->td_phys, &ep->regs->desptr);
+ /* set HOST READY */
+ req->td_data->status =
+ AMD_ADDBITS(
+ req->td_data->status,
+ UDC_DMA_STP_STS_BS_HOST_READY,
+ UDC_DMA_STP_STS_BS);
+
+ /* set poll demand bit */
+ tmp =
+ readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_P);
+ writel(tmp,
+ &dev->ep[UDC_EP0IN_IX].regs->ctl);
+
+ /* all bytes will be transferred */
+ req->req.actual = req->req.length;
+
+ /* complete req */
+ complete_req(ep, req, 0);
+
+ } else {
+ /* write fifo */
+ udc_txfifo_write(ep, &req->req);
+
+ /* lengh bytes transferred */
+ len = req->req.length - req->req.actual;
+ if (len > ep->ep.maxpacket)
+ len = ep->ep.maxpacket;
+
+ req->req.actual += len;
+ if (req->req.actual == req->req.length
+ || (len != ep->ep.maxpacket)) {
+ /* complete req */
+ complete_req(ep, req, 0);
+ }
+ }
+
+ }
+ }
+ ep->halted = 0;
+ dev->stall_ep0in = 0;
+ if (!ep->dma) {
+ /* clear IN bit */
+ writel(AMD_BIT(UDC_EPSTS_IN),
+ &dev->ep[UDC_EP0IN_IX].regs->sts);
+ }
+ }
+
+ return ret_val;
+}
+
+
+/* Interrupt handler for global device events */
+static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ u32 cfg;
+ struct udc_ep *ep;
+ u16 i;
+ u8 udc_csr_epix;
+
+ /* SET_CONFIG irq ? */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
+ ret_val = IRQ_HANDLED;
+
+ /* read config value */
+ tmp = readl(&dev->regs->sts);
+ cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
+ DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
+ dev->cur_config = cfg;
+ dev->set_cfg_not_acked = 1;
+
+ /* make usb request for gadget driver */
+ memset(&setup_data, 0 , sizeof(union udc_setup_data));
+ setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
+ setup_data.request.wValue = cpu_to_le16(dev->cur_config);
+
+ /* programm the NE registers */
+ for (i = 0; i < UDC_EP_NUM; i++) {
+ ep = &dev->ep[i];
+ if (ep->in) {
+
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num;
+
+
+ /* OUT ep */
+ } else {
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
+ }
+
+ tmp = readl(&dev->csr->ne[udc_csr_epix]);
+ /* ep cfg */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
+ UDC_CSR_NE_CFG);
+ /* write reg */
+ writel(tmp, &dev->csr->ne[udc_csr_epix]);
+
+ /* clear stall bits */
+ ep->halted = 0;
+ tmp = readl(&ep->regs->ctl);
+ tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);
+ }
+ /* call gadget zero with setup data received */
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
+ spin_lock(&dev->lock);
+
+ } /* SET_INTERFACE ? */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
+ ret_val = IRQ_HANDLED;
+
+ dev->set_cfg_not_acked = 1;
+ /* read interface and alt setting values */
+ tmp = readl(&dev->regs->sts);
+ dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
+ dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
+
+ /* make usb request for gadget driver */
+ memset(&setup_data, 0 , sizeof(union udc_setup_data));
+ setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
+ setup_data.request.bRequestType = USB_RECIP_INTERFACE;
+ setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
+ setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
+
+ DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
+ dev->cur_alt, dev->cur_intf);
+
+ /* programm the NE registers */
+ for (i = 0; i < UDC_EP_NUM; i++) {
+ ep = &dev->ep[i];
+ if (ep->in) {
+
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num;
+
+
+ /* OUT ep */
+ } else {
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
+ }
+
+ /* UDC CSR reg */
+ /* set ep values */
+ tmp = readl(&dev->csr->ne[udc_csr_epix]);
+ /* ep interface */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
+ UDC_CSR_NE_INTF);
+ /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
+ /* ep alt */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
+ UDC_CSR_NE_ALT);
+ /* write reg */
+ writel(tmp, &dev->csr->ne[udc_csr_epix]);
+
+ /* clear stall bits */
+ ep->halted = 0;
+ tmp = readl(&ep->regs->ctl);
+ tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);
+ }
+
+ /* call gadget zero with setup data received */
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
+ spin_lock(&dev->lock);
+
+ } /* USB reset */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
+ DBG(dev, "USB Reset interrupt\n");
+ ret_val = IRQ_HANDLED;
+
+ /* allow soft reset when suspend occurs */
+ soft_reset_occured = 0;
+
+ dev->waiting_zlp_ack_ep0in = 0;
+ dev->set_cfg_not_acked = 0;
+
+ /* mask not needed interrupts */
+ udc_mask_unused_interrupts(dev);
+
+ /* call gadget to resume and reset configs etc. */
+ spin_unlock(&dev->lock);
+ if (dev->sys_suspended && dev->driver->resume) {
+ dev->driver->resume(&dev->gadget);
+ dev->sys_suspended = 0;
+ }
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
+ spin_lock(&dev->lock);
+
+ /* disable ep0 to empty req queue */
+ empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
+ ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
+
+ /* soft reset when rxfifo not empty */
+ tmp = readl(&dev->regs->sts);
+ if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
+ && !soft_reset_after_usbreset_occured) {
+ udc_soft_reset(dev);
+ soft_reset_after_usbreset_occured++;
+ }
+
+ /*
+ * DMA reset to kill potential old DMA hw hang,
+ * POLL bit is already reset by ep_init() through
+ * disconnect()
+ */
+ DBG(dev, "DMA machine reset\n");
+ tmp = readl(&dev->regs->cfg);
+ writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
+ writel(tmp, &dev->regs->cfg);
+
+ /* put into initial config */
+ udc_basic_init(dev);
+
+ /* enable device setup interrupts */
+ udc_enable_dev_setup_interrupts(dev);
+
+ /* enable suspend interrupt */
+ tmp = readl(&dev->regs->irqmsk);
+ tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
+ writel(tmp, &dev->regs->irqmsk);
+
+ } /* USB suspend */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
+ DBG(dev, "USB Suspend interrupt\n");
+ ret_val = IRQ_HANDLED;
+ if (dev->driver->suspend) {
+ spin_unlock(&dev->lock);
+ dev->sys_suspended = 1;
+ dev->driver->suspend(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+ } /* new speed ? */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
+ DBG(dev, "ENUM interrupt\n");
+ ret_val = IRQ_HANDLED;
+ soft_reset_after_usbreset_occured = 0;
+
+ /* disable ep0 to empty req queue */
+ empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
+ ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
+
+ /* link up all endpoints */
+ udc_setup_endpoints(dev);
+ dev_info(dev->dev, "Connect: %s\n",
+ usb_speed_string(dev->gadget.speed));
+
+ /* init ep 0 */
+ activate_control_endpoints(dev);
+
+ /* enable ep0 interrupts */
+ udc_enable_ep0_interrupts(dev);
+ }
+ /* session valid change interrupt */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
+ DBG(dev, "USB SVC interrupt\n");
+ ret_val = IRQ_HANDLED;
+
+ /* check that session is not valid to detect disconnect */
+ tmp = readl(&dev->regs->sts);
+ if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
+ /* disable suspend interrupt */
+ tmp = readl(&dev->regs->irqmsk);
+ tmp |= AMD_BIT(UDC_DEVINT_US);
+ writel(tmp, &dev->regs->irqmsk);
+ DBG(dev, "USB Disconnect (session valid low)\n");
+ /* cleanup on disconnect */
+ usb_disconnect(udc);
+ }
+
+ }
+
+ return ret_val;
+}
+
+/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
+irqreturn_t udc_irq(int irq, void *pdev)
+{
+ struct udc *dev = pdev;
+ u32 reg;
+ u16 i;
+ u32 ep_irq;
+ irqreturn_t ret_val = IRQ_NONE;
+
+ spin_lock(&dev->lock);
+
+ /* check for ep irq */
+ reg = readl(&dev->regs->ep_irqsts);
+ if (reg) {
+ if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
+ ret_val |= udc_control_out_isr(dev);
+ if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
+ ret_val |= udc_control_in_isr(dev);
+
+ /*
+ * data endpoint
+ * iterate ep's
+ */
+ for (i = 1; i < UDC_EP_NUM; i++) {
+ ep_irq = 1 << i;
+ if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
+ continue;
+
+ /* clear irq status */
+ writel(ep_irq, &dev->regs->ep_irqsts);
+
+ /* irq for out ep ? */
+ if (i > UDC_EPIN_NUM)
+ ret_val |= udc_data_out_isr(dev, i);
+ else
+ ret_val |= udc_data_in_isr(dev, i);
+ }
+
+ }
+
+
+ /* check for dev irq */
+ reg = readl(&dev->regs->irqsts);
+ if (reg) {
+ /* clear irq */
+ writel(reg, &dev->regs->irqsts);
+ ret_val |= udc_dev_isr(dev, reg);
+ }
+
+
+ spin_unlock(&dev->lock);
+ return ret_val;
+}
+EXPORT_SYMBOL_GPL(udc_irq);
+
+/* Tears down device */
+void gadget_release(struct device *pdev)
+{
+ struct amd5536udc *dev = dev_get_drvdata(pdev);
+ kfree(dev);
+}
+EXPORT_SYMBOL_GPL(gadget_release);
+
+/* Cleanup on device remove */
+void udc_remove(struct udc *dev)
+{
+ /* remove timer */
+ stop_timer++;
+ if (timer_pending(&udc_timer))
+ wait_for_completion(&on_exit);
+ del_timer_sync(&udc_timer);
+ /* remove pollstall timer */
+ stop_pollstall_timer++;
+ if (timer_pending(&udc_pollstall_timer))
+ wait_for_completion(&on_pollstall_exit);
+ del_timer_sync(&udc_pollstall_timer);
+ udc = NULL;
+}
+EXPORT_SYMBOL_GPL(udc_remove);
+
+/* free all the dma pools */
+void free_dma_pools(struct udc *dev)
+{
+ dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
+ dev->ep[UDC_EP0OUT_IX].td_phys);
+ dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
+ dev->ep[UDC_EP0OUT_IX].td_stp_dma);
+ dma_pool_destroy(dev->stp_requests);
+ dma_pool_destroy(dev->data_requests);
+}
+EXPORT_SYMBOL_GPL(free_dma_pools);
+
+/* create dma pools on init */
+int init_dma_pools(struct udc *dev)
+{
+ struct udc_stp_dma *td_stp;
+ struct udc_data_dma *td_data;
+ int retval;
+
+ /* consistent DMA mode setting ? */
+ if (use_dma_ppb) {
+ use_dma_bufferfill_mode = 0;
+ } else {
+ use_dma_ppb_du = 0;
+ use_dma_bufferfill_mode = 1;
+ }
+
+ /* DMA setup */
+ dev->data_requests = dma_pool_create("data_requests", dev->dev,
+ sizeof(struct udc_data_dma), 0, 0);
+ if (!dev->data_requests) {
+ DBG(dev, "can't get request data pool\n");
+ return -ENOMEM;
+ }
+
+ /* EP0 in dma regs = dev control regs */
+ dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
+
+ /* dma desc for setup data */
+ dev->stp_requests = dma_pool_create("setup requests", dev->dev,
+ sizeof(struct udc_stp_dma), 0, 0);
+ if (!dev->stp_requests) {
+ DBG(dev, "can't get stp request pool\n");
+ retval = -ENOMEM;
+ goto err_create_dma_pool;
+ }
+ /* setup */
+ td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
+ &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
+ if (!td_stp) {
+ retval = -ENOMEM;
+ goto err_alloc_dma;
+ }
+ dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
+
+ /* data: 0 packets !? */
+ td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
+ &dev->ep[UDC_EP0OUT_IX].td_phys);
+ if (!td_data) {
+ retval = -ENOMEM;
+ goto err_alloc_phys;
+ }
+ dev->ep[UDC_EP0OUT_IX].td = td_data;
+ return 0;
+
+err_alloc_phys:
+ dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
+ dev->ep[UDC_EP0OUT_IX].td_stp_dma);
+err_alloc_dma:
+ dma_pool_destroy(dev->stp_requests);
+ dev->stp_requests = NULL;
+err_create_dma_pool:
+ dma_pool_destroy(dev->data_requests);
+ dev->data_requests = NULL;
+ return retval;
+}
+EXPORT_SYMBOL_GPL(init_dma_pools);
+
+/* general probe */
+int udc_probe(struct udc *dev)
+{
+ char tmp[128];
+ u32 reg;
+ int retval;
+
+ /* device struct setup */
+ dev->gadget.ops = &udc_ops;
+
+ dev_set_name(&dev->gadget.dev, "gadget");
+ dev->gadget.name = name;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
+
+ /* init registers, interrupts, ... */
+ startup_registers(dev);
+
+ dev_info(dev->dev, "%s\n", mod_desc);
+
+ snprintf(tmp, sizeof(tmp), "%d", dev->irq);
+
+ /* Print this device info for AMD chips only*/
+ if (dev->chiprev == UDC_HSA0_REV ||
+ dev->chiprev == UDC_HSB1_REV) {
+ dev_info(dev->dev, "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
+ tmp, dev->phys_addr, dev->chiprev,
+ (dev->chiprev == UDC_HSA0_REV) ?
+ "A0" : "B1");
+ strcpy(tmp, UDC_DRIVER_VERSION_STRING);
+ if (dev->chiprev == UDC_HSA0_REV) {
+ dev_err(dev->dev, "chip revision is A0; too old\n");
+ retval = -ENODEV;
+ goto finished;
+ }
+ dev_info(dev->dev,
+ "driver version: %s(for Geode5536 B1)\n", tmp);
+ }
+
+ udc = dev;
+
+ retval = usb_add_gadget_udc_release(udc->dev, &dev->gadget,
+ gadget_release);
+ if (retval)
+ goto finished;
+
+ /* timer init */
+ timer_setup(&udc_timer, udc_timer_function, 0);
+ timer_setup(&udc_pollstall_timer, udc_pollstall_timer_function, 0);
+
+ /* set SD */
+ reg = readl(&dev->regs->ctl);
+ reg |= AMD_BIT(UDC_DEVCTL_SD);
+ writel(reg, &dev->regs->ctl);
+
+ /* print dev register info */
+ print_regs(dev);
+
+ return 0;
+
+finished:
+ return retval;
+}
+EXPORT_SYMBOL_GPL(udc_probe);
+
+MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
+MODULE_AUTHOR("Thomas Dahlmann");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
new file mode 100644
index 000000000..8bbb89c80
--- /dev/null
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * snps_udc_plat.c - Synopsys UDC Platform Driver
+ *
+ * Copyright (C) 2016 Broadcom
+ */
+
+#include <linux/extcon.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/module.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include "amd5536udc.h"
+
+/* description */
+#define UDC_MOD_DESCRIPTION "Synopsys UDC platform driver"
+
+static void start_udc(struct udc *udc)
+{
+ if (udc->driver) {
+ dev_info(udc->dev, "Connecting...\n");
+ udc_enable_dev_setup_interrupts(udc);
+ udc_basic_init(udc);
+ udc->connected = 1;
+ }
+}
+
+static void stop_udc(struct udc *udc)
+{
+ int tmp;
+ u32 reg;
+
+ spin_lock(&udc->lock);
+
+ /* Flush the receieve fifo */
+ reg = readl(&udc->regs->ctl);
+ reg |= AMD_BIT(UDC_DEVCTL_SRX_FLUSH);
+ writel(reg, &udc->regs->ctl);
+
+ reg = readl(&udc->regs->ctl);
+ reg &= ~(AMD_BIT(UDC_DEVCTL_SRX_FLUSH));
+ writel(reg, &udc->regs->ctl);
+ dev_dbg(udc->dev, "ep rx queue flushed\n");
+
+ /* Mask interrupts. Required more so when the
+ * UDC is connected to a DRD phy.
+ */
+ udc_mask_unused_interrupts(udc);
+
+ /* Disconnect gadget driver */
+ if (udc->driver) {
+ spin_unlock(&udc->lock);
+ udc->driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+
+ /* empty queues */
+ for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
+ empty_req_queue(&udc->ep[tmp]);
+ }
+ udc->connected = 0;
+
+ spin_unlock(&udc->lock);
+ dev_info(udc->dev, "Device disconnected\n");
+}
+
+static void udc_drd_work(struct work_struct *work)
+{
+ struct udc *udc;
+
+ udc = container_of(to_delayed_work(work),
+ struct udc, drd_work);
+
+ if (udc->conn_type) {
+ dev_dbg(udc->dev, "idle -> device\n");
+ start_udc(udc);
+ } else {
+ dev_dbg(udc->dev, "device -> idle\n");
+ stop_udc(udc);
+ }
+}
+
+static int usbd_connect_notify(struct notifier_block *self,
+ unsigned long event, void *ptr)
+{
+ struct udc *udc = container_of(self, struct udc, nb);
+
+ dev_dbg(udc->dev, "%s: event: %lu\n", __func__, event);
+
+ udc->conn_type = event;
+
+ schedule_delayed_work(&udc->drd_work, 0);
+
+ return NOTIFY_OK;
+}
+
+static int udc_plat_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct udc *udc;
+ int ret;
+
+ udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ spin_lock_init(&udc->lock);
+ udc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ udc->virt_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(udc->virt_addr))
+ return PTR_ERR(udc->virt_addr);
+
+ /* udc csr registers base */
+ udc->csr = udc->virt_addr + UDC_CSR_ADDR;
+
+ /* dev registers base */
+ udc->regs = udc->virt_addr + UDC_DEVCFG_ADDR;
+
+ /* ep registers base */
+ udc->ep_regs = udc->virt_addr + UDC_EPREGS_ADDR;
+
+ /* fifo's base */
+ udc->rxfifo = (u32 __iomem *)(udc->virt_addr + UDC_RXFIFO_ADDR);
+ udc->txfifo = (u32 __iomem *)(udc->virt_addr + UDC_TXFIFO_ADDR);
+
+ udc->phys_addr = (unsigned long)res->start;
+
+ udc->irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (udc->irq <= 0) {
+ dev_err(dev, "Can't parse and map interrupt\n");
+ return -EINVAL;
+ }
+
+ udc->udc_phy = devm_of_phy_get_by_index(dev, dev->of_node, 0);
+ if (IS_ERR(udc->udc_phy)) {
+ dev_err(dev, "Failed to obtain phy from device tree\n");
+ return PTR_ERR(udc->udc_phy);
+ }
+
+ ret = phy_init(udc->udc_phy);
+ if (ret) {
+ dev_err(dev, "UDC phy init failed");
+ return ret;
+ }
+
+ ret = phy_power_on(udc->udc_phy);
+ if (ret) {
+ dev_err(dev, "UDC phy power on failed");
+ phy_exit(udc->udc_phy);
+ return ret;
+ }
+
+ /* Register for extcon if supported */
+ if (of_get_property(dev->of_node, "extcon", NULL)) {
+ udc->edev = extcon_get_edev_by_phandle(dev, 0);
+ if (IS_ERR(udc->edev)) {
+ if (PTR_ERR(udc->edev) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(dev, "Invalid or missing extcon\n");
+ ret = PTR_ERR(udc->edev);
+ goto exit_phy;
+ }
+
+ udc->nb.notifier_call = usbd_connect_notify;
+ ret = extcon_register_notifier(udc->edev, EXTCON_USB,
+ &udc->nb);
+ if (ret < 0) {
+ dev_err(dev, "Can't register extcon device\n");
+ goto exit_phy;
+ }
+
+ ret = extcon_get_state(udc->edev, EXTCON_USB);
+ if (ret < 0) {
+ dev_err(dev, "Can't get cable state\n");
+ goto exit_extcon;
+ } else if (ret) {
+ udc->conn_type = ret;
+ }
+ INIT_DELAYED_WORK(&udc->drd_work, udc_drd_work);
+ }
+
+ /* init dma pools */
+ if (use_dma) {
+ ret = init_dma_pools(udc);
+ if (ret != 0)
+ goto exit_extcon;
+ }
+
+ ret = devm_request_irq(dev, udc->irq, udc_irq, IRQF_SHARED,
+ "snps-udc", udc);
+ if (ret < 0) {
+ dev_err(dev, "Request irq %d failed for UDC\n", udc->irq);
+ goto exit_dma;
+ }
+
+ platform_set_drvdata(pdev, udc);
+ udc->chiprev = UDC_BCM_REV;
+
+ if (udc_probe(udc)) {
+ ret = -ENODEV;
+ goto exit_dma;
+ }
+ dev_info(dev, "Synopsys UDC platform driver probe successful\n");
+
+ return 0;
+
+exit_dma:
+ if (use_dma)
+ free_dma_pools(udc);
+exit_extcon:
+ if (udc->edev)
+ extcon_unregister_notifier(udc->edev, EXTCON_USB, &udc->nb);
+exit_phy:
+ if (udc->udc_phy) {
+ phy_power_off(udc->udc_phy);
+ phy_exit(udc->udc_phy);
+ }
+ return ret;
+}
+
+static int udc_plat_remove(struct platform_device *pdev)
+{
+ struct udc *dev;
+
+ dev = platform_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&dev->gadget);
+ /* gadget driver must not be registered */
+ if (WARN_ON(dev->driver))
+ return 0;
+
+ /* dma pool cleanup */
+ free_dma_pools(dev);
+
+ udc_remove(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ phy_power_off(dev->udc_phy);
+ phy_exit(dev->udc_phy);
+ extcon_unregister_notifier(dev->edev, EXTCON_USB, &dev->nb);
+
+ dev_info(&pdev->dev, "Synopsys UDC platform driver removed\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int udc_plat_suspend(struct device *dev)
+{
+ struct udc *udc;
+
+ udc = dev_get_drvdata(dev);
+ stop_udc(udc);
+
+ if (extcon_get_state(udc->edev, EXTCON_USB) > 0) {
+ dev_dbg(udc->dev, "device -> idle\n");
+ stop_udc(udc);
+ }
+ phy_power_off(udc->udc_phy);
+ phy_exit(udc->udc_phy);
+
+ return 0;
+}
+
+static int udc_plat_resume(struct device *dev)
+{
+ struct udc *udc;
+ int ret;
+
+ udc = dev_get_drvdata(dev);
+
+ ret = phy_init(udc->udc_phy);
+ if (ret) {
+ dev_err(udc->dev, "UDC phy init failure");
+ return ret;
+ }
+
+ ret = phy_power_on(udc->udc_phy);
+ if (ret) {
+ dev_err(udc->dev, "UDC phy power on failure");
+ phy_exit(udc->udc_phy);
+ return ret;
+ }
+
+ if (extcon_get_state(udc->edev, EXTCON_USB) > 0) {
+ dev_dbg(udc->dev, "idle -> device\n");
+ start_udc(udc);
+ }
+
+ return 0;
+}
+static const struct dev_pm_ops udc_plat_pm_ops = {
+ .suspend = udc_plat_suspend,
+ .resume = udc_plat_resume,
+};
+#endif
+
+#if defined(CONFIG_OF)
+static const struct of_device_id of_udc_match[] = {
+ { .compatible = "brcm,ns2-udc", },
+ { .compatible = "brcm,cygnus-udc", },
+ { .compatible = "brcm,iproc-udc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_udc_match);
+#endif
+
+static struct platform_driver udc_plat_driver = {
+ .probe = udc_plat_probe,
+ .remove = udc_plat_remove,
+ .driver = {
+ .name = "snps-udc-plat",
+ .of_match_table = of_match_ptr(of_udc_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &udc_plat_pm_ops,
+#endif
+ },
+};
+module_platform_driver(udc_plat_driver);
+
+MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
+MODULE_AUTHOR("Broadcom");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
new file mode 100644
index 000000000..a8cadc45c
--- /dev/null
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -0,0 +1,4048 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra XUSB device mode controller
+ *
+ * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2015, Google Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/tegra/xusb.h>
+#include <linux/pm_domain.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
+#include <linux/usb/phy.h>
+#include <linux/workqueue.h>
+
+/* XUSB_DEV registers */
+#define DB 0x004
+#define DB_TARGET_MASK GENMASK(15, 8)
+#define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
+#define DB_STREAMID_MASK GENMASK(31, 16)
+#define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
+#define ERSTSZ 0x008
+#define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
+#define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
+#define ERSTXBALO(x) (0x010 + 8 * (x))
+#define ERSTXBAHI(x) (0x014 + 8 * (x))
+#define ERDPLO 0x020
+#define ERDPLO_EHB BIT(3)
+#define ERDPHI 0x024
+#define EREPLO 0x028
+#define EREPLO_ECS BIT(0)
+#define EREPLO_SEGI BIT(1)
+#define EREPHI 0x02c
+#define CTRL 0x030
+#define CTRL_RUN BIT(0)
+#define CTRL_LSE BIT(1)
+#define CTRL_IE BIT(4)
+#define CTRL_SMI_EVT BIT(5)
+#define CTRL_SMI_DSE BIT(6)
+#define CTRL_EWE BIT(7)
+#define CTRL_DEVADDR_MASK GENMASK(30, 24)
+#define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
+#define CTRL_ENABLE BIT(31)
+#define ST 0x034
+#define ST_RC BIT(0)
+#define ST_IP BIT(4)
+#define RT_IMOD 0x038
+#define RT_IMOD_IMODI_MASK GENMASK(15, 0)
+#define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
+#define RT_IMOD_IMODC_MASK GENMASK(31, 16)
+#define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
+#define PORTSC 0x03c
+#define PORTSC_CCS BIT(0)
+#define PORTSC_PED BIT(1)
+#define PORTSC_PR BIT(4)
+#define PORTSC_PLS_SHIFT 5
+#define PORTSC_PLS_MASK GENMASK(8, 5)
+#define PORTSC_PLS_U0 0x0
+#define PORTSC_PLS_U2 0x2
+#define PORTSC_PLS_U3 0x3
+#define PORTSC_PLS_DISABLED 0x4
+#define PORTSC_PLS_RXDETECT 0x5
+#define PORTSC_PLS_INACTIVE 0x6
+#define PORTSC_PLS_RESUME 0xf
+#define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
+#define PORTSC_PS_SHIFT 10
+#define PORTSC_PS_MASK GENMASK(13, 10)
+#define PORTSC_PS_UNDEFINED 0x0
+#define PORTSC_PS_FS 0x1
+#define PORTSC_PS_LS 0x2
+#define PORTSC_PS_HS 0x3
+#define PORTSC_PS_SS 0x4
+#define PORTSC_LWS BIT(16)
+#define PORTSC_CSC BIT(17)
+#define PORTSC_WRC BIT(19)
+#define PORTSC_PRC BIT(21)
+#define PORTSC_PLC BIT(22)
+#define PORTSC_CEC BIT(23)
+#define PORTSC_WPR BIT(30)
+#define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
+ PORTSC_PLC | PORTSC_CEC)
+#define ECPLO 0x040
+#define ECPHI 0x044
+#define MFINDEX 0x048
+#define MFINDEX_FRAME_SHIFT 3
+#define MFINDEX_FRAME_MASK GENMASK(13, 3)
+#define PORTPM 0x04c
+#define PORTPM_L1S_MASK GENMASK(1, 0)
+#define PORTPM_L1S_DROP 0x0
+#define PORTPM_L1S_ACCEPT 0x1
+#define PORTPM_L1S_NYET 0x2
+#define PORTPM_L1S_STALL 0x3
+#define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
+#define PORTPM_RWE BIT(3)
+#define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
+#define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
+#define PORTPM_FLA BIT(24)
+#define PORTPM_VBA BIT(25)
+#define PORTPM_WOC BIT(26)
+#define PORTPM_WOD BIT(27)
+#define PORTPM_U1E BIT(28)
+#define PORTPM_U2E BIT(29)
+#define PORTPM_FRWE BIT(30)
+#define PORTPM_PNG_CYA BIT(31)
+#define EP_HALT 0x050
+#define EP_PAUSE 0x054
+#define EP_RELOAD 0x058
+#define EP_STCHG 0x05c
+#define DEVNOTIF_LO 0x064
+#define DEVNOTIF_LO_TRIG BIT(0)
+#define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
+#define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
+#define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
+#define DEVNOTIF_HI 0x068
+#define PORTHALT 0x06c
+#define PORTHALT_HALT_LTSSM BIT(0)
+#define PORTHALT_HALT_REJECT BIT(1)
+#define PORTHALT_STCHG_REQ BIT(20)
+#define PORTHALT_STCHG_INTR_EN BIT(24)
+#define PORT_TM 0x070
+#define EP_THREAD_ACTIVE 0x074
+#define EP_STOPPED 0x078
+#define HSFSPI_COUNT0 0x100
+#define HSFSPI_COUNT13 0x134
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
+ HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
+#define BLCG 0x840
+#define SSPX_CORE_CNT0 0x610
+#define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
+#define SSPX_CORE_CNT30 0x688
+#define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
+ SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
+#define SSPX_CORE_CNT32 0x690
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
+ SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
+#define SSPX_CORE_CNT56 0x6fc
+#define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(x) ((x) & \
+ SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK)
+#define SSPX_CORE_CNT57 0x700
+#define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(x) ((x) & \
+ SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK)
+#define SSPX_CORE_CNT65 0x720
+#define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(x) ((x) & \
+ SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK)
+#define SSPX_CORE_CNT66 0x724
+#define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(x) ((x) & \
+ SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK)
+#define SSPX_CORE_CNT67 0x728
+#define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(x) ((x) & \
+ SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK)
+#define SSPX_CORE_CNT72 0x73c
+#define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(x) ((x) & \
+ SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK)
+#define SSPX_CORE_PADCTL4 0x750
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
+ SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
+#define BLCG_DFPCI BIT(0)
+#define BLCG_UFPCI BIT(1)
+#define BLCG_FE BIT(2)
+#define BLCG_COREPLL_PWRDN BIT(8)
+#define BLCG_IOPLL_0_PWRDN BIT(9)
+#define BLCG_IOPLL_1_PWRDN BIT(10)
+#define BLCG_IOPLL_2_PWRDN BIT(11)
+#define BLCG_ALL 0x1ff
+#define CFG_DEV_SSPI_XFER 0x858
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
+ CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
+#define CFG_DEV_FE 0x85c
+#define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
+#define CFG_DEV_FE_PORTREGSEL_SS_PI 1
+#define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
+#define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
+#define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
+
+/* FPCI registers */
+#define XUSB_DEV_CFG_1 0x004
+#define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
+#define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
+#define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
+#define XUSB_DEV_CFG_4 0x010
+#define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
+#define XUSB_DEV_CFG_5 0x014
+
+/* IPFS registers */
+#define XUSB_DEV_CONFIGURATION_0 0x180
+#define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
+#define XUSB_DEV_INTR_MASK_0 0x188
+#define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
+
+struct tegra_xudc_ep_context {
+ __le32 info0;
+ __le32 info1;
+ __le32 deq_lo;
+ __le32 deq_hi;
+ __le32 tx_info;
+ __le32 rsvd[11];
+};
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+
+#define EP_TYPE_INVALID 0
+#define EP_TYPE_ISOCH_OUT 1
+#define EP_TYPE_BULK_OUT 2
+#define EP_TYPE_INTERRUPT_OUT 3
+#define EP_TYPE_CONTROL 4
+#define EP_TYPE_ISCOH_IN 5
+#define EP_TYPE_BULK_IN 6
+#define EP_TYPE_INTERRUPT_IN 7
+
+#define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
+static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
+{ \
+ return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
+} \
+static inline void \
+ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ ctx->member = cpu_to_le32(tmp); \
+}
+
+BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
+BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
+BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
+BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
+BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
+BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
+BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
+BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
+BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
+BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
+BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
+BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
+BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
+BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
+BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1)
+BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
+BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1)
+BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f)
+BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
+BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
+BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
+BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
+
+static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
+{
+ return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
+ (ep_ctx_read_deq_lo(ctx) << 4);
+}
+
+static inline void
+ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
+{
+ ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
+ ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
+}
+
+struct tegra_xudc_trb {
+ __le32 data_lo;
+ __le32 data_hi;
+ __le32 status;
+ __le32 control;
+};
+
+#define TRB_TYPE_RSVD 0
+#define TRB_TYPE_NORMAL 1
+#define TRB_TYPE_SETUP_STAGE 2
+#define TRB_TYPE_DATA_STAGE 3
+#define TRB_TYPE_STATUS_STAGE 4
+#define TRB_TYPE_ISOCH 5
+#define TRB_TYPE_LINK 6
+#define TRB_TYPE_TRANSFER_EVENT 32
+#define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
+#define TRB_TYPE_STREAM 48
+#define TRB_TYPE_SETUP_PACKET_EVENT 63
+
+#define TRB_CMPL_CODE_INVALID 0
+#define TRB_CMPL_CODE_SUCCESS 1
+#define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
+#define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
+#define TRB_CMPL_CODE_USB_TRANS_ERR 4
+#define TRB_CMPL_CODE_TRB_ERR 5
+#define TRB_CMPL_CODE_STALL 6
+#define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
+#define TRB_CMPL_CODE_SHORT_PACKET 13
+#define TRB_CMPL_CODE_RING_UNDERRUN 14
+#define TRB_CMPL_CODE_RING_OVERRUN 15
+#define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
+#define TRB_CMPL_CODE_STOPPED 26
+#define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
+#define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
+#define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
+#define TRB_CMPL_CODE_HOST_REJECTED 221
+#define TRB_CMPL_CODE_CTRL_DIR_ERR 222
+#define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
+
+#define BUILD_TRB_RW(name, member, shift, mask) \
+static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
+{ \
+ return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
+} \
+static inline void \
+trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ trb->member = cpu_to_le32(tmp); \
+}
+
+BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
+BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
+BUILD_TRB_RW(seq_num, status, 0, 0xffff)
+BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
+BUILD_TRB_RW(td_size, status, 17, 0x1f)
+BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
+BUILD_TRB_RW(cycle, control, 0, 0x1)
+BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
+BUILD_TRB_RW(isp, control, 2, 0x1)
+BUILD_TRB_RW(chain, control, 4, 0x1)
+BUILD_TRB_RW(ioc, control, 5, 0x1)
+BUILD_TRB_RW(type, control, 10, 0x3f)
+BUILD_TRB_RW(stream_id, control, 16, 0xffff)
+BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
+BUILD_TRB_RW(tlbpc, control, 16, 0xf)
+BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
+BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
+BUILD_TRB_RW(sia, control, 31, 0x1)
+
+static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
+{
+ return ((u64)trb_read_data_hi(trb) << 32) |
+ trb_read_data_lo(trb);
+}
+
+static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
+{
+ trb_write_data_lo(trb, lower_32_bits(addr));
+ trb_write_data_hi(trb, upper_32_bits(addr));
+}
+
+struct tegra_xudc_request {
+ struct usb_request usb_req;
+
+ size_t buf_queued;
+ unsigned int trbs_queued;
+ unsigned int trbs_needed;
+ bool need_zlp;
+
+ struct tegra_xudc_trb *first_trb;
+ struct tegra_xudc_trb *last_trb;
+
+ struct list_head list;
+};
+
+struct tegra_xudc_ep {
+ struct tegra_xudc *xudc;
+ struct usb_ep usb_ep;
+ unsigned int index;
+ char name[8];
+
+ struct tegra_xudc_ep_context *context;
+
+#define XUDC_TRANSFER_RING_SIZE 64
+ struct tegra_xudc_trb *transfer_ring;
+ dma_addr_t transfer_ring_phys;
+
+ unsigned int enq_ptr;
+ unsigned int deq_ptr;
+ bool pcs;
+ bool ring_full;
+ bool stream_rejected;
+
+ struct list_head queue;
+ const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+};
+
+struct tegra_xudc_sel_timing {
+ __u8 u1sel;
+ __u8 u1pel;
+ __le16 u2sel;
+ __le16 u2pel;
+};
+
+enum tegra_xudc_setup_state {
+ WAIT_FOR_SETUP,
+ DATA_STAGE_XFER,
+ DATA_STAGE_RECV,
+ STATUS_STAGE_XFER,
+ STATUS_STAGE_RECV,
+};
+
+struct tegra_xudc_setup_packet {
+ struct usb_ctrlrequest ctrl_req;
+ unsigned int seq_num;
+};
+
+struct tegra_xudc_save_regs {
+ u32 ctrl;
+ u32 portpm;
+};
+
+struct tegra_xudc {
+ struct device *dev;
+ const struct tegra_xudc_soc *soc;
+ struct tegra_xusb_padctl *padctl;
+
+ spinlock_t lock;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+#define XUDC_NR_EVENT_RINGS 2
+#define XUDC_EVENT_RING_SIZE 4096
+ struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
+ dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
+ unsigned int event_ring_index;
+ unsigned int event_ring_deq_ptr;
+ bool ccs;
+
+#define XUDC_NR_EPS 32
+ struct tegra_xudc_ep ep[XUDC_NR_EPS];
+ struct tegra_xudc_ep_context *ep_context;
+ dma_addr_t ep_context_phys;
+
+ struct device *genpd_dev_device;
+ struct device *genpd_dev_ss;
+ struct device_link *genpd_dl_device;
+ struct device_link *genpd_dl_ss;
+
+ struct dma_pool *transfer_ring_pool;
+
+ bool queued_setup_packet;
+ struct tegra_xudc_setup_packet setup_packet;
+ enum tegra_xudc_setup_state setup_state;
+ u16 setup_seq_num;
+
+ u16 dev_addr;
+ u16 isoch_delay;
+ struct tegra_xudc_sel_timing sel_timing;
+ u8 test_mode_pattern;
+ u16 status_buf;
+ struct tegra_xudc_request *ep0_req;
+
+ bool pullup;
+
+ unsigned int nr_enabled_eps;
+ unsigned int nr_isoch_eps;
+
+ unsigned int device_state;
+ unsigned int resume_state;
+
+ int irq;
+
+ void __iomem *base;
+ resource_size_t phys_base;
+ void __iomem *ipfs;
+ void __iomem *fpci;
+
+ struct regulator_bulk_data *supplies;
+
+ struct clk_bulk_data *clks;
+
+ bool device_mode;
+ struct work_struct usb_role_sw_work;
+
+ struct phy **usb3_phy;
+ struct phy *curr_usb3_phy;
+ struct phy **utmi_phy;
+ struct phy *curr_utmi_phy;
+
+ struct tegra_xudc_save_regs saved_regs;
+ bool suspended;
+ bool powergated;
+
+ struct usb_phy **usbphy;
+ struct usb_phy *curr_usbphy;
+ struct notifier_block vbus_nb;
+
+ struct completion disconnect_complete;
+
+ bool selfpowered;
+
+#define TOGGLE_VBUS_WAIT_MS 100
+ struct delayed_work plc_reset_work;
+ bool wait_csc;
+
+ struct delayed_work port_reset_war_work;
+ bool wait_for_sec_prc;
+};
+
+#define XUDC_TRB_MAX_BUFFER_SIZE 65536
+#define XUDC_MAX_ISOCH_EPS 4
+#define XUDC_INTERRUPT_MODERATION_US 0
+
+static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+struct tegra_xudc_soc {
+ const char * const *supply_names;
+ unsigned int num_supplies;
+ const char * const *clock_names;
+ unsigned int num_clks;
+ unsigned int num_phys;
+ bool u1_enable;
+ bool u2_enable;
+ bool lpm_enable;
+ bool invalid_seq_num;
+ bool pls_quirk;
+ bool port_reset_quirk;
+ bool port_speed_quirk;
+ bool has_ipfs;
+};
+
+static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->fpci + offset);
+}
+
+static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->fpci + offset);
+}
+
+static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->ipfs + offset);
+}
+
+static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->ipfs + offset);
+}
+
+static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->base + offset);
+}
+
+static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->base + offset);
+}
+
+static inline int xudc_readl_poll(struct tegra_xudc *xudc,
+ unsigned int offset, u32 mask, u32 val)
+{
+ u32 regval;
+
+ return readl_poll_timeout_atomic(xudc->base + offset, regval,
+ (regval & mask) == val, 1, 100);
+}
+
+static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct tegra_xudc, gadget);
+}
+
+static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct tegra_xudc_ep, usb_ep);
+}
+
+static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
+{
+ return container_of(req, struct tegra_xudc_request, usb_req);
+}
+
+static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(xudc->dev,
+ "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
+ type, trb, trb->data_lo, trb->data_hi, trb->status,
+ trb->control);
+}
+
+static void tegra_xudc_limit_port_speed(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ /* limit port speed to gen 1 */
+ val = xudc_readl(xudc, SSPX_CORE_CNT56);
+ val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x260);
+ xudc_writel(xudc, val, SSPX_CORE_CNT56);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT57);
+ val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x6D6);
+ xudc_writel(xudc, val, SSPX_CORE_CNT57);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT65);
+ val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0x4B0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT66);
+ val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x4B0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT67);
+ val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x4B0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT67);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT72);
+ val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
+ val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x10);
+ xudc_writel(xudc, val, SSPX_CORE_CNT72);
+}
+
+static void tegra_xudc_restore_port_speed(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ /* restore port speed to gen2 */
+ val = xudc_readl(xudc, SSPX_CORE_CNT56);
+ val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x438);
+ xudc_writel(xudc, val, SSPX_CORE_CNT56);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT57);
+ val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x528);
+ xudc_writel(xudc, val, SSPX_CORE_CNT57);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT65);
+ val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0xE10);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT66);
+ val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x348);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT67);
+ val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x5a0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT67);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT72);
+ val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
+ val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x1c21);
+ xudc_writel(xudc, val, SSPX_CORE_CNT72);
+}
+
+static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
+{
+ int err;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ tegra_phy_xusb_utmi_pad_power_on(xudc->curr_utmi_phy);
+
+ err = phy_power_on(xudc->curr_utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "UTMI power on failed: %d\n", err);
+
+ err = phy_power_on(xudc->curr_usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "USB3 PHY power on failed: %d\n", err);
+
+ dev_dbg(xudc->dev, "device mode on\n");
+
+ phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
+ USB_ROLE_DEVICE);
+}
+
+static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
+{
+ bool connected = false;
+ u32 pls, val;
+ int err;
+
+ dev_dbg(xudc->dev, "device mode off\n");
+
+ connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
+
+ reinit_completion(&xudc->disconnect_complete);
+
+ if (xudc->soc->port_speed_quirk)
+ tegra_xudc_restore_port_speed(xudc);
+
+ phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
+
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ /* Direct link to U0 if disconnected in RESUME or U2. */
+ if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
+ (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
+ val = xudc_readl(xudc, PORTPM);
+ val |= PORTPM_FRWE;
+ xudc_writel(xudc, val, PORTPM);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ /* Wait for disconnect event. */
+ if (connected)
+ wait_for_completion(&xudc->disconnect_complete);
+
+ /* Make sure interrupt handler has completed before powergating. */
+ synchronize_irq(xudc->irq);
+
+ tegra_phy_xusb_utmi_pad_power_down(xudc->curr_utmi_phy);
+
+ err = phy_power_off(xudc->curr_utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err);
+
+ err = phy_power_off(xudc->curr_usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "USB3 PHY power off failed: %d\n", err);
+
+ pm_runtime_put(xudc->dev);
+}
+
+static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
+{
+ struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
+ usb_role_sw_work);
+
+ if (xudc->device_mode)
+ tegra_xudc_device_mode_on(xudc);
+ else
+ tegra_xudc_device_mode_off(xudc);
+}
+
+static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
+ struct usb_phy *usbphy)
+{
+ unsigned int i;
+
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ if (xudc->usbphy[i] && usbphy == xudc->usbphy[i])
+ return i;
+ }
+
+ dev_info(xudc->dev, "phy index could not be found for shared USB PHY");
+ return -1;
+}
+
+static int tegra_xudc_vbus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
+ vbus_nb);
+ struct usb_phy *usbphy = (struct usb_phy *)data;
+ int phy_index;
+
+ dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
+
+ if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
+ (!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
+ dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
+ xudc->device_mode);
+ return NOTIFY_OK;
+ }
+
+ xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
+ false;
+
+ phy_index = tegra_xudc_get_phy_index(xudc, usbphy);
+ dev_dbg(xudc->dev, "%s(): current phy index is %d\n", __func__,
+ phy_index);
+
+ if (!xudc->suspended && phy_index != -1) {
+ xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
+ xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
+ xudc->curr_usbphy = usbphy;
+ schedule_work(&xudc->usb_role_sw_work);
+ }
+
+ return NOTIFY_OK;
+}
+
+static void tegra_xudc_plc_reset_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
+ plc_reset_work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->wait_csc) {
+ u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ if (pls == PORTSC_PLS_INACTIVE) {
+ dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
+ phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
+ USB_ROLE_NONE);
+ phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
+ USB_ROLE_DEVICE);
+
+ xudc->wait_csc = false;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static void tegra_xudc_port_reset_war_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc =
+ container_of(dwork, struct tegra_xudc, port_reset_war_work);
+ unsigned long flags;
+ u32 pls;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->device_mode && xudc->wait_for_sec_prc) {
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+ dev_dbg(xudc->dev, "pls = %x\n", pls);
+
+ if (pls == PORTSC_PLS_DISABLED) {
+ dev_dbg(xudc->dev, "toggle vbus\n");
+ /* PRC doesn't complete in 100ms, toggle the vbus */
+ ret = tegra_phy_xusb_utmi_port_reset(
+ xudc->curr_utmi_phy);
+ if (ret == 1)
+ xudc->wait_for_sec_prc = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ unsigned int index;
+
+ index = trb - ep->transfer_ring;
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return 0;
+
+ return (ep->transfer_ring_phys + index * sizeof(*trb));
+}
+
+static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
+ dma_addr_t addr)
+{
+ struct tegra_xudc_trb *trb;
+ unsigned int index;
+
+ index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return NULL;
+
+ trb = &ep->transfer_ring[index];
+
+ return trb;
+}
+
+static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_writel(xudc, BIT(ep), EP_RELOAD);
+ xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
+}
+
+static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+
+ xudc_writel(xudc, 0, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!val)
+ return;
+ xudc_writel(xudc, 0, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
+ xudc_writel(xudc, BIT(ep), EP_STOPPED);
+}
+
+static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
+}
+
+static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req, int status)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
+ req, ep->index, status);
+
+ if (likely(req->usb_req.status == -EINPROGRESS))
+ req->usb_req.status = status;
+
+ list_del_init(&req->list);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ spin_unlock(&xudc->lock);
+ usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
+ spin_lock(&xudc->lock);
+}
+
+static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
+{
+ struct tegra_xudc_request *req;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ tegra_xudc_req_done(ep, req, status);
+ }
+}
+
+static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
+{
+ if (ep->ring_full)
+ return 0;
+
+ if (ep->deq_ptr > ep->enq_ptr)
+ return ep->deq_ptr - ep->enq_ptr - 1;
+
+ return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
+}
+
+static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb,
+ bool ioc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ dma_addr_t buf_addr;
+ size_t len;
+
+ len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
+ req->buf_queued);
+ if (len > 0)
+ buf_addr = req->usb_req.dma + req->buf_queued;
+ else
+ buf_addr = 0;
+
+ trb_write_data_ptr(trb, buf_addr);
+
+ trb_write_transfer_len(trb, len);
+ trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
+
+ if (req->trbs_queued == req->trbs_needed - 1 ||
+ (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
+ trb_write_chain(trb, 0);
+ else
+ trb_write_chain(trb, 1);
+
+ trb_write_ioc(trb, ioc);
+
+ if (usb_endpoint_dir_out(ep->desc) ||
+ (usb_endpoint_xfer_control(ep->desc) &&
+ (xudc->setup_state == DATA_STAGE_RECV)))
+ trb_write_isp(trb, 1);
+ else
+ trb_write_isp(trb, 0);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == DATA_STAGE_RECV)
+ trb_write_type(trb, TRB_TYPE_DATA_STAGE);
+ else
+ trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
+
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == STATUS_STAGE_XFER)
+ trb_write_data_stage_dir(trb, 1);
+ else
+ trb_write_data_stage_dir(trb, 0);
+ } else if (usb_endpoint_xfer_isoc(ep->desc)) {
+ trb_write_type(trb, TRB_TYPE_ISOCH);
+ trb_write_sia(trb, 1);
+ trb_write_frame_id(trb, 0);
+ trb_write_tlbpc(trb, 0);
+ } else if (usb_ss_max_streams(ep->comp_desc)) {
+ trb_write_type(trb, TRB_TYPE_STREAM);
+ trb_write_stream_id(trb, req->usb_req.stream_id);
+ } else {
+ trb_write_type(trb, TRB_TYPE_NORMAL);
+ trb_write_stream_id(trb, 0);
+ }
+
+ trb_write_cycle(trb, ep->pcs);
+
+ req->trbs_queued++;
+ req->buf_queued += len;
+
+ dump_trb(xudc, "TRANSFER", trb);
+}
+
+static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ unsigned int i, count, available;
+ bool wait_td = false;
+
+ available = ep_available_trbs(ep);
+ count = req->trbs_needed - req->trbs_queued;
+ if (available < count) {
+ count = available;
+ ep->ring_full = true;
+ }
+
+ /*
+ * To generate zero-length packet on USB bus, SW needs schedule a
+ * standalone zero-length TD. According to HW's behavior, SW needs
+ * to schedule TDs in different ways for different endpoint types.
+ *
+ * For control endpoint:
+ * - Data stage TD (IOC = 1, CH = 0)
+ * - Ring doorbell and wait transfer event
+ * - Data stage TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ *
+ * For bulk and interrupt endpoints:
+ * - Normal transfer TD (IOC = 0, CH = 0)
+ * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ */
+
+ if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
+ wait_td = true;
+
+ if (!req->first_trb)
+ req->first_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ for (i = 0; i < count; i++) {
+ struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
+ bool ioc = false;
+
+ if ((i == count - 1) || (wait_td && i == count - 2))
+ ioc = true;
+
+ tegra_xudc_queue_one_trb(ep, req, trb, ioc);
+ req->last_trb = trb;
+
+ ep->enq_ptr++;
+ if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
+ trb = &ep->transfer_ring[ep->enq_ptr];
+ trb_write_cycle(trb, ep->pcs);
+ ep->pcs = !ep->pcs;
+ ep->enq_ptr = 0;
+ }
+
+ if (ioc)
+ break;
+ }
+
+ return count;
+}
+
+static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ u32 val;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ val = DB_TARGET(ep->index);
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ val |= DB_STREAMID(xudc->setup_seq_num);
+ } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
+ struct tegra_xudc_request *req;
+
+ /* Don't ring doorbell if the stream has been rejected. */
+ if (ep->stream_rejected)
+ return;
+
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ val |= DB_STREAMID(req->usb_req.stream_id);
+ }
+
+ dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
+ xudc_writel(xudc, val, DB);
+}
+
+static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc_request *req;
+ bool trbs_queued = false;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (ep->ring_full)
+ break;
+
+ if (tegra_xudc_queue_trbs(ep, req) > 0)
+ trbs_queued = true;
+ }
+
+ if (trbs_queued)
+ tegra_xudc_ep_ring_doorbell(ep);
+}
+
+static int
+__tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ int err;
+
+ if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "control EP has pending transfers\n");
+ return -EINVAL;
+ }
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to map request: %d\n", err);
+ return err;
+ }
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ req->need_zlp = false;
+ req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
+ XUDC_TRB_MAX_BUFFER_SIZE);
+ if (req->usb_req.length == 0)
+ req->trbs_needed++;
+
+ if (!usb_endpoint_xfer_isoc(ep->desc) &&
+ req->usb_req.zero && req->usb_req.length &&
+ ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
+ req->trbs_needed++;
+ req->need_zlp = true;
+ }
+
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ list_add_tail(&req->list, &ep->queue);
+
+ tegra_xudc_ep_kick_queue(ep);
+
+ return 0;
+}
+
+static int
+tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
+ gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_queue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc_trb *trb = req->first_trb;
+ bool pcs_enq = trb_read_cycle(trb);
+ bool pcs;
+
+ /*
+ * Clear out all the TRBs part of or after the cancelled request,
+ * and must correct trb cycle bit to the last un-enqueued state.
+ */
+ while (trb != &ep->transfer_ring[ep->enq_ptr]) {
+ pcs = trb_read_cycle(trb);
+ memset(trb, 0, sizeof(*trb));
+ trb_write_cycle(trb, !pcs);
+ trb++;
+
+ if (trb_read_type(trb) == TRB_TYPE_LINK)
+ trb = ep->transfer_ring;
+ }
+
+ /* Requests will be re-queued at the start of the cancelled request. */
+ ep->enq_ptr = req->first_trb - ep->transfer_ring;
+ /*
+ * Retrieve the correct cycle bit state from the first trb of
+ * the cancelled request.
+ */
+ ep->pcs = pcs_enq;
+ ep->ring_full = false;
+ list_for_each_entry_continue(req, &ep->queue, list) {
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ }
+}
+
+/*
+ * Determine if the given TRB is in the range [first trb, last trb] for the
+ * given request.
+ */
+static bool trb_in_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
+ req->first_trb, req->last_trb, trb);
+
+ if (trb >= req->first_trb && (trb <= req->last_trb ||
+ req->last_trb < req->first_trb))
+ return true;
+
+ if (trb < req->first_trb && trb <= req->last_trb &&
+ req->last_trb < req->first_trb)
+ return true;
+
+ return false;
+}
+
+/*
+ * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
+ * for the given endpoint and request.
+ */
+static bool trb_before_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
+ __func__, req->first_trb, req->last_trb, enq_trb, trb);
+
+ if (trb < req->first_trb && (enq_trb <= trb ||
+ req->first_trb < enq_trb))
+ return true;
+
+ if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
+ return true;
+
+ return false;
+}
+
+static int
+__tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ struct tegra_xudc_request *r = NULL, *iter;
+ struct tegra_xudc_trb *deq_trb;
+ bool busy, kick_queue = false;
+ int ret = 0;
+
+ /* Make sure the request is actually queued to this endpoint. */
+ list_for_each_entry(iter, &ep->queue, list) {
+ if (iter != req)
+ continue;
+ r = iter;
+ break;
+ }
+
+ if (!r)
+ return -EINVAL;
+
+ /* Request hasn't been queued in the transfer ring yet. */
+ if (!req->trbs_queued) {
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ return 0;
+ }
+
+ /* Halt DMA for this endpoint. */
+ if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
+ ep_pause(xudc, ep->index);
+ ep_wait_for_inactive(xudc, ep->index);
+ }
+
+ deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
+ /* Is the hardware processing the TRB at the dequeue pointer? */
+ busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
+
+ if (trb_in_request(ep, req, deq_trb) && busy) {
+ /*
+ * Request has been partially completed or it hasn't
+ * started processing yet.
+ */
+ dma_addr_t deq_ptr;
+
+ squeeze_transfer_ring(ep, req);
+
+ req->usb_req.actual = ep_ctx_read_edtla(ep->context);
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+
+ /* EDTLA is > 0: request has been partially completed */
+ if (req->usb_req.actual > 0) {
+ /*
+ * Abort the pending transfer and update the dequeue
+ * pointer
+ */
+ ep_ctx_write_edtla(ep->context, 0);
+ ep_ctx_write_partial_td(ep->context, 0);
+ ep_ctx_write_data_offset(ep->context, 0);
+
+ deq_ptr = trb_virt_to_phys(ep,
+ &ep->transfer_ring[ep->enq_ptr]);
+
+ if (dma_mapping_error(xudc->dev, deq_ptr)) {
+ ret = -EINVAL;
+ } else {
+ ep_ctx_write_deq_ptr(ep->context, deq_ptr);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+ ep_reload(xudc, ep->index);
+ }
+ }
+ } else if (trb_before_request(ep, req, deq_trb) && busy) {
+ /* Request hasn't started processing yet. */
+ squeeze_transfer_ring(ep, req);
+
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+ } else {
+ /*
+ * Request has completed, but we haven't processed the
+ * completion event yet.
+ */
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ ret = -EINVAL;
+ }
+
+ /* Resume the endpoint. */
+ ep_unpause(xudc, ep->index);
+
+ if (kick_queue)
+ tegra_xudc_ep_kick_queue(ep);
+
+ return ret;
+}
+
+static int
+tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_dequeue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (!ep->desc)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
+ dev_err(xudc->dev, "can't halt isochronous EP\n");
+ return -ENOTSUPP;
+ }
+
+ if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
+ dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
+ halt ? "halted" : "not halted");
+ return 0;
+ }
+
+ if (halt) {
+ ep_halt(xudc, ep->index);
+ } else {
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_rsvd(ep->context, 0);
+ ep_ctx_write_partial_td(ep->context, 0);
+ ep_ctx_write_splitxstate(ep->context, 0);
+ ep_ctx_write_seq_num(ep->context, 0);
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ tegra_xudc_ep_ring_doorbell(ep);
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ if (value && usb_endpoint_dir_in(ep->desc) &&
+ !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "can't halt EP with requests pending\n");
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_set_halt(ep, value);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
+{
+ const struct usb_endpoint_descriptor *desc = ep->desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
+ struct tegra_xudc *xudc = ep->xudc;
+ u16 maxpacket, maxburst = 0, esit = 0;
+ u32 val;
+
+ maxpacket = usb_endpoint_maxp(desc);
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (!usb_endpoint_xfer_control(desc))
+ maxburst = comp_desc->bMaxBurst;
+
+ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
+ esit = le16_to_cpu(comp_desc->wBytesPerInterval);
+ } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc))) {
+ if (xudc->gadget.speed == USB_SPEED_HIGH) {
+ maxburst = usb_endpoint_maxp_mult(desc) - 1;
+ if (maxburst == 0x3) {
+ dev_warn(xudc->dev,
+ "invalid endpoint maxburst\n");
+ maxburst = 0x2;
+ }
+ }
+ esit = maxpacket * (maxburst + 1);
+ }
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_interval(ep->context, desc->bInterval);
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (usb_endpoint_xfer_isoc(desc)) {
+ ep_ctx_write_mult(ep->context,
+ comp_desc->bmAttributes & 0x3);
+ }
+
+ if (usb_endpoint_xfer_bulk(desc)) {
+ ep_ctx_write_max_pstreams(ep->context,
+ comp_desc->bmAttributes &
+ 0x1f);
+ ep_ctx_write_lsa(ep->context, 1);
+ }
+ }
+
+ if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
+ val = usb_endpoint_type(desc);
+ else
+ val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
+
+ ep_ctx_write_type(ep->context, val);
+ ep_ctx_write_cerr(ep->context, 0x3);
+ ep_ctx_write_max_packet_size(ep->context, maxpacket);
+ ep_ctx_write_max_burst_size(ep->context, maxburst);
+
+ ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+
+ /* Select a reasonable average TRB length based on endpoint type. */
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ val = 8;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ val = 1024;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_ISOC:
+ default:
+ val = 3072;
+ break;
+ }
+
+ ep_ctx_write_avg_trb_len(ep->context, val);
+ ep_ctx_write_max_esit_payload(ep->context, esit);
+
+ ep_ctx_write_cerrcnt(ep->context, 0x3);
+}
+
+static void setup_link_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ trb_write_data_ptr(trb, ep->transfer_ring_phys);
+ trb_write_type(trb, TRB_TYPE_LINK);
+ trb_write_toggle_cycle(trb, 1);
+}
+
+static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_err(xudc->dev, "endpoint %u already disabled\n",
+ ep->index);
+ return -EINVAL;
+ }
+
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
+
+ xudc->nr_enabled_eps--;
+ if (usb_endpoint_xfer_isoc(ep->desc))
+ xudc->nr_isoch_eps--;
+
+ ep->desc = NULL;
+ ep->comp_desc = NULL;
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+ if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
+ xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
+
+ /*
+ * If this is the last endpoint disabled in a de-configure request,
+ * switch back to address state.
+ */
+ if ((xudc->device_state == USB_STATE_CONFIGURED) &&
+ (xudc->nr_enabled_eps == 1)) {
+ u32 val;
+
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ dev_info(xudc->dev, "ep %u disabled\n", ep->index);
+
+ return 0;
+}
+
+static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_disable(ep);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ unsigned int i;
+ u32 val;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER &&
+ !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
+ return -EINVAL;
+
+ /* Disable the EP if it is not disabled */
+ if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
+ __tegra_xudc_ep_disable(ep);
+
+ ep->desc = desc;
+ ep->comp_desc = ep->usb_ep.comp_desc;
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
+ dev_err(xudc->dev, "too many isochronous endpoints\n");
+ return -EBUSY;
+ }
+ xudc->nr_isoch_eps++;
+ }
+
+ memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
+ sizeof(*ep->transfer_ring));
+ setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
+
+ ep->enq_ptr = 0;
+ ep->deq_ptr = 0;
+ ep->pcs = true;
+ ep->ring_full = false;
+ xudc->nr_enabled_eps++;
+
+ tegra_xudc_ep_context_setup(ep);
+
+ /*
+ * No need to reload and un-halt EP0. This will be done automatically
+ * once a valid SETUP packet is received.
+ */
+ if (usb_endpoint_xfer_control(desc))
+ goto out;
+
+ /*
+ * Transition to configured state once the first non-control
+ * endpoint is enabled.
+ */
+ if (xudc->device_state == USB_STATE_ADDRESS) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+
+ xudc->device_state = USB_STATE_CONFIGURED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ /*
+ * Pause all bulk endpoints when enabling an isoch endpoint
+ * to ensure the isoch endpoint is allocated enough bandwidth.
+ */
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_pause(xudc, i);
+ }
+ }
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_unpause(xudc, i);
+ }
+ }
+
+out:
+ dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
+ usb_ep_type_string(usb_endpoint_type(ep->desc)),
+ usb_endpoint_dir_in(ep->desc) ? "in" : "out");
+
+ return 0;
+}
+
+static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_enable(ep, desc);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *
+tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+
+ req = kzalloc(sizeof(*req), gfp);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->list);
+
+ return &req->usb_req;
+}
+
+static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
+ struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req = to_xudc_req(usb_req);
+
+ kfree(req);
+}
+
+static const struct usb_ep_ops tegra_xudc_ep_ops = {
+ .enable = tegra_xudc_ep_enable,
+ .disable = tegra_xudc_ep_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EBUSY;
+}
+
+static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
+{
+ return -EBUSY;
+}
+
+static const struct usb_ep_ops tegra_xudc_ep0_ops = {
+ .enable = tegra_xudc_ep0_enable,
+ .disable = tegra_xudc_ep0_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
+ MFINDEX_FRAME_SHIFT;
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ ep_unpause_all(xudc);
+
+ /* Direct link to U0. */
+ val = xudc_readl(xudc, PORTSC);
+ if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ if (xudc->device_state == USB_STATE_SUSPENDED) {
+ xudc->device_state = xudc->resume_state;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ xudc->resume_state = 0;
+ }
+
+ /*
+ * Doorbells may be dropped if they are sent too soon (< ~200ns)
+ * after unpausing the endpoint. Wait for 500ns just to be safe.
+ */
+ ndelay(500);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
+}
+
+static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+ val = xudc_readl(xudc, PORTPM);
+ dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
+ val, gadget->speed);
+
+ if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
+ (val & PORTPM_RWE)) ||
+ ((xudc->gadget.speed == USB_SPEED_SUPER) &&
+ (val & PORTPM_FRWE))) {
+ tegra_xudc_resume_device_state(xudc);
+
+ /* Send Device Notification packet. */
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
+ | DEVNOTIF_LO_TRIG;
+ xudc_writel(xudc, 0, DEVNOTIF_HI);
+ xudc_writel(xudc, val, DEVNOTIF_LO);
+ }
+ }
+
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (is_on != xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ if (is_on)
+ val |= CTRL_ENABLE;
+ else
+ val &= ~CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ xudc->pullup = is_on;
+ dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+ int ret;
+ unsigned int i;
+
+ if (!driver)
+ return -EINVAL;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->driver) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
+ if (ret < 0)
+ goto unlock;
+
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_IE | CTRL_LSE;
+ xudc_writel(xudc, val, CTRL);
+
+ val = xudc_readl(xudc, PORTHALT);
+ val |= PORTHALT_STCHG_INTR_EN;
+ xudc_writel(xudc, val, PORTHALT);
+
+ if (xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ for (i = 0; i < xudc->soc->num_phys; i++)
+ if (xudc->usbphy[i])
+ otg_set_peripheral(xudc->usbphy[i]->otg, gadget);
+
+ xudc->driver = driver;
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+ unsigned int i;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ for (i = 0; i < xudc->soc->num_phys; i++)
+ if (xudc->usbphy[i])
+ otg_set_peripheral(xudc->usbphy[i]->otg, NULL);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_IE | CTRL_ENABLE);
+ xudc_writel(xudc, val, CTRL);
+
+ __tegra_xudc_ep_disable(&xudc->ep[0]);
+
+ xudc->driver = NULL;
+ dev_dbg(xudc->dev, "Gadget stopped");
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
+ unsigned int m_a)
+{
+ int ret = 0;
+ struct tegra_xudc *xudc = to_xudc(gadget);
+
+ dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
+
+ if (xudc->curr_usbphy && xudc->curr_usbphy->chg_type == SDP_TYPE)
+ ret = usb_phy_set_power(xudc->curr_usbphy, m_a);
+
+ return ret;
+}
+
+static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+
+ dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
+ xudc->selfpowered = !!is_on;
+
+ return 0;
+}
+
+static const struct usb_gadget_ops tegra_xudc_gadget_ops = {
+ .get_frame = tegra_xudc_gadget_get_frame,
+ .wakeup = tegra_xudc_gadget_wakeup,
+ .pullup = tegra_xudc_gadget_pullup,
+ .udc_start = tegra_xudc_gadget_start,
+ .udc_stop = tegra_xudc_gadget_stop,
+ .vbus_draw = tegra_xudc_gadget_vbus_draw,
+ .set_selfpowered = tegra_xudc_set_selfpowered,
+};
+
+static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
+static int
+tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = NULL;
+ xudc->ep0_req->usb_req.dma = 0;
+ xudc->ep0_req->usb_req.length = 0;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static int
+tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = buf;
+ xudc->ep0_req->usb_req.length = len;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
+{
+ switch (xudc->setup_state) {
+ case DATA_STAGE_XFER:
+ xudc->setup_state = STATUS_STAGE_RECV;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ case DATA_STAGE_RECV:
+ xudc->setup_state = STATUS_STAGE_XFER;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ default:
+ xudc->setup_state = WAIT_FOR_SETUP;
+ break;
+ }
+}
+
+static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ spin_unlock(&xudc->lock);
+ ret = xudc->driver->setup(&xudc->gadget, ctrl);
+ spin_lock(&xudc->lock);
+
+ return ret;
+}
+
+static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if (xudc->test_mode_pattern) {
+ xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
+ xudc->test_mode_pattern = 0;
+ }
+}
+
+static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+ u32 feature = le16_to_cpu(ctrl->wValue);
+ u32 index = le16_to_cpu(ctrl->wIndex);
+ u32 val, ep;
+ int ret;
+
+ if (le16_to_cpu(ctrl->wLength) != 0)
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ switch (feature) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
+ (xudc->device_state == USB_STATE_DEFAULT))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if (set)
+ val |= PORTPM_RWE;
+ else
+ val &= ~PORTPM_RWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ case USB_DEVICE_U2_ENABLE:
+ if ((xudc->device_state != USB_STATE_CONFIGURED) ||
+ (xudc->gadget.speed != USB_SPEED_SUPER))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if ((feature == USB_DEVICE_U1_ENABLE) &&
+ xudc->soc->u1_enable) {
+ if (set)
+ val |= PORTPM_U1E;
+ else
+ val &= ~PORTPM_U1E;
+ }
+
+ if ((feature == USB_DEVICE_U2_ENABLE) &&
+ xudc->soc->u2_enable) {
+ if (set)
+ val |= PORTPM_U2E;
+ else
+ val &= ~PORTPM_U2E;
+ }
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (xudc->gadget.speed != USB_SPEED_HIGH)
+ return -EINVAL;
+
+ if (!set)
+ return -EINVAL;
+
+ xudc->test_mode_pattern = index >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->device_state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ switch (feature) {
+ case USB_INTRF_FUNC_SUSPEND:
+ if (set) {
+ val = xudc_readl(xudc, PORTPM);
+
+ if (index & USB_INTRF_FUNC_SUSPEND_RW)
+ val |= PORTPM_FRWE;
+ else
+ val &= ~PORTPM_FRWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ return tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) ||
+ ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (index != 0)))
+ return -EINVAL;
+
+ ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
+}
+
+static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep_context *ep_ctx;
+ u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
+ u16 status = 0;
+
+ if (!(ctrl->bRequestType & USB_DIR_IN))
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 2))
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ val = xudc_readl(xudc, PORTPM);
+
+ if (xudc->selfpowered)
+ status |= BIT(USB_DEVICE_SELF_POWERED);
+
+ if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (val & PORTPM_RWE))
+ status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (val & PORTPM_U1E)
+ status |= BIT(USB_DEV_STAT_U1_ENABLED);
+ if (val & PORTPM_U2E)
+ status |= BIT(USB_DEV_STAT_U2_ENABLED);
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ status |= USB_INTRF_STAT_FUNC_RW_CAP;
+ val = xudc_readl(xudc, PORTPM);
+ if (val & PORTPM_FRWE)
+ status |= USB_INTRF_STAT_FUNC_RW;
+ }
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+ ep_ctx = &xudc->ep_context[ep];
+
+ if ((xudc->device_state != USB_STATE_CONFIGURED) &&
+ ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
+ return -EINVAL;
+
+ if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
+ return -EINVAL;
+
+ if (xudc_readl(xudc, EP_HALT) & BIT(ep))
+ status |= BIT(USB_ENDPOINT_HALT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xudc->status_buf = cpu_to_le16(status);
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
+ sizeof(xudc->status_buf),
+ no_op_complete);
+}
+
+static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with SEL values */
+}
+
+static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_DEFAULT)
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 6))
+ return -EINVAL;
+
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
+ sizeof(xudc->sel_timing),
+ set_sel_complete);
+}
+
+static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with isoch delay */
+}
+
+static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ u32 delay = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ xudc->isoch_delay = delay;
+
+ return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
+}
+
+static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) &&
+ (xudc->dev_addr != 0)) {
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (xudc->dev_addr == 0)) {
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+}
+
+static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u32 val, addr = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ dev_dbg(xudc->dev, "set address: %u\n", addr);
+
+ xudc->dev_addr = addr;
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_DEVADDR_MASK);
+ val |= CTRL_DEVADDR(addr);
+ xudc_writel(xudc, val, CTRL);
+
+ ep_ctx_write_devaddr(ep0->context, addr);
+
+ return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
+}
+
+static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
+ ret = tegra_xudc_ep0_get_status(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
+ ret = tegra_xudc_ep0_set_address(xudc, ctrl);
+ break;
+ case USB_REQ_SET_SEL:
+ dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
+ ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
+ ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
+ ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ /*
+ * In theory we need to clear RUN bit before status stage of
+ * deconfig request sent, but this seems to be causing problems.
+ * Clear RUN once all endpoints are disabled instead.
+ */
+ fallthrough;
+ default:
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ break;
+ }
+
+ return ret;
+}
+
+static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl,
+ u16 seq_num)
+{
+ int ret;
+
+ xudc->setup_seq_num = seq_num;
+
+ /* Ensure EP0 is unhalted. */
+ ep_unhalt(xudc, 0);
+
+ /*
+ * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
+ * are invalid. Halt EP0 until we get a valid packet.
+ */
+ if (xudc->soc->invalid_seq_num &&
+ (seq_num == 0xfffe || seq_num == 0xffff)) {
+ dev_warn(xudc->dev, "invalid sequence number detected\n");
+ ep_halt(xudc, 0);
+ return;
+ }
+
+ if (ctrl->wLength)
+ xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
+ DATA_STAGE_XFER : DATA_STAGE_RECV;
+ else
+ xudc->setup_state = STATUS_STAGE_XFER;
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
+ else
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+
+ if (ret < 0) {
+ dev_warn(xudc->dev, "setup request failed: %d\n", ret);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ ep_halt(xudc, 0);
+ }
+}
+
+static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
+ u16 seq_num = trb_read_seq_num(event);
+
+ if (xudc->setup_state != WAIT_FOR_SETUP) {
+ /*
+ * The controller is in the process of handling another
+ * setup request. Queue subsequent requests and handle
+ * the last one once the controller reports a sequence
+ * number error.
+ */
+ memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
+ xudc->setup_packet.seq_num = seq_num;
+ xudc->queued_setup_packet = true;
+ } else {
+ tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
+ }
+}
+
+static struct tegra_xudc_request *
+trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_request *req;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (!req->trbs_queued)
+ break;
+
+ if (trb_in_request(ep, req, trb))
+ return req;
+ }
+
+ return NULL;
+}
+
+static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
+ struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *event)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_trb *trb;
+ bool short_packet;
+
+ short_packet = (trb_read_cmpl_code(event) ==
+ TRB_CMPL_CODE_SHORT_PACKET);
+
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ req = trb_to_request(ep, trb);
+
+ /*
+ * TDs are complete on short packet or when the completed TRB is the
+ * last TRB in the TD (the CHAIN bit is unset).
+ */
+ if (req && (short_packet || (!trb_read_chain(trb) &&
+ (req->trbs_needed == req->trbs_queued)))) {
+ struct tegra_xudc_trb *last = req->last_trb;
+ unsigned int residual;
+
+ residual = trb_read_transfer_len(event);
+ req->usb_req.actual = req->usb_req.length - residual;
+
+ dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
+ req->usb_req.actual, req->usb_req.length);
+
+ tegra_xudc_req_done(ep, req, 0);
+
+ if (ep->desc && usb_endpoint_xfer_control(ep->desc))
+ tegra_xudc_ep0_req_done(xudc);
+
+ /*
+ * Advance the dequeue pointer past the end of the current TD
+ * on short packet completion.
+ */
+ if (short_packet) {
+ ep->deq_ptr = (last - ep->transfer_ring) + 1;
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ }
+ } else if (!req) {
+ dev_warn(xudc->dev, "transfer event on dequeued request\n");
+ }
+
+ if (ep->desc)
+ tegra_xudc_ep_kick_queue(ep);
+}
+
+static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ unsigned int ep_index = trb_read_endpoint_id(event);
+ struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
+ struct tegra_xudc_trb *trb;
+ u16 comp_code;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
+ ep_index);
+ return;
+ }
+
+ /* Update transfer ring dequeue pointer. */
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ comp_code = trb_read_cmpl_code(event);
+ if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
+ ep->deq_ptr = (trb - ep->transfer_ring) + 1;
+
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ ep->ring_full = false;
+ }
+
+ switch (comp_code) {
+ case TRB_CMPL_CODE_SUCCESS:
+ case TRB_CMPL_CODE_SHORT_PACKET:
+ tegra_xudc_handle_transfer_completion(xudc, ep, event);
+ break;
+ case TRB_CMPL_CODE_HOST_REJECTED:
+ dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
+
+ ep->stream_rejected = true;
+ break;
+ case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
+ dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
+
+ if (ep->stream_rejected) {
+ ep->stream_rejected = false;
+ /*
+ * An EP is stopped when a stream is rejected. Wait
+ * for the EP to report that it is stopped and then
+ * un-stop it.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ }
+ tegra_xudc_ep_ring_doorbell(ep);
+ break;
+ case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
+ /*
+ * Wait for the EP to be stopped so the controller stops
+ * processing doorbells.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ ep->enq_ptr = ep->deq_ptr;
+ tegra_xudc_ep_nuke(ep, -EIO);
+ fallthrough;
+ case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
+ case TRB_CMPL_CODE_CTRL_DIR_ERR:
+ case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
+ case TRB_CMPL_CODE_RING_UNDERRUN:
+ case TRB_CMPL_CODE_RING_OVERRUN:
+ case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
+ case TRB_CMPL_CODE_USB_TRANS_ERR:
+ case TRB_CMPL_CODE_TRB_ERR:
+ dev_err(xudc->dev, "completion error %#x on EP %u\n",
+ comp_code, ep_index);
+
+ ep_halt(xudc, ep_index);
+ break;
+ case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
+ dev_info(xudc->dev, "sequence number error\n");
+
+ /*
+ * Kill any queued control request and skip to the last
+ * setup packet we received.
+ */
+ tegra_xudc_ep_nuke(ep, -EINVAL);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ if (!xudc->queued_setup_packet)
+ break;
+
+ tegra_xudc_handle_ep0_setup_packet(xudc,
+ &xudc->setup_packet.ctrl_req,
+ xudc->setup_packet.seq_num);
+ xudc->queued_setup_packet = false;
+ break;
+ case TRB_CMPL_CODE_STOPPED:
+ dev_dbg(xudc->dev, "stop completion code on EP %u\n",
+ ep_index);
+
+ /* Disconnected. */
+ tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
+ break;
+ default:
+ dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
+ comp_code, ep_index);
+ break;
+ }
+}
+
+static void tegra_xudc_reset(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ dma_addr_t deq_ptr;
+ unsigned int i;
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ep_unpause_all(xudc);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
+
+ /*
+ * Reset sequence number and dequeue pointer to flush the transfer
+ * ring.
+ */
+ ep0->deq_ptr = ep0->enq_ptr;
+ ep0->ring_full = false;
+
+ xudc->setup_seq_num = 0;
+ xudc->queued_setup_packet = false;
+
+ ep_ctx_write_rsvd(ep0->context, 0);
+ ep_ctx_write_partial_td(ep0->context, 0);
+ ep_ctx_write_splitxstate(ep0->context, 0);
+ ep_ctx_write_seq_num(ep0->context, 0);
+
+ deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
+
+ if (!dma_mapping_error(xudc->dev, deq_ptr)) {
+ ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
+ ep_ctx_write_dcs(ep0->context, ep0->pcs);
+ }
+
+ ep_unhalt_all(xudc);
+ ep_reload(xudc, 0);
+ ep_unpause(xudc, 0);
+}
+
+static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u16 maxpacket;
+ u32 val;
+
+ val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
+ switch (val) {
+ case PORTSC_PS_LS:
+ xudc->gadget.speed = USB_SPEED_LOW;
+ break;
+ case PORTSC_PS_FS:
+ xudc->gadget.speed = USB_SPEED_FULL;
+ break;
+ case PORTSC_PS_HS:
+ xudc->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case PORTSC_PS_SS:
+ xudc->gadget.speed = USB_SPEED_SUPER;
+ break;
+ default:
+ xudc->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ maxpacket = 512;
+ else
+ maxpacket = 64;
+
+ ep_ctx_write_max_packet_size(ep0->context, maxpacket);
+ tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
+ usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
+
+ if (!xudc->soc->u1_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U1TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (!xudc->soc->u2_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U2TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (xudc->gadget.speed <= USB_SPEED_HIGH) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_L1S_MASK);
+ if (xudc->soc->lpm_enable)
+ val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
+ else
+ val |= PORTPM_L1S(PORTPM_L1S_NYET);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ val = xudc_readl(xudc, ST);
+ if (val & ST_RC)
+ xudc_writel(xudc, ST_RC, ST);
+}
+
+static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver && xudc->driver->disconnect) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->disconnect(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+
+ xudc->device_state = USB_STATE_NOTATTACHED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ complete(&xudc->disconnect_complete);
+}
+
+static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver) {
+ spin_unlock(&xudc->lock);
+ usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
+ spin_lock(&xudc->lock);
+ }
+
+ tegra_xudc_port_connect(xudc);
+}
+
+static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port suspend\n");
+
+ xudc->resume_state = xudc->device_state;
+ xudc->device_state = USB_STATE_SUSPENDED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ if (xudc->driver->suspend) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->suspend(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port resume\n");
+
+ tegra_xudc_resume_device_state(xudc);
+
+ if (xudc->driver->resume) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->resume(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~PORTSC_CHANGE_MASK;
+ val |= flag;
+ xudc_writel(xudc, val, PORTSC);
+}
+
+static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ u32 portsc, porthalt;
+
+ porthalt = xudc_readl(xudc, PORTHALT);
+ if ((porthalt & PORTHALT_STCHG_REQ) &&
+ (porthalt & PORTHALT_HALT_LTSSM)) {
+ dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
+ porthalt &= ~PORTHALT_HALT_LTSSM;
+ xudc_writel(xudc, porthalt, PORTHALT);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+#define TOGGLE_VBUS_WAIT_MS 100
+ if (xudc->soc->port_reset_quirk) {
+ schedule_delayed_work(&xudc->port_reset_war_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_for_sec_prc = 1;
+ }
+ }
+
+ if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+ tegra_xudc_port_reset(xudc);
+ cancel_delayed_work(&xudc->port_reset_war_work);
+ xudc->wait_for_sec_prc = 0;
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_WRC) {
+ dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
+ if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
+ tegra_xudc_port_reset(xudc);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_CSC) {
+ dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CSC);
+
+ if (portsc & PORTSC_CCS)
+ tegra_xudc_port_connect(xudc);
+ else
+ tegra_xudc_port_disconnect(xudc);
+
+ if (xudc->wait_csc) {
+ cancel_delayed_work(&xudc->plc_reset_work);
+ xudc->wait_csc = false;
+ }
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_PLC) {
+ u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
+
+ dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PLC);
+ switch (pls) {
+ case PORTSC_PLS_U3:
+ tegra_xudc_port_suspend(xudc);
+ break;
+ case PORTSC_PLS_U0:
+ if (xudc->gadget.speed < USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_RESUME:
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_INACTIVE:
+ schedule_delayed_work(&xudc->plc_reset_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_csc = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (portsc & PORTSC_CEC) {
+ dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CEC);
+ }
+
+ dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
+}
+
+static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
+ (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
+ __tegra_xudc_handle_port_status(xudc);
+}
+
+static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ u32 type = trb_read_type(event);
+
+ dump_trb(xudc, "EVENT", event);
+
+ switch (type) {
+ case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
+ tegra_xudc_handle_port_status(xudc);
+ break;
+ case TRB_TYPE_TRANSFER_EVENT:
+ tegra_xudc_handle_transfer_event(xudc, event);
+ break;
+ case TRB_TYPE_SETUP_PACKET_EVENT:
+ tegra_xudc_handle_ep0_event(xudc, event);
+ break;
+ default:
+ dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
+ break;
+ }
+}
+
+static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_trb *event;
+ dma_addr_t erdp;
+
+ while (true) {
+ event = xudc->event_ring[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr;
+
+ if (trb_read_cycle(event) != xudc->ccs)
+ break;
+
+ tegra_xudc_handle_event(xudc, event);
+
+ xudc->event_ring_deq_ptr++;
+ if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
+ xudc->event_ring_deq_ptr = 0;
+ xudc->event_ring_index++;
+ }
+
+ if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
+ xudc->event_ring_index = 0;
+ xudc->ccs = !xudc->ccs;
+ }
+ }
+
+ erdp = xudc->event_ring_phys[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr * sizeof(*event);
+
+ xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
+ xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
+}
+
+static irqreturn_t tegra_xudc_irq(int irq, void *data)
+{
+ struct tegra_xudc *xudc = data;
+ unsigned long flags;
+ u32 val;
+
+ val = xudc_readl(xudc, ST);
+ if (!(val & ST_IP))
+ return IRQ_NONE;
+ xudc_writel(xudc, ST_IP, ST);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ tegra_xudc_process_event_ring(xudc);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ ep->xudc = xudc;
+ ep->index = index;
+ ep->context = &xudc->ep_context[index];
+ INIT_LIST_HEAD(&ep->queue);
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return 0;
+
+ ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
+ GFP_KERNEL,
+ &ep->transfer_ring_phys);
+ if (!ep->transfer_ring)
+ return -ENOMEM;
+
+ if (index) {
+ snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
+ (index % 2 == 0) ? "out" : "in");
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+ ep->usb_ep.max_streams = 16;
+ ep->usb_ep.ops = &tegra_xudc_ep_ops;
+ ep->usb_ep.caps.type_bulk = true;
+ ep->usb_ep.caps.type_int = true;
+ if (index & 1)
+ ep->usb_ep.caps.dir_in = true;
+ else
+ ep->usb_ep.caps.dir_out = true;
+ list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
+ } else {
+ strscpy(ep->name, "ep0", 3);
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
+ ep->usb_ep.ops = &tegra_xudc_ep0_ops;
+ ep->usb_ep.caps.type_control = true;
+ ep->usb_ep.caps.dir_in = true;
+ ep->usb_ep.caps.dir_out = true;
+ }
+
+ return 0;
+}
+
+static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return;
+
+ dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
+ ep->transfer_ring_phys);
+}
+
+static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
+{
+ struct usb_request *req;
+ unsigned int i;
+ int err;
+
+ xudc->ep_context =
+ dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
+ sizeof(*xudc->ep_context),
+ &xudc->ep_context_phys, GFP_KERNEL);
+ if (!xudc->ep_context)
+ return -ENOMEM;
+
+ xudc->transfer_ring_pool =
+ dmam_pool_create(dev_name(xudc->dev), xudc->dev,
+ XUDC_TRANSFER_RING_SIZE *
+ sizeof(struct tegra_xudc_trb),
+ sizeof(struct tegra_xudc_trb), 0);
+ if (!xudc->transfer_ring_pool) {
+ err = -ENOMEM;
+ goto free_ep_context;
+ }
+
+ INIT_LIST_HEAD(&xudc->gadget.ep_list);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ err = tegra_xudc_alloc_ep(xudc, i);
+ if (err < 0)
+ goto free_eps;
+ }
+
+ req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
+ if (!req) {
+ err = -ENOMEM;
+ goto free_eps;
+ }
+ xudc->ep0_req = to_xudc_req(req);
+
+ return 0;
+
+free_eps:
+ for (; i > 0; i--)
+ tegra_xudc_free_ep(xudc, i - 1);
+free_ep_context:
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+ return err;
+}
+
+static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
+{
+ xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
+ xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
+}
+
+static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
+ &xudc->ep0_req->usb_req);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_free_ep(xudc, i);
+
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+}
+
+static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ xudc->event_ring[i] =
+ dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ &xudc->event_ring_phys[i],
+ GFP_KERNEL);
+ if (!xudc->event_ring[i])
+ goto free_dma;
+ }
+
+ return 0;
+
+free_dma:
+ for (; i > 0; i--) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i - 1]),
+ xudc->event_ring[i - 1],
+ xudc->event_ring_phys[i - 1]);
+ }
+ return -ENOMEM;
+}
+
+static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]));
+
+ val = xudc_readl(xudc, ERSTSZ);
+ val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
+ val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
+ xudc_writel(xudc, val, ERSTSZ);
+
+ xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBALO(i));
+ xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBAHI(i));
+ }
+
+ val = lower_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPLO);
+ val |= EREPLO_ECS;
+ xudc_writel(xudc, val, EREPLO);
+
+ val = upper_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPHI);
+ xudc_writel(xudc, val, EREPHI);
+
+ xudc->ccs = true;
+ xudc->event_ring_index = 0;
+ xudc->event_ring_deq_ptr = 0;
+}
+
+static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ xudc->event_ring[i],
+ xudc->event_ring_phys[i]);
+ }
+}
+
+static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ if (xudc->soc->has_ipfs) {
+ val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
+ val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
+ ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
+ usleep_range(10, 15);
+ }
+
+ /* Enable bus master */
+ val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
+ XUSB_DEV_CFG_1_BUS_MASTER_EN;
+ fpci_writel(xudc, val, XUSB_DEV_CFG_1);
+
+ /* Program BAR0 space */
+ val = fpci_readl(xudc, XUSB_DEV_CFG_4);
+ val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+ val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+
+ fpci_writel(xudc, val, XUSB_DEV_CFG_4);
+ fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
+
+ usleep_range(100, 200);
+
+ if (xudc->soc->has_ipfs) {
+ /* Enable interrupt assertion */
+ val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
+ val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
+ ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
+ }
+}
+
+static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
+{
+ u32 val, imod;
+
+ if (xudc->soc->has_ipfs) {
+ val = xudc_readl(xudc, BLCG);
+ val |= BLCG_ALL;
+ val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
+ BLCG_COREPLL_PWRDN);
+ val |= BLCG_IOPLL_0_PWRDN;
+ val |= BLCG_IOPLL_1_PWRDN;
+ val |= BLCG_IOPLL_2_PWRDN;
+
+ xudc_writel(xudc, val, BLCG);
+ }
+
+ if (xudc->soc->port_speed_quirk)
+ tegra_xudc_limit_port_speed(xudc);
+
+ /* Set a reasonable U3 exit timer value. */
+ val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
+ val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
+ val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
+ xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
+
+ /* Default ping LFPS tBurst is too large. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT0);
+ val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
+ val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
+ xudc_writel(xudc, val, SSPX_CORE_CNT0);
+
+ /* Default tPortConfiguration timeout is too small. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT30);
+ val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
+ val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
+ xudc_writel(xudc, val, SSPX_CORE_CNT30);
+
+ if (xudc->soc->lpm_enable) {
+ /* Set L1 resume duration to 95 us. */
+ val = xudc_readl(xudc, HSFSPI_COUNT13);
+ val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
+ val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
+ xudc_writel(xudc, val, HSFSPI_COUNT13);
+ }
+
+ /*
+ * Compliance suite appears to be violating polling LFPS tBurst max
+ * of 1.4us. Send 1.45us instead.
+ */
+ val = xudc_readl(xudc, SSPX_CORE_CNT32);
+ val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
+ val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT32);
+
+ /* Direct HS/FS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Direct SS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Restore port instance. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /*
+ * Enable INFINITE_SS_RETRY to prevent device from entering
+ * Disabled.Error when attached to buggy SuperSpeed hubs.
+ */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val |= CFG_DEV_FE_INFINITE_SS_RETRY;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /* Set interrupt moderation. */
+ imod = XUDC_INTERRUPT_MODERATION_US * 4;
+ val = xudc_readl(xudc, RT_IMOD);
+ val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
+ val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
+ xudc_writel(xudc, val, RT_IMOD);
+
+ /* increase SSPI transaction timeout from 32us to 512us */
+ val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
+ val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
+ val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
+ xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
+}
+
+static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+{
+ int err = 0, usb3;
+ unsigned int i;
+
+ xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
+ sizeof(*xudc->utmi_phy), GFP_KERNEL);
+ if (!xudc->utmi_phy)
+ return -ENOMEM;
+
+ xudc->usb3_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
+ sizeof(*xudc->usb3_phy), GFP_KERNEL);
+ if (!xudc->usb3_phy)
+ return -ENOMEM;
+
+ xudc->usbphy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
+ sizeof(*xudc->usbphy), GFP_KERNEL);
+ if (!xudc->usbphy)
+ return -ENOMEM;
+
+ xudc->vbus_nb.notifier_call = tegra_xudc_vbus_notify;
+
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ char phy_name[] = "usb.-.";
+
+ /* Get USB2 phy */
+ snprintf(phy_name, sizeof(phy_name), "usb2-%d", i);
+ xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+ if (IS_ERR(xudc->utmi_phy[i])) {
+ err = PTR_ERR(xudc->utmi_phy[i]);
+ dev_err_probe(xudc->dev, err,
+ "failed to get usb2-%d PHY\n", i);
+ goto clean_up;
+ } else if (xudc->utmi_phy[i]) {
+ /* Get usb-phy, if utmi phy is available */
+ xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
+ xudc->utmi_phy[i]->dev.of_node,
+ &xudc->vbus_nb);
+ if (IS_ERR(xudc->usbphy[i])) {
+ err = PTR_ERR(xudc->usbphy[i]);
+ dev_err_probe(xudc->dev, err,
+ "failed to get usbphy-%d\n", i);
+ goto clean_up;
+ }
+ } else if (!xudc->utmi_phy[i]) {
+ /* if utmi phy is not available, ignore USB3 phy get */
+ continue;
+ }
+
+ /* Get USB3 phy */
+ usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
+ if (usb3 < 0)
+ continue;
+
+ snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
+ xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+ if (IS_ERR(xudc->usb3_phy[i])) {
+ err = PTR_ERR(xudc->usb3_phy[i]);
+ dev_err_probe(xudc->dev, err,
+ "failed to get usb3-%d PHY\n", usb3);
+ goto clean_up;
+ } else if (xudc->usb3_phy[i])
+ dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
+ }
+
+ return err;
+
+clean_up:
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ xudc->usb3_phy[i] = NULL;
+ xudc->utmi_phy[i] = NULL;
+ xudc->usbphy[i] = NULL;
+ }
+
+ return err;
+}
+
+static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ phy_exit(xudc->usb3_phy[i]);
+ phy_exit(xudc->utmi_phy[i]);
+ }
+}
+
+static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
+{
+ int err;
+ unsigned int i;
+
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ err = phy_init(xudc->utmi_phy[i]);
+ if (err < 0) {
+ dev_err(xudc->dev, "UTMI PHY #%u initialization failed: %d\n", i, err);
+ goto exit_phy;
+ }
+
+ err = phy_init(xudc->usb3_phy[i]);
+ if (err < 0) {
+ dev_err(xudc->dev, "USB3 PHY #%u initialization failed: %d\n", i, err);
+ goto exit_phy;
+ }
+ }
+ return 0;
+
+exit_phy:
+ tegra_xudc_phy_exit(xudc);
+ return err;
+}
+
+static const char * const tegra210_xudc_supply_names[] = {
+ "hvdd-usb",
+ "avddio-usb",
+};
+
+static const char * const tegra210_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "hs_src",
+ "fs_src",
+};
+
+static const char * const tegra186_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "fs_src",
+};
+
+static struct tegra_xudc_soc tegra210_xudc_soc_data = {
+ .supply_names = tegra210_xudc_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
+ .clock_names = tegra210_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
+ .num_phys = 4,
+ .u1_enable = false,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = true,
+ .pls_quirk = true,
+ .port_reset_quirk = true,
+ .port_speed_quirk = false,
+ .has_ipfs = true,
+};
+
+static struct tegra_xudc_soc tegra186_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .num_phys = 4,
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .port_speed_quirk = false,
+ .has_ipfs = false,
+};
+
+static struct tegra_xudc_soc tegra194_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .num_phys = 4,
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = true,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .port_speed_quirk = true,
+ .has_ipfs = false,
+};
+
+static const struct of_device_id tegra_xudc_of_match[] = {
+ {
+ .compatible = "nvidia,tegra210-xudc",
+ .data = &tegra210_xudc_soc_data
+ },
+ {
+ .compatible = "nvidia,tegra186-xudc",
+ .data = &tegra186_xudc_soc_data
+ },
+ {
+ .compatible = "nvidia,tegra194-xudc",
+ .data = &tegra194_xudc_soc_data
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
+
+static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
+{
+ if (xudc->genpd_dl_ss)
+ device_link_del(xudc->genpd_dl_ss);
+ if (xudc->genpd_dl_device)
+ device_link_del(xudc->genpd_dl_device);
+ if (xudc->genpd_dev_ss)
+ dev_pm_domain_detach(xudc->genpd_dev_ss, true);
+ if (xudc->genpd_dev_device)
+ dev_pm_domain_detach(xudc->genpd_dev_device, true);
+}
+
+static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
+{
+ struct device *dev = xudc->dev;
+ int err;
+
+ xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
+ if (IS_ERR(xudc->genpd_dev_device)) {
+ err = PTR_ERR(xudc->genpd_dev_device);
+ dev_err(dev, "failed to get device power domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
+ if (IS_ERR(xudc->genpd_dev_ss)) {
+ err = PTR_ERR(xudc->genpd_dev_ss);
+ dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_device) {
+ dev_err(dev, "failed to add USB device link\n");
+ return -ENODEV;
+ }
+
+ xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_ss) {
+ dev_err(dev, "failed to add SuperSpeed device link\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_probe(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc;
+ struct resource *res;
+ unsigned int i;
+ int err;
+
+ xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_KERNEL);
+ if (!xudc)
+ return -ENOMEM;
+
+ xudc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xudc);
+
+ xudc->soc = of_device_get_match_data(&pdev->dev);
+ if (!xudc->soc)
+ return -ENODEV;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ xudc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->base))
+ return PTR_ERR(xudc->base);
+ xudc->phys_base = res->start;
+
+ xudc->fpci = devm_platform_ioremap_resource_byname(pdev, "fpci");
+ if (IS_ERR(xudc->fpci))
+ return PTR_ERR(xudc->fpci);
+
+ if (xudc->soc->has_ipfs) {
+ xudc->ipfs = devm_platform_ioremap_resource_byname(pdev, "ipfs");
+ if (IS_ERR(xudc->ipfs))
+ return PTR_ERR(xudc->ipfs);
+ }
+
+ xudc->irq = platform_get_irq(pdev, 0);
+ if (xudc->irq < 0)
+ return xudc->irq;
+
+ err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
+ dev_name(&pdev->dev), xudc);
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
+ err);
+ return err;
+ }
+
+ xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, sizeof(*xudc->clks),
+ GFP_KERNEL);
+ if (!xudc->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_clks; i++)
+ xudc->clks[i].id = xudc->soc->clock_names[i];
+
+ err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, xudc->clks);
+ if (err) {
+ dev_err_probe(xudc->dev, err, "failed to request clocks\n");
+ return err;
+ }
+
+ xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
+ sizeof(*xudc->supplies), GFP_KERNEL);
+ if (!xudc->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_supplies; i++)
+ xudc->supplies[i].supply = xudc->soc->supply_names[i];
+
+ err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err) {
+ dev_err_probe(xudc->dev, err, "failed to request regulators\n");
+ return err;
+ }
+
+ xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
+ if (IS_ERR(xudc->padctl))
+ return PTR_ERR(xudc->padctl);
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
+ if (err) {
+ dev_err(xudc->dev, "failed to enable regulators: %d\n", err);
+ goto put_padctl;
+ }
+
+ err = tegra_xudc_phy_get(xudc);
+ if (err)
+ goto disable_regulator;
+
+ err = tegra_xudc_powerdomain_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_phy_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_alloc_event_ring(xudc);
+ if (err)
+ goto disable_phy;
+
+ err = tegra_xudc_alloc_eps(xudc);
+ if (err)
+ goto free_event_ring;
+
+ spin_lock_init(&xudc->lock);
+
+ init_completion(&xudc->disconnect_complete);
+
+ INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
+
+ INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
+
+ INIT_DELAYED_WORK(&xudc->port_reset_war_work,
+ tegra_xudc_port_reset_war_work);
+
+ pm_runtime_enable(&pdev->dev);
+
+ xudc->gadget.ops = &tegra_xudc_gadget_ops;
+ xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
+ xudc->gadget.name = "tegra-xudc";
+ xudc->gadget.max_speed = USB_SPEED_SUPER;
+
+ err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
+ goto free_eps;
+ }
+
+ return 0;
+
+free_eps:
+ pm_runtime_disable(&pdev->dev);
+ tegra_xudc_free_eps(xudc);
+free_event_ring:
+ tegra_xudc_free_event_ring(xudc);
+disable_phy:
+ tegra_xudc_phy_exit(xudc);
+put_powerdomains:
+ tegra_xudc_powerdomain_remove(xudc);
+disable_regulator:
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+put_padctl:
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return err;
+}
+
+static int tegra_xudc_remove(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ cancel_delayed_work_sync(&xudc->plc_reset_work);
+ cancel_work_sync(&xudc->usb_role_sw_work);
+
+ usb_del_gadget_udc(&xudc->gadget);
+
+ tegra_xudc_free_eps(xudc);
+ tegra_xudc_free_event_ring(xudc);
+
+ tegra_xudc_powerdomain_remove(xudc);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ phy_power_off(xudc->utmi_phy[i]);
+ phy_power_off(xudc->usb3_phy[i]);
+ }
+
+ tegra_xudc_phy_exit(xudc);
+
+ pm_runtime_disable(xudc->dev);
+ pm_runtime_put(xudc->dev);
+
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+
+ dev_dbg(xudc->dev, "entering ELPG\n");
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ xudc->powergated = true;
+ xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
+ xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
+ xudc_writel(xudc, 0, CTRL);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ dev_dbg(xudc->dev, "entering ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+ int err;
+
+ dev_dbg(xudc->dev, "exiting ELPG\n");
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err < 0)
+ return err;
+
+ err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
+ if (err < 0)
+ return err;
+
+ tegra_xudc_fpci_ipfs_init(xudc);
+
+ tegra_xudc_device_params_init(xudc);
+
+ tegra_xudc_init_event_ring(xudc);
+
+ tegra_xudc_init_eps(xudc);
+
+ xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
+ xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->powergated = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ dev_dbg(xudc->dev, "exiting ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = true;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ flush_work(&xudc->usb_role_sw_work);
+
+ if (!pm_runtime_status_suspended(dev)) {
+ /* Forcibly disconnect before powergating. */
+ tegra_xudc_device_mode_off(xudc);
+ tegra_xudc_powergate(xudc);
+ }
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err;
+
+ err = tegra_xudc_unpowergate(xudc);
+ if (err < 0)
+ return err;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ schedule_work(&xudc->usb_role_sw_work);
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_powergate(xudc);
+}
+
+static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_unpowergate(xudc);
+}
+
+static const struct dev_pm_ops tegra_xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
+ SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
+ tegra_xudc_runtime_resume, NULL)
+};
+
+static struct platform_driver tegra_xudc_driver = {
+ .probe = tegra_xudc_probe,
+ .remove = tegra_xudc_remove,
+ .driver = {
+ .name = "tegra-xudc",
+ .pm = &tegra_xudc_pm_ops,
+ .of_match_table = tegra_xudc_of_match,
+ },
+};
+module_platform_driver(tegra_xudc_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
+MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/udc/trace.c b/drivers/usb/gadget/udc/trace.c
new file mode 100644
index 000000000..19e837de2
--- /dev/null
+++ b/drivers/usb/gadget/udc/trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * trace.c - USB Gadget Framework Trace Support
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Felipe Balbi <felipe.balbi@linux.intel.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
new file mode 100644
index 000000000..abdbcb1ba
--- /dev/null
+++ b/drivers/usb/gadget/udc/trace.h
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Felipe Balbi <felipe.balbi@linux.intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gadget
+
+#if !defined(__UDC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __UDC_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <asm/byteorder.h>
+#include <linux/usb/gadget.h>
+
+DECLARE_EVENT_CLASS(udc_log_gadget,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret),
+ TP_STRUCT__entry(
+ __field(enum usb_device_speed, speed)
+ __field(enum usb_device_speed, max_speed)
+ __field(enum usb_device_state, state)
+ __field(unsigned, mA)
+ __field(unsigned, sg_supported)
+ __field(unsigned, is_otg)
+ __field(unsigned, is_a_peripheral)
+ __field(unsigned, b_hnp_enable)
+ __field(unsigned, a_hnp_support)
+ __field(unsigned, hnp_polling_support)
+ __field(unsigned, host_request_flag)
+ __field(unsigned, quirk_ep_out_aligned_size)
+ __field(unsigned, quirk_altset_not_supp)
+ __field(unsigned, quirk_stall_not_supp)
+ __field(unsigned, quirk_zlp_not_supp)
+ __field(unsigned, is_selfpowered)
+ __field(unsigned, deactivated)
+ __field(unsigned, connected)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->speed = g->speed;
+ __entry->max_speed = g->max_speed;
+ __entry->state = g->state;
+ __entry->mA = g->mA;
+ __entry->sg_supported = g->sg_supported;
+ __entry->is_otg = g->is_otg;
+ __entry->is_a_peripheral = g->is_a_peripheral;
+ __entry->b_hnp_enable = g->b_hnp_enable;
+ __entry->a_hnp_support = g->a_hnp_support;
+ __entry->hnp_polling_support = g->hnp_polling_support;
+ __entry->host_request_flag = g->host_request_flag;
+ __entry->quirk_ep_out_aligned_size = g->quirk_ep_out_aligned_size;
+ __entry->quirk_altset_not_supp = g->quirk_altset_not_supp;
+ __entry->quirk_stall_not_supp = g->quirk_stall_not_supp;
+ __entry->quirk_zlp_not_supp = g->quirk_zlp_not_supp;
+ __entry->is_selfpowered = g->is_selfpowered;
+ __entry->deactivated = g->deactivated;
+ __entry->connected = g->connected;
+ __entry->ret = ret;
+ ),
+ TP_printk("speed %d/%d state %d %dmA [%s%s%s%s%s%s%s%s%s%s%s%s%s%s] --> %d",
+ __entry->speed, __entry->max_speed, __entry->state, __entry->mA,
+ __entry->sg_supported ? "sg:" : "",
+ __entry->is_otg ? "OTG:" : "",
+ __entry->is_a_peripheral ? "a_peripheral:" : "",
+ __entry->b_hnp_enable ? "b_hnp:" : "",
+ __entry->a_hnp_support ? "a_hnp:" : "",
+ __entry->hnp_polling_support ? "hnp_poll:" : "",
+ __entry->host_request_flag ? "hostreq:" : "",
+ __entry->quirk_ep_out_aligned_size ? "out_aligned:" : "",
+ __entry->quirk_altset_not_supp ? "no_altset:" : "",
+ __entry->quirk_stall_not_supp ? "no_stall:" : "",
+ __entry->quirk_zlp_not_supp ? "no_zlp" : "",
+ __entry->is_selfpowered ? "self-powered:" : "bus-powered:",
+ __entry->deactivated ? "deactivated:" : "activated:",
+ __entry->connected ? "connected" : "disconnected",
+ __entry->ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_frame_number,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_wakeup,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_set_selfpowered,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_clear_selfpowered,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_connect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_draw,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_disconnect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_connect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_disconnect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_deactivate,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_activate,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DECLARE_EVENT_CLASS(udc_log_ep,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret),
+ TP_STRUCT__entry(
+ __string(name, ep->name)
+ __field(unsigned, maxpacket)
+ __field(unsigned, maxpacket_limit)
+ __field(unsigned, max_streams)
+ __field(unsigned, mult)
+ __field(unsigned, maxburst)
+ __field(u8, address)
+ __field(bool, claimed)
+ __field(bool, enabled)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __assign_str(name, ep->name);
+ __entry->maxpacket = ep->maxpacket;
+ __entry->maxpacket_limit = ep->maxpacket_limit;
+ __entry->max_streams = ep->max_streams;
+ __entry->mult = ep->mult;
+ __entry->maxburst = ep->maxburst;
+ __entry->address = ep->address,
+ __entry->claimed = ep->claimed;
+ __entry->enabled = ep->enabled;
+ __entry->ret = ret;
+ ),
+ TP_printk("%s: mps %d/%d streams %d mult %d burst %d addr %02x %s%s --> %d",
+ __get_str(name), __entry->maxpacket, __entry->maxpacket_limit,
+ __entry->max_streams, __entry->mult, __entry->maxburst,
+ __entry->address, __entry->claimed ? "claimed:" : "released:",
+ __entry->enabled ? "enabled" : "disabled", ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_maxpacket_limit,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_enable,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_disable,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_halt,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_clear_halt,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_wedge,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_fifo_status,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_fifo_flush,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DECLARE_EVENT_CLASS(udc_log_req,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret),
+ TP_STRUCT__entry(
+ __string(name, ep->name)
+ __field(unsigned, length)
+ __field(unsigned, actual)
+ __field(unsigned, num_sgs)
+ __field(unsigned, num_mapped_sgs)
+ __field(unsigned, stream_id)
+ __field(unsigned, no_interrupt)
+ __field(unsigned, zero)
+ __field(unsigned, short_not_ok)
+ __field(int, status)
+ __field(int, ret)
+ __field(struct usb_request *, req)
+ ),
+ TP_fast_assign(
+ __assign_str(name, ep->name);
+ __entry->length = req->length;
+ __entry->actual = req->actual;
+ __entry->num_sgs = req->num_sgs;
+ __entry->num_mapped_sgs = req->num_mapped_sgs;
+ __entry->stream_id = req->stream_id;
+ __entry->no_interrupt = req->no_interrupt;
+ __entry->zero = req->zero;
+ __entry->short_not_ok = req->short_not_ok;
+ __entry->status = req->status;
+ __entry->ret = ret;
+ __entry->req = req;
+ ),
+ TP_printk("%s: req %p length %d/%d sgs %d/%d stream %d %s%s%s status %d --> %d",
+ __get_str(name),__entry->req, __entry->actual, __entry->length,
+ __entry->num_mapped_sgs, __entry->num_sgs, __entry->stream_id,
+ __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "i" : "I",
+ __entry->status, __entry->ret
+ )
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_alloc_request,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_free_request,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_queue,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_dequeue,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_gadget_giveback_request,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+#endif /* __UDC_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
new file mode 100644
index 000000000..4c7a4f770
--- /dev/null
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -0,0 +1,2271 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Xilinx USB peripheral controller driver
+ *
+ * Copyright (C) 2004 by Thomas Rathbone
+ * Copyright (C) 2005 by HP Labs
+ * Copyright (C) 2005 by David Brownell
+ * Copyright (C) 2010 - 2014 Xilinx, Inc.
+ *
+ * Some parts of this driver code is based on the driver for at91-series
+ * USB peripheral controller (at91_udc.c).
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/prefetch.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* Register offsets for the USB device.*/
+#define XUSB_EP0_CONFIG_OFFSET 0x0000 /* EP0 Config Reg Offset */
+#define XUSB_SETUP_PKT_ADDR_OFFSET 0x0080 /* Setup Packet Address */
+#define XUSB_ADDRESS_OFFSET 0x0100 /* Address Register */
+#define XUSB_CONTROL_OFFSET 0x0104 /* Control Register */
+#define XUSB_STATUS_OFFSET 0x0108 /* Status Register */
+#define XUSB_FRAMENUM_OFFSET 0x010C /* Frame Number Register */
+#define XUSB_IER_OFFSET 0x0110 /* Interrupt Enable Register */
+#define XUSB_BUFFREADY_OFFSET 0x0114 /* Buffer Ready Register */
+#define XUSB_TESTMODE_OFFSET 0x0118 /* Test Mode Register */
+#define XUSB_DMA_RESET_OFFSET 0x0200 /* DMA Soft Reset Register */
+#define XUSB_DMA_CONTROL_OFFSET 0x0204 /* DMA Control Register */
+#define XUSB_DMA_DSAR_ADDR_OFFSET 0x0208 /* DMA source Address Reg */
+#define XUSB_DMA_DDAR_ADDR_OFFSET 0x020C /* DMA destination Addr Reg */
+#define XUSB_DMA_LENGTH_OFFSET 0x0210 /* DMA Length Register */
+#define XUSB_DMA_STATUS_OFFSET 0x0214 /* DMA Status Register */
+
+/* Endpoint Configuration Space offsets */
+#define XUSB_EP_CFGSTATUS_OFFSET 0x00 /* Endpoint Config Status */
+#define XUSB_EP_BUF0COUNT_OFFSET 0x08 /* Buffer 0 Count */
+#define XUSB_EP_BUF1COUNT_OFFSET 0x0C /* Buffer 1 Count */
+
+#define XUSB_CONTROL_USB_READY_MASK 0x80000000 /* USB ready Mask */
+#define XUSB_CONTROL_USB_RMTWAKE_MASK 0x40000000 /* Remote wake up mask */
+
+/* Interrupt register related masks.*/
+#define XUSB_STATUS_GLOBAL_INTR_MASK 0x80000000 /* Global Intr Enable */
+#define XUSB_STATUS_DMADONE_MASK 0x04000000 /* DMA done Mask */
+#define XUSB_STATUS_DMAERR_MASK 0x02000000 /* DMA Error Mask */
+#define XUSB_STATUS_DMABUSY_MASK 0x80000000 /* DMA Error Mask */
+#define XUSB_STATUS_RESUME_MASK 0x01000000 /* USB Resume Mask */
+#define XUSB_STATUS_RESET_MASK 0x00800000 /* USB Reset Mask */
+#define XUSB_STATUS_SUSPEND_MASK 0x00400000 /* USB Suspend Mask */
+#define XUSB_STATUS_DISCONNECT_MASK 0x00200000 /* USB Disconnect Mask */
+#define XUSB_STATUS_FIFO_BUFF_RDY_MASK 0x00100000 /* FIFO Buff Ready Mask */
+#define XUSB_STATUS_FIFO_BUFF_FREE_MASK 0x00080000 /* FIFO Buff Free Mask */
+#define XUSB_STATUS_SETUP_PACKET_MASK 0x00040000 /* Setup packet received */
+#define XUSB_STATUS_EP1_BUFF2_COMP_MASK 0x00000200 /* EP 1 Buff 2 Processed */
+#define XUSB_STATUS_EP1_BUFF1_COMP_MASK 0x00000002 /* EP 1 Buff 1 Processed */
+#define XUSB_STATUS_EP0_BUFF2_COMP_MASK 0x00000100 /* EP 0 Buff 2 Processed */
+#define XUSB_STATUS_EP0_BUFF1_COMP_MASK 0x00000001 /* EP 0 Buff 1 Processed */
+#define XUSB_STATUS_HIGH_SPEED_MASK 0x00010000 /* USB Speed Mask */
+/* Suspend,Reset,Suspend and Disconnect Mask */
+#define XUSB_STATUS_INTR_EVENT_MASK 0x01E00000
+/* Buffers completion Mask */
+#define XUSB_STATUS_INTR_BUFF_COMP_ALL_MASK 0x0000FEFF
+/* Mask for buffer 0 and buffer 1 completion for all Endpoints */
+#define XUSB_STATUS_INTR_BUFF_COMP_SHIFT_MASK 0x00000101
+#define XUSB_STATUS_EP_BUFF2_SHIFT 8 /* EP buffer offset */
+
+/* Endpoint Configuration Status Register */
+#define XUSB_EP_CFG_VALID_MASK 0x80000000 /* Endpoint Valid bit */
+#define XUSB_EP_CFG_STALL_MASK 0x40000000 /* Endpoint Stall bit */
+#define XUSB_EP_CFG_DATA_TOGGLE_MASK 0x08000000 /* Endpoint Data toggle */
+
+/* USB device specific global configuration constants.*/
+#define XUSB_MAX_ENDPOINTS 8 /* Maximum End Points */
+#define XUSB_EP_NUMBER_ZERO 0 /* End point Zero */
+/* DPRAM is the source address for DMA transfer */
+#define XUSB_DMA_READ_FROM_DPRAM 0x80000000
+#define XUSB_DMA_DMASR_BUSY 0x80000000 /* DMA busy */
+#define XUSB_DMA_DMASR_ERROR 0x40000000 /* DMA Error */
+/*
+ * When this bit is set, the DMA buffer ready bit is set by hardware upon
+ * DMA transfer completion.
+ */
+#define XUSB_DMA_BRR_CTRL 0x40000000 /* DMA bufready ctrl bit */
+/* Phase States */
+#define SETUP_PHASE 0x0000 /* Setup Phase */
+#define DATA_PHASE 0x0001 /* Data Phase */
+#define STATUS_PHASE 0x0002 /* Status Phase */
+
+#define EP0_MAX_PACKET 64 /* Endpoint 0 maximum packet length */
+#define STATUSBUFF_SIZE 2 /* Buffer size for GET_STATUS command */
+#define EPNAME_SIZE 4 /* Buffer size for endpoint name */
+
+/* container_of helper macros */
+#define to_udc(g) container_of((g), struct xusb_udc, gadget)
+#define to_xusb_ep(ep) container_of((ep), struct xusb_ep, ep_usb)
+#define to_xusb_req(req) container_of((req), struct xusb_req, usb_req)
+
+/**
+ * struct xusb_req - Xilinx USB device request structure
+ * @usb_req: Linux usb request structure
+ * @queue: usb device request queue
+ * @ep: pointer to xusb_endpoint structure
+ */
+struct xusb_req {
+ struct usb_request usb_req;
+ struct list_head queue;
+ struct xusb_ep *ep;
+};
+
+/**
+ * struct xusb_ep - USB end point structure.
+ * @ep_usb: usb endpoint instance
+ * @queue: endpoint message queue
+ * @udc: xilinx usb peripheral driver instance pointer
+ * @desc: pointer to the usb endpoint descriptor
+ * @rambase: the endpoint buffer address
+ * @offset: the endpoint register offset value
+ * @name: name of the endpoint
+ * @epnumber: endpoint number
+ * @maxpacket: maximum packet size the endpoint can store
+ * @buffer0count: the size of the packet recieved in the first buffer
+ * @buffer1count: the size of the packet received in the second buffer
+ * @curbufnum: current buffer of endpoint that will be processed next
+ * @buffer0ready: the busy state of first buffer
+ * @buffer1ready: the busy state of second buffer
+ * @is_in: endpoint direction (IN or OUT)
+ * @is_iso: endpoint type(isochronous or non isochronous)
+ */
+struct xusb_ep {
+ struct usb_ep ep_usb;
+ struct list_head queue;
+ struct xusb_udc *udc;
+ const struct usb_endpoint_descriptor *desc;
+ u32 rambase;
+ u32 offset;
+ char name[4];
+ u16 epnumber;
+ u16 maxpacket;
+ u16 buffer0count;
+ u16 buffer1count;
+ u8 curbufnum;
+ bool buffer0ready;
+ bool buffer1ready;
+ bool is_in;
+ bool is_iso;
+};
+
+/**
+ * struct xusb_udc - USB peripheral driver structure
+ * @gadget: USB gadget driver instance
+ * @ep: an array of endpoint structures
+ * @driver: pointer to the usb gadget driver instance
+ * @setup: usb_ctrlrequest structure for control requests
+ * @req: pointer to dummy request for get status command
+ * @dev: pointer to device structure in gadget
+ * @usb_state: device in suspended state or not
+ * @remote_wkp: remote wakeup enabled by host
+ * @setupseqtx: tx status
+ * @setupseqrx: rx status
+ * @addr: the usb device base address
+ * @lock: instance of spinlock
+ * @dma_enabled: flag indicating whether the dma is included in the system
+ * @clk: pointer to struct clk
+ * @read_fn: function pointer to read device registers
+ * @write_fn: function pointer to write to device registers
+ */
+struct xusb_udc {
+ struct usb_gadget gadget;
+ struct xusb_ep ep[8];
+ struct usb_gadget_driver *driver;
+ struct usb_ctrlrequest setup;
+ struct xusb_req *req;
+ struct device *dev;
+ u32 usb_state;
+ u32 remote_wkp;
+ u32 setupseqtx;
+ u32 setupseqrx;
+ void __iomem *addr;
+ spinlock_t lock;
+ bool dma_enabled;
+ struct clk *clk;
+
+ unsigned int (*read_fn)(void __iomem *);
+ void (*write_fn)(void __iomem *, u32, u32);
+};
+
+/* Endpoint buffer start addresses in the core */
+static u32 rambase[8] = { 0x22, 0x1000, 0x1100, 0x1200, 0x1300, 0x1400, 0x1500,
+ 0x1600 };
+
+static const char driver_name[] = "xilinx-udc";
+static const char ep0name[] = "ep0";
+
+/* Control endpoint configuration.*/
+static const struct usb_endpoint_descriptor config_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(EP0_MAX_PACKET),
+};
+
+/**
+ * xudc_write32 - little endian write to device registers
+ * @addr: base addr of device registers
+ * @offset: register offset
+ * @val: data to be written
+ */
+static void xudc_write32(void __iomem *addr, u32 offset, u32 val)
+{
+ iowrite32(val, addr + offset);
+}
+
+/**
+ * xudc_read32 - little endian read from device registers
+ * @addr: addr of device register
+ * Return: value at addr
+ */
+static unsigned int xudc_read32(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+/**
+ * xudc_write32_be - big endian write to device registers
+ * @addr: base addr of device registers
+ * @offset: register offset
+ * @val: data to be written
+ */
+static void xudc_write32_be(void __iomem *addr, u32 offset, u32 val)
+{
+ iowrite32be(val, addr + offset);
+}
+
+/**
+ * xudc_read32_be - big endian read from device registers
+ * @addr: addr of device register
+ * Return: value at addr
+ */
+static unsigned int xudc_read32_be(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+/**
+ * xudc_wrstatus - Sets up the usb device status stages.
+ * @udc: pointer to the usb device controller structure.
+ */
+static void xudc_wrstatus(struct xusb_udc *udc)
+{
+ struct xusb_ep *ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO];
+ u32 epcfgreg;
+
+ epcfgreg = udc->read_fn(udc->addr + ep0->offset)|
+ XUSB_EP_CFG_DATA_TOGGLE_MASK;
+ udc->write_fn(udc->addr, ep0->offset, epcfgreg);
+ udc->write_fn(udc->addr, ep0->offset + XUSB_EP_BUF0COUNT_OFFSET, 0);
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
+}
+
+/**
+ * xudc_epconfig - Configures the given endpoint.
+ * @ep: pointer to the usb device endpoint structure.
+ * @udc: pointer to the usb peripheral controller structure.
+ *
+ * This function configures a specific endpoint with the given configuration
+ * data.
+ */
+static void xudc_epconfig(struct xusb_ep *ep, struct xusb_udc *udc)
+{
+ u32 epcfgreg;
+
+ /*
+ * Configure the end point direction, type, Max Packet Size and the
+ * EP buffer location.
+ */
+ epcfgreg = ((ep->is_in << 29) | (ep->is_iso << 28) |
+ (ep->ep_usb.maxpacket << 15) | (ep->rambase));
+ udc->write_fn(udc->addr, ep->offset, epcfgreg);
+
+ /* Set the Buffer count and the Buffer ready bits.*/
+ udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF0COUNT_OFFSET,
+ ep->buffer0count);
+ udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF1COUNT_OFFSET,
+ ep->buffer1count);
+ if (ep->buffer0ready)
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
+ 1 << ep->epnumber);
+ if (ep->buffer1ready)
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
+ 1 << (ep->epnumber + XUSB_STATUS_EP_BUFF2_SHIFT));
+}
+
+/**
+ * xudc_start_dma - Starts DMA transfer.
+ * @ep: pointer to the usb device endpoint structure.
+ * @src: DMA source address.
+ * @dst: DMA destination address.
+ * @length: number of bytes to transfer.
+ *
+ * Return: 0 on success, error code on failure
+ *
+ * This function starts DMA transfer by writing to DMA source,
+ * destination and lenth registers.
+ */
+static int xudc_start_dma(struct xusb_ep *ep, dma_addr_t src,
+ dma_addr_t dst, u32 length)
+{
+ struct xusb_udc *udc = ep->udc;
+ int rc = 0;
+ u32 timeout = 500;
+ u32 reg;
+
+ /*
+ * Set the addresses in the DMA source and
+ * destination registers and then set the length
+ * into the DMA length register.
+ */
+ udc->write_fn(udc->addr, XUSB_DMA_DSAR_ADDR_OFFSET, src);
+ udc->write_fn(udc->addr, XUSB_DMA_DDAR_ADDR_OFFSET, dst);
+ udc->write_fn(udc->addr, XUSB_DMA_LENGTH_OFFSET, length);
+
+ /*
+ * Wait till DMA transaction is complete and
+ * check whether the DMA transaction was
+ * successful.
+ */
+ do {
+ reg = udc->read_fn(udc->addr + XUSB_DMA_STATUS_OFFSET);
+ if (!(reg & XUSB_DMA_DMASR_BUSY))
+ break;
+
+ /*
+ * We can't sleep here, because it's also called from
+ * interrupt context.
+ */
+ timeout--;
+ if (!timeout) {
+ dev_err(udc->dev, "DMA timeout\n");
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ } while (1);
+
+ if ((udc->read_fn(udc->addr + XUSB_DMA_STATUS_OFFSET) &
+ XUSB_DMA_DMASR_ERROR) == XUSB_DMA_DMASR_ERROR){
+ dev_err(udc->dev, "DMA Error\n");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * xudc_dma_send - Sends IN data using DMA.
+ * @ep: pointer to the usb device endpoint structure.
+ * @req: pointer to the usb request structure.
+ * @buffer: pointer to data to be sent.
+ * @length: number of bytes to send.
+ *
+ * Return: 0 on success, -EAGAIN if no buffer is free and error
+ * code on failure.
+ *
+ * This function sends data using DMA.
+ */
+static int xudc_dma_send(struct xusb_ep *ep, struct xusb_req *req,
+ u8 *buffer, u32 length)
+{
+ u32 *eprambase;
+ dma_addr_t src;
+ dma_addr_t dst;
+ struct xusb_udc *udc = ep->udc;
+
+ src = req->usb_req.dma + req->usb_req.actual;
+ if (req->usb_req.length)
+ dma_sync_single_for_device(udc->dev, src,
+ length, DMA_TO_DEVICE);
+ if (!ep->curbufnum && !ep->buffer0ready) {
+ /* Get the Buffer address and copy the transmit data.*/
+ eprambase = (u32 __force *)(udc->addr + ep->rambase);
+ dst = virt_to_phys(eprambase);
+ udc->write_fn(udc->addr, ep->offset +
+ XUSB_EP_BUF0COUNT_OFFSET, length);
+ udc->write_fn(udc->addr, XUSB_DMA_CONTROL_OFFSET,
+ XUSB_DMA_BRR_CTRL | (1 << ep->epnumber));
+ ep->buffer0ready = 1;
+ ep->curbufnum = 1;
+ } else if (ep->curbufnum && !ep->buffer1ready) {
+ /* Get the Buffer address and copy the transmit data.*/
+ eprambase = (u32 __force *)(udc->addr + ep->rambase +
+ ep->ep_usb.maxpacket);
+ dst = virt_to_phys(eprambase);
+ udc->write_fn(udc->addr, ep->offset +
+ XUSB_EP_BUF1COUNT_OFFSET, length);
+ udc->write_fn(udc->addr, XUSB_DMA_CONTROL_OFFSET,
+ XUSB_DMA_BRR_CTRL | (1 << (ep->epnumber +
+ XUSB_STATUS_EP_BUFF2_SHIFT)));
+ ep->buffer1ready = 1;
+ ep->curbufnum = 0;
+ } else {
+ /* None of ping pong buffers are ready currently .*/
+ return -EAGAIN;
+ }
+
+ return xudc_start_dma(ep, src, dst, length);
+}
+
+/**
+ * xudc_dma_receive - Receives OUT data using DMA.
+ * @ep: pointer to the usb device endpoint structure.
+ * @req: pointer to the usb request structure.
+ * @buffer: pointer to storage buffer of received data.
+ * @length: number of bytes to receive.
+ *
+ * Return: 0 on success, -EAGAIN if no buffer is free and error
+ * code on failure.
+ *
+ * This function receives data using DMA.
+ */
+static int xudc_dma_receive(struct xusb_ep *ep, struct xusb_req *req,
+ u8 *buffer, u32 length)
+{
+ u32 *eprambase;
+ dma_addr_t src;
+ dma_addr_t dst;
+ struct xusb_udc *udc = ep->udc;
+
+ dst = req->usb_req.dma + req->usb_req.actual;
+ if (!ep->curbufnum && !ep->buffer0ready) {
+ /* Get the Buffer address and copy the transmit data */
+ eprambase = (u32 __force *)(udc->addr + ep->rambase);
+ src = virt_to_phys(eprambase);
+ udc->write_fn(udc->addr, XUSB_DMA_CONTROL_OFFSET,
+ XUSB_DMA_BRR_CTRL | XUSB_DMA_READ_FROM_DPRAM |
+ (1 << ep->epnumber));
+ ep->buffer0ready = 1;
+ ep->curbufnum = 1;
+ } else if (ep->curbufnum && !ep->buffer1ready) {
+ /* Get the Buffer address and copy the transmit data */
+ eprambase = (u32 __force *)(udc->addr +
+ ep->rambase + ep->ep_usb.maxpacket);
+ src = virt_to_phys(eprambase);
+ udc->write_fn(udc->addr, XUSB_DMA_CONTROL_OFFSET,
+ XUSB_DMA_BRR_CTRL | XUSB_DMA_READ_FROM_DPRAM |
+ (1 << (ep->epnumber +
+ XUSB_STATUS_EP_BUFF2_SHIFT)));
+ ep->buffer1ready = 1;
+ ep->curbufnum = 0;
+ } else {
+ /* None of the ping-pong buffers are ready currently */
+ return -EAGAIN;
+ }
+
+ return xudc_start_dma(ep, src, dst, length);
+}
+
+/**
+ * xudc_eptxrx - Transmits or receives data to or from an endpoint.
+ * @ep: pointer to the usb endpoint configuration structure.
+ * @req: pointer to the usb request structure.
+ * @bufferptr: pointer to buffer containing the data to be sent.
+ * @bufferlen: The number of data bytes to be sent.
+ *
+ * Return: 0 on success, -EAGAIN if no buffer is free.
+ *
+ * This function copies the transmit/receive data to/from the end point buffer
+ * and enables the buffer for transmission/reception.
+ */
+static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
+ u8 *bufferptr, u32 bufferlen)
+{
+ u32 *eprambase;
+ u32 bytestosend;
+ int rc = 0;
+ struct xusb_udc *udc = ep->udc;
+
+ bytestosend = bufferlen;
+ if (udc->dma_enabled) {
+ if (ep->is_in)
+ rc = xudc_dma_send(ep, req, bufferptr, bufferlen);
+ else
+ rc = xudc_dma_receive(ep, req, bufferptr, bufferlen);
+ return rc;
+ }
+ /* Put the transmit buffer into the correct ping-pong buffer.*/
+ if (!ep->curbufnum && !ep->buffer0ready) {
+ /* Get the Buffer address and copy the transmit data.*/
+ eprambase = (u32 __force *)(udc->addr + ep->rambase);
+ if (ep->is_in) {
+ memcpy_toio((void __iomem *)eprambase, bufferptr,
+ bytestosend);
+ udc->write_fn(udc->addr, ep->offset +
+ XUSB_EP_BUF0COUNT_OFFSET, bufferlen);
+ } else {
+ memcpy_toio((void __iomem *)bufferptr, eprambase,
+ bytestosend);
+ }
+ /*
+ * Enable the buffer for transmission.
+ */
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
+ 1 << ep->epnumber);
+ ep->buffer0ready = 1;
+ ep->curbufnum = 1;
+ } else if (ep->curbufnum && !ep->buffer1ready) {
+ /* Get the Buffer address and copy the transmit data.*/
+ eprambase = (u32 __force *)(udc->addr + ep->rambase +
+ ep->ep_usb.maxpacket);
+ if (ep->is_in) {
+ memcpy_toio((void __iomem *)eprambase, bufferptr,
+ bytestosend);
+ udc->write_fn(udc->addr, ep->offset +
+ XUSB_EP_BUF1COUNT_OFFSET, bufferlen);
+ } else {
+ memcpy_toio((void __iomem *)bufferptr, eprambase,
+ bytestosend);
+ }
+ /*
+ * Enable the buffer for transmission.
+ */
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
+ 1 << (ep->epnumber + XUSB_STATUS_EP_BUFF2_SHIFT));
+ ep->buffer1ready = 1;
+ ep->curbufnum = 0;
+ } else {
+ /* None of the ping-pong buffers are ready currently */
+ return -EAGAIN;
+ }
+ return rc;
+}
+
+/**
+ * xudc_done - Exeutes the endpoint data transfer completion tasks.
+ * @ep: pointer to the usb device endpoint structure.
+ * @req: pointer to the usb request structure.
+ * @status: Status of the data transfer.
+ *
+ * Deletes the message from the queue and updates data transfer completion
+ * status.
+ */
+static void xudc_done(struct xusb_ep *ep, struct xusb_req *req, int status)
+{
+ struct xusb_udc *udc = ep->udc;
+
+ list_del_init(&req->queue);
+
+ if (req->usb_req.status == -EINPROGRESS)
+ req->usb_req.status = status;
+ else
+ status = req->usb_req.status;
+
+ if (status && status != -ESHUTDOWN)
+ dev_dbg(udc->dev, "%s done %p, status %d\n",
+ ep->ep_usb.name, req, status);
+ /* unmap request if DMA is present*/
+ if (udc->dma_enabled && ep->epnumber && req->usb_req.length)
+ usb_gadget_unmap_request(&udc->gadget, &req->usb_req,
+ ep->is_in);
+
+ if (req->usb_req.complete) {
+ spin_unlock(&udc->lock);
+ req->usb_req.complete(&ep->ep_usb, &req->usb_req);
+ spin_lock(&udc->lock);
+ }
+}
+
+/**
+ * xudc_read_fifo - Reads the data from the given endpoint buffer.
+ * @ep: pointer to the usb device endpoint structure.
+ * @req: pointer to the usb request structure.
+ *
+ * Return: 0 if request is completed and -EAGAIN if not completed.
+ *
+ * Pulls OUT packet data from the endpoint buffer.
+ */
+static int xudc_read_fifo(struct xusb_ep *ep, struct xusb_req *req)
+{
+ u8 *buf;
+ u32 is_short, count, bufferspace;
+ u8 bufoffset;
+ u8 two_pkts = 0;
+ int ret;
+ int retval = -EAGAIN;
+ struct xusb_udc *udc = ep->udc;
+
+ if (ep->buffer0ready && ep->buffer1ready) {
+ dev_dbg(udc->dev, "Packet NOT ready!\n");
+ return retval;
+ }
+top:
+ if (ep->curbufnum)
+ bufoffset = XUSB_EP_BUF1COUNT_OFFSET;
+ else
+ bufoffset = XUSB_EP_BUF0COUNT_OFFSET;
+
+ count = udc->read_fn(udc->addr + ep->offset + bufoffset);
+
+ if (!ep->buffer0ready && !ep->buffer1ready)
+ two_pkts = 1;
+
+ buf = req->usb_req.buf + req->usb_req.actual;
+ prefetchw(buf);
+ bufferspace = req->usb_req.length - req->usb_req.actual;
+ is_short = count < ep->ep_usb.maxpacket;
+
+ if (unlikely(!bufferspace)) {
+ /*
+ * This happens when the driver's buffer
+ * is smaller than what the host sent.
+ * discard the extra data.
+ */
+ if (req->usb_req.status != -EOVERFLOW)
+ dev_dbg(udc->dev, "%s overflow %d\n",
+ ep->ep_usb.name, count);
+ req->usb_req.status = -EOVERFLOW;
+ xudc_done(ep, req, -EOVERFLOW);
+ return 0;
+ }
+
+ ret = xudc_eptxrx(ep, req, buf, count);
+ switch (ret) {
+ case 0:
+ req->usb_req.actual += min(count, bufferspace);
+ dev_dbg(udc->dev, "read %s, %d bytes%s req %p %d/%d\n",
+ ep->ep_usb.name, count, is_short ? "/S" : "", req,
+ req->usb_req.actual, req->usb_req.length);
+
+ /* Completion */
+ if ((req->usb_req.actual == req->usb_req.length) || is_short) {
+ if (udc->dma_enabled && req->usb_req.length)
+ dma_sync_single_for_cpu(udc->dev,
+ req->usb_req.dma,
+ req->usb_req.actual,
+ DMA_FROM_DEVICE);
+ xudc_done(ep, req, 0);
+ return 0;
+ }
+ if (two_pkts) {
+ two_pkts = 0;
+ goto top;
+ }
+ break;
+ case -EAGAIN:
+ dev_dbg(udc->dev, "receive busy\n");
+ break;
+ case -EINVAL:
+ case -ETIMEDOUT:
+ /* DMA error, dequeue the request */
+ xudc_done(ep, req, -ECONNRESET);
+ retval = 0;
+ break;
+ }
+
+ return retval;
+}
+
+/**
+ * xudc_write_fifo - Writes data into the given endpoint buffer.
+ * @ep: pointer to the usb device endpoint structure.
+ * @req: pointer to the usb request structure.
+ *
+ * Return: 0 if request is completed and -EAGAIN if not completed.
+ *
+ * Loads endpoint buffer for an IN packet.
+ */
+static int xudc_write_fifo(struct xusb_ep *ep, struct xusb_req *req)
+{
+ u32 max;
+ u32 length;
+ int ret;
+ int retval = -EAGAIN;
+ struct xusb_udc *udc = ep->udc;
+ int is_last, is_short = 0;
+ u8 *buf;
+
+ max = le16_to_cpu(ep->desc->wMaxPacketSize);
+ buf = req->usb_req.buf + req->usb_req.actual;
+ prefetch(buf);
+ length = req->usb_req.length - req->usb_req.actual;
+ length = min(length, max);
+
+ ret = xudc_eptxrx(ep, req, buf, length);
+ switch (ret) {
+ case 0:
+ req->usb_req.actual += length;
+ if (unlikely(length != max)) {
+ is_last = is_short = 1;
+ } else {
+ if (likely(req->usb_req.length !=
+ req->usb_req.actual) || req->usb_req.zero)
+ is_last = 0;
+ else
+ is_last = 1;
+ }
+ dev_dbg(udc->dev, "%s: wrote %s %d bytes%s%s %d left %p\n",
+ __func__, ep->ep_usb.name, length, is_last ? "/L" : "",
+ is_short ? "/S" : "",
+ req->usb_req.length - req->usb_req.actual, req);
+ /* completion */
+ if (is_last) {
+ xudc_done(ep, req, 0);
+ retval = 0;
+ }
+ break;
+ case -EAGAIN:
+ dev_dbg(udc->dev, "Send busy\n");
+ break;
+ case -EINVAL:
+ case -ETIMEDOUT:
+ /* DMA error, dequeue the request */
+ xudc_done(ep, req, -ECONNRESET);
+ retval = 0;
+ break;
+ }
+
+ return retval;
+}
+
+/**
+ * xudc_nuke - Cleans up the data transfer message list.
+ * @ep: pointer to the usb device endpoint structure.
+ * @status: Status of the data transfer.
+ */
+static void xudc_nuke(struct xusb_ep *ep, int status)
+{
+ struct xusb_req *req;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct xusb_req, queue);
+ xudc_done(ep, req, status);
+ }
+}
+
+/**
+ * xudc_ep_set_halt - Stalls/unstalls the given endpoint.
+ * @_ep: pointer to the usb device endpoint structure.
+ * @value: value to indicate stall/unstall.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xudc_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ struct xusb_ep *ep = to_xusb_ep(_ep);
+ struct xusb_udc *udc;
+ unsigned long flags;
+ u32 epcfgreg;
+
+ if (!_ep || (!ep->desc && ep->epnumber)) {
+ pr_debug("%s: bad ep or descriptor\n", __func__);
+ return -EINVAL;
+ }
+ udc = ep->udc;
+
+ if (ep->is_in && (!list_empty(&ep->queue)) && value) {
+ dev_dbg(udc->dev, "requests pending can't halt\n");
+ return -EAGAIN;
+ }
+
+ if (ep->buffer0ready || ep->buffer1ready) {
+ dev_dbg(udc->dev, "HW buffers busy can't halt\n");
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (value) {
+ /* Stall the device.*/
+ epcfgreg = udc->read_fn(udc->addr + ep->offset);
+ epcfgreg |= XUSB_EP_CFG_STALL_MASK;
+ udc->write_fn(udc->addr, ep->offset, epcfgreg);
+ } else {
+ /* Unstall the device.*/
+ epcfgreg = udc->read_fn(udc->addr + ep->offset);
+ epcfgreg &= ~XUSB_EP_CFG_STALL_MASK;
+ udc->write_fn(udc->addr, ep->offset, epcfgreg);
+ if (ep->epnumber) {
+ /* Reset the toggle bit.*/
+ epcfgreg = udc->read_fn(ep->udc->addr + ep->offset);
+ epcfgreg &= ~XUSB_EP_CFG_DATA_TOGGLE_MASK;
+ udc->write_fn(udc->addr, ep->offset, epcfgreg);
+ }
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+/**
+ * __xudc_ep_enable - Enables the given endpoint.
+ * @ep: pointer to the xusb endpoint structure.
+ * @desc: pointer to usb endpoint descriptor.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int __xudc_ep_enable(struct xusb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct xusb_udc *udc = ep->udc;
+ u32 tmp;
+ u32 epcfg;
+ u32 ier;
+ u16 maxpacket;
+
+ ep->is_in = ((desc->bEndpointAddress & USB_DIR_IN) != 0);
+ /* Bit 3...0:endpoint number */
+ ep->epnumber = (desc->bEndpointAddress & 0x0f);
+ ep->desc = desc;
+ ep->ep_usb.desc = desc;
+ tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ ep->ep_usb.maxpacket = maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+
+ switch (tmp) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ dev_dbg(udc->dev, "only one control endpoint\n");
+ /* NON- ISO */
+ ep->is_iso = 0;
+ return -EINVAL;
+ case USB_ENDPOINT_XFER_INT:
+ /* NON- ISO */
+ ep->is_iso = 0;
+ if (maxpacket > 64) {
+ dev_dbg(udc->dev, "bogus maxpacket %d\n", maxpacket);
+ return -EINVAL;
+ }
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ /* NON- ISO */
+ ep->is_iso = 0;
+ if (!(is_power_of_2(maxpacket) && maxpacket >= 8 &&
+ maxpacket <= 512)) {
+ dev_dbg(udc->dev, "bogus maxpacket %d\n", maxpacket);
+ return -EINVAL;
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ /* ISO */
+ ep->is_iso = 1;
+ break;
+ }
+
+ ep->buffer0ready = false;
+ ep->buffer1ready = false;
+ ep->curbufnum = 0;
+ ep->rambase = rambase[ep->epnumber];
+ xudc_epconfig(ep, udc);
+
+ dev_dbg(udc->dev, "Enable Endpoint %d max pkt is %d\n",
+ ep->epnumber, maxpacket);
+
+ /* Enable the End point.*/
+ epcfg = udc->read_fn(udc->addr + ep->offset);
+ epcfg |= XUSB_EP_CFG_VALID_MASK;
+ udc->write_fn(udc->addr, ep->offset, epcfg);
+ if (ep->epnumber)
+ ep->rambase <<= 2;
+
+ /* Enable buffer completion interrupts for endpoint */
+ ier = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
+ ier |= (XUSB_STATUS_INTR_BUFF_COMP_SHIFT_MASK << ep->epnumber);
+ udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier);
+
+ /* for OUT endpoint set buffers ready to receive */
+ if (ep->epnumber && !ep->is_in) {
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
+ 1 << ep->epnumber);
+ ep->buffer0ready = true;
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET,
+ (1 << (ep->epnumber +
+ XUSB_STATUS_EP_BUFF2_SHIFT)));
+ ep->buffer1ready = true;
+ }
+
+ return 0;
+}
+
+/**
+ * xudc_ep_enable - Enables the given endpoint.
+ * @_ep: pointer to the usb endpoint structure.
+ * @desc: pointer to usb endpoint descriptor.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xudc_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct xusb_ep *ep;
+ struct xusb_udc *udc;
+ unsigned long flags;
+ int ret;
+
+ if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ pr_debug("%s: bad ep or descriptor\n", __func__);
+ return -EINVAL;
+ }
+
+ ep = to_xusb_ep(_ep);
+ udc = ep->udc;
+
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_dbg(udc->dev, "bogus device state\n");
+ return -ESHUTDOWN;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ ret = __xudc_ep_enable(ep, desc);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return ret;
+}
+
+/**
+ * xudc_ep_disable - Disables the given endpoint.
+ * @_ep: pointer to the usb endpoint structure.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xudc_ep_disable(struct usb_ep *_ep)
+{
+ struct xusb_ep *ep;
+ unsigned long flags;
+ u32 epcfg;
+ struct xusb_udc *udc;
+
+ if (!_ep) {
+ pr_debug("%s: invalid ep\n", __func__);
+ return -EINVAL;
+ }
+
+ ep = to_xusb_ep(_ep);
+ udc = ep->udc;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ xudc_nuke(ep, -ESHUTDOWN);
+
+ /* Restore the endpoint's pristine config */
+ ep->desc = NULL;
+ ep->ep_usb.desc = NULL;
+
+ dev_dbg(udc->dev, "USB Ep %d disable\n ", ep->epnumber);
+ /* Disable the endpoint.*/
+ epcfg = udc->read_fn(udc->addr + ep->offset);
+ epcfg &= ~XUSB_EP_CFG_VALID_MASK;
+ udc->write_fn(udc->addr, ep->offset, epcfg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+/**
+ * xudc_ep_alloc_request - Initializes the request queue.
+ * @_ep: pointer to the usb endpoint structure.
+ * @gfp_flags: Flags related to the request call.
+ *
+ * Return: pointer to request structure on success and a NULL on failure.
+ */
+static struct usb_request *xudc_ep_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct xusb_ep *ep = to_xusb_ep(_ep);
+ struct xusb_req *req;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->ep = ep;
+ INIT_LIST_HEAD(&req->queue);
+ return &req->usb_req;
+}
+
+/**
+ * xudc_free_request - Releases the request from queue.
+ * @_ep: pointer to the usb device endpoint structure.
+ * @_req: pointer to the usb request structure.
+ */
+static void xudc_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct xusb_req *req = to_xusb_req(_req);
+
+ kfree(req);
+}
+
+/**
+ * __xudc_ep0_queue - Adds the request to endpoint 0 queue.
+ * @ep0: pointer to the xusb endpoint 0 structure.
+ * @req: pointer to the xusb request structure.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req)
+{
+ struct xusb_udc *udc = ep0->udc;
+ u32 length;
+ u8 *corebuf;
+
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_dbg(udc->dev, "%s, bogus device state\n", __func__);
+ return -EINVAL;
+ }
+ if (!list_empty(&ep0->queue)) {
+ dev_dbg(udc->dev, "%s:ep0 busy\n", __func__);
+ return -EBUSY;
+ }
+
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ list_add_tail(&req->queue, &ep0->queue);
+
+ if (udc->setup.bRequestType & USB_DIR_IN) {
+ prefetch(req->usb_req.buf);
+ length = req->usb_req.length;
+ corebuf = (void __force *) ((ep0->rambase << 2) +
+ udc->addr);
+ length = req->usb_req.actual = min_t(u32, length,
+ EP0_MAX_PACKET);
+ memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length);
+ udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length);
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
+ } else {
+ if (udc->setup.wLength) {
+ /* Enable EP0 buffer to receive data */
+ udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, 0);
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
+ } else {
+ xudc_wrstatus(udc);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xudc_ep0_queue - Adds the request to endpoint 0 queue.
+ * @_ep: pointer to the usb endpoint 0 structure.
+ * @_req: pointer to the usb request structure.
+ * @gfp_flags: Flags related to the request call.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xudc_ep0_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct xusb_req *req = to_xusb_req(_req);
+ struct xusb_ep *ep0 = to_xusb_ep(_ep);
+ struct xusb_udc *udc = ep0->udc;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ ret = __xudc_ep0_queue(ep0, req);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return ret;
+}
+
+/**
+ * xudc_ep_queue - Adds the request to endpoint queue.
+ * @_ep: pointer to the usb endpoint structure.
+ * @_req: pointer to the usb request structure.
+ * @gfp_flags: Flags related to the request call.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xudc_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct xusb_req *req = to_xusb_req(_req);
+ struct xusb_ep *ep = to_xusb_ep(_ep);
+ struct xusb_udc *udc = ep->udc;
+ int ret;
+ unsigned long flags;
+
+ if (!ep->desc) {
+ dev_dbg(udc->dev, "%s: queuing request to disabled %s\n",
+ __func__, ep->name);
+ return -ESHUTDOWN;
+ }
+
+ if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_dbg(udc->dev, "%s, bogus device state\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ if (udc->dma_enabled) {
+ ret = usb_gadget_map_request(&udc->gadget, &req->usb_req,
+ ep->is_in);
+ if (ret) {
+ dev_dbg(udc->dev, "gadget_map failed ep%d\n",
+ ep->epnumber);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EAGAIN;
+ }
+ }
+
+ if (list_empty(&ep->queue)) {
+ if (ep->is_in) {
+ dev_dbg(udc->dev, "xudc_write_fifo from ep_queue\n");
+ if (!xudc_write_fifo(ep, req))
+ req = NULL;
+ } else {
+ dev_dbg(udc->dev, "xudc_read_fifo from ep_queue\n");
+ if (!xudc_read_fifo(ep, req))
+ req = NULL;
+ }
+ }
+
+ if (req != NULL)
+ list_add_tail(&req->queue, &ep->queue);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+/**
+ * xudc_ep_dequeue - Removes the request from the queue.
+ * @_ep: pointer to the usb device endpoint structure.
+ * @_req: pointer to the usb request structure.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xudc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct xusb_ep *ep = to_xusb_ep(_ep);
+ struct xusb_req *req = NULL;
+ struct xusb_req *iter;
+ struct xusb_udc *udc = ep->udc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* Make sure it's actually queued on this endpoint */
+ list_for_each_entry(iter, &ep->queue, queue) {
+ if (&iter->usb_req != _req)
+ continue;
+ req = iter;
+ break;
+ }
+ if (!req) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EINVAL;
+ }
+ xudc_done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+/**
+ * xudc_ep0_enable - Enables the given endpoint.
+ * @ep: pointer to the usb endpoint structure.
+ * @desc: pointer to usb endpoint descriptor.
+ *
+ * Return: error always.
+ *
+ * endpoint 0 enable should not be called by gadget layer.
+ */
+static int xudc_ep0_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EINVAL;
+}
+
+/**
+ * xudc_ep0_disable - Disables the given endpoint.
+ * @ep: pointer to the usb endpoint structure.
+ *
+ * Return: error always.
+ *
+ * endpoint 0 disable should not be called by gadget layer.
+ */
+static int xudc_ep0_disable(struct usb_ep *ep)
+{
+ return -EINVAL;
+}
+
+static const struct usb_ep_ops xusb_ep0_ops = {
+ .enable = xudc_ep0_enable,
+ .disable = xudc_ep0_disable,
+ .alloc_request = xudc_ep_alloc_request,
+ .free_request = xudc_free_request,
+ .queue = xudc_ep0_queue,
+ .dequeue = xudc_ep_dequeue,
+ .set_halt = xudc_ep_set_halt,
+};
+
+static const struct usb_ep_ops xusb_ep_ops = {
+ .enable = xudc_ep_enable,
+ .disable = xudc_ep_disable,
+ .alloc_request = xudc_ep_alloc_request,
+ .free_request = xudc_free_request,
+ .queue = xudc_ep_queue,
+ .dequeue = xudc_ep_dequeue,
+ .set_halt = xudc_ep_set_halt,
+};
+
+/**
+ * xudc_get_frame - Reads the current usb frame number.
+ * @gadget: pointer to the usb gadget structure.
+ *
+ * Return: current frame number for success and error value on failure.
+ */
+static int xudc_get_frame(struct usb_gadget *gadget)
+{
+ struct xusb_udc *udc;
+ int frame;
+
+ if (!gadget)
+ return -ENODEV;
+
+ udc = to_udc(gadget);
+ frame = udc->read_fn(udc->addr + XUSB_FRAMENUM_OFFSET);
+ return frame;
+}
+
+/**
+ * xudc_wakeup - Send remote wakeup signal to host
+ * @gadget: pointer to the usb gadget structure.
+ *
+ * Return: 0 on success and error on failure
+ */
+static int xudc_wakeup(struct usb_gadget *gadget)
+{
+ struct xusb_udc *udc = to_udc(gadget);
+ u32 crtlreg;
+ int status = -EINVAL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Remote wake up not enabled by host */
+ if (!udc->remote_wkp)
+ goto done;
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg |= XUSB_CONTROL_USB_RMTWAKE_MASK;
+ /* set remote wake up bit */
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+ /*
+ * wait for a while and reset remote wake up bit since this bit
+ * is not cleared by HW after sending remote wakeup to host.
+ */
+ mdelay(2);
+
+ crtlreg &= ~XUSB_CONTROL_USB_RMTWAKE_MASK;
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+ status = 0;
+done:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return status;
+}
+
+/**
+ * xudc_pullup - start/stop USB traffic
+ * @gadget: pointer to the usb gadget structure.
+ * @is_on: flag to start or stop
+ *
+ * Return: 0 always
+ *
+ * This function starts/stops SIE engine of IP based on is_on.
+ */
+static int xudc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct xusb_udc *udc = to_udc(gadget);
+ unsigned long flags;
+ u32 crtlreg;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ if (is_on)
+ crtlreg |= XUSB_CONTROL_USB_READY_MASK;
+ else
+ crtlreg &= ~XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+/**
+ * xudc_eps_init - initialize endpoints.
+ * @udc: pointer to the usb device controller structure.
+ */
+static void xudc_eps_init(struct xusb_udc *udc)
+{
+ u32 ep_number;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+
+ for (ep_number = 0; ep_number < XUSB_MAX_ENDPOINTS; ep_number++) {
+ struct xusb_ep *ep = &udc->ep[ep_number];
+
+ if (ep_number) {
+ list_add_tail(&ep->ep_usb.ep_list,
+ &udc->gadget.ep_list);
+ usb_ep_set_maxpacket_limit(&ep->ep_usb,
+ (unsigned short) ~0);
+ snprintf(ep->name, EPNAME_SIZE, "ep%d", ep_number);
+ ep->ep_usb.name = ep->name;
+ ep->ep_usb.ops = &xusb_ep_ops;
+
+ ep->ep_usb.caps.type_iso = true;
+ ep->ep_usb.caps.type_bulk = true;
+ ep->ep_usb.caps.type_int = true;
+ } else {
+ ep->ep_usb.name = ep0name;
+ usb_ep_set_maxpacket_limit(&ep->ep_usb, EP0_MAX_PACKET);
+ ep->ep_usb.ops = &xusb_ep0_ops;
+
+ ep->ep_usb.caps.type_control = true;
+ }
+
+ ep->ep_usb.caps.dir_in = true;
+ ep->ep_usb.caps.dir_out = true;
+
+ ep->udc = udc;
+ ep->epnumber = ep_number;
+ ep->desc = NULL;
+ /*
+ * The configuration register address offset between
+ * each endpoint is 0x10.
+ */
+ ep->offset = XUSB_EP0_CONFIG_OFFSET + (ep_number * 0x10);
+ ep->is_in = 0;
+ ep->is_iso = 0;
+ ep->maxpacket = 0;
+ xudc_epconfig(ep, udc);
+
+ /* Initialize one queue per endpoint */
+ INIT_LIST_HEAD(&ep->queue);
+ }
+}
+
+/**
+ * xudc_stop_activity - Stops any further activity on the device.
+ * @udc: pointer to the usb device controller structure.
+ */
+static void xudc_stop_activity(struct xusb_udc *udc)
+{
+ int i;
+ struct xusb_ep *ep;
+
+ for (i = 0; i < XUSB_MAX_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ xudc_nuke(ep, -ESHUTDOWN);
+ }
+}
+
+/**
+ * xudc_start - Starts the device.
+ * @gadget: pointer to the usb gadget structure
+ * @driver: pointer to gadget driver structure
+ *
+ * Return: zero on success and error on failure
+ */
+static int xudc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct xusb_udc *udc = to_udc(gadget);
+ struct xusb_ep *ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO];
+ const struct usb_endpoint_descriptor *desc = &config_bulk_out_desc;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (udc->driver) {
+ dev_err(udc->dev, "%s is already bound to %s\n",
+ udc->gadget.name, udc->driver->driver.name);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* hook up the driver */
+ udc->driver = driver;
+ udc->gadget.speed = driver->max_speed;
+
+ /* Enable the control endpoint. */
+ ret = __xudc_ep_enable(ep0, desc);
+
+ /* Set device address and remote wakeup to 0 */
+ udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
+ udc->remote_wkp = 0;
+err:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return ret;
+}
+
+/**
+ * xudc_stop - stops the device.
+ * @gadget: pointer to the usb gadget structure
+ *
+ * Return: zero always
+ */
+static int xudc_stop(struct usb_gadget *gadget)
+{
+ struct xusb_udc *udc = to_udc(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->driver = NULL;
+
+ /* Set device address and remote wakeup to 0 */
+ udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
+ udc->remote_wkp = 0;
+
+ xudc_stop_activity(udc);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops xusb_udc_ops = {
+ .get_frame = xudc_get_frame,
+ .wakeup = xudc_wakeup,
+ .pullup = xudc_pullup,
+ .udc_start = xudc_start,
+ .udc_stop = xudc_stop,
+};
+
+/**
+ * xudc_clear_stall_all_ep - clears stall of every endpoint.
+ * @udc: pointer to the udc structure.
+ */
+static void xudc_clear_stall_all_ep(struct xusb_udc *udc)
+{
+ struct xusb_ep *ep;
+ u32 epcfgreg;
+ int i;
+
+ for (i = 0; i < XUSB_MAX_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ epcfgreg = udc->read_fn(udc->addr + ep->offset);
+ epcfgreg &= ~XUSB_EP_CFG_STALL_MASK;
+ udc->write_fn(udc->addr, ep->offset, epcfgreg);
+ if (ep->epnumber) {
+ /* Reset the toggle bit.*/
+ epcfgreg = udc->read_fn(udc->addr + ep->offset);
+ epcfgreg &= ~XUSB_EP_CFG_DATA_TOGGLE_MASK;
+ udc->write_fn(udc->addr, ep->offset, epcfgreg);
+ }
+ }
+}
+
+/**
+ * xudc_startup_handler - The usb device controller interrupt handler.
+ * @udc: pointer to the udc structure.
+ * @intrstatus: The mask value containing the interrupt sources.
+ *
+ * This function handles the RESET,SUSPEND,RESUME and DISCONNECT interrupts.
+ */
+static void xudc_startup_handler(struct xusb_udc *udc, u32 intrstatus)
+{
+ u32 intrreg;
+
+ if (intrstatus & XUSB_STATUS_RESET_MASK) {
+
+ dev_dbg(udc->dev, "Reset\n");
+
+ if (intrstatus & XUSB_STATUS_HIGH_SPEED_MASK)
+ udc->gadget.speed = USB_SPEED_HIGH;
+ else
+ udc->gadget.speed = USB_SPEED_FULL;
+
+ xudc_stop_activity(udc);
+ xudc_clear_stall_all_ep(udc);
+ udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, 0);
+
+ /* Set device address and remote wakeup to 0 */
+ udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
+ udc->remote_wkp = 0;
+
+ /* Enable the suspend, resume and disconnect */
+ intrreg = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
+ intrreg |= XUSB_STATUS_SUSPEND_MASK | XUSB_STATUS_RESUME_MASK |
+ XUSB_STATUS_DISCONNECT_MASK;
+ udc->write_fn(udc->addr, XUSB_IER_OFFSET, intrreg);
+ }
+ if (intrstatus & XUSB_STATUS_SUSPEND_MASK) {
+
+ dev_dbg(udc->dev, "Suspend\n");
+
+ /* Enable the reset, resume and disconnect */
+ intrreg = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
+ intrreg |= XUSB_STATUS_RESET_MASK | XUSB_STATUS_RESUME_MASK |
+ XUSB_STATUS_DISCONNECT_MASK;
+ udc->write_fn(udc->addr, XUSB_IER_OFFSET, intrreg);
+
+ udc->usb_state = USB_STATE_SUSPENDED;
+
+ if (udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+ if (intrstatus & XUSB_STATUS_RESUME_MASK) {
+ bool condition = (udc->usb_state != USB_STATE_SUSPENDED);
+
+ dev_WARN_ONCE(udc->dev, condition,
+ "Resume IRQ while not suspended\n");
+
+ dev_dbg(udc->dev, "Resume\n");
+
+ /* Enable the reset, suspend and disconnect */
+ intrreg = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
+ intrreg |= XUSB_STATUS_RESET_MASK | XUSB_STATUS_SUSPEND_MASK |
+ XUSB_STATUS_DISCONNECT_MASK;
+ udc->write_fn(udc->addr, XUSB_IER_OFFSET, intrreg);
+
+ udc->usb_state = 0;
+
+ if (udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+ if (intrstatus & XUSB_STATUS_DISCONNECT_MASK) {
+
+ dev_dbg(udc->dev, "Disconnect\n");
+
+ /* Enable the reset, resume and suspend */
+ intrreg = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
+ intrreg |= XUSB_STATUS_RESET_MASK | XUSB_STATUS_RESUME_MASK |
+ XUSB_STATUS_SUSPEND_MASK;
+ udc->write_fn(udc->addr, XUSB_IER_OFFSET, intrreg);
+
+ if (udc->driver && udc->driver->disconnect) {
+ spin_unlock(&udc->lock);
+ udc->driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+ }
+}
+
+/**
+ * xudc_ep0_stall - Stall endpoint zero.
+ * @udc: pointer to the udc structure.
+ *
+ * This function stalls endpoint zero.
+ */
+static void xudc_ep0_stall(struct xusb_udc *udc)
+{
+ u32 epcfgreg;
+ struct xusb_ep *ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO];
+
+ epcfgreg = udc->read_fn(udc->addr + ep0->offset);
+ epcfgreg |= XUSB_EP_CFG_STALL_MASK;
+ udc->write_fn(udc->addr, ep0->offset, epcfgreg);
+}
+
+/**
+ * xudc_setaddress - executes SET_ADDRESS command
+ * @udc: pointer to the udc structure.
+ *
+ * This function executes USB SET_ADDRESS command
+ */
+static void xudc_setaddress(struct xusb_udc *udc)
+{
+ struct xusb_ep *ep0 = &udc->ep[0];
+ struct xusb_req *req = udc->req;
+ int ret;
+
+ req->usb_req.length = 0;
+ ret = __xudc_ep0_queue(ep0, req);
+ if (ret == 0)
+ return;
+
+ dev_err(udc->dev, "Can't respond to SET ADDRESS request\n");
+ xudc_ep0_stall(udc);
+}
+
+/**
+ * xudc_getstatus - executes GET_STATUS command
+ * @udc: pointer to the udc structure.
+ *
+ * This function executes USB GET_STATUS command
+ */
+static void xudc_getstatus(struct xusb_udc *udc)
+{
+ struct xusb_ep *ep0 = &udc->ep[0];
+ struct xusb_req *req = udc->req;
+ struct xusb_ep *target_ep;
+ u16 status = 0;
+ u32 epcfgreg;
+ int epnum;
+ u32 halt;
+ int ret;
+
+ switch (udc->setup.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ /* Get device status */
+ status = 1 << USB_DEVICE_SELF_POWERED;
+ if (udc->remote_wkp)
+ status |= (1 << USB_DEVICE_REMOTE_WAKEUP);
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum >= XUSB_MAX_ENDPOINTS)
+ goto stall;
+ target_ep = &udc->ep[epnum];
+ epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
+ halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
+ if (udc->setup.wIndex & USB_DIR_IN) {
+ if (!target_ep->is_in)
+ goto stall;
+ } else {
+ if (target_ep->is_in)
+ goto stall;
+ }
+ if (halt)
+ status = 1 << USB_ENDPOINT_HALT;
+ break;
+ default:
+ goto stall;
+ }
+
+ req->usb_req.length = 2;
+ *(u16 *)req->usb_req.buf = cpu_to_le16(status);
+ ret = __xudc_ep0_queue(ep0, req);
+ if (ret == 0)
+ return;
+stall:
+ dev_err(udc->dev, "Can't respond to getstatus request\n");
+ xudc_ep0_stall(udc);
+}
+
+/**
+ * xudc_set_clear_feature - Executes the set feature and clear feature commands.
+ * @udc: pointer to the usb device controller structure.
+ *
+ * Processes the SET_FEATURE and CLEAR_FEATURE commands.
+ */
+static void xudc_set_clear_feature(struct xusb_udc *udc)
+{
+ struct xusb_ep *ep0 = &udc->ep[0];
+ struct xusb_req *req = udc->req;
+ struct xusb_ep *target_ep;
+ u8 endpoint;
+ u8 outinbit;
+ u32 epcfgreg;
+ int flag = (udc->setup.bRequest == USB_REQ_SET_FEATURE ? 1 : 0);
+ int ret;
+
+ switch (udc->setup.bRequestType) {
+ case USB_RECIP_DEVICE:
+ switch (udc->setup.wValue) {
+ case USB_DEVICE_TEST_MODE:
+ /*
+ * The Test Mode will be executed
+ * after the status phase.
+ */
+ break;
+ case USB_DEVICE_REMOTE_WAKEUP:
+ if (flag)
+ udc->remote_wkp = 1;
+ else
+ udc->remote_wkp = 0;
+ break;
+ default:
+ xudc_ep0_stall(udc);
+ break;
+ }
+ break;
+ case USB_RECIP_ENDPOINT:
+ if (!udc->setup.wValue) {
+ endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (endpoint >= XUSB_MAX_ENDPOINTS) {
+ xudc_ep0_stall(udc);
+ return;
+ }
+ target_ep = &udc->ep[endpoint];
+ outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
+ outinbit = outinbit >> 7;
+
+ /* Make sure direction matches.*/
+ if (outinbit != target_ep->is_in) {
+ xudc_ep0_stall(udc);
+ return;
+ }
+ epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
+ if (!endpoint) {
+ /* Clear the stall.*/
+ epcfgreg &= ~XUSB_EP_CFG_STALL_MASK;
+ udc->write_fn(udc->addr,
+ target_ep->offset, epcfgreg);
+ } else {
+ if (flag) {
+ epcfgreg |= XUSB_EP_CFG_STALL_MASK;
+ udc->write_fn(udc->addr,
+ target_ep->offset,
+ epcfgreg);
+ } else {
+ /* Unstall the endpoint.*/
+ epcfgreg &= ~(XUSB_EP_CFG_STALL_MASK |
+ XUSB_EP_CFG_DATA_TOGGLE_MASK);
+ udc->write_fn(udc->addr,
+ target_ep->offset,
+ epcfgreg);
+ }
+ }
+ }
+ break;
+ default:
+ xudc_ep0_stall(udc);
+ return;
+ }
+
+ req->usb_req.length = 0;
+ ret = __xudc_ep0_queue(ep0, req);
+ if (ret == 0)
+ return;
+
+ dev_err(udc->dev, "Can't respond to SET/CLEAR FEATURE\n");
+ xudc_ep0_stall(udc);
+}
+
+/**
+ * xudc_handle_setup - Processes the setup packet.
+ * @udc: pointer to the usb device controller structure.
+ *
+ * Process setup packet and delegate to gadget layer.
+ */
+static void xudc_handle_setup(struct xusb_udc *udc)
+ __must_hold(&udc->lock)
+{
+ struct xusb_ep *ep0 = &udc->ep[0];
+ struct usb_ctrlrequest setup;
+ u32 *ep0rambase;
+
+ /* Load up the chapter 9 command buffer.*/
+ ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET);
+ memcpy_toio((void __iomem *)&setup, ep0rambase, 8);
+
+ udc->setup = setup;
+ udc->setup.wValue = cpu_to_le16(setup.wValue);
+ udc->setup.wIndex = cpu_to_le16(setup.wIndex);
+ udc->setup.wLength = cpu_to_le16(setup.wLength);
+
+ /* Clear previous requests */
+ xudc_nuke(ep0, -ECONNRESET);
+
+ if (udc->setup.bRequestType & USB_DIR_IN) {
+ /* Execute the get command.*/
+ udc->setupseqrx = STATUS_PHASE;
+ udc->setupseqtx = DATA_PHASE;
+ } else {
+ /* Execute the put command.*/
+ udc->setupseqrx = DATA_PHASE;
+ udc->setupseqtx = STATUS_PHASE;
+ }
+
+ switch (udc->setup.bRequest) {
+ case USB_REQ_GET_STATUS:
+ /* Data+Status phase form udc */
+ if ((udc->setup.bRequestType &
+ (USB_DIR_IN | USB_TYPE_MASK)) !=
+ (USB_DIR_IN | USB_TYPE_STANDARD))
+ break;
+ xudc_getstatus(udc);
+ return;
+ case USB_REQ_SET_ADDRESS:
+ /* Status phase from udc */
+ if (udc->setup.bRequestType != (USB_DIR_OUT |
+ USB_TYPE_STANDARD | USB_RECIP_DEVICE))
+ break;
+ xudc_setaddress(udc);
+ return;
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ /* Requests with no data phase, status phase from udc */
+ if ((udc->setup.bRequestType & USB_TYPE_MASK)
+ != USB_TYPE_STANDARD)
+ break;
+ xudc_set_clear_feature(udc);
+ return;
+ default:
+ break;
+ }
+
+ spin_unlock(&udc->lock);
+ if (udc->driver->setup(&udc->gadget, &setup) < 0)
+ xudc_ep0_stall(udc);
+ spin_lock(&udc->lock);
+}
+
+/**
+ * xudc_ep0_out - Processes the endpoint 0 OUT token.
+ * @udc: pointer to the usb device controller structure.
+ */
+static void xudc_ep0_out(struct xusb_udc *udc)
+{
+ struct xusb_ep *ep0 = &udc->ep[0];
+ struct xusb_req *req;
+ u8 *ep0rambase;
+ unsigned int bytes_to_rx;
+ void *buffer;
+
+ req = list_first_entry(&ep0->queue, struct xusb_req, queue);
+
+ switch (udc->setupseqrx) {
+ case STATUS_PHASE:
+ /*
+ * This resets both state machines for the next
+ * Setup packet.
+ */
+ udc->setupseqrx = SETUP_PHASE;
+ udc->setupseqtx = SETUP_PHASE;
+ req->usb_req.actual = req->usb_req.length;
+ xudc_done(ep0, req, 0);
+ break;
+ case DATA_PHASE:
+ bytes_to_rx = udc->read_fn(udc->addr +
+ XUSB_EP_BUF0COUNT_OFFSET);
+ /* Copy the data to be received from the DPRAM. */
+ ep0rambase = (u8 __force *) (udc->addr +
+ (ep0->rambase << 2));
+ buffer = req->usb_req.buf + req->usb_req.actual;
+ req->usb_req.actual = req->usb_req.actual + bytes_to_rx;
+ memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx);
+
+ if (req->usb_req.length == req->usb_req.actual) {
+ /* Data transfer completed get ready for Status stage */
+ xudc_wrstatus(udc);
+ } else {
+ /* Enable EP0 buffer to receive data */
+ udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, 0);
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * xudc_ep0_in - Processes the endpoint 0 IN token.
+ * @udc: pointer to the usb device controller structure.
+ */
+static void xudc_ep0_in(struct xusb_udc *udc)
+{
+ struct xusb_ep *ep0 = &udc->ep[0];
+ struct xusb_req *req;
+ unsigned int bytes_to_tx;
+ void *buffer;
+ u32 epcfgreg;
+ u16 count = 0;
+ u16 length;
+ u8 *ep0rambase;
+ u8 test_mode = udc->setup.wIndex >> 8;
+
+ req = list_first_entry(&ep0->queue, struct xusb_req, queue);
+ bytes_to_tx = req->usb_req.length - req->usb_req.actual;
+
+ switch (udc->setupseqtx) {
+ case STATUS_PHASE:
+ switch (udc->setup.bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ /* Set the address of the device.*/
+ udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET,
+ udc->setup.wValue);
+ break;
+ case USB_REQ_SET_FEATURE:
+ if (udc->setup.bRequestType ==
+ USB_RECIP_DEVICE) {
+ if (udc->setup.wValue ==
+ USB_DEVICE_TEST_MODE)
+ udc->write_fn(udc->addr,
+ XUSB_TESTMODE_OFFSET,
+ test_mode);
+ }
+ break;
+ }
+ req->usb_req.actual = req->usb_req.length;
+ xudc_done(ep0, req, 0);
+ break;
+ case DATA_PHASE:
+ if (!bytes_to_tx) {
+ /*
+ * We're done with data transfer, next
+ * will be zero length OUT with data toggle of
+ * 1. Setup data_toggle.
+ */
+ epcfgreg = udc->read_fn(udc->addr + ep0->offset);
+ epcfgreg |= XUSB_EP_CFG_DATA_TOGGLE_MASK;
+ udc->write_fn(udc->addr, ep0->offset, epcfgreg);
+ udc->setupseqtx = STATUS_PHASE;
+ } else {
+ length = count = min_t(u32, bytes_to_tx,
+ EP0_MAX_PACKET);
+ /* Copy the data to be transmitted into the DPRAM. */
+ ep0rambase = (u8 __force *) (udc->addr +
+ (ep0->rambase << 2));
+ buffer = req->usb_req.buf + req->usb_req.actual;
+ req->usb_req.actual = req->usb_req.actual + length;
+ memcpy_toio((void __iomem *)ep0rambase, buffer, length);
+ }
+ udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count);
+ udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * xudc_ctrl_ep_handler - Endpoint 0 interrupt handler.
+ * @udc: pointer to the udc structure.
+ * @intrstatus: It's the mask value for the interrupt sources on endpoint 0.
+ *
+ * Processes the commands received during enumeration phase.
+ */
+static void xudc_ctrl_ep_handler(struct xusb_udc *udc, u32 intrstatus)
+{
+
+ if (intrstatus & XUSB_STATUS_SETUP_PACKET_MASK) {
+ xudc_handle_setup(udc);
+ } else {
+ if (intrstatus & XUSB_STATUS_FIFO_BUFF_RDY_MASK)
+ xudc_ep0_out(udc);
+ else if (intrstatus & XUSB_STATUS_FIFO_BUFF_FREE_MASK)
+ xudc_ep0_in(udc);
+ }
+}
+
+/**
+ * xudc_nonctrl_ep_handler - Non control endpoint interrupt handler.
+ * @udc: pointer to the udc structure.
+ * @epnum: End point number for which the interrupt is to be processed
+ * @intrstatus: mask value for interrupt sources of endpoints other
+ * than endpoint 0.
+ *
+ * Processes the buffer completion interrupts.
+ */
+static void xudc_nonctrl_ep_handler(struct xusb_udc *udc, u8 epnum,
+ u32 intrstatus)
+{
+
+ struct xusb_req *req;
+ struct xusb_ep *ep;
+
+ ep = &udc->ep[epnum];
+ /* Process the End point interrupts.*/
+ if (intrstatus & (XUSB_STATUS_EP0_BUFF1_COMP_MASK << epnum))
+ ep->buffer0ready = 0;
+ if (intrstatus & (XUSB_STATUS_EP0_BUFF2_COMP_MASK << epnum))
+ ep->buffer1ready = false;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ req = list_first_entry(&ep->queue, struct xusb_req, queue);
+
+ if (ep->is_in)
+ xudc_write_fifo(ep, req);
+ else
+ xudc_read_fifo(ep, req);
+}
+
+/**
+ * xudc_irq - The main interrupt handler.
+ * @irq: The interrupt number.
+ * @_udc: pointer to the usb device controller structure.
+ *
+ * Return: IRQ_HANDLED after the interrupt is handled.
+ */
+static irqreturn_t xudc_irq(int irq, void *_udc)
+{
+ struct xusb_udc *udc = _udc;
+ u32 intrstatus;
+ u32 ier;
+ u8 index;
+ u32 bufintr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /*
+ * Event interrupts are level sensitive hence first disable
+ * IER, read ISR and figure out active interrupts.
+ */
+ ier = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
+ ier &= ~XUSB_STATUS_INTR_EVENT_MASK;
+ udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier);
+
+ /* Read the Interrupt Status Register.*/
+ intrstatus = udc->read_fn(udc->addr + XUSB_STATUS_OFFSET);
+
+ /* Call the handler for the event interrupt.*/
+ if (intrstatus & XUSB_STATUS_INTR_EVENT_MASK) {
+ /*
+ * Check if there is any action to be done for :
+ * - USB Reset received {XUSB_STATUS_RESET_MASK}
+ * - USB Suspend received {XUSB_STATUS_SUSPEND_MASK}
+ * - USB Resume received {XUSB_STATUS_RESUME_MASK}
+ * - USB Disconnect received {XUSB_STATUS_DISCONNECT_MASK}
+ */
+ xudc_startup_handler(udc, intrstatus);
+ }
+
+ /* Check the buffer completion interrupts */
+ if (intrstatus & XUSB_STATUS_INTR_BUFF_COMP_ALL_MASK) {
+ /* Enable Reset, Suspend, Resume and Disconnect */
+ ier = udc->read_fn(udc->addr + XUSB_IER_OFFSET);
+ ier |= XUSB_STATUS_INTR_EVENT_MASK;
+ udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier);
+
+ if (intrstatus & XUSB_STATUS_EP0_BUFF1_COMP_MASK)
+ xudc_ctrl_ep_handler(udc, intrstatus);
+
+ for (index = 1; index < 8; index++) {
+ bufintr = ((intrstatus &
+ (XUSB_STATUS_EP1_BUFF1_COMP_MASK <<
+ (index - 1))) || (intrstatus &
+ (XUSB_STATUS_EP1_BUFF2_COMP_MASK <<
+ (index - 1))));
+ if (bufintr) {
+ xudc_nonctrl_ep_handler(udc, index,
+ intrstatus);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xudc_probe - The device probe function for driver initialization.
+ * @pdev: pointer to the platform device structure.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xudc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ struct xusb_udc *udc;
+ int irq;
+ int ret;
+ u32 ier;
+ u8 *buff;
+
+ udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ /* Create a dummy request for GET_STATUS, SET_ADDRESS */
+ udc->req = devm_kzalloc(&pdev->dev, sizeof(struct xusb_req),
+ GFP_KERNEL);
+ if (!udc->req)
+ return -ENOMEM;
+
+ buff = devm_kzalloc(&pdev->dev, STATUSBUFF_SIZE, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ udc->req->usb_req.buf = buff;
+
+ /* Map the registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ udc->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(udc->addr))
+ return PTR_ERR(udc->addr);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(&pdev->dev, irq, xudc_irq, 0,
+ dev_name(&pdev->dev), udc);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "unable to request irq %d", irq);
+ goto fail;
+ }
+
+ udc->dma_enabled = of_property_read_bool(np, "xlnx,has-builtin-dma");
+
+ /* Setup gadget structure */
+ udc->gadget.ops = &xusb_udc_ops;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO].ep_usb;
+ udc->gadget.name = driver_name;
+
+ udc->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(udc->clk)) {
+ if (PTR_ERR(udc->clk) != -ENOENT) {
+ ret = PTR_ERR(udc->clk);
+ goto fail;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ dev_warn(&pdev->dev, "s_axi_aclk clock property is not found\n");
+ udc->clk = NULL;
+ }
+
+ ret = clk_prepare_enable(udc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
+ spin_lock_init(&udc->lock);
+
+ /* Check for IP endianness */
+ udc->write_fn = xudc_write32_be;
+ udc->read_fn = xudc_read32_be;
+ udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, USB_TEST_J);
+ if ((udc->read_fn(udc->addr + XUSB_TESTMODE_OFFSET))
+ != USB_TEST_J) {
+ udc->write_fn = xudc_write32;
+ udc->read_fn = xudc_read32;
+ }
+ udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, 0);
+
+ xudc_eps_init(udc);
+
+ /* Set device address to 0.*/
+ udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
+
+ ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (ret)
+ goto err_disable_unprepare_clk;
+
+ udc->dev = &udc->gadget.dev;
+
+ /* Enable the interrupts.*/
+ ier = XUSB_STATUS_GLOBAL_INTR_MASK | XUSB_STATUS_INTR_EVENT_MASK |
+ XUSB_STATUS_FIFO_BUFF_RDY_MASK | XUSB_STATUS_FIFO_BUFF_FREE_MASK |
+ XUSB_STATUS_SETUP_PACKET_MASK |
+ XUSB_STATUS_INTR_BUFF_COMP_ALL_MASK;
+
+ udc->write_fn(udc->addr, XUSB_IER_OFFSET, ier);
+
+ platform_set_drvdata(pdev, udc);
+
+ dev_vdbg(&pdev->dev, "%s at 0x%08X mapped to %p %s\n",
+ driver_name, (u32)res->start, udc->addr,
+ udc->dma_enabled ? "with DMA" : "without DMA");
+
+ return 0;
+
+err_disable_unprepare_clk:
+ clk_disable_unprepare(udc->clk);
+fail:
+ dev_err(&pdev->dev, "probe failed, %d\n", ret);
+ return ret;
+}
+
+/**
+ * xudc_remove - Releases the resources allocated during the initialization.
+ * @pdev: pointer to the platform device structure.
+ *
+ * Return: 0 always
+ */
+static int xudc_remove(struct platform_device *pdev)
+{
+ struct xusb_udc *udc = platform_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&udc->gadget);
+ clk_disable_unprepare(udc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int xudc_suspend(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+
+ udc = dev_get_drvdata(dev);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg &= ~XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ if (udc->driver && udc->driver->suspend)
+ udc->driver->suspend(&udc->gadget);
+
+ clk_disable(udc->clk);
+
+ return 0;
+}
+
+static int xudc_resume(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+ int ret;
+
+ udc = dev_get_drvdata(dev);
+
+ ret = clk_enable(udc->clk);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg |= XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xudc_suspend, xudc_resume)
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id usb_of_match[] = {
+ { .compatible = "xlnx,usb2-device-4.00.a", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, usb_of_match);
+
+static struct platform_driver xudc_driver = {
+ .driver = {
+ .name = driver_name,
+ .of_match_table = usb_of_match,
+ .pm = &xudc_pm_ops,
+ },
+ .probe = xudc_probe,
+ .remove = xudc_remove,
+};
+
+module_platform_driver(xudc_driver);
+
+MODULE_DESCRIPTION("Xilinx udc driver");
+MODULE_AUTHOR("Xilinx, Inc");
+MODULE_LICENSE("GPL");