From 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 27 Apr 2024 12:05:51 +0200 Subject: Adding upstream version 5.10.209. Signed-off-by: Daniel Baumann --- drivers/mmc/host/Kconfig | 1107 ++++++ drivers/mmc/host/Makefile | 114 + drivers/mmc/host/alcor.c | 1192 ++++++ drivers/mmc/host/android-goldfish.c | 545 +++ drivers/mmc/host/atmel-mci.c | 2680 ++++++++++++++ drivers/mmc/host/au1xmmc.c | 1225 ++++++ drivers/mmc/host/bcm2835.c | 1486 ++++++++ drivers/mmc/host/cavium-octeon.c | 340 ++ drivers/mmc/host/cavium-thunderx.c | 205 + drivers/mmc/host/cavium.c | 1086 ++++++ drivers/mmc/host/cavium.h | 215 ++ drivers/mmc/host/cb710-mmc.c | 787 ++++ drivers/mmc/host/cb710-mmc.h | 100 + drivers/mmc/host/cqhci.c | 1168 ++++++ drivers/mmc/host/cqhci.h | 242 ++ drivers/mmc/host/davinci_mmc.c | 1417 +++++++ drivers/mmc/host/dw_mmc-bluefield.c | 68 + drivers/mmc/host/dw_mmc-exynos.c | 620 ++++ drivers/mmc/host/dw_mmc-exynos.h | 69 + drivers/mmc/host/dw_mmc-hi3798cv200.c | 211 ++ drivers/mmc/host/dw_mmc-k3.c | 486 +++ drivers/mmc/host/dw_mmc-pci.c | 105 + drivers/mmc/host/dw_mmc-pltfm.c | 112 + drivers/mmc/host/dw_mmc-pltfm.h | 16 + drivers/mmc/host/dw_mmc-rockchip.c | 397 ++ drivers/mmc/host/dw_mmc-zx.c | 234 ++ drivers/mmc/host/dw_mmc-zx.h | 32 + drivers/mmc/host/dw_mmc.c | 3493 ++++++++++++++++++ drivers/mmc/host/dw_mmc.h | 567 +++ drivers/mmc/host/jz4740_mmc.c | 1167 ++++++ drivers/mmc/host/meson-gx-mmc.c | 1341 +++++++ drivers/mmc/host/meson-mx-sdhc-clkc.c | 158 + drivers/mmc/host/meson-mx-sdhc-mmc.c | 920 +++++ drivers/mmc/host/meson-mx-sdhc.h | 141 + drivers/mmc/host/meson-mx-sdio.c | 773 ++++ drivers/mmc/host/mmc_hsq.c | 375 ++ drivers/mmc/host/mmc_hsq.h | 31 + drivers/mmc/host/mmc_spi.c | 1546 ++++++++ drivers/mmc/host/mmci.c | 2401 ++++++++++++ drivers/mmc/host/mmci.h | 475 +++ drivers/mmc/host/mmci_qcom_dml.c | 202 + drivers/mmc/host/mmci_stm32_sdmmc.c | 542 +++ drivers/mmc/host/moxart-mmc.c | 727 ++++ drivers/mmc/host/mtk-sd.c | 2735 ++++++++++++++ drivers/mmc/host/mvsdio.c | 843 +++++ drivers/mmc/host/mvsdio.h | 187 + drivers/mmc/host/mxcmmc.c | 1260 +++++++ drivers/mmc/host/mxs-mmc.c | 750 ++++ drivers/mmc/host/of_mmc_spi.c | 99 + drivers/mmc/host/omap.c | 1516 ++++++++ drivers/mmc/host/omap_hsmmc.c | 2182 +++++++++++ drivers/mmc/host/owl-mmc.c | 706 ++++ drivers/mmc/host/pxamci.c | 829 +++++ drivers/mmc/host/pxamci.h | 91 + drivers/mmc/host/renesas_sdhi.h | 81 + drivers/mmc/host/renesas_sdhi_core.c | 1165 ++++++ drivers/mmc/host/renesas_sdhi_internal_dmac.c | 368 ++ drivers/mmc/host/renesas_sdhi_sys_dmac.c | 479 +++ drivers/mmc/host/rtsx_pci_sdmmc.c | 1502 ++++++++ drivers/mmc/host/rtsx_usb_sdmmc.c | 1473 ++++++++ drivers/mmc/host/s3cmci.c | 1782 +++++++++ drivers/mmc/host/s3cmci.h | 75 + drivers/mmc/host/sdhci-acpi.c | 1090 ++++++ drivers/mmc/host/sdhci-bcm-kona.c | 338 ++ drivers/mmc/host/sdhci-brcmstb.c | 371 ++ drivers/mmc/host/sdhci-cadence.c | 477 +++ drivers/mmc/host/sdhci-cns3xxx.c | 113 + drivers/mmc/host/sdhci-cqhci.h | 24 + drivers/mmc/host/sdhci-dove.c | 121 + drivers/mmc/host/sdhci-esdhc-imx.c | 1881 ++++++++++ drivers/mmc/host/sdhci-esdhc-mcf.c | 522 +++ drivers/mmc/host/sdhci-esdhc.h | 109 + drivers/mmc/host/sdhci-iproc.c | 420 +++ drivers/mmc/host/sdhci-milbeaut.c | 348 ++ drivers/mmc/host/sdhci-msm.c | 2628 +++++++++++++ drivers/mmc/host/sdhci-of-arasan.c | 1729 +++++++++ drivers/mmc/host/sdhci-of-aspeed.c | 356 ++ drivers/mmc/host/sdhci-of-at91.c | 485 +++ drivers/mmc/host/sdhci-of-dwcmshc.c | 256 ++ drivers/mmc/host/sdhci-of-esdhc.c | 1523 ++++++++ drivers/mmc/host/sdhci-of-hlwd.c | 95 + drivers/mmc/host/sdhci-of-sparx5.c | 270 ++ drivers/mmc/host/sdhci-omap.c | 1325 +++++++ drivers/mmc/host/sdhci-pci-arasan.c | 331 ++ drivers/mmc/host/sdhci-pci-core.c | 2401 ++++++++++++ drivers/mmc/host/sdhci-pci-data.c | 6 + drivers/mmc/host/sdhci-pci-dwc-mshc.c | 84 + drivers/mmc/host/sdhci-pci-gli.c | 865 +++++ drivers/mmc/host/sdhci-pci-o2micro.c | 871 +++++ drivers/mmc/host/sdhci-pci.h | 204 + drivers/mmc/host/sdhci-pic32.c | 255 ++ drivers/mmc/host/sdhci-pltfm.c | 267 ++ drivers/mmc/host/sdhci-pltfm.h | 123 + drivers/mmc/host/sdhci-pxav2.c | 242 ++ drivers/mmc/host/sdhci-pxav3.c | 583 +++ drivers/mmc/host/sdhci-s3c.c | 800 ++++ drivers/mmc/host/sdhci-sirf.c | 235 ++ drivers/mmc/host/sdhci-spear.c | 196 + drivers/mmc/host/sdhci-sprd.c | 833 +++++ drivers/mmc/host/sdhci-st.c | 535 +++ drivers/mmc/host/sdhci-tegra.c | 1839 +++++++++ drivers/mmc/host/sdhci-xenon-phy.c | 844 +++++ drivers/mmc/host/sdhci-xenon.c | 687 ++++ drivers/mmc/host/sdhci-xenon.h | 100 + drivers/mmc/host/sdhci.c | 4929 +++++++++++++++++++++++++ drivers/mmc/host/sdhci.h | 816 ++++ drivers/mmc/host/sdhci_am654.c | 881 +++++ drivers/mmc/host/sdhci_f_sdh30.c | 235 ++ drivers/mmc/host/sdhci_f_sdh30.h | 32 + drivers/mmc/host/sdricoh_cs.c | 536 +++ drivers/mmc/host/sh_mmcif.c | 1576 ++++++++ drivers/mmc/host/sunxi-mmc.c | 1531 ++++++++ drivers/mmc/host/tifm_sd.c | 1092 ++++++ drivers/mmc/host/tmio_mmc.c | 228 ++ drivers/mmc/host/tmio_mmc.h | 284 ++ drivers/mmc/host/tmio_mmc_core.c | 1281 +++++++ drivers/mmc/host/toshsd.c | 709 ++++ drivers/mmc/host/toshsd.h | 172 + drivers/mmc/host/uniphier-sd.c | 699 ++++ drivers/mmc/host/usdhi6rol0.c | 1908 ++++++++++ drivers/mmc/host/ushc.c | 568 +++ drivers/mmc/host/via-sdmmc.c | 1320 +++++++ drivers/mmc/host/vub300.c | 2481 +++++++++++++ drivers/mmc/host/wbsd.c | 2013 ++++++++++ drivers/mmc/host/wbsd.h | 181 + drivers/mmc/host/wmt-sdmmc.c | 1011 +++++ 126 files changed, 100506 insertions(+) create mode 100644 drivers/mmc/host/Kconfig create mode 100644 drivers/mmc/host/Makefile create mode 100644 drivers/mmc/host/alcor.c create mode 100644 drivers/mmc/host/android-goldfish.c create mode 100644 drivers/mmc/host/atmel-mci.c create mode 100644 drivers/mmc/host/au1xmmc.c create mode 100644 drivers/mmc/host/bcm2835.c create mode 100644 drivers/mmc/host/cavium-octeon.c create mode 100644 drivers/mmc/host/cavium-thunderx.c create mode 100644 drivers/mmc/host/cavium.c create mode 100644 drivers/mmc/host/cavium.h create mode 100644 drivers/mmc/host/cb710-mmc.c create mode 100644 drivers/mmc/host/cb710-mmc.h create mode 100644 drivers/mmc/host/cqhci.c create mode 100644 drivers/mmc/host/cqhci.h create mode 100644 drivers/mmc/host/davinci_mmc.c create mode 100644 drivers/mmc/host/dw_mmc-bluefield.c create mode 100644 drivers/mmc/host/dw_mmc-exynos.c create mode 100644 drivers/mmc/host/dw_mmc-exynos.h create mode 100644 drivers/mmc/host/dw_mmc-hi3798cv200.c create mode 100644 drivers/mmc/host/dw_mmc-k3.c create mode 100644 drivers/mmc/host/dw_mmc-pci.c create mode 100644 drivers/mmc/host/dw_mmc-pltfm.c create mode 100644 drivers/mmc/host/dw_mmc-pltfm.h create mode 100644 drivers/mmc/host/dw_mmc-rockchip.c create mode 100644 drivers/mmc/host/dw_mmc-zx.c create mode 100644 drivers/mmc/host/dw_mmc-zx.h create mode 100644 drivers/mmc/host/dw_mmc.c create mode 100644 drivers/mmc/host/dw_mmc.h create mode 100644 drivers/mmc/host/jz4740_mmc.c create mode 100644 drivers/mmc/host/meson-gx-mmc.c create mode 100644 drivers/mmc/host/meson-mx-sdhc-clkc.c create mode 100644 drivers/mmc/host/meson-mx-sdhc-mmc.c create mode 100644 drivers/mmc/host/meson-mx-sdhc.h create mode 100644 drivers/mmc/host/meson-mx-sdio.c create mode 100644 drivers/mmc/host/mmc_hsq.c create mode 100644 drivers/mmc/host/mmc_hsq.h create mode 100644 drivers/mmc/host/mmc_spi.c create mode 100644 drivers/mmc/host/mmci.c create mode 100644 drivers/mmc/host/mmci.h create mode 100644 drivers/mmc/host/mmci_qcom_dml.c create mode 100644 drivers/mmc/host/mmci_stm32_sdmmc.c create mode 100644 drivers/mmc/host/moxart-mmc.c create mode 100644 drivers/mmc/host/mtk-sd.c create mode 100644 drivers/mmc/host/mvsdio.c create mode 100644 drivers/mmc/host/mvsdio.h create mode 100644 drivers/mmc/host/mxcmmc.c create mode 100644 drivers/mmc/host/mxs-mmc.c create mode 100644 drivers/mmc/host/of_mmc_spi.c create mode 100644 drivers/mmc/host/omap.c create mode 100644 drivers/mmc/host/omap_hsmmc.c create mode 100644 drivers/mmc/host/owl-mmc.c create mode 100644 drivers/mmc/host/pxamci.c create mode 100644 drivers/mmc/host/pxamci.h create mode 100644 drivers/mmc/host/renesas_sdhi.h create mode 100644 drivers/mmc/host/renesas_sdhi_core.c create mode 100644 drivers/mmc/host/renesas_sdhi_internal_dmac.c create mode 100644 drivers/mmc/host/renesas_sdhi_sys_dmac.c create mode 100644 drivers/mmc/host/rtsx_pci_sdmmc.c create mode 100644 drivers/mmc/host/rtsx_usb_sdmmc.c create mode 100644 drivers/mmc/host/s3cmci.c create mode 100644 drivers/mmc/host/s3cmci.h create mode 100644 drivers/mmc/host/sdhci-acpi.c create mode 100644 drivers/mmc/host/sdhci-bcm-kona.c create mode 100644 drivers/mmc/host/sdhci-brcmstb.c create mode 100644 drivers/mmc/host/sdhci-cadence.c create mode 100644 drivers/mmc/host/sdhci-cns3xxx.c create mode 100644 drivers/mmc/host/sdhci-cqhci.h create mode 100644 drivers/mmc/host/sdhci-dove.c create mode 100644 drivers/mmc/host/sdhci-esdhc-imx.c create mode 100644 drivers/mmc/host/sdhci-esdhc-mcf.c create mode 100644 drivers/mmc/host/sdhci-esdhc.h create mode 100644 drivers/mmc/host/sdhci-iproc.c create mode 100644 drivers/mmc/host/sdhci-milbeaut.c create mode 100644 drivers/mmc/host/sdhci-msm.c create mode 100644 drivers/mmc/host/sdhci-of-arasan.c create mode 100644 drivers/mmc/host/sdhci-of-aspeed.c create mode 100644 drivers/mmc/host/sdhci-of-at91.c create mode 100644 drivers/mmc/host/sdhci-of-dwcmshc.c create mode 100644 drivers/mmc/host/sdhci-of-esdhc.c create mode 100644 drivers/mmc/host/sdhci-of-hlwd.c create mode 100644 drivers/mmc/host/sdhci-of-sparx5.c create mode 100644 drivers/mmc/host/sdhci-omap.c create mode 100644 drivers/mmc/host/sdhci-pci-arasan.c create mode 100644 drivers/mmc/host/sdhci-pci-core.c create mode 100644 drivers/mmc/host/sdhci-pci-data.c create mode 100644 drivers/mmc/host/sdhci-pci-dwc-mshc.c create mode 100644 drivers/mmc/host/sdhci-pci-gli.c create mode 100644 drivers/mmc/host/sdhci-pci-o2micro.c create mode 100644 drivers/mmc/host/sdhci-pci.h create mode 100644 drivers/mmc/host/sdhci-pic32.c create mode 100644 drivers/mmc/host/sdhci-pltfm.c create mode 100644 drivers/mmc/host/sdhci-pltfm.h create mode 100644 drivers/mmc/host/sdhci-pxav2.c create mode 100644 drivers/mmc/host/sdhci-pxav3.c create mode 100644 drivers/mmc/host/sdhci-s3c.c create mode 100644 drivers/mmc/host/sdhci-sirf.c create mode 100644 drivers/mmc/host/sdhci-spear.c create mode 100644 drivers/mmc/host/sdhci-sprd.c create mode 100644 drivers/mmc/host/sdhci-st.c create mode 100644 drivers/mmc/host/sdhci-tegra.c create mode 100644 drivers/mmc/host/sdhci-xenon-phy.c create mode 100644 drivers/mmc/host/sdhci-xenon.c create mode 100644 drivers/mmc/host/sdhci-xenon.h create mode 100644 drivers/mmc/host/sdhci.c create mode 100644 drivers/mmc/host/sdhci.h create mode 100644 drivers/mmc/host/sdhci_am654.c create mode 100644 drivers/mmc/host/sdhci_f_sdh30.c create mode 100644 drivers/mmc/host/sdhci_f_sdh30.h create mode 100644 drivers/mmc/host/sdricoh_cs.c create mode 100644 drivers/mmc/host/sh_mmcif.c create mode 100644 drivers/mmc/host/sunxi-mmc.c create mode 100644 drivers/mmc/host/tifm_sd.c create mode 100644 drivers/mmc/host/tmio_mmc.c create mode 100644 drivers/mmc/host/tmio_mmc.h create mode 100644 drivers/mmc/host/tmio_mmc_core.c create mode 100644 drivers/mmc/host/toshsd.c create mode 100644 drivers/mmc/host/toshsd.h create mode 100644 drivers/mmc/host/uniphier-sd.c create mode 100644 drivers/mmc/host/usdhi6rol0.c create mode 100644 drivers/mmc/host/ushc.c create mode 100644 drivers/mmc/host/via-sdmmc.c create mode 100644 drivers/mmc/host/vub300.c create mode 100644 drivers/mmc/host/wbsd.c create mode 100644 drivers/mmc/host/wbsd.h create mode 100644 drivers/mmc/host/wmt-sdmmc.c (limited to 'drivers/mmc/host') diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig new file mode 100644 index 000000000..8fe4a0fd6 --- /dev/null +++ b/drivers/mmc/host/Kconfig @@ -0,0 +1,1107 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# MMC/SD host controller drivers +# + +comment "MMC/SD/SDIO Host Controller Drivers" + +config MMC_DEBUG + bool "MMC host drivers debugging" + depends on MMC != n + help + This is an option for use by developers; most people should + say N here. This enables MMC host driver debugging. And further + added host drivers please don't invent their private macro for + debugging. + +config MMC_ARMMMCI + tristate "ARM AMBA Multimedia Card Interface support" + depends on ARM_AMBA + help + This selects the ARM(R) AMBA(R) PrimeCell Multimedia Card + Interface (PL180 and PL181) support. If you have an ARM(R) + platform with a Multimedia Card slot, say Y or M here. + + If unsure, say N. + +config MMC_QCOM_DML + bool "Qualcomm Data Mover for SD Card Controller" + depends on MMC_ARMMMCI && QCOM_BAM_DMA + default y + help + This selects the Qualcomm Data Mover lite/local on SD Card controller. + This option will enable the dma to work correctly, if you are using + Qcom SOCs and MMC, you would probably need this option to get DMA working. + + if unsure, say N. + +config MMC_STM32_SDMMC + bool "STMicroelectronics STM32 SDMMC Controller" + depends on MMC_ARMMMCI + default y + help + This selects the STMicroelectronics STM32 SDMMC host controller. + If you have a STM32 sdmmc host with internal DMA say Y here. + + If unsure, say N. + +config MMC_PXA + tristate "Intel PXA25x/26x/27x Multimedia Card Interface support" + depends on ARCH_PXA + help + This selects the Intel(R) PXA(R) Multimedia card Interface. + If you have a PXA(R) platform with a Multimedia Card slot, + say Y or M here. + + If unsure, say N. + +config MMC_SDHCI + tristate "Secure Digital Host Controller Interface support" + depends on HAS_DMA + help + This selects the generic Secure Digital Host Controller Interface. + It is used by manufacturers such as Texas Instruments(R), Ricoh(R) + and Toshiba(R). Most controllers found in laptops are of this type. + + If you have a controller with this interface, say Y or M here. You + also need to enable an appropriate bus interface. + + If unsure, say N. + +config MMC_SDHCI_IO_ACCESSORS + bool + depends on MMC_SDHCI + help + This is silent Kconfig symbol that is selected by the drivers that + need to overwrite SDHCI IO memory accessors. + +config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + bool + depends on MMC_SDHCI + select MMC_SDHCI_IO_ACCESSORS + help + This option is selected by drivers running on big endian hosts + and performing I/O to a SDHCI controller through a bus that + implements a hardware byte swapper using a 32-bit datum. + This endian mapping mode is called "data invariance" and + has the effect of scrambling the addresses and formats of data + accessed in sizes other than the datum size. + + This is the case for the Nintendo Wii SDHCI. + +config MMC_SDHCI_PCI + tristate "SDHCI support on PCI bus" + depends on MMC_SDHCI && PCI + select MMC_CQHCI + select IOSF_MBI if X86 + select MMC_SDHCI_IO_ACCESSORS + help + This selects the PCI Secure Digital Host Controller Interface. + Most controllers found today are PCI devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_RICOH_MMC + bool "Ricoh MMC Controller Disabler" + depends on MMC_SDHCI_PCI + default y + help + This adds a pci quirk to disable Ricoh MMC Controller. This + proprietary controller is unnecessary because the SDHCI driver + supports MMC cards on the SD controller, but if it is not + disabled, it will steal the MMC cards away - rendering them + useless. It is safe to select this even if you don't + have a Ricoh based card reader. + + If unsure, say Y. + +config MMC_SDHCI_ACPI + tristate "SDHCI support for ACPI enumerated SDHCI controllers" + depends on MMC_SDHCI && ACPI && PCI + select IOSF_MBI if X86 + help + This selects support for ACPI enumerated SDHCI controllers, + identified by ACPI Compatibility ID PNP0D40 or specific + ACPI Hardware IDs. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_PLTFM + tristate "SDHCI platform and OF driver helper" + depends on MMC_SDHCI + help + This selects the common helper functions support for Secure Digital + Host Controller Interface based platform and OF drivers. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_OF_ARASAN + tristate "SDHCI OF support for the Arasan SDHCI controllers" + depends on MMC_SDHCI_PLTFM + depends on OF + depends on COMMON_CLK + select MMC_CQHCI + help + This selects the Arasan Secure Digital Host Controller Interface + (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_OF_ASPEED + tristate "SDHCI OF support for the ASPEED SDHCI controller" + depends on MMC_SDHCI_PLTFM + depends on OF && OF_ADDRESS + select MMC_SDHCI_IO_ACCESSORS + help + This selects the ASPEED Secure Digital Host Controller Interface. + + If you have a controller with this interface, say Y or M here. You + also need to enable an appropriate bus interface. + + If unsure, say N. + +config MMC_SDHCI_OF_AT91 + tristate "SDHCI OF support for the Atmel SDMMC controller" + depends on MMC_SDHCI_PLTFM + depends on OF && HAVE_CLK + help + This selects the Atmel SDMMC driver + +config MMC_SDHCI_OF_ESDHC + tristate "SDHCI OF support for the Freescale eSDHC controller" + depends on MMC_SDHCI_PLTFM + depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST + select MMC_SDHCI_IO_ACCESSORS + select FSL_GUTS + help + This selects the Freescale eSDHC controller support. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_OF_HLWD + tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers" + depends on MMC_SDHCI_PLTFM + depends on PPC + select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + help + This selects the Secure Digital Host Controller Interface (SDHCI) + found in the "Hollywood" chipset of the Nintendo Wii video game + console. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_OF_DWCMSHC + tristate "SDHCI OF support for the Synopsys DWC MSHC" + depends on MMC_SDHCI_PLTFM + depends on OF + depends on COMMON_CLK + help + This selects Synopsys DesignWare Cores Mobile Storage Controller + support. + If you have a controller with this interface, say Y or M here. + If unsure, say N. + +config MMC_SDHCI_OF_SPARX5 + tristate "SDHCI OF support for the MCHP Sparx5 SoC" + depends on MMC_SDHCI_PLTFM + depends on ARCH_SPARX5 || COMPILE_TEST + help + This selects the Secure Digital Host Controller Interface (SDHCI) + found in the MCHP Sparx5 SoC. + + If you have a Sparx5 SoC with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_CADENCE + tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller" + depends on MMC_SDHCI_PLTFM + depends on OF + help + This selects the Cadence SD/SDIO/eMMC driver. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_CNS3XXX + tristate "SDHCI support on the Cavium Networks CNS3xxx SoC" + depends on ARCH_CNS3XXX || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + help + This selects the SDHCI support for CNS3xxx System-on-Chip devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_ESDHC_MCF + tristate "SDHCI support for the Freescale eSDHC ColdFire controller" + depends on M5441x + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + help + This selects the Freescale eSDHC controller support for + ColdFire mcf5441x devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_ESDHC_IMX + tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller" + depends on ARCH_MXC || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI + help + This selects the Freescale eSDHC/uSDHC controller support + found on i.MX25, i.MX35 i.MX5x and i.MX6x. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_DOVE + tristate "SDHCI support on Marvell's Dove SoC" + depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + help + This selects the Secure Digital Host Controller Interface in + Marvell's Dove SoC. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_TEGRA + tristate "SDHCI platform support for the Tegra SD/MMC Controller" + depends on ARCH_TEGRA || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI + help + This selects the Tegra SD/MMC controller. If you have a Tegra + platform with SD or MMC devices, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_S3C + tristate "SDHCI support on Samsung S3C SoC" + depends on MMC_SDHCI + depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST + help + This selects the Secure Digital Host Controller Interface (SDHCI) + often referrered to as the HSMMC block in some of the Samsung S3C + range of SoC. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_SIRF + tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs" + depends on ARCH_SIRF || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + help + This selects the SDHCI support for SiRF System-on-Chip devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_PXAV3 + tristate "Marvell MMP2 SD Host Controller support (PXAV3)" + depends on CLKDEV_LOOKUP + depends on MMC_SDHCI_PLTFM + depends on ARCH_BERLIN || ARCH_MMP || ARCH_MVEBU || COMPILE_TEST + default CPU_MMP2 + help + This selects the Marvell(R) PXAV3 SD Host Controller. + If you have a MMP2 platform with SD Host Controller + and a card slot, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_PXAV2 + tristate "Marvell PXA9XX SD Host Controller support (PXAV2)" + depends on CLKDEV_LOOKUP + depends on MMC_SDHCI_PLTFM + depends on ARCH_MMP || COMPILE_TEST + default CPU_PXA910 + help + This selects the Marvell(R) PXAV2 SD Host Controller. + If you have a PXA9XX platform with SD Host Controller + and a card slot, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_SPEAR + tristate "SDHCI support on ST SPEAr platform" + depends on MMC_SDHCI + depends on PLAT_SPEAR || COMPILE_TEST + depends on OF + help + This selects the Secure Digital Host Controller Interface (SDHCI) + often referrered to as the HSMMC block in some of the ST SPEAR range + of SoC + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_S3C_DMA + bool "DMA support on S3C SDHCI" + depends on MMC_SDHCI_S3C + help + Enable DMA support on the Samsung S3C SDHCI glue. The DMA + has proved to be problematic if the controller encounters + certain errors, and thus should be treated with care. + + YMMV. + +config MMC_SDHCI_BCM_KONA + tristate "SDHCI support on Broadcom KONA platform" + depends on ARCH_BCM_MOBILE || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + help + This selects the Broadcom Kona Secure Digital Host Controller + Interface(SDHCI) support. + This is used in Broadcom mobile SoCs. + + If you have a controller with this interface, say Y or M here. + +config MMC_SDHCI_F_SDH30 + tristate "SDHCI support for Fujitsu Semiconductor F_SDH30" + depends on MMC_SDHCI_PLTFM + depends on OF || ACPI + help + This selects the Secure Digital Host Controller Interface (SDHCI) + Needed by some Fujitsu SoC for MMC / SD / SDIO support. + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_MILBEAUT + tristate "SDHCI support for Socionext Milbeaut Serieas using F_SDH30" + depends on MMC_SDHCI_PLTFM + depends on OF + help + This selects the Secure Digital Host Controller Interface (SDHCI) + Needed by Milbeaut SoC for MMC / SD / SDIO support. + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_IPROC + tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller" + depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + depends on OF || ACPI + default ARCH_BCM_IPROC + select MMC_SDHCI_IO_ACCESSORS + help + This selects the iProc SD/MMC controller. + + If you have a BCM2835 or IPROC platform with SD or MMC devices, + say Y or M here. + + If unsure, say N. + +config MMC_MESON_GX + tristate "Amlogic S905/GX*/AXG SD/MMC Host Controller support" + depends on ARCH_MESON|| COMPILE_TEST + depends on COMMON_CLK + help + This selects support for the Amlogic SD/MMC Host Controller + found on the S905/GX*/AXG family of SoCs. This controller is + MMC 5.1 compliant and supports SD, eMMC and SDIO interfaces. + + If you have a controller with this interface, say Y here. + +config MMC_MESON_MX_SDHC + tristate "Amlogic Meson SDHC Host Controller support" + depends on (ARM && ARCH_MESON) || COMPILE_TEST + depends on COMMON_CLK + depends on OF + help + This selects support for the SDHC Host Controller on + Amlogic Meson6, Meson8, Meson8b and Meson8m2 SoCs. + The controller supports the SD/SDIO Spec 3.x and eMMC Spec 4.5x + with 1, 4, and 8 bit bus widths. + + If you have a controller with this interface, say Y or M here. + If unsure, say N. + +config MMC_MESON_MX_SDIO + tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support" + depends on ARCH_MESON || COMPILE_TEST + depends on COMMON_CLK + depends on OF_ADDRESS + help + This selects support for the SD/MMC Host Controller on + Amlogic Meson6, Meson8 and Meson8b SoCs. + + If you have a controller with this interface, say Y or M here. + If unsure, say N. + +config MMC_MOXART + tristate "MOXART SD/MMC Host Controller support" + depends on ARCH_MOXART || COMPILE_TEST + help + This selects support for the MOXART SD/MMC Host Controller. + MOXA provides one multi-functional card reader which can + be found on some embedded hardware such as UC-7112-LX. + If you have a controller with this interface, say Y here. + +config MMC_SDHCI_ST + tristate "SDHCI support on STMicroelectronics SoC" + depends on ARCH_STI || FSP2 || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + help + This selects the Secure Digital Host Controller Interface in + STMicroelectronics SoCs. + + If you have a controller with this interface, say Y or M here. + If unsure, say N. + +config MMC_OMAP + tristate "TI OMAP Multimedia Card Interface support" + depends on ARCH_OMAP + depends on TPS65010 || !MACH_OMAP_H2 + help + This selects the TI OMAP Multimedia card Interface. + If you have an OMAP board with a Multimedia Card slot, + say Y or M here. + + If unsure, say N. + +config MMC_OMAP_HS + tristate "TI OMAP High Speed Multimedia Card Interface support" + depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST + help + This selects the TI OMAP High Speed Multimedia card Interface. + If you have an omap2plus board with a Multimedia Card slot, + say Y or M here. + + If unsure, say N. + +config MMC_WBSD + tristate "Winbond W83L51xD SD/MMC Card Interface support" + depends on ISA_DMA_API && !M68K + help + This selects the Winbond(R) W83L51xD Secure digital and + Multimedia card Interface. + If you have a machine with a integrated W83L518D or W83L519D + SD/MMC card reader, say Y or M here. + + If unsure, say N. + +config MMC_ALCOR + tristate "Alcor Micro/Alcor Link SD/MMC controller" + depends on MISC_ALCOR_PCI + help + Say Y here to include driver code to support SD/MMC card interface + of Alcor Micro PCI-E card reader + +config MMC_AU1X + bool "Alchemy AU1XX0 MMC Card Interface support" + depends on MIPS_ALCHEMY + depends on MMC=y + help + This selects the AMD Alchemy(R) Multimedia card interface. + If you have a Alchemy platform with a MMC slot, say Y here. + + If unsure, say N. + +config MMC_ATMELMCI + tristate "Atmel SD/MMC Driver (Multimedia Card Interface)" + depends on ARCH_AT91 + help + This selects the Atmel Multimedia Card Interface driver. + If you have an AT91 platform with a Multimedia Card slot, + say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_MSM + tristate "Qualcomm SDHCI Controller Support" + depends on ARCH_QCOM || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI + help + This selects the Secure Digital Host Controller Interface (SDHCI) + support present in Qualcomm SOCs. The controller supports + SD/MMC/SDIO devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_MXC + tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support" + depends on ARCH_MXC || PPC_MPC512x + help + This selects the Freescale i.MX21, i.MX27, i.MX31 or MPC512x + Multimedia Card Interface. If you have an i.MX or MPC512x platform + with a Multimedia Card slot, say Y or M here. + + If unsure, say N. + +config MMC_MXS + tristate "Freescale MXS Multimedia Card Interface support" + depends on ARCH_MXS && MXS_DMA + help + This selects the Freescale SSP MMC controller found on MXS based + platforms like mx23/28. + + If unsure, say N. + +config MMC_TIFM_SD + tristate "TI Flash Media MMC/SD Interface support" + depends on PCI + select TIFM_CORE + help + Say Y here if you want to be able to access MMC/SD cards with + the Texas Instruments(R) Flash Media card reader, found in many + laptops. + This option 'selects' (turns on, enables) 'TIFM_CORE', but you + probably also need appropriate card reader host adapter, such as + 'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support + (TIFM_7XX1)'. + + To compile this driver as a module, choose M here: the + module will be called tifm_sd. + +config MMC_MVSDIO + tristate "Marvell MMC/SD/SDIO host driver" + depends on PLAT_ORION || (COMPILE_TEST && ARM) + depends on OF + help + This selects the Marvell SDIO host driver. + SDIO may currently be found on the Kirkwood 88F6281 and 88F6192 + SoC controllers. + + To compile this driver as a module, choose M here: the + module will be called mvsdio. + +config MMC_DAVINCI + tristate "TI DAVINCI Multimedia Card Interface support" + depends on ARCH_DAVINCI || COMPILE_TEST + help + This selects the TI DAVINCI Multimedia card Interface. + If you have an DAVINCI board with a Multimedia Card slot, + say Y or M here. If unsure, say N. + +config MMC_GOLDFISH + tristate "goldfish qemu Multimedia Card Interface support" + depends on GOLDFISH || COMPILE_TEST + help + This selects the Goldfish Multimedia card Interface emulation + found on the Goldfish Android virtual device emulation. + +config MMC_SPI + tristate "MMC/SD/SDIO over SPI" + depends on SPI_MASTER + select CRC7 + select CRC_ITU_T + help + Some systems access MMC/SD/SDIO cards using a SPI controller + instead of using a "native" MMC/SD/SDIO controller. This has a + disadvantage of being relatively high overhead, but a compensating + advantage of working on many systems without dedicated MMC/SD/SDIO + controllers. + + If unsure, or if your system has no SPI master driver, say N. + +config MMC_S3C + tristate "Samsung S3C SD/MMC Card Interface support" + depends on ARCH_S3C24XX + depends on S3C24XX_DMAC + help + This selects a driver for the MCI interface found in + Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. + If you have a board based on one of those and a MMC/SD + slot, say Y or M here. + + If unsure, say N. + +config MMC_S3C_HW_SDIO_IRQ + bool "Hardware support for SDIO IRQ" + depends on MMC_S3C + help + Enable the hardware support for SDIO interrupts instead of using + the generic polling code. + +choice + prompt "Samsung S3C SD/MMC transfer code" + depends on MMC_S3C + +config MMC_S3C_PIO + bool "Use PIO transfers only" + help + Use PIO to transfer data between memory and the hardware. + + PIO is slower than DMA as it requires CPU instructions to + move the data. This has been the traditional default for + the S3C MCI driver. + +config MMC_S3C_DMA + bool "Use DMA transfers only" + help + Use DMA to transfer data between memory and the hardare. + + Currently, the DMA support in this driver seems to not be + working properly and needs to be debugged before this + option is useful. + +endchoice + +config MMC_SDRICOH_CS + tristate "MMC/SD driver for Ricoh Bay1Controllers" + depends on PCI && PCMCIA + help + Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA + card whenever you insert a MMC or SD card into the card slot. + + To compile this driver as a module, choose M here: the + module will be called sdricoh_cs. + +config MMC_SDHCI_SPRD + tristate "Spreadtrum SDIO host Controller" + depends on ARCH_SPRD || COMPILE_TEST + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + select MMC_HSQ + help + This selects the SDIO Host Controller in Spreadtrum + SoCs, this driver supports R11(IP version: R11P0). + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_TMIO_CORE + tristate + +config MMC_TMIO + tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" + depends on MFD_TMIO || MFD_ASIC3 || COMPILE_TEST + select MMC_TMIO_CORE + help + This provides support for the SD/MMC cell found in TC6393XB, + T7L66XB and also HTC ASIC3 + +config MMC_SDHI + tristate "Renesas SDHI SD/SDIO controller support" + depends on SUPERH || ARCH_RENESAS || COMPILE_TEST + select MMC_TMIO_CORE + help + This provides support for the SDHI SD/SDIO controller found in + Renesas SuperH, ARM and ARM64 based SoCs + +config MMC_SDHI_SYS_DMAC + tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC" + depends on MMC_SDHI + default MMC_SDHI if (SUPERH || ARM) + help + This provides DMA support for SDHI SD/SDIO controllers + using SYS-DMAC via DMA Engine. This supports the controllers + found in SuperH and Renesas ARM based SoCs. + +config MMC_SDHI_INTERNAL_DMAC + tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering" + depends on ARM64 || ARCH_R7S9210 || ARCH_R8A77470 || COMPILE_TEST + depends on MMC_SDHI + default MMC_SDHI if (ARM64 || ARCH_R7S9210 || ARCH_R8A77470) + help + This provides DMA support for SDHI SD/SDIO controllers + using on-chip bus mastering. This supports the controllers + found in arm64 based SoCs. This controller is also found in + some RZ family SoCs. + +config MMC_UNIPHIER + tristate "UniPhier SD/eMMC Host Controller support" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on OF + select MMC_TMIO_CORE + help + This provides support for the SD/eMMC controller found in + UniPhier SoCs. The eMMC variant of this controller is used + only for 32-bit SoCs. + +config MMC_CB710 + tristate "ENE CB710 MMC/SD Interface support" + depends on PCI + select CB710_CORE + help + This option enables support for MMC/SD part of ENE CB710/720 Flash + memory card reader found in some laptops (ie. some versions of + HP Compaq nx9500). + + This driver can also be built as a module. If so, the module + will be called cb710-mmc. + +config MMC_VIA_SDMMC + tristate "VIA SD/MMC Card Reader Driver" + depends on PCI + help + This selects the VIA SD/MMC Card Reader driver, say Y or M here. + VIA provides one multi-functional card reader which integrated into + some motherboards manufactured by VIA. This card reader supports + SD/MMC/SDHC. + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_CAVIUM_OCTEON + tristate "Cavium OCTEON SD/MMC Card Interface support" + depends on CAVIUM_OCTEON_SOC + help + This selects Cavium OCTEON SD/MMC card Interface. + If you have an OCTEON board with a Multimedia Card slot, + say Y or M here. + + If unsure, say N. + +config MMC_CAVIUM_THUNDERX + tristate "Cavium ThunderX SD/MMC Card Interface support" + depends on PCI && 64BIT && (ARM64 || COMPILE_TEST) + depends on GPIO_THUNDERX + depends on OF_ADDRESS + help + This selects Cavium ThunderX SD/MMC Card Interface. + If you have an Cavium ARM64 board with a Multimedia Card slot + or builtin eMMC chip say Y or M here. If built as a module + the module will be called thunderx_mmc.ko. + +config MMC_DW + tristate "Synopsys DesignWare Memory Card Interface" + depends on ARC || ARM || ARM64 || MIPS || RISCV || CSKY || COMPILE_TEST + help + This selects support for the Synopsys DesignWare Mobile Storage IP + block, this provides host support for SD and MMC interfaces, in both + PIO, internal DMA mode and external DMA mode. + +config MMC_DW_PLTFM + tristate "Synopsys Designware MCI Support as platform device" + depends on MMC_DW + default y + help + This selects the common helper functions support for Host Controller + Interface based platform driver. Please select this option if the IP + is present as a platform device. This is the common interface for the + Synopsys Designware IP. + + If you have a controller with this interface, say Y or M here. + + If unsure, say Y. + +config MMC_DW_BLUEFIELD + tristate "BlueField specific extensions for Synopsys DW Memory Card Interface" + depends on MMC_DW + select MMC_DW_PLTFM + help + This selects support for Mellanox BlueField SoC specific extensions to + the Synopsys DesignWare Memory Card Interface driver. Select this + option for platforms based on Mellanox BlueField SoC's. + +config MMC_DW_EXYNOS + tristate "Exynos specific extensions for Synopsys DW Memory Card Interface" + depends on MMC_DW + select MMC_DW_PLTFM + help + This selects support for Samsung Exynos SoC specific extensions to the + Synopsys DesignWare Memory Card Interface driver. Select this option + for platforms based on Exynos4 and Exynos5 SoC's. + +config MMC_DW_HI3798CV200 + tristate "Hi3798CV200 specific extensions for Synopsys DW Memory Card Interface" + depends on MMC_DW + select MMC_DW_PLTFM + help + This selects support for HiSilicon Hi3798CV200 SoC specific extensions to the + Synopsys DesignWare Memory Card Interface driver. Select this option + for platforms based on HiSilicon Hi3798CV200 SoC. + +config MMC_DW_K3 + tristate "K3 specific extensions for Synopsys DW Memory Card Interface" + depends on MMC_DW + select MMC_DW_PLTFM + help + This selects support for Hisilicon K3 SoC specific extensions to the + Synopsys DesignWare Memory Card Interface driver. Select this option + for platforms based on Hisilicon K3 SoC's. + +config MMC_DW_PCI + tristate "Synopsys Designware MCI support on PCI bus" + depends on MMC_DW && PCI + help + This selects the PCI bus for the Synopsys Designware Mobile Storage IP. + Select this option if the IP is present on PCI platform. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_DW_ROCKCHIP + tristate "Rockchip specific extensions for Synopsys DW Memory Card Interface" + depends on MMC_DW && ARCH_ROCKCHIP + select MMC_DW_PLTFM + help + This selects support for Rockchip SoC specific extensions to the + Synopsys DesignWare Memory Card Interface driver. Select this option + for platforms based on RK3066, RK3188 and RK3288 SoC's. + +config MMC_DW_ZX + tristate "ZTE specific extensions for Synopsys DW Memory Card Interface" + depends on MMC_DW && ARCH_ZX + select MMC_DW_PLTFM + help + This selects support for ZTE SoC specific extensions to the + Synopsys DesignWare Memory Card Interface driver. Select this option + for platforms based on ZX296718 SoC's. + +config MMC_SH_MMCIF + tristate "SuperH Internal MMCIF support" + depends on SUPERH || ARCH_RENESAS || COMPILE_TEST + help + This selects the MMC Host Interface controller (MMCIF) found in various + Renesas SoCs for SH and ARM architectures. + + +config MMC_JZ4740 + tristate "Ingenic JZ47xx SD/Multimedia Card Interface support" + depends on MIPS + help + This selects support for the SD/MMC controller on Ingenic + JZ4740, JZ4750, JZ4770 and JZ4780 SoCs. + + If you have a board based on such a SoC and with a SD/MMC slot, + say Y or M here. + +config MMC_VUB300 + tristate "VUB300 USB to SDIO/SD/MMC Host Controller support" + depends on USB + help + This selects support for Elan Digital Systems' VUB300 chip. + + The VUB300 is a USB-SDIO Host Controller Interface chip + that enables the host computer to use SDIO/SD/MMC cards + via a USB 2.0 or USB 1.1 host. + + The VUB300 chip will be found in both physically separate + USB to SDIO/SD/MMC adapters and embedded on some motherboards. + + The VUB300 chip supports SD and MMC memory cards in addition + to single and multifunction SDIO cards. + + Some SDIO cards will need a firmware file to be loaded and + sent to VUB300 chip in order to achieve better data throughput. + Download these "Offload Pseudocode" from Elan Digital Systems' + web-site http://www.elandigitalsystems.com/support/downloads.php + and put them in /lib/firmware. Note that without these additional + firmware files the VUB300 chip will still function, but not at + the best obtainable data rate. + + To compile this mmc host controller driver as a module, + choose M here: the module will be called vub300. + + If you have a computer with an embedded VUB300 chip + or if you intend connecting a USB adapter based on a + VUB300 chip say Y or M here. + +config MMC_USHC + tristate "USB SD Host Controller (USHC) support" + depends on USB + help + This selects support for USB SD Host Controllers based on + the Cypress Astoria chip with firmware compliant with CSR's + USB SD Host Controller specification (CS-118793-SP). + + CSR boards with this device include: USB<>SDIO (M1985v2), + and Ultrasira. + + Note: These controllers only support SDIO cards and do not + support MMC or SD memory cards. + +config MMC_WMT + tristate "Wondermedia SD/MMC Host Controller support" + depends on ARCH_VT8500 + default y + help + This selects support for the SD/MMC Host Controller on + Wondermedia WM8505/WM8650 based SoCs. + + To compile this driver as a module, choose M here: the + module will be called wmt-sdmmc. + +config MMC_USDHI6ROL0 + tristate "Renesas USDHI6ROL0 SD/SDIO Host Controller support" + depends on HAS_DMA + help + This selects support for the Renesas USDHI6ROL0 SD/SDIO + Host Controller + +config MMC_REALTEK_PCI + tristate "Realtek PCI-E SD/MMC Card Interface Driver" + depends on MISC_RTSX_PCI + help + Say Y here to include driver code to support SD/MMC card interface + of Realtek PCI-E card reader + +config MMC_REALTEK_USB + tristate "Realtek USB SD/MMC Card Interface Driver" + depends on MISC_RTSX_USB + help + Say Y here to include driver code to support SD/MMC card interface + of Realtek RTS5129/39 series card reader + +config MMC_SUNXI + tristate "Allwinner sunxi SD/MMC Host Controller support" + depends on ARCH_SUNXI || COMPILE_TEST + help + This selects support for the SD/MMC Host Controller on + Allwinner sunxi SoCs. + +config MMC_CQHCI + tristate "Command Queue Host Controller Interface support" + depends on HAS_DMA + help + This selects the Command Queue Host Controller Interface (CQHCI) + support present in host controllers of Qualcomm Technologies, Inc + amongst others. + This controller supports eMMC devices with command queue support. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_HSQ + tristate "MMC Host Software Queue support" + help + This selects the MMC Host Software Queue support. This may increase + performance, if the host controller and its driver supports it. + + If you have a controller/driver supporting this interface, say Y or M + here. + + If unsure, say N. + +config MMC_TOSHIBA_PCI + tristate "Toshiba Type A SD/MMC Card Interface Driver" + depends on PCI + +config MMC_BCM2835 + tristate "Broadcom BCM2835 SDHOST MMC Controller support" + depends on ARCH_BCM2835 || COMPILE_TEST + help + This selects the BCM2835 SDHOST MMC controller. If you have + a BCM2835 platform with SD or MMC devices, say Y or M here. + + Note that the BCM2835 has two SD controllers: The Arasan + sdhci controller (supported by MMC_SDHCI_IPROC) and a custom + sdhost controller (supported by this driver). + + If unsure, say N. + +config MMC_MTK + tristate "MediaTek SD/MMC Card Interface support" + depends on HAS_DMA + select REGULATOR + select MMC_CQHCI + help + This selects the MediaTek(R) Secure digital and Multimedia card Interface. + If you have a machine with a integrated SD/MMC card reader, say Y or M here. + This is needed if support for any SD/SDIO/MMC devices is required. + If unsure, say N. + +config MMC_SDHCI_MICROCHIP_PIC32 + tristate "Microchip PIC32MZDA SDHCI support" + depends on MMC_SDHCI && PIC32MZDA && MMC_SDHCI_PLTFM + help + This selects the Secure Digital Host Controller Interface (SDHCI) + for PIC32MZDA platform. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_BRCMSTB + tristate "Broadcom SDIO/SD/MMC support" + depends on ARCH_BRCMSTB || BMIPS_GENERIC + depends on MMC_SDHCI_PLTFM + select MMC_CQHCI + default y + help + This selects support for the SDIO/SD/MMC Host Controller on + Broadcom STB SoCs. + + If unsure, say Y. + +config MMC_SDHCI_XENON + tristate "Marvell Xenon eMMC/SD/SDIO SDHCI driver" + depends on MMC_SDHCI_PLTFM + help + This selects Marvell Xenon eMMC/SD/SDIO SDHCI. + If you have a controller with this interface, say Y or M here. + If unsure, say N. + +config MMC_SDHCI_OMAP + tristate "TI SDHCI Controller Support" + depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST + depends on MMC_SDHCI_PLTFM && OF + select THERMAL + imply TI_SOC_THERMAL + select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE + help + This selects the Secure Digital Host Controller Interface (SDHCI) + support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller + supports SD/MMC/SDIO devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_AM654 + tristate "Support for the SDHCI Controller in TI's AM654 SOCs" + depends on ARCH_K3 || COMPILE_TEST + depends on MMC_SDHCI_PLTFM && OF + select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI + select REGMAP_MMIO + help + This selects the Secure Digital Host Controller Interface (SDHCI) + support present in TI's AM65x/AM64x/AM62x/J721E SOCs. The controller + supports SD/MMC/SDIO devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_OWL + tristate "Actions Semi Owl SD/MMC Host Controller support" + depends on HAS_DMA + depends on ARCH_ACTIONS || COMPILE_TEST + help + This selects support for the SD/MMC Host Controller on + Actions Semi Owl SoCs. + +config MMC_SDHCI_EXTERNAL_DMA + bool diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile new file mode 100644 index 000000000..451c25fc2 --- /dev/null +++ b/drivers/mmc/host/Makefile @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for MMC/SD host controller drivers +# + +obj-$(CONFIG_MMC_ARMMMCI) += armmmci.o +armmmci-y := mmci.o +armmmci-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o +armmmci-$(CONFIG_MMC_STM32_SDMMC) += mmci_stm32_sdmmc.o +obj-$(CONFIG_MMC_PXA) += pxamci.o +obj-$(CONFIG_MMC_MXC) += mxcmmc.o +obj-$(CONFIG_MMC_MXS) += mxs-mmc.o +obj-$(CONFIG_MMC_SDHCI) += sdhci.o +obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o +sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o \ + sdhci-pci-dwc-mshc.o sdhci-pci-gli.o +obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o +obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o +obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o +obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o +obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o +obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o +obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o +obj-$(CONFIG_MMC_SDHCI_MILBEAUT) += sdhci-milbeaut.o +obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o +obj-$(CONFIG_MMC_SDHCI_AM654) += sdhci_am654.o +obj-$(CONFIG_MMC_WBSD) += wbsd.o +obj-$(CONFIG_MMC_AU1X) += au1xmmc.o +obj-$(CONFIG_MMC_ALCOR) += alcor.o +obj-$(CONFIG_MMC_MTK) += mtk-sd.o +obj-$(CONFIG_MMC_OMAP) += omap.o +obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o +obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o +obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o +obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o +obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o +obj-$(CONFIG_MMC_GOLDFISH) += android-goldfish.o +obj-$(CONFIG_MMC_SPI) += mmc_spi.o +ifeq ($(CONFIG_OF),y) +obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o +endif +obj-$(CONFIG_MMC_S3C) += s3cmci.o +obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o +obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o +obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o +obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o +obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o +obj-$(CONFIG_MMC_SDHI_INTERNAL_DMAC) += renesas_sdhi_internal_dmac.o +obj-$(CONFIG_MMC_UNIPHIER) += uniphier-sd.o +obj-$(CONFIG_MMC_CB710) += cb710-mmc.o +obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o +octeon-mmc-objs := cavium.o cavium-octeon.o +obj-$(CONFIG_MMC_CAVIUM_OCTEON) += octeon-mmc.o +thunderx-mmc-objs := cavium.o cavium-thunderx.o +obj-$(CONFIG_MMC_CAVIUM_THUNDERX) += thunderx-mmc.o +obj-$(CONFIG_MMC_DW) += dw_mmc.o +obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o +obj-$(CONFIG_MMC_DW_BLUEFIELD) += dw_mmc-bluefield.o +obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o +obj-$(CONFIG_MMC_DW_HI3798CV200) += dw_mmc-hi3798cv200.o +obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o +obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o +obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o +obj-$(CONFIG_MMC_DW_ZX) += dw_mmc-zx.o +obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o +obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o +obj-$(CONFIG_MMC_VUB300) += vub300.o +obj-$(CONFIG_MMC_USHC) += ushc.o +obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o +obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o +meson-mx-sdhc-objs := meson-mx-sdhc-clkc.o meson-mx-sdhc-mmc.o +obj-$(CONFIG_MMC_MESON_MX_SDHC) += meson-mx-sdhc.o +obj-$(CONFIG_MMC_MESON_MX_SDIO) += meson-mx-sdio.o +obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o +obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o +obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o +obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o +obj-$(CONFIG_MMC_BCM2835) += bcm2835.o +obj-$(CONFIG_MMC_OWL) += owl-mmc.o + +obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o +obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o + +obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o +obj-$(CONFIG_MMC_SDHCI_CADENCE) += sdhci-cadence.o +obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o +obj-$(CONFIG_MMC_SDHCI_ESDHC_MCF) += sdhci-esdhc-mcf.o +obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o +obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o +obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o +obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o +obj-$(CONFIG_MMC_SDHCI_OF_ASPEED) += sdhci-of-aspeed.o +obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o +obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o +obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o +obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC) += sdhci-of-dwcmshc.o +obj-$(CONFIG_MMC_SDHCI_OF_SPARX5) += sdhci-of-sparx5.o +obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o +obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o +obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o +obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o +obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o +obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o +obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o +obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o +obj-$(CONFIG_MMC_CQHCI) += cqhci.o +obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o + +ifeq ($(CONFIG_CB710_DEBUG),y) + CFLAGS-cb710-mmc += -DDEBUG +endif + +obj-$(CONFIG_MMC_SDHCI_XENON) += sdhci-xenon-driver.o +sdhci-xenon-driver-y += sdhci-xenon.o sdhci-xenon-phy.o diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c new file mode 100644 index 000000000..d01df01d4 --- /dev/null +++ b/drivers/mmc/host/alcor.c @@ -0,0 +1,1192 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Oleksij Rempel + * + * Driver for Alcor Micro AU6601 and AU6621 controllers + */ + +/* Note: this driver was created without any documentation. Based + * on sniffing, testing and in some cases mimic of original driver. + * As soon as some one with documentation or more experience in SD/MMC, or + * reverse engineering then me, please review this driver and question every + * thing what I did. 2018 Oleksij Rempel + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +enum alcor_cookie { + COOKIE_UNMAPPED, + COOKIE_PRE_MAPPED, + COOKIE_MAPPED, +}; + +struct alcor_pll_conf { + unsigned int clk_src_freq; + unsigned int clk_src_reg; + unsigned int min_div; + unsigned int max_div; +}; + +struct alcor_sdmmc_host { + struct device *dev; + struct alcor_pci_priv *alcor_pci; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + unsigned int dma_on:1; + + struct mutex cmd_mutex; + + struct delayed_work timeout_work; + + struct sg_mapping_iter sg_miter; /* SG state for PIO */ + struct scatterlist *sg; + unsigned int blocks; /* remaining PIO blocks */ + int sg_count; + + u32 irq_status_sd; + unsigned char cur_power_mode; +}; + +static const struct alcor_pll_conf alcor_pll_cfg[] = { + /* MHZ, CLK src, max div, min div */ + { 31250000, AU6601_CLK_31_25_MHZ, 1, 511}, + { 48000000, AU6601_CLK_48_MHZ, 1, 511}, + {125000000, AU6601_CLK_125_MHZ, 1, 511}, + {384000000, AU6601_CLK_384_MHZ, 1, 511}, +}; + +static inline void alcor_rmw8(struct alcor_sdmmc_host *host, unsigned int addr, + u8 clear, u8 set) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + u32 var; + + var = alcor_read8(priv, addr); + var &= ~clear; + var |= set; + alcor_write8(priv, var, addr); +} + +/* As soon as irqs are masked, some status updates may be missed. + * Use this with care. + */ +static inline void alcor_mask_sd_irqs(struct alcor_sdmmc_host *host) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + + alcor_write32(priv, 0, AU6601_REG_INT_ENABLE); +} + +static inline void alcor_unmask_sd_irqs(struct alcor_sdmmc_host *host) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + + alcor_write32(priv, AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK | + AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE | + AU6601_INT_OVER_CURRENT_ERR, + AU6601_REG_INT_ENABLE); +} + +static void alcor_reset(struct alcor_sdmmc_host *host, u8 val) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + int i; + + alcor_write8(priv, val | AU6601_BUF_CTRL_RESET, + AU6601_REG_SW_RESET); + for (i = 0; i < 100; i++) { + if (!(alcor_read8(priv, AU6601_REG_SW_RESET) & val)) + return; + udelay(50); + } + dev_err(host->dev, "%s: timeout\n", __func__); +} + +/* + * Perform DMA I/O of a single page. + */ +static void alcor_data_set_dma(struct alcor_sdmmc_host *host) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + u32 addr; + + if (!host->sg_count) + return; + + if (!host->sg) { + dev_err(host->dev, "have blocks, but no SG\n"); + return; + } + + if (!sg_dma_len(host->sg)) { + dev_err(host->dev, "DMA SG len == 0\n"); + return; + } + + + addr = (u32)sg_dma_address(host->sg); + + alcor_write32(priv, addr, AU6601_REG_SDMA_ADDR); + host->sg = sg_next(host->sg); + host->sg_count--; +} + +static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + struct mmc_data *data = host->data; + u8 ctrl = 0; + + if (data->flags & MMC_DATA_WRITE) + ctrl |= AU6601_DATA_WRITE; + + if (data->host_cookie == COOKIE_MAPPED) { + /* + * For DMA transfers, this function is called just once, + * at the start of the operation. The hardware can only + * perform DMA I/O on a single page at a time, so here + * we kick off the transfer with the first page, and expect + * subsequent pages to be transferred upon IRQ events + * indicating that the single-page DMA was completed. + */ + alcor_data_set_dma(host); + ctrl |= AU6601_DATA_DMA_MODE; + host->dma_on = 1; + alcor_write32(priv, data->sg_count * 0x1000, + AU6601_REG_BLOCK_SIZE); + } else { + /* + * For PIO transfers, we break down each operation + * into several sector-sized transfers. When one sector has + * complete, the IRQ handler will call this function again + * to kick off the transfer of the next sector. + */ + alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE); + } + + alcor_write8(priv, ctrl | AU6601_DATA_START_XFER, + AU6601_DATA_XFER_CTRL); +} + +static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + size_t blksize, len; + u8 *buf; + + if (!host->blocks) + return; + + if (host->dma_on) { + dev_err(host->dev, "configured DMA but got PIO request.\n"); + return; + } + + if (!!(host->data->flags & MMC_DATA_READ) != read) { + dev_err(host->dev, "got unexpected direction %i != %i\n", + !!(host->data->flags & MMC_DATA_READ), read); + } + + if (!sg_miter_next(&host->sg_miter)) + return; + + blksize = host->data->blksz; + len = min(host->sg_miter.length, blksize); + + dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n", + read ? "read" : "write", blksize); + + host->sg_miter.consumed = len; + host->blocks--; + + buf = host->sg_miter.addr; + + if (read) + ioread32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2); + else + iowrite32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2); + + sg_miter_stop(&host->sg_miter); +} + +static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host) +{ + unsigned int flags = SG_MITER_ATOMIC; + struct mmc_data *data = host->data; + + if (data->flags & MMC_DATA_READ) + flags |= SG_MITER_TO_SG; + else + flags |= SG_MITER_FROM_SG; + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); +} + +static void alcor_prepare_data(struct alcor_sdmmc_host *host, + struct mmc_command *cmd) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + struct mmc_data *data = cmd->data; + + if (!data) + return; + + + host->data = data; + host->data->bytes_xfered = 0; + host->blocks = data->blocks; + host->sg = data->sg; + host->sg_count = data->sg_count; + dev_dbg(host->dev, "prepare DATA: sg %i, blocks: %i\n", + host->sg_count, host->blocks); + + if (data->host_cookie != COOKIE_MAPPED) + alcor_prepare_sg_miter(host); + + alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL); +} + +static void alcor_send_cmd(struct alcor_sdmmc_host *host, + struct mmc_command *cmd, bool set_timeout) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + unsigned long timeout = 0; + u8 ctrl = 0; + + host->cmd = cmd; + alcor_prepare_data(host, cmd); + + dev_dbg(host->dev, "send CMD. opcode: 0x%02x, arg; 0x%08x\n", + cmd->opcode, cmd->arg); + alcor_write8(priv, cmd->opcode | 0x40, AU6601_REG_CMD_OPCODE); + alcor_write32be(priv, cmd->arg, AU6601_REG_CMD_ARG); + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + ctrl = AU6601_CMD_NO_RESP; + break; + case MMC_RSP_R1: + ctrl = AU6601_CMD_6_BYTE_CRC; + break; + case MMC_RSP_R1B: + ctrl = AU6601_CMD_6_BYTE_CRC | AU6601_CMD_STOP_WAIT_RDY; + break; + case MMC_RSP_R2: + ctrl = AU6601_CMD_17_BYTE_CRC; + break; + case MMC_RSP_R3: + ctrl = AU6601_CMD_6_BYTE_WO_CRC; + break; + default: + dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n", + mmc_hostname(mmc_from_priv(host)), mmc_resp_type(cmd)); + break; + } + + if (set_timeout) { + if (!cmd->data && cmd->busy_timeout) + timeout = cmd->busy_timeout; + else + timeout = 10000; + + schedule_delayed_work(&host->timeout_work, + msecs_to_jiffies(timeout)); + } + + dev_dbg(host->dev, "xfer ctrl: 0x%02x; timeout: %lu\n", ctrl, timeout); + alcor_write8(priv, ctrl | AU6601_CMD_START_XFER, + AU6601_CMD_XFER_CTRL); +} + +static void alcor_request_complete(struct alcor_sdmmc_host *host, + bool cancel_timeout) +{ + struct mmc_request *mrq; + + /* + * If this work gets rescheduled while running, it will + * be run again afterwards but without any active request. + */ + if (!host->mrq) + return; + + if (cancel_timeout) + cancel_delayed_work(&host->timeout_work); + + mrq = host->mrq; + + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + host->dma_on = 0; + + mmc_request_done(mmc_from_priv(host), mrq); +} + +static void alcor_finish_data(struct alcor_sdmmc_host *host) +{ + struct mmc_data *data; + + data = host->data; + host->data = NULL; + host->dma_on = 0; + + /* + * The specification states that the block count register must + * be updated, but it does not specify at what point in the + * data flow. That makes the register entirely useless to read + * back so we have to assume that nothing made it to the card + * in the event of an error. + */ + if (data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blksz * data->blocks; + + /* + * Need to send CMD12 if - + * a) open-ended multiblock transfer (no CMD23) + * b) error in multiblock transfer + */ + if (data->stop && + (data->error || + !host->mrq->sbc)) { + + /* + * The controller needs a reset of internal state machines + * upon error conditions. + */ + if (data->error) + alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA); + + alcor_unmask_sd_irqs(host); + alcor_send_cmd(host, data->stop, false); + return; + } + + alcor_request_complete(host, 1); +} + +static void alcor_err_irq(struct alcor_sdmmc_host *host, u32 intmask) +{ + dev_dbg(host->dev, "ERR IRQ %x\n", intmask); + + if (host->cmd) { + if (intmask & AU6601_INT_CMD_TIMEOUT_ERR) + host->cmd->error = -ETIMEDOUT; + else + host->cmd->error = -EILSEQ; + } + + if (host->data) { + if (intmask & AU6601_INT_DATA_TIMEOUT_ERR) + host->data->error = -ETIMEDOUT; + else + host->data->error = -EILSEQ; + + host->data->bytes_xfered = 0; + } + + alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA); + alcor_request_complete(host, 1); +} + +static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + + intmask &= AU6601_INT_CMD_END; + + if (!intmask) + return true; + + /* got CMD_END but no CMD is in progress, wake thread an process the + * error + */ + if (!host->cmd) + return false; + + if (host->cmd->flags & MMC_RSP_PRESENT) { + struct mmc_command *cmd = host->cmd; + + cmd->resp[0] = alcor_read32be(priv, AU6601_REG_CMD_RSP0); + dev_dbg(host->dev, "RSP0: 0x%04x\n", cmd->resp[0]); + if (host->cmd->flags & MMC_RSP_136) { + cmd->resp[1] = + alcor_read32be(priv, AU6601_REG_CMD_RSP1); + cmd->resp[2] = + alcor_read32be(priv, AU6601_REG_CMD_RSP2); + cmd->resp[3] = + alcor_read32be(priv, AU6601_REG_CMD_RSP3); + dev_dbg(host->dev, "RSP1,2,3: 0x%04x 0x%04x 0x%04x\n", + cmd->resp[1], cmd->resp[2], cmd->resp[3]); + } + + } + + host->cmd->error = 0; + + /* Processed actual command. */ + if (!host->data) + return false; + + alcor_trigger_data_transfer(host); + host->cmd = NULL; + return true; +} + +static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask) +{ + intmask &= AU6601_INT_CMD_END; + + if (!intmask) + return; + + if (!host->cmd && intmask & AU6601_INT_CMD_END) { + dev_dbg(host->dev, "Got command interrupt 0x%08x even though no command operation was in progress.\n", + intmask); + } + + /* Processed actual command. */ + if (!host->data) + alcor_request_complete(host, 1); + else + alcor_trigger_data_transfer(host); + host->cmd = NULL; +} + +static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask) +{ + u32 tmp; + + intmask &= AU6601_INT_DATA_MASK; + + /* nothing here to do */ + if (!intmask) + return 1; + + /* we was too fast and got DATA_END after it was processed? + * lets ignore it for now. + */ + if (!host->data && intmask == AU6601_INT_DATA_END) + return 1; + + /* looks like an error, so lets handle it. */ + if (!host->data) + return 0; + + tmp = intmask & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY + | AU6601_INT_DMA_END); + switch (tmp) { + case 0: + break; + case AU6601_INT_READ_BUF_RDY: + alcor_trf_block_pio(host, true); + return 1; + case AU6601_INT_WRITE_BUF_RDY: + alcor_trf_block_pio(host, false); + return 1; + case AU6601_INT_DMA_END: + if (!host->sg_count) + break; + + alcor_data_set_dma(host); + break; + default: + dev_err(host->dev, "Got READ_BUF_RDY and WRITE_BUF_RDY at same time\n"); + break; + } + + if (intmask & AU6601_INT_DATA_END) { + if (!host->dma_on && host->blocks) { + alcor_trigger_data_transfer(host); + return 1; + } else { + return 0; + } + } + + return 1; +} + +static void alcor_data_irq_thread(struct alcor_sdmmc_host *host, u32 intmask) +{ + intmask &= AU6601_INT_DATA_MASK; + + if (!intmask) + return; + + if (!host->data) { + dev_dbg(host->dev, "Got data interrupt 0x%08x even though no data operation was in progress.\n", + intmask); + alcor_reset(host, AU6601_RESET_DATA); + return; + } + + if (alcor_data_irq_done(host, intmask)) + return; + + if ((intmask & AU6601_INT_DATA_END) || !host->blocks || + (host->dma_on && !host->sg_count)) + alcor_finish_data(host); +} + +static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask) +{ + dev_dbg(host->dev, "card %s\n", + intmask & AU6601_INT_CARD_REMOVE ? "removed" : "inserted"); + + if (host->mrq) { + dev_dbg(host->dev, "cancel all pending tasks.\n"); + + if (host->data) + host->data->error = -ENOMEDIUM; + + if (host->cmd) + host->cmd->error = -ENOMEDIUM; + else + host->mrq->cmd->error = -ENOMEDIUM; + + alcor_request_complete(host, 1); + } + + mmc_detect_change(mmc_from_priv(host), msecs_to_jiffies(1)); +} + +static irqreturn_t alcor_irq_thread(int irq, void *d) +{ + struct alcor_sdmmc_host *host = d; + irqreturn_t ret = IRQ_HANDLED; + u32 intmask, tmp; + + mutex_lock(&host->cmd_mutex); + + intmask = host->irq_status_sd; + + /* some thing bad */ + if (unlikely(!intmask || AU6601_INT_ALL_MASK == intmask)) { + dev_dbg(host->dev, "unexpected IRQ: 0x%04x\n", intmask); + ret = IRQ_NONE; + goto exit; + } + + tmp = intmask & (AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK); + if (tmp) { + if (tmp & AU6601_INT_ERROR_MASK) + alcor_err_irq(host, tmp); + else { + alcor_cmd_irq_thread(host, tmp); + alcor_data_irq_thread(host, tmp); + } + intmask &= ~(AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK); + } + + if (intmask & (AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE)) { + alcor_cd_irq(host, intmask); + intmask &= ~(AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE); + } + + if (intmask & AU6601_INT_OVER_CURRENT_ERR) { + dev_warn(host->dev, + "warning: over current detected!\n"); + intmask &= ~AU6601_INT_OVER_CURRENT_ERR; + } + + if (intmask) + dev_dbg(host->dev, "got not handled IRQ: 0x%04x\n", intmask); + +exit: + mutex_unlock(&host->cmd_mutex); + alcor_unmask_sd_irqs(host); + return ret; +} + + +static irqreturn_t alcor_irq(int irq, void *d) +{ + struct alcor_sdmmc_host *host = d; + struct alcor_pci_priv *priv = host->alcor_pci; + u32 status, tmp; + irqreturn_t ret; + int cmd_done, data_done; + + status = alcor_read32(priv, AU6601_REG_INT_STATUS); + if (!status) + return IRQ_NONE; + + alcor_write32(priv, status, AU6601_REG_INT_STATUS); + + tmp = status & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY + | AU6601_INT_DATA_END | AU6601_INT_DMA_END + | AU6601_INT_CMD_END); + if (tmp == status) { + cmd_done = alcor_cmd_irq_done(host, tmp); + data_done = alcor_data_irq_done(host, tmp); + /* use fast path for simple tasks */ + if (cmd_done && data_done) { + ret = IRQ_HANDLED; + goto alcor_irq_done; + } + } + + host->irq_status_sd = status; + ret = IRQ_WAKE_THREAD; + alcor_mask_sd_irqs(host); +alcor_irq_done: + return ret; +} + +static void alcor_set_clock(struct alcor_sdmmc_host *host, unsigned int clock) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + int i, diff = 0x7fffffff, tmp_clock = 0; + u16 clk_src = 0; + u8 clk_div = 0; + + if (clock == 0) { + alcor_write16(priv, 0, AU6601_CLK_SELECT); + return; + } + + for (i = 0; i < ARRAY_SIZE(alcor_pll_cfg); i++) { + unsigned int tmp_div, tmp_diff; + const struct alcor_pll_conf *cfg = &alcor_pll_cfg[i]; + + tmp_div = DIV_ROUND_UP(cfg->clk_src_freq, clock); + if (cfg->min_div > tmp_div || tmp_div > cfg->max_div) + continue; + + tmp_clock = DIV_ROUND_UP(cfg->clk_src_freq, tmp_div); + tmp_diff = abs(clock - tmp_clock); + + if (tmp_diff < diff) { + diff = tmp_diff; + clk_src = cfg->clk_src_reg; + clk_div = tmp_div; + } + } + + clk_src |= ((clk_div - 1) << 8); + clk_src |= AU6601_CLK_ENABLE; + + dev_dbg(host->dev, "set freq %d cal freq %d, use div %d, mod %x\n", + clock, tmp_clock, clk_div, clk_src); + + alcor_write16(priv, clk_src, AU6601_CLK_SELECT); + +} + +static void alcor_set_timing(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + + if (ios->timing == MMC_TIMING_LEGACY) { + alcor_rmw8(host, AU6601_CLK_DELAY, + AU6601_CLK_POSITIVE_EDGE_ALL, 0); + } else { + alcor_rmw8(host, AU6601_CLK_DELAY, + 0, AU6601_CLK_POSITIVE_EDGE_ALL); + } +} + +static void alcor_set_bus_width(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + struct alcor_pci_priv *priv = host->alcor_pci; + + if (ios->bus_width == MMC_BUS_WIDTH_1) { + alcor_write8(priv, 0, AU6601_REG_BUS_CTRL); + } else if (ios->bus_width == MMC_BUS_WIDTH_4) { + alcor_write8(priv, AU6601_BUS_WIDTH_4BIT, + AU6601_REG_BUS_CTRL); + } else + dev_err(host->dev, "Unknown BUS mode\n"); + +} + +static int alcor_card_busy(struct mmc_host *mmc) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + struct alcor_pci_priv *priv = host->alcor_pci; + u8 status; + + /* Check whether dat[0:3] low */ + status = alcor_read8(priv, AU6601_DATA_PIN_STATE); + + return !(status & AU6601_BUS_STAT_DAT_MASK); +} + +static int alcor_get_cd(struct mmc_host *mmc) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + struct alcor_pci_priv *priv = host->alcor_pci; + u8 detect; + + detect = alcor_read8(priv, AU6601_DETECT_STATUS) + & AU6601_DETECT_STATUS_M; + /* check if card is present then send command and data */ + return (detect == AU6601_SD_DETECTED); +} + +static int alcor_get_ro(struct mmc_host *mmc) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + struct alcor_pci_priv *priv = host->alcor_pci; + u8 status; + + /* get write protect pin status */ + status = alcor_read8(priv, AU6601_INTERFACE_MODE_CTRL); + + return !!(status & AU6601_SD_CARD_WP); +} + +static void alcor_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + + mutex_lock(&host->cmd_mutex); + + host->mrq = mrq; + + /* check if card is present then send command and data */ + if (alcor_get_cd(mmc)) + alcor_send_cmd(host, mrq->cmd, true); + else { + mrq->cmd->error = -ENOMEDIUM; + alcor_request_complete(host, 1); + } + + mutex_unlock(&host->cmd_mutex); +} + +static void alcor_pre_req(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + struct mmc_command *cmd = mrq->cmd; + struct scatterlist *sg; + unsigned int i, sg_len; + + if (!data || !cmd) + return; + + data->host_cookie = COOKIE_UNMAPPED; + + /* FIXME: looks like the DMA engine works only with CMD18 */ + if (cmd->opcode != MMC_READ_MULTIPLE_BLOCK + && cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK) + return; + /* + * We don't do DMA on "complex" transfers, i.e. with + * non-word-aligned buffers or lengths. A future improvement + * could be made to use temporary DMA bounce-buffers when these + * requirements are not met. + * + * Also, we don't bother with all the DMA setup overhead for + * short transfers. + */ + if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE) + return; + + if (data->blksz & 3) + return; + + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE) + return; + if (sg->offset != 0) + return; + } + + /* This data might be unmapped at this time */ + + sg_len = dma_map_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + if (sg_len) + data->host_cookie = COOKIE_MAPPED; + + data->sg_count = sg_len; +} + +static void alcor_post_req(struct mmc_host *mmc, + struct mmc_request *mrq, + int err) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + if (data->host_cookie == COOKIE_MAPPED) { + dma_unmap_sg(host->dev, + data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + } + + data->host_cookie = COOKIE_UNMAPPED; +} + +static void alcor_set_power_mode(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + struct alcor_pci_priv *priv = host->alcor_pci; + + switch (ios->power_mode) { + case MMC_POWER_OFF: + alcor_set_clock(host, ios->clock); + /* set all pins to input */ + alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE); + /* turn of VDD */ + alcor_write8(priv, 0, AU6601_POWER_CONTROL); + break; + case MMC_POWER_UP: + break; + case MMC_POWER_ON: + /* This is most trickiest part. The order and timings of + * instructions seems to play important role. Any changes may + * confuse internal state engine if this HW. + * FIXME: If we will ever get access to documentation, then this + * part should be reviewed again. + */ + + /* enable SD card mode */ + alcor_write8(priv, AU6601_SD_CARD, + AU6601_ACTIVE_CTRL); + /* set signal voltage to 3.3V */ + alcor_write8(priv, 0, AU6601_OPT); + /* no documentation about clk delay, for now just try to mimic + * original driver. + */ + alcor_write8(priv, 0x20, AU6601_CLK_DELAY); + /* set BUS width to 1 bit */ + alcor_write8(priv, 0, AU6601_REG_BUS_CTRL); + /* set CLK first time */ + alcor_set_clock(host, ios->clock); + /* power on VDD */ + alcor_write8(priv, AU6601_SD_CARD, + AU6601_POWER_CONTROL); + /* wait until the CLK will get stable */ + mdelay(20); + /* set CLK again, mimic original driver. */ + alcor_set_clock(host, ios->clock); + + /* enable output */ + alcor_write8(priv, AU6601_SD_CARD, + AU6601_OUTPUT_ENABLE); + /* The clk will not work on au6621. We need to trigger data + * transfer. + */ + alcor_write8(priv, AU6601_DATA_WRITE, + AU6601_DATA_XFER_CTRL); + /* configure timeout. Not clear what exactly it means. */ + alcor_write8(priv, 0x7d, AU6601_TIME_OUT_CTRL); + mdelay(100); + break; + default: + dev_err(host->dev, "Unknown power parameter\n"); + } +} + +static void alcor_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + + mutex_lock(&host->cmd_mutex); + + dev_dbg(host->dev, "set ios. bus width: %x, power mode: %x\n", + ios->bus_width, ios->power_mode); + + if (ios->power_mode != host->cur_power_mode) { + alcor_set_power_mode(mmc, ios); + host->cur_power_mode = ios->power_mode; + } else { + alcor_set_timing(mmc, ios); + alcor_set_bus_width(mmc, ios); + alcor_set_clock(host, ios->clock); + } + + mutex_unlock(&host->cmd_mutex); +} + +static int alcor_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct alcor_sdmmc_host *host = mmc_priv(mmc); + + mutex_lock(&host->cmd_mutex); + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + alcor_rmw8(host, AU6601_OPT, AU6601_OPT_SD_18V, 0); + break; + case MMC_SIGNAL_VOLTAGE_180: + alcor_rmw8(host, AU6601_OPT, 0, AU6601_OPT_SD_18V); + break; + default: + /* No signal voltage switch required */ + break; + } + + mutex_unlock(&host->cmd_mutex); + return 0; +} + +static const struct mmc_host_ops alcor_sdc_ops = { + .card_busy = alcor_card_busy, + .get_cd = alcor_get_cd, + .get_ro = alcor_get_ro, + .post_req = alcor_post_req, + .pre_req = alcor_pre_req, + .request = alcor_request, + .set_ios = alcor_set_ios, + .start_signal_voltage_switch = alcor_signal_voltage_switch, +}; + +static void alcor_timeout_timer(struct work_struct *work) +{ + struct delayed_work *d = to_delayed_work(work); + struct alcor_sdmmc_host *host = container_of(d, struct alcor_sdmmc_host, + timeout_work); + mutex_lock(&host->cmd_mutex); + + dev_dbg(host->dev, "triggered timeout\n"); + if (host->mrq) { + dev_err(host->dev, "Timeout waiting for hardware interrupt.\n"); + + if (host->data) { + host->data->error = -ETIMEDOUT; + } else { + if (host->cmd) + host->cmd->error = -ETIMEDOUT; + else + host->mrq->cmd->error = -ETIMEDOUT; + } + + alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA); + alcor_request_complete(host, 0); + } + + mutex_unlock(&host->cmd_mutex); +} + +static void alcor_hw_init(struct alcor_sdmmc_host *host) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + struct alcor_dev_cfg *cfg = priv->cfg; + + /* FIXME: This part is a mimics HW init of original driver. + * If we will ever get access to documentation, then this part + * should be reviewed again. + */ + + /* reset command state engine */ + alcor_reset(host, AU6601_RESET_CMD); + + alcor_write8(priv, 0, AU6601_DMA_BOUNDARY); + /* enable sd card mode */ + alcor_write8(priv, AU6601_SD_CARD, AU6601_ACTIVE_CTRL); + + /* set BUS width to 1 bit */ + alcor_write8(priv, 0, AU6601_REG_BUS_CTRL); + + /* reset data state engine */ + alcor_reset(host, AU6601_RESET_DATA); + /* Not sure if a voodoo with AU6601_DMA_BOUNDARY is really needed */ + alcor_write8(priv, 0, AU6601_DMA_BOUNDARY); + + alcor_write8(priv, 0, AU6601_INTERFACE_MODE_CTRL); + /* not clear what we are doing here. */ + alcor_write8(priv, 0x44, AU6601_PAD_DRIVE0); + alcor_write8(priv, 0x44, AU6601_PAD_DRIVE1); + alcor_write8(priv, 0x00, AU6601_PAD_DRIVE2); + + /* for 6601 - dma_boundary; for 6621 - dma_page_cnt + * exact meaning of this register is not clear. + */ + alcor_write8(priv, cfg->dma, AU6601_DMA_BOUNDARY); + + /* make sure all pins are set to input and VDD is off */ + alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE); + alcor_write8(priv, 0, AU6601_POWER_CONTROL); + + alcor_write8(priv, AU6601_DETECT_EN, AU6601_DETECT_STATUS); + /* now we should be safe to enable IRQs */ + alcor_unmask_sd_irqs(host); +} + +static void alcor_hw_uninit(struct alcor_sdmmc_host *host) +{ + struct alcor_pci_priv *priv = host->alcor_pci; + + alcor_mask_sd_irqs(host); + alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA); + + alcor_write8(priv, 0, AU6601_DETECT_STATUS); + + alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE); + alcor_write8(priv, 0, AU6601_POWER_CONTROL); + + alcor_write8(priv, 0, AU6601_OPT); +} + +static void alcor_init_mmc(struct alcor_sdmmc_host *host) +{ + struct mmc_host *mmc = mmc_from_priv(host); + + mmc->f_min = AU6601_MIN_CLOCK; + mmc->f_max = AU6601_MAX_CLOCK; + mmc->ocr_avail = MMC_VDD_33_34; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED + | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 + | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50; + mmc->caps2 = MMC_CAP2_NO_SDIO; + mmc->ops = &alcor_sdc_ops; + + /* The hardware does DMA data transfer of 4096 bytes to/from a single + * buffer address. Scatterlists are not supported at the hardware + * level, however we can work with them at the driver level, + * provided that each segment is exactly 4096 bytes in size. + * Upon DMA completion of a single segment (signalled via IRQ), we + * immediately proceed to transfer the next segment from the + * scatterlist. + * + * The overall request is limited to 240 sectors, matching the + * original vendor driver. + */ + mmc->max_segs = AU6601_MAX_DMA_SEGMENTS; + mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE; + mmc->max_blk_count = 240; + mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size; + dma_set_max_seg_size(host->dev, mmc->max_seg_size); +} + +static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) +{ + struct alcor_pci_priv *priv = pdev->dev.platform_data; + struct mmc_host *mmc; + struct alcor_sdmmc_host *host; + int ret; + + mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "Can't allocate MMC\n"); + return -ENOMEM; + } + + host = mmc_priv(mmc); + host->dev = &pdev->dev; + host->cur_power_mode = MMC_POWER_UNDEFINED; + host->alcor_pci = priv; + + /* make sure irqs are disabled */ + alcor_write32(priv, 0, AU6601_REG_INT_ENABLE); + alcor_write32(priv, 0, AU6601_MS_INT_ENABLE); + + ret = devm_request_threaded_irq(&pdev->dev, priv->irq, + alcor_irq, alcor_irq_thread, IRQF_SHARED, + DRV_NAME_ALCOR_PCI_SDMMC, host); + + if (ret) { + dev_err(&pdev->dev, "Failed to get irq for data line\n"); + goto free_host; + } + + mutex_init(&host->cmd_mutex); + INIT_DELAYED_WORK(&host->timeout_work, alcor_timeout_timer); + + alcor_init_mmc(host); + alcor_hw_init(host); + + dev_set_drvdata(&pdev->dev, host); + ret = mmc_add_host(mmc); + if (ret) + goto free_host; + + return 0; + +free_host: + mmc_free_host(mmc); + return ret; +} + +static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev) +{ + struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev); + struct mmc_host *mmc = mmc_from_priv(host); + + if (cancel_delayed_work_sync(&host->timeout_work)) + alcor_request_complete(host, 0); + + alcor_hw_uninit(host); + mmc_remove_host(mmc); + mmc_free_host(mmc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int alcor_pci_sdmmc_suspend(struct device *dev) +{ + struct alcor_sdmmc_host *host = dev_get_drvdata(dev); + + if (cancel_delayed_work_sync(&host->timeout_work)) + alcor_request_complete(host, 0); + + alcor_hw_uninit(host); + + return 0; +} + +static int alcor_pci_sdmmc_resume(struct device *dev) +{ + struct alcor_sdmmc_host *host = dev_get_drvdata(dev); + + alcor_hw_init(host); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend, + alcor_pci_sdmmc_resume); + +static const struct platform_device_id alcor_pci_sdmmc_ids[] = { + { + .name = DRV_NAME_ALCOR_PCI_SDMMC, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, alcor_pci_sdmmc_ids); + +static struct platform_driver alcor_pci_sdmmc_driver = { + .probe = alcor_pci_sdmmc_drv_probe, + .remove = alcor_pci_sdmmc_drv_remove, + .id_table = alcor_pci_sdmmc_ids, + .driver = { + .name = DRV_NAME_ALCOR_PCI_SDMMC, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &alcor_mmc_pm_ops + }, +}; +module_platform_driver(alcor_pci_sdmmc_driver); + +MODULE_AUTHOR("Oleksij Rempel "); +MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c new file mode 100644 index 000000000..e878fdf8f --- /dev/null +++ b/drivers/mmc/host/android-goldfish.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2007, Google Inc. + * Copyright 2012, Intel Inc. + * + * based on omap.c driver, which was + * Copyright (C) 2004 Nokia Corporation + * Written by Tuukka Tikkanen and Juha Yrjölä + * Misc hacks here and there by Tony Lindgren + * Other hacks (DMA, SD, etc) by David Brownell + */ + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#define DRIVER_NAME "goldfish_mmc" + +#define BUFFER_SIZE 16384 + +#define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr)) +#define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr)) + +enum { + /* status register */ + MMC_INT_STATUS = 0x00, + /* set this to enable IRQ */ + MMC_INT_ENABLE = 0x04, + /* set this to specify buffer address */ + MMC_SET_BUFFER = 0x08, + + /* MMC command number */ + MMC_CMD = 0x0C, + + /* MMC argument */ + MMC_ARG = 0x10, + + /* MMC response (or R2 bits 0 - 31) */ + MMC_RESP_0 = 0x14, + + /* MMC R2 response bits 32 - 63 */ + MMC_RESP_1 = 0x18, + + /* MMC R2 response bits 64 - 95 */ + MMC_RESP_2 = 0x1C, + + /* MMC R2 response bits 96 - 127 */ + MMC_RESP_3 = 0x20, + + MMC_BLOCK_LENGTH = 0x24, + MMC_BLOCK_COUNT = 0x28, + + /* MMC state flags */ + MMC_STATE = 0x2C, + + /* MMC_INT_STATUS bits */ + + MMC_STAT_END_OF_CMD = 1U << 0, + MMC_STAT_END_OF_DATA = 1U << 1, + MMC_STAT_STATE_CHANGE = 1U << 2, + MMC_STAT_CMD_TIMEOUT = 1U << 3, + + /* MMC_STATE bits */ + MMC_STATE_INSERTED = 1U << 0, + MMC_STATE_READ_ONLY = 1U << 1, +}; + +/* + * Command types + */ +#define OMAP_MMC_CMDTYPE_BC 0 +#define OMAP_MMC_CMDTYPE_BCR 1 +#define OMAP_MMC_CMDTYPE_AC 2 +#define OMAP_MMC_CMDTYPE_ADTC 3 + + +struct goldfish_mmc_host { + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + struct device *dev; + unsigned char id; /* 16xx chips have 2 MMC blocks */ + void *virt_base; + unsigned int phys_base; + int irq; + unsigned char bus_mode; + unsigned char hw_bus_mode; + + unsigned int sg_len; + unsigned dma_done:1; + unsigned dma_in_use:1; + + void __iomem *reg_base; +}; + +static inline int +goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host) +{ + return 0; +} + +static ssize_t +goldfish_mmc_show_cover_switch(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct goldfish_mmc_host *host = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" : + "closed"); +} + +static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL); + +static void +goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd) +{ + u32 cmdreg; + u32 resptype; + u32 cmdtype; + + host->cmd = cmd; + + resptype = 0; + cmdtype = 0; + + /* Our hardware needs to know exact type */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + break; + case MMC_RSP_R1: + case MMC_RSP_R1B: + /* resp 1, 1b, 6, 7 */ + resptype = 1; + break; + case MMC_RSP_R2: + resptype = 2; + break; + case MMC_RSP_R3: + resptype = 3; + break; + default: + dev_err(mmc_dev(mmc_from_priv(host)), + "Invalid response type: %04x\n", mmc_resp_type(cmd)); + break; + } + + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) + cmdtype = OMAP_MMC_CMDTYPE_ADTC; + else if (mmc_cmd_type(cmd) == MMC_CMD_BC) + cmdtype = OMAP_MMC_CMDTYPE_BC; + else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) + cmdtype = OMAP_MMC_CMDTYPE_BCR; + else + cmdtype = OMAP_MMC_CMDTYPE_AC; + + cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12); + + if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) + cmdreg |= 1 << 6; + + if (cmd->flags & MMC_RSP_BUSY) + cmdreg |= 1 << 11; + + if (host->data && !(host->data->flags & MMC_DATA_WRITE)) + cmdreg |= 1 << 15; + + GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg); + GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg); +} + +static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, + struct mmc_data *data) +{ + if (host->dma_in_use) { + enum dma_data_direction dma_data_dir; + + dma_data_dir = mmc_get_dma_dir(data); + + if (dma_data_dir == DMA_FROM_DEVICE) { + /* + * We don't really have DMA, so we need + * to copy from our platform driver buffer + */ + sg_copy_from_buffer(data->sg, 1, host->virt_base, + data->sg->length); + } + host->data->bytes_xfered += data->sg->length; + dma_unmap_sg(mmc_dev(mmc_from_priv(host)), data->sg, + host->sg_len, dma_data_dir); + } + + host->data = NULL; + host->sg_len = 0; + + /* + * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing + * dozens of requests until the card finishes writing data. + * It'd be cheaper to just wait till an EOFB interrupt arrives... + */ + + if (!data->stop) { + host->mrq = NULL; + mmc_request_done(mmc_from_priv(host), data->mrq); + return; + } + + goldfish_mmc_start_command(host, data->stop); +} + +static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host, + struct mmc_data *data) +{ + if (!host->dma_in_use) { + goldfish_mmc_xfer_done(host, data); + return; + } + if (host->dma_done) + goldfish_mmc_xfer_done(host, data); +} + +static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host, + struct mmc_command *cmd) +{ + host->cmd = NULL; + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* response type 2 */ + cmd->resp[3] = + GOLDFISH_MMC_READ(host, MMC_RESP_0); + cmd->resp[2] = + GOLDFISH_MMC_READ(host, MMC_RESP_1); + cmd->resp[1] = + GOLDFISH_MMC_READ(host, MMC_RESP_2); + cmd->resp[0] = + GOLDFISH_MMC_READ(host, MMC_RESP_3); + } else { + /* response types 1, 1b, 3, 4, 5, 6 */ + cmd->resp[0] = + GOLDFISH_MMC_READ(host, MMC_RESP_0); + } + } + + if (host->data == NULL || cmd->error) { + host->mrq = NULL; + mmc_request_done(mmc_from_priv(host), cmd->mrq); + } +} + +static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) +{ + struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id; + u16 status; + int end_command = 0; + int end_transfer = 0; + int state_changed = 0; + int cmd_timeout = 0; + + while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) { + GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); + + if (status & MMC_STAT_END_OF_CMD) + end_command = 1; + + if (status & MMC_STAT_END_OF_DATA) + end_transfer = 1; + + if (status & MMC_STAT_STATE_CHANGE) + state_changed = 1; + + if (status & MMC_STAT_CMD_TIMEOUT) { + end_command = 0; + cmd_timeout = 1; + } + } + + if (cmd_timeout) { + struct mmc_request *mrq = host->mrq; + mrq->cmd->error = -ETIMEDOUT; + host->mrq = NULL; + mmc_request_done(mmc_from_priv(host), mrq); + } + + if (end_command) + goldfish_mmc_cmd_done(host, host->cmd); + + if (end_transfer) { + host->dma_done = 1; + goldfish_mmc_end_of_data(host, host->data); + } else if (host->data != NULL) { + /* + * WORKAROUND -- after porting this driver from 2.6 to 3.4, + * during device initialization, cases where host->data is + * non-null but end_transfer is false would occur. Doing + * nothing in such cases results in no further interrupts, + * and initialization failure. + * TODO -- find the real cause. + */ + host->dma_done = 1; + goldfish_mmc_end_of_data(host, host->data); + } + + if (state_changed) { + u32 state = GOLDFISH_MMC_READ(host, MMC_STATE); + pr_info("%s: Card detect now %d\n", __func__, + (state & MMC_STATE_INSERTED)); + mmc_detect_change(mmc_from_priv(host), 0); + } + + if (!end_command && !end_transfer && !state_changed && !cmd_timeout) { + status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS); + dev_info(mmc_dev(mmc_from_priv(host)), "spurious irq 0x%04x\n", + status); + if (status != 0) { + GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); + GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0); + } + } + + return IRQ_HANDLED; +} + +static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, + struct mmc_request *req) +{ + struct mmc_data *data = req->data; + int block_size; + unsigned sg_len; + enum dma_data_direction dma_data_dir; + + host->data = data; + if (data == NULL) { + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0); + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0); + host->dma_in_use = 0; + return; + } + + block_size = data->blksz; + + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1); + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1); + + /* + * Cope with calling layer confusion; it issues "single + * block" writes using multi-block scatterlists. + */ + sg_len = (data->blocks == 1) ? 1 : data->sg_len; + + dma_data_dir = mmc_get_dma_dir(data); + + host->sg_len = dma_map_sg(mmc_dev(mmc_from_priv(host)), data->sg, + sg_len, dma_data_dir); + host->dma_done = 0; + host->dma_in_use = 1; + + if (dma_data_dir == DMA_TO_DEVICE) { + /* + * We don't really have DMA, so we need to copy to our + * platform driver buffer + */ + sg_copy_to_buffer(data->sg, 1, host->virt_base, + data->sg->length); + } +} + +static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct goldfish_mmc_host *host = mmc_priv(mmc); + + WARN_ON(host->mrq != NULL); + + host->mrq = req; + goldfish_mmc_prepare_data(host, req); + goldfish_mmc_start_command(host, req->cmd); +} + +static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct goldfish_mmc_host *host = mmc_priv(mmc); + + host->bus_mode = ios->bus_mode; + host->hw_bus_mode = host->bus_mode; +} + +static int goldfish_mmc_get_ro(struct mmc_host *mmc) +{ + uint32_t state; + struct goldfish_mmc_host *host = mmc_priv(mmc); + + state = GOLDFISH_MMC_READ(host, MMC_STATE); + return ((state & MMC_STATE_READ_ONLY) != 0); +} + +static const struct mmc_host_ops goldfish_mmc_ops = { + .request = goldfish_mmc_request, + .set_ios = goldfish_mmc_set_ios, + .get_ro = goldfish_mmc_get_ro, +}; + +static int goldfish_mmc_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct goldfish_mmc_host *host = NULL; + struct resource *res; + int ret = 0; + int irq; + dma_addr_t buf_addr; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (res == NULL || irq < 0) + return -ENXIO; + + mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev); + if (mmc == NULL) { + ret = -ENOMEM; + goto err_alloc_host_failed; + } + + host = mmc_priv(mmc); + + pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end); + host->reg_base = ioremap(res->start, resource_size(res)); + if (host->reg_base == NULL) { + ret = -ENOMEM; + goto ioremap_failed; + } + host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, + &buf_addr, GFP_KERNEL); + + if (host->virt_base == 0) { + ret = -ENOMEM; + goto dma_alloc_failed; + } + host->phys_base = buf_addr; + + host->id = pdev->id; + host->irq = irq; + + mmc->ops = &goldfish_mmc_ops; + mmc->f_min = 400000; + mmc->f_max = 24000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = MMC_CAP_4_BIT_DATA; + mmc->caps2 = MMC_CAP2_NO_SDIO; + + /* Use scatterlist DMA to reduce per-transfer costs. + * NOTE max_seg_size assumption that small blocks aren't + * normally used (except e.g. for reading SD registers). + */ + mmc->max_segs = 32; + mmc->max_blk_size = 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */ + mmc->max_blk_count = 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */ + mmc->max_req_size = BUFFER_SIZE; + mmc->max_seg_size = mmc->max_req_size; + + ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host); + if (ret) { + dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n"); + goto err_request_irq_failed; + } + + host->dev = &pdev->dev; + platform_set_drvdata(pdev, host); + + ret = device_create_file(&pdev->dev, &dev_attr_cover_switch); + if (ret) + dev_warn(mmc_dev(mmc), "Unable to create sysfs attributes\n"); + + GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base); + GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, + MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA | + MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT); + + mmc_add_host(mmc); + return 0; + +err_request_irq_failed: + dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, + host->phys_base); +dma_alloc_failed: + iounmap(host->reg_base); +ioremap_failed: + mmc_free_host(mmc); +err_alloc_host_failed: + return ret; +} + +static int goldfish_mmc_remove(struct platform_device *pdev) +{ + struct goldfish_mmc_host *host = platform_get_drvdata(pdev); + struct mmc_host *mmc = mmc_from_priv(host); + + BUG_ON(host == NULL); + + mmc_remove_host(mmc); + free_irq(host->irq, host); + dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base); + iounmap(host->reg_base); + mmc_free_host(mmc); + return 0; +} + +static struct platform_driver goldfish_mmc_driver = { + .probe = goldfish_mmc_probe, + .remove = goldfish_mmc_remove, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +module_platform_driver(goldfish_mmc_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c new file mode 100644 index 000000000..c468f9a02 --- /dev/null +++ b/drivers/mmc/host/atmel-mci.c @@ -0,0 +1,2680 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Atmel MultiMedia Card Interface driver + * + * Copyright (C) 2004-2008 Atmel Corporation + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Superset of MCI IP registers integrated in Atmel AT91 Processor + * Registers and bitfields marked with [2] are only available in MCI2 + */ + +/* MCI Register Definitions */ +#define ATMCI_CR 0x0000 /* Control */ +#define ATMCI_CR_MCIEN BIT(0) /* MCI Enable */ +#define ATMCI_CR_MCIDIS BIT(1) /* MCI Disable */ +#define ATMCI_CR_PWSEN BIT(2) /* Power Save Enable */ +#define ATMCI_CR_PWSDIS BIT(3) /* Power Save Disable */ +#define ATMCI_CR_SWRST BIT(7) /* Software Reset */ +#define ATMCI_MR 0x0004 /* Mode */ +#define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ +#define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */ +#define ATMCI_MR_RDPROOF BIT(11) /* Read Proof */ +#define ATMCI_MR_WRPROOF BIT(12) /* Write Proof */ +#define ATMCI_MR_PDCFBYTE BIT(13) /* Force Byte Transfer */ +#define ATMCI_MR_PDCPADV BIT(14) /* Padding Value */ +#define ATMCI_MR_PDCMODE BIT(15) /* PDC-oriented Mode */ +#define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */ +#define ATMCI_DTOR 0x0008 /* Data Timeout */ +#define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ +#define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ +#define ATMCI_SDCR 0x000c /* SD Card / SDIO */ +#define ATMCI_SDCSEL_SLOT_A (0 << 0) /* Select SD slot A */ +#define ATMCI_SDCSEL_SLOT_B (1 << 0) /* Select SD slot A */ +#define ATMCI_SDCSEL_MASK (3 << 0) +#define ATMCI_SDCBUS_1BIT (0 << 6) /* 1-bit data bus */ +#define ATMCI_SDCBUS_4BIT (2 << 6) /* 4-bit data bus */ +#define ATMCI_SDCBUS_8BIT (3 << 6) /* 8-bit data bus[2] */ +#define ATMCI_SDCBUS_MASK (3 << 6) +#define ATMCI_ARGR 0x0010 /* Command Argument */ +#define ATMCI_CMDR 0x0014 /* Command */ +#define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ +#define ATMCI_CMDR_RSPTYP_NONE (0 << 6) /* No response */ +#define ATMCI_CMDR_RSPTYP_48BIT (1 << 6) /* 48-bit response */ +#define ATMCI_CMDR_RSPTYP_136BIT (2 << 6) /* 136-bit response */ +#define ATMCI_CMDR_SPCMD_INIT (1 << 8) /* Initialization command */ +#define ATMCI_CMDR_SPCMD_SYNC (2 << 8) /* Synchronized command */ +#define ATMCI_CMDR_SPCMD_INT (4 << 8) /* Interrupt command */ +#define ATMCI_CMDR_SPCMD_INTRESP (5 << 8) /* Interrupt response */ +#define ATMCI_CMDR_OPDCMD (1 << 11) /* Open Drain */ +#define ATMCI_CMDR_MAXLAT_5CYC (0 << 12) /* Max latency 5 cycles */ +#define ATMCI_CMDR_MAXLAT_64CYC (1 << 12) /* Max latency 64 cycles */ +#define ATMCI_CMDR_START_XFER (1 << 16) /* Start data transfer */ +#define ATMCI_CMDR_STOP_XFER (2 << 16) /* Stop data transfer */ +#define ATMCI_CMDR_TRDIR_WRITE (0 << 18) /* Write data */ +#define ATMCI_CMDR_TRDIR_READ (1 << 18) /* Read data */ +#define ATMCI_CMDR_BLOCK (0 << 19) /* Single-block transfer */ +#define ATMCI_CMDR_MULTI_BLOCK (1 << 19) /* Multi-block transfer */ +#define ATMCI_CMDR_STREAM (2 << 19) /* MMC Stream transfer */ +#define ATMCI_CMDR_SDIO_BYTE (4 << 19) /* SDIO Byte transfer */ +#define ATMCI_CMDR_SDIO_BLOCK (5 << 19) /* SDIO Block transfer */ +#define ATMCI_CMDR_SDIO_SUSPEND (1 << 24) /* SDIO Suspend Command */ +#define ATMCI_CMDR_SDIO_RESUME (2 << 24) /* SDIO Resume Command */ +#define ATMCI_BLKR 0x0018 /* Block */ +#define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */ +#define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ +#define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */ +#define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */ +#define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */ +#define ATMCI_RSPR 0x0020 /* Response 0 */ +#define ATMCI_RSPR1 0x0024 /* Response 1 */ +#define ATMCI_RSPR2 0x0028 /* Response 2 */ +#define ATMCI_RSPR3 0x002c /* Response 3 */ +#define ATMCI_RDR 0x0030 /* Receive Data */ +#define ATMCI_TDR 0x0034 /* Transmit Data */ +#define ATMCI_SR 0x0040 /* Status */ +#define ATMCI_IER 0x0044 /* Interrupt Enable */ +#define ATMCI_IDR 0x0048 /* Interrupt Disable */ +#define ATMCI_IMR 0x004c /* Interrupt Mask */ +#define ATMCI_CMDRDY BIT(0) /* Command Ready */ +#define ATMCI_RXRDY BIT(1) /* Receiver Ready */ +#define ATMCI_TXRDY BIT(2) /* Transmitter Ready */ +#define ATMCI_BLKE BIT(3) /* Data Block Ended */ +#define ATMCI_DTIP BIT(4) /* Data Transfer In Progress */ +#define ATMCI_NOTBUSY BIT(5) /* Data Not Busy */ +#define ATMCI_ENDRX BIT(6) /* End of RX Buffer */ +#define ATMCI_ENDTX BIT(7) /* End of TX Buffer */ +#define ATMCI_SDIOIRQA BIT(8) /* SDIO IRQ in slot A */ +#define ATMCI_SDIOIRQB BIT(9) /* SDIO IRQ in slot B */ +#define ATMCI_SDIOWAIT BIT(12) /* SDIO Read Wait Operation Status */ +#define ATMCI_CSRCV BIT(13) /* CE-ATA Completion Signal Received */ +#define ATMCI_RXBUFF BIT(14) /* RX Buffer Full */ +#define ATMCI_TXBUFE BIT(15) /* TX Buffer Empty */ +#define ATMCI_RINDE BIT(16) /* Response Index Error */ +#define ATMCI_RDIRE BIT(17) /* Response Direction Error */ +#define ATMCI_RCRCE BIT(18) /* Response CRC Error */ +#define ATMCI_RENDE BIT(19) /* Response End Bit Error */ +#define ATMCI_RTOE BIT(20) /* Response Time-Out Error */ +#define ATMCI_DCRCE BIT(21) /* Data CRC Error */ +#define ATMCI_DTOE BIT(22) /* Data Time-Out Error */ +#define ATMCI_CSTOE BIT(23) /* Completion Signal Time-out Error */ +#define ATMCI_BLKOVRE BIT(24) /* DMA Block Overrun Error */ +#define ATMCI_DMADONE BIT(25) /* DMA Transfer Done */ +#define ATMCI_FIFOEMPTY BIT(26) /* FIFO Empty Flag */ +#define ATMCI_XFRDONE BIT(27) /* Transfer Done Flag */ +#define ATMCI_ACKRCV BIT(28) /* Boot Operation Acknowledge Received */ +#define ATMCI_ACKRCVE BIT(29) /* Boot Operation Acknowledge Error */ +#define ATMCI_OVRE BIT(30) /* RX Overrun Error */ +#define ATMCI_UNRE BIT(31) /* TX Underrun Error */ +#define ATMCI_DMA 0x0050 /* DMA Configuration[2] */ +#define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */ +#define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */ +#define ATMCI_DMAEN BIT(8) /* DMA Hardware Handshaking Enable */ +#define ATMCI_CFG 0x0054 /* Configuration[2] */ +#define ATMCI_CFG_FIFOMODE_1DATA BIT(0) /* MCI Internal FIFO control mode */ +#define ATMCI_CFG_FERRCTRL_COR BIT(4) /* Flow Error flag reset control mode */ +#define ATMCI_CFG_HSMODE BIT(8) /* High Speed Mode */ +#define ATMCI_CFG_LSYNC BIT(12) /* Synchronize on the last block */ +#define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */ +#define ATMCI_WP_EN BIT(0) /* WP Enable */ +#define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */ +#define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */ +#define ATMCI_GET_WP_VS(x) ((x) & 0x0f) +#define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff) +#define ATMCI_VERSION 0x00FC /* Version */ +#define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */ + +/* This is not including the FIFO Aperture on MCI2 */ +#define ATMCI_REGS_SIZE 0x100 + +/* Register access macros */ +#define atmci_readl(port, reg) \ + __raw_readl((port)->regs + reg) +#define atmci_writel(port, reg, value) \ + __raw_writel((value), (port)->regs + reg) + +#define ATMCI_CMD_TIMEOUT_MS 2000 +#define AUTOSUSPEND_DELAY 50 + +#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE) +#define ATMCI_DMA_THRESHOLD 16 + +enum { + EVENT_CMD_RDY = 0, + EVENT_XFER_COMPLETE, + EVENT_NOTBUSY, + EVENT_DATA_ERROR, +}; + +enum atmel_mci_state { + STATE_IDLE = 0, + STATE_SENDING_CMD, + STATE_DATA_XFER, + STATE_WAITING_NOTBUSY, + STATE_SENDING_STOP, + STATE_END_REQUEST, +}; + +enum atmci_xfer_dir { + XFER_RECEIVE = 0, + XFER_TRANSMIT, +}; + +enum atmci_pdc_buf { + PDC_FIRST_BUF = 0, + PDC_SECOND_BUF, +}; + +struct atmel_mci_caps { + bool has_dma_conf_reg; + bool has_pdc; + bool has_cfg_reg; + bool has_cstor_reg; + bool has_highspeed; + bool has_rwproof; + bool has_odd_clk_div; + bool has_bad_data_ordering; + bool need_reset_after_xfer; + bool need_blksz_mul_4; + bool need_notbusy_for_read_ops; +}; + +struct atmel_mci_dma { + struct dma_chan *chan; + struct dma_async_tx_descriptor *data_desc; +}; + +/** + * struct atmel_mci - MMC controller state shared between all slots + * @lock: Spinlock protecting the queue and associated data. + * @regs: Pointer to MMIO registers. + * @sg: Scatterlist entry currently being processed by PIO or PDC code. + * @sg_len: Size of the scatterlist + * @pio_offset: Offset into the current scatterlist entry. + * @buffer: Buffer used if we don't have the r/w proof capability. We + * don't have the time to switch pdc buffers so we have to use only + * one buffer for the full transaction. + * @buf_size: size of the buffer. + * @buf_phys_addr: buffer address needed for pdc. + * @cur_slot: The slot which is currently using the controller. + * @mrq: The request currently being processed on @cur_slot, + * or NULL if the controller is idle. + * @cmd: The command currently being sent to the card, or NULL. + * @data: The data currently being transferred, or NULL if no data + * transfer is in progress. + * @data_size: just data->blocks * data->blksz. + * @dma: DMA client state. + * @data_chan: DMA channel being used for the current data transfer. + * @dma_conf: Configuration for the DMA slave + * @cmd_status: Snapshot of SR taken upon completion of the current + * command. Only valid when EVENT_CMD_COMPLETE is pending. + * @data_status: Snapshot of SR taken upon completion of the current + * data transfer. Only valid when EVENT_DATA_COMPLETE or + * EVENT_DATA_ERROR is pending. + * @stop_cmdr: Value to be loaded into CMDR when the stop command is + * to be sent. + * @tasklet: Tasklet running the request state machine. + * @pending_events: Bitmask of events flagged by the interrupt handler + * to be processed by the tasklet. + * @completed_events: Bitmask of events which the state machine has + * processed. + * @state: Tasklet state. + * @queue: List of slots waiting for access to the controller. + * @need_clock_update: Update the clock rate before the next request. + * @need_reset: Reset controller before next request. + * @timer: Timer to balance the data timeout error flag which cannot rise. + * @mode_reg: Value of the MR register. + * @cfg_reg: Value of the CFG register. + * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus + * rate and timeout calculations. + * @mapbase: Physical address of the MMIO registers. + * @mck: The peripheral bus clock hooked up to the MMC controller. + * @pdev: Platform device associated with the MMC controller. + * @slot: Slots sharing this MMC controller. + * @caps: MCI capabilities depending on MCI version. + * @prepare_data: function to setup MCI before data transfer which + * depends on MCI capabilities. + * @submit_data: function to start data transfer which depends on MCI + * capabilities. + * @stop_transfer: function to stop data transfer which depends on MCI + * capabilities. + * + * Locking + * ======= + * + * @lock is a softirq-safe spinlock protecting @queue as well as + * @cur_slot, @mrq and @state. These must always be updated + * at the same time while holding @lock. + * + * @lock also protects mode_reg and need_clock_update since these are + * used to synchronize mode register updates with the queue + * processing. + * + * The @mrq field of struct atmel_mci_slot is also protected by @lock, + * and must always be written at the same time as the slot is added to + * @queue. + * + * @pending_events and @completed_events are accessed using atomic bit + * operations, so they don't need any locking. + * + * None of the fields touched by the interrupt handler need any + * locking. However, ordering is important: Before EVENT_DATA_ERROR or + * EVENT_DATA_COMPLETE is set in @pending_events, all data-related + * interrupts must be disabled and @data_status updated with a + * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the + * CMDRDY interrupt must be disabled and @cmd_status updated with a + * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the + * bytes_xfered field of @data must be written. This is ensured by + * using barriers. + */ +struct atmel_mci { + spinlock_t lock; + void __iomem *regs; + + struct scatterlist *sg; + unsigned int sg_len; + unsigned int pio_offset; + unsigned int *buffer; + unsigned int buf_size; + dma_addr_t buf_phys_addr; + + struct atmel_mci_slot *cur_slot; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + unsigned int data_size; + + struct atmel_mci_dma dma; + struct dma_chan *data_chan; + struct dma_slave_config dma_conf; + + u32 cmd_status; + u32 data_status; + u32 stop_cmdr; + + struct tasklet_struct tasklet; + unsigned long pending_events; + unsigned long completed_events; + enum atmel_mci_state state; + struct list_head queue; + + bool need_clock_update; + bool need_reset; + struct timer_list timer; + u32 mode_reg; + u32 cfg_reg; + unsigned long bus_hz; + unsigned long mapbase; + struct clk *mck; + struct platform_device *pdev; + + struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS]; + + struct atmel_mci_caps caps; + + u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data); + void (*submit_data)(struct atmel_mci *host, struct mmc_data *data); + void (*stop_transfer)(struct atmel_mci *host); +}; + +/** + * struct atmel_mci_slot - MMC slot state + * @mmc: The mmc_host representing this slot. + * @host: The MMC controller this slot is using. + * @sdc_reg: Value of SDCR to be written before using this slot. + * @sdio_irq: SDIO irq mask for this slot. + * @mrq: mmc_request currently being processed or waiting to be + * processed, or NULL when the slot is idle. + * @queue_node: List node for placing this node in the @queue list of + * &struct atmel_mci. + * @clock: Clock rate configured by set_ios(). Protected by host->lock. + * @flags: Random state bits associated with the slot. + * @detect_pin: GPIO pin used for card detection, or negative if not + * available. + * @wp_pin: GPIO pin used for card write protect sending, or negative + * if not available. + * @detect_is_active_high: The state of the detect pin when it is active. + * @detect_timer: Timer used for debouncing @detect_pin interrupts. + */ +struct atmel_mci_slot { + struct mmc_host *mmc; + struct atmel_mci *host; + + u32 sdc_reg; + u32 sdio_irq; + + struct mmc_request *mrq; + struct list_head queue_node; + + unsigned int clock; + unsigned long flags; +#define ATMCI_CARD_PRESENT 0 +#define ATMCI_CARD_NEED_INIT 1 +#define ATMCI_SHUTDOWN 2 + + int detect_pin; + int wp_pin; + bool detect_is_active_high; + + struct timer_list detect_timer; +}; + +#define atmci_test_and_clear_pending(host, event) \ + test_and_clear_bit(event, &host->pending_events) +#define atmci_set_completed(host, event) \ + set_bit(event, &host->completed_events) +#define atmci_set_pending(host, event) \ + set_bit(event, &host->pending_events) + +/* + * The debugfs stuff below is mostly optimized away when + * CONFIG_DEBUG_FS is not set. + */ +static int atmci_req_show(struct seq_file *s, void *v) +{ + struct atmel_mci_slot *slot = s->private; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_command *stop; + struct mmc_data *data; + + /* Make sure we get a consistent snapshot */ + spin_lock_bh(&slot->host->lock); + mrq = slot->mrq; + + if (mrq) { + cmd = mrq->cmd; + data = mrq->data; + stop = mrq->stop; + + if (cmd) + seq_printf(s, + "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", + cmd->opcode, cmd->arg, cmd->flags, + cmd->resp[0], cmd->resp[1], cmd->resp[2], + cmd->resp[3], cmd->error); + if (data) + seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", + data->bytes_xfered, data->blocks, + data->blksz, data->flags, data->error); + if (stop) + seq_printf(s, + "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", + stop->opcode, stop->arg, stop->flags, + stop->resp[0], stop->resp[1], stop->resp[2], + stop->resp[3], stop->error); + } + + spin_unlock_bh(&slot->host->lock); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(atmci_req); + +static void atmci_show_status_reg(struct seq_file *s, + const char *regname, u32 value) +{ + static const char *sr_bit[] = { + [0] = "CMDRDY", + [1] = "RXRDY", + [2] = "TXRDY", + [3] = "BLKE", + [4] = "DTIP", + [5] = "NOTBUSY", + [6] = "ENDRX", + [7] = "ENDTX", + [8] = "SDIOIRQA", + [9] = "SDIOIRQB", + [12] = "SDIOWAIT", + [14] = "RXBUFF", + [15] = "TXBUFE", + [16] = "RINDE", + [17] = "RDIRE", + [18] = "RCRCE", + [19] = "RENDE", + [20] = "RTOE", + [21] = "DCRCE", + [22] = "DTOE", + [23] = "CSTOE", + [24] = "BLKOVRE", + [25] = "DMADONE", + [26] = "FIFOEMPTY", + [27] = "XFRDONE", + [30] = "OVRE", + [31] = "UNRE", + }; + unsigned int i; + + seq_printf(s, "%s:\t0x%08x", regname, value); + for (i = 0; i < ARRAY_SIZE(sr_bit); i++) { + if (value & (1 << i)) { + if (sr_bit[i]) + seq_printf(s, " %s", sr_bit[i]); + else + seq_puts(s, " UNKNOWN"); + } + } + seq_putc(s, '\n'); +} + +static int atmci_regs_show(struct seq_file *s, void *v) +{ + struct atmel_mci *host = s->private; + u32 *buf; + int ret = 0; + + + buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pm_runtime_get_sync(&host->pdev->dev); + + /* + * Grab a more or less consistent snapshot. Note that we're + * not disabling interrupts, so IMR and SR may not be + * consistent. + */ + spin_lock_bh(&host->lock); + memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE); + spin_unlock_bh(&host->lock); + + pm_runtime_mark_last_busy(&host->pdev->dev); + pm_runtime_put_autosuspend(&host->pdev->dev); + + seq_printf(s, "MR:\t0x%08x%s%s ", + buf[ATMCI_MR / 4], + buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "", + buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : ""); + if (host->caps.has_odd_clk_div) + seq_printf(s, "{CLKDIV,CLKODD}=%u\n", + ((buf[ATMCI_MR / 4] & 0xff) << 1) + | ((buf[ATMCI_MR / 4] >> 16) & 1)); + else + seq_printf(s, "CLKDIV=%u\n", + (buf[ATMCI_MR / 4] & 0xff)); + seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]); + seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]); + seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]); + seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n", + buf[ATMCI_BLKR / 4], + buf[ATMCI_BLKR / 4] & 0xffff, + (buf[ATMCI_BLKR / 4] >> 16) & 0xffff); + if (host->caps.has_cstor_reg) + seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]); + + /* Don't read RSPR and RDR; it will consume the data there */ + + atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); + atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); + + if (host->caps.has_dma_conf_reg) { + u32 val; + + val = buf[ATMCI_DMA / 4]; + seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n", + val, val & 3, + ((val >> 4) & 3) ? + 1 << (((val >> 4) & 3) + 1) : 1, + val & ATMCI_DMAEN ? " DMAEN" : ""); + } + if (host->caps.has_cfg_reg) { + u32 val; + + val = buf[ATMCI_CFG / 4]; + seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", + val, + val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", + val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", + val & ATMCI_CFG_HSMODE ? " HSMODE" : "", + val & ATMCI_CFG_LSYNC ? " LSYNC" : ""); + } + + kfree(buf); + + return ret; +} + +DEFINE_SHOW_ATTRIBUTE(atmci_regs); + +static void atmci_init_debugfs(struct atmel_mci_slot *slot) +{ + struct mmc_host *mmc = slot->mmc; + struct atmel_mci *host = slot->host; + struct dentry *root; + + root = mmc->debugfs_root; + if (!root) + return; + + debugfs_create_file("regs", S_IRUSR, root, host, &atmci_regs_fops); + debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops); + debugfs_create_u32("state", S_IRUSR, root, &host->state); + debugfs_create_xul("pending_events", S_IRUSR, root, + &host->pending_events); + debugfs_create_xul("completed_events", S_IRUSR, root, + &host->completed_events); +} + +#if defined(CONFIG_OF) +static const struct of_device_id atmci_dt_ids[] = { + { .compatible = "atmel,hsmci" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, atmci_dt_ids); + +static struct mci_platform_data* +atmci_of_init(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device_node *cnp; + struct mci_platform_data *pdata; + u32 slot_id; + + if (!np) { + dev_err(&pdev->dev, "device node not found\n"); + return ERR_PTR(-EINVAL); + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + for_each_child_of_node(np, cnp) { + if (of_property_read_u32(cnp, "reg", &slot_id)) { + dev_warn(&pdev->dev, "reg property is missing for %pOF\n", + cnp); + continue; + } + + if (slot_id >= ATMCI_MAX_NR_SLOTS) { + dev_warn(&pdev->dev, "can't have more than %d slots\n", + ATMCI_MAX_NR_SLOTS); + of_node_put(cnp); + break; + } + + if (of_property_read_u32(cnp, "bus-width", + &pdata->slot[slot_id].bus_width)) + pdata->slot[slot_id].bus_width = 1; + + pdata->slot[slot_id].detect_pin = + of_get_named_gpio(cnp, "cd-gpios", 0); + + pdata->slot[slot_id].detect_is_active_high = + of_property_read_bool(cnp, "cd-inverted"); + + pdata->slot[slot_id].non_removable = + of_property_read_bool(cnp, "non-removable"); + + pdata->slot[slot_id].wp_pin = + of_get_named_gpio(cnp, "wp-gpios", 0); + } + + return pdata; +} +#else /* CONFIG_OF */ +static inline struct mci_platform_data* +atmci_of_init(struct platform_device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif + +static inline unsigned int atmci_get_version(struct atmel_mci *host) +{ + return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; +} + +/* + * Fix sconfig's burst size according to atmel MCI. We need to convert them as: + * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. + * With version 0x600, we need to convert them as: 1 -> 0, 2 -> 1, 4 -> 2, + * 8 -> 3, 16 -> 4. + * + * This can be done by finding most significant bit set. + */ +static inline unsigned int atmci_convert_chksize(struct atmel_mci *host, + unsigned int maxburst) +{ + unsigned int version = atmci_get_version(host); + unsigned int offset = 2; + + if (version >= 0x600) + offset = 1; + + if (maxburst > 1) + return fls(maxburst) - offset; + else + return 0; +} + +static void atmci_timeout_timer(struct timer_list *t) +{ + struct atmel_mci *host; + + host = from_timer(host, t, timer); + + dev_dbg(&host->pdev->dev, "software timeout\n"); + + if (host->mrq->cmd->data) { + host->mrq->cmd->data->error = -ETIMEDOUT; + host->data = NULL; + /* + * With some SDIO modules, sometimes DMA transfer hangs. If + * stop_transfer() is not called then the DMA request is not + * removed, following ones are queued and never computed. + */ + if (host->state == STATE_DATA_XFER) + host->stop_transfer(host); + } else { + host->mrq->cmd->error = -ETIMEDOUT; + host->cmd = NULL; + } + host->need_reset = 1; + host->state = STATE_END_REQUEST; + smp_wmb(); + tasklet_schedule(&host->tasklet); +} + +static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host, + unsigned int ns) +{ + /* + * It is easier here to use us instead of ns for the timeout, + * it prevents from overflows during calculation. + */ + unsigned int us = DIV_ROUND_UP(ns, 1000); + + /* Maximum clock frequency is host->bus_hz/2 */ + return us * (DIV_ROUND_UP(host->bus_hz, 2000000)); +} + +static void atmci_set_timeout(struct atmel_mci *host, + struct atmel_mci_slot *slot, struct mmc_data *data) +{ + static unsigned dtomul_to_shift[] = { + 0, 4, 7, 8, 10, 12, 16, 20 + }; + unsigned timeout; + unsigned dtocyc; + unsigned dtomul; + + timeout = atmci_ns_to_clocks(host, data->timeout_ns) + + data->timeout_clks; + + for (dtomul = 0; dtomul < 8; dtomul++) { + unsigned shift = dtomul_to_shift[dtomul]; + dtocyc = (timeout + (1 << shift) - 1) >> shift; + if (dtocyc < 15) + break; + } + + if (dtomul >= 8) { + dtomul = 7; + dtocyc = 15; + } + + dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", + dtocyc << dtomul_to_shift[dtomul]); + atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc))); +} + +/* + * Return mask with command flags to be enabled for this command. + */ +static u32 atmci_prepare_command(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct mmc_data *data; + u32 cmdr; + + cmd->error = -EINPROGRESS; + + cmdr = ATMCI_CMDR_CMDNB(cmd->opcode); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) + cmdr |= ATMCI_CMDR_RSPTYP_136BIT; + else + cmdr |= ATMCI_CMDR_RSPTYP_48BIT; + } + + /* + * This should really be MAXLAT_5 for CMD2 and ACMD41, but + * it's too difficult to determine whether this is an ACMD or + * not. Better make it 64. + */ + cmdr |= ATMCI_CMDR_MAXLAT_64CYC; + + if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) + cmdr |= ATMCI_CMDR_OPDCMD; + + data = cmd->data; + if (data) { + cmdr |= ATMCI_CMDR_START_XFER; + + if (cmd->opcode == SD_IO_RW_EXTENDED) { + cmdr |= ATMCI_CMDR_SDIO_BLOCK; + } else { + if (data->blocks > 1) + cmdr |= ATMCI_CMDR_MULTI_BLOCK; + else + cmdr |= ATMCI_CMDR_BLOCK; + } + + if (data->flags & MMC_DATA_READ) + cmdr |= ATMCI_CMDR_TRDIR_READ; + } + + return cmdr; +} + +static void atmci_send_command(struct atmel_mci *host, + struct mmc_command *cmd, u32 cmd_flags) +{ + unsigned int timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : + ATMCI_CMD_TIMEOUT_MS; + + WARN_ON(host->cmd); + host->cmd = cmd; + + dev_vdbg(&host->pdev->dev, + "start command: ARGR=0x%08x CMDR=0x%08x\n", + cmd->arg, cmd_flags); + + atmci_writel(host, ATMCI_ARGR, cmd->arg); + atmci_writel(host, ATMCI_CMDR, cmd_flags); + + mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms)); +} + +static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) +{ + dev_dbg(&host->pdev->dev, "send stop command\n"); + atmci_send_command(host, data->stop, host->stop_cmdr); + atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); +} + +/* + * Configure given PDC buffer taking care of alignement issues. + * Update host->data_size and host->sg. + */ +static void atmci_pdc_set_single_buf(struct atmel_mci *host, + enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb) +{ + u32 pointer_reg, counter_reg; + unsigned int buf_size; + + if (dir == XFER_RECEIVE) { + pointer_reg = ATMEL_PDC_RPR; + counter_reg = ATMEL_PDC_RCR; + } else { + pointer_reg = ATMEL_PDC_TPR; + counter_reg = ATMEL_PDC_TCR; + } + + if (buf_nb == PDC_SECOND_BUF) { + pointer_reg += ATMEL_PDC_SCND_BUF_OFF; + counter_reg += ATMEL_PDC_SCND_BUF_OFF; + } + + if (!host->caps.has_rwproof) { + buf_size = host->buf_size; + atmci_writel(host, pointer_reg, host->buf_phys_addr); + } else { + buf_size = sg_dma_len(host->sg); + atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); + } + + if (host->data_size <= buf_size) { + if (host->data_size & 0x3) { + /* If size is different from modulo 4, transfer bytes */ + atmci_writel(host, counter_reg, host->data_size); + atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE); + } else { + /* Else transfer 32-bits words */ + atmci_writel(host, counter_reg, host->data_size / 4); + } + host->data_size = 0; + } else { + /* We assume the size of a page is 32-bits aligned */ + atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4); + host->data_size -= sg_dma_len(host->sg); + if (host->data_size) + host->sg = sg_next(host->sg); + } +} + +/* + * Configure PDC buffer according to the data size ie configuring one or two + * buffers. Don't use this function if you want to configure only the second + * buffer. In this case, use atmci_pdc_set_single_buf. + */ +static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir) +{ + atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF); + if (host->data_size) + atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF); +} + +/* + * Unmap sg lists, called when transfer is finished. + */ +static void atmci_pdc_cleanup(struct atmel_mci *host) +{ + struct mmc_data *data = host->data; + + if (data) + dma_unmap_sg(&host->pdev->dev, + data->sg, data->sg_len, + mmc_get_dma_dir(data)); +} + +/* + * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after + * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY + * interrupt needed for both transfer directions. + */ +static void atmci_pdc_complete(struct atmel_mci *host) +{ + int transfer_size = host->data->blocks * host->data->blksz; + int i; + + atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); + + if ((!host->caps.has_rwproof) + && (host->data->flags & MMC_DATA_READ)) { + if (host->caps.has_bad_data_ordering) + for (i = 0; i < transfer_size; i++) + host->buffer[i] = swab32(host->buffer[i]); + sg_copy_from_buffer(host->data->sg, host->data->sg_len, + host->buffer, transfer_size); + } + + atmci_pdc_cleanup(host); + + dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__); + atmci_set_pending(host, EVENT_XFER_COMPLETE); + tasklet_schedule(&host->tasklet); +} + +static void atmci_dma_cleanup(struct atmel_mci *host) +{ + struct mmc_data *data = host->data; + + if (data) + dma_unmap_sg(host->dma.chan->device->dev, + data->sg, data->sg_len, + mmc_get_dma_dir(data)); +} + +/* + * This function is called by the DMA driver from tasklet context. + */ +static void atmci_dma_complete(void *arg) +{ + struct atmel_mci *host = arg; + struct mmc_data *data = host->data; + + dev_vdbg(&host->pdev->dev, "DMA complete\n"); + + if (host->caps.has_dma_conf_reg) + /* Disable DMA hardware handshaking on MCI */ + atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); + + atmci_dma_cleanup(host); + + /* + * If the card was removed, data will be NULL. No point trying + * to send the stop command or waiting for NBUSY in this case. + */ + if (data) { + dev_dbg(&host->pdev->dev, + "(%s) set pending xfer complete\n", __func__); + atmci_set_pending(host, EVENT_XFER_COMPLETE); + tasklet_schedule(&host->tasklet); + + /* + * Regardless of what the documentation says, we have + * to wait for NOTBUSY even after block read + * operations. + * + * When the DMA transfer is complete, the controller + * may still be reading the CRC from the card, i.e. + * the data transfer is still in progress and we + * haven't seen all the potential error bits yet. + * + * The interrupt handler will schedule a different + * tasklet to finish things up when the data transfer + * is completely done. + * + * We may not complete the mmc request here anyway + * because the mmc layer may call back and cause us to + * violate the "don't submit new operations from the + * completion callback" rule of the dma engine + * framework. + */ + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + } +} + +/* + * Returns a mask of interrupt flags to be enabled after the whole + * request has been prepared. + */ +static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) +{ + u32 iflags; + + data->error = -EINPROGRESS; + + host->sg = data->sg; + host->sg_len = data->sg_len; + host->data = data; + host->data_chan = NULL; + + iflags = ATMCI_DATA_ERROR_FLAGS; + + /* + * Errata: MMC data write operation with less than 12 + * bytes is impossible. + * + * Errata: MCI Transmit Data Register (TDR) FIFO + * corruption when length is not multiple of 4. + */ + if (data->blocks * data->blksz < 12 + || (data->blocks * data->blksz) & 3) + host->need_reset = true; + + host->pio_offset = 0; + if (data->flags & MMC_DATA_READ) + iflags |= ATMCI_RXRDY; + else + iflags |= ATMCI_TXRDY; + + return iflags; +} + +/* + * Set interrupt flags and set block length into the MCI mode register even + * if this value is also accessible in the MCI block register. It seems to be + * necessary before the High Speed MCI version. It also map sg and configure + * PDC registers. + */ +static u32 +atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) +{ + u32 iflags, tmp; + int i; + + data->error = -EINPROGRESS; + + host->data = data; + host->sg = data->sg; + iflags = ATMCI_DATA_ERROR_FLAGS; + + /* Enable pdc mode */ + atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE); + + if (data->flags & MMC_DATA_READ) + iflags |= ATMCI_ENDRX | ATMCI_RXBUFF; + else + iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE; + + /* Set BLKLEN */ + tmp = atmci_readl(host, ATMCI_MR); + tmp &= 0x0000ffff; + tmp |= ATMCI_BLKLEN(data->blksz); + atmci_writel(host, ATMCI_MR, tmp); + + /* Configure PDC */ + host->data_size = data->blocks * data->blksz; + dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + + if ((!host->caps.has_rwproof) + && (host->data->flags & MMC_DATA_WRITE)) { + sg_copy_to_buffer(host->data->sg, host->data->sg_len, + host->buffer, host->data_size); + if (host->caps.has_bad_data_ordering) + for (i = 0; i < host->data_size; i++) + host->buffer[i] = swab32(host->buffer[i]); + } + + if (host->data_size) + atmci_pdc_set_both_buf(host, data->flags & MMC_DATA_READ ? + XFER_RECEIVE : XFER_TRANSMIT); + return iflags; +} + +static u32 +atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) +{ + struct dma_chan *chan; + struct dma_async_tx_descriptor *desc; + struct scatterlist *sg; + unsigned int i; + enum dma_transfer_direction slave_dirn; + unsigned int sglen; + u32 maxburst; + u32 iflags; + + data->error = -EINPROGRESS; + + WARN_ON(host->data); + host->sg = NULL; + host->data = data; + + iflags = ATMCI_DATA_ERROR_FLAGS; + + /* + * We don't do DMA on "complex" transfers, i.e. with + * non-word-aligned buffers or lengths. Also, we don't bother + * with all the DMA setup overhead for short transfers. + */ + if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) + return atmci_prepare_data(host, data); + if (data->blksz & 3) + return atmci_prepare_data(host, data); + + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->offset & 3 || sg->length & 3) + return atmci_prepare_data(host, data); + } + + /* If we don't have a channel, we can't do DMA */ + chan = host->dma.chan; + if (chan) + host->data_chan = chan; + + if (!chan) + return -ENODEV; + + if (data->flags & MMC_DATA_READ) { + host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; + maxburst = atmci_convert_chksize(host, + host->dma_conf.src_maxburst); + } else { + host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; + maxburst = atmci_convert_chksize(host, + host->dma_conf.dst_maxburst); + } + + if (host->caps.has_dma_conf_reg) + atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | + ATMCI_DMAEN); + + sglen = dma_map_sg(chan->device->dev, data->sg, + data->sg_len, mmc_get_dma_dir(data)); + + dmaengine_slave_config(chan, &host->dma_conf); + desc = dmaengine_prep_slave_sg(chan, + data->sg, sglen, slave_dirn, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + goto unmap_exit; + + host->dma.data_desc = desc; + desc->callback = atmci_dma_complete; + desc->callback_param = host; + + return iflags; +unmap_exit: + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + return -ENOMEM; +} + +static void +atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) +{ + return; +} + +/* + * Start PDC according to transfer direction. + */ +static void +atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data) +{ + if (data->flags & MMC_DATA_READ) + atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); + else + atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); +} + +static void +atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) +{ + struct dma_chan *chan = host->data_chan; + struct dma_async_tx_descriptor *desc = host->dma.data_desc; + + if (chan) { + dmaengine_submit(desc); + dma_async_issue_pending(chan); + } +} + +static void atmci_stop_transfer(struct atmel_mci *host) +{ + dev_dbg(&host->pdev->dev, + "(%s) set pending xfer complete\n", __func__); + atmci_set_pending(host, EVENT_XFER_COMPLETE); + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); +} + +/* + * Stop data transfer because error(s) occurred. + */ +static void atmci_stop_transfer_pdc(struct atmel_mci *host) +{ + atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); +} + +static void atmci_stop_transfer_dma(struct atmel_mci *host) +{ + struct dma_chan *chan = host->data_chan; + + if (chan) { + dmaengine_terminate_all(chan); + atmci_dma_cleanup(host); + } else { + /* Data transfer was stopped by the interrupt handler */ + dev_dbg(&host->pdev->dev, + "(%s) set pending xfer complete\n", __func__); + atmci_set_pending(host, EVENT_XFER_COMPLETE); + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + } +} + +/* + * Start a request: prepare data if needed, prepare the command and activate + * interrupts. + */ +static void atmci_start_request(struct atmel_mci *host, + struct atmel_mci_slot *slot) +{ + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 iflags; + u32 cmdflags; + + mrq = slot->mrq; + host->cur_slot = slot; + host->mrq = mrq; + + host->pending_events = 0; + host->completed_events = 0; + host->cmd_status = 0; + host->data_status = 0; + + dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode); + + if (host->need_reset || host->caps.need_reset_after_xfer) { + iflags = atmci_readl(host, ATMCI_IMR); + iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB); + atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); + atmci_writel(host, ATMCI_MR, host->mode_reg); + if (host->caps.has_cfg_reg) + atmci_writel(host, ATMCI_CFG, host->cfg_reg); + atmci_writel(host, ATMCI_IER, iflags); + host->need_reset = false; + } + atmci_writel(host, ATMCI_SDCR, slot->sdc_reg); + + iflags = atmci_readl(host, ATMCI_IMR); + if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) + dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", + iflags); + + if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { + /* Send init sequence (74 clock cycles) */ + atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT); + while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY)) + cpu_relax(); + } + iflags = 0; + data = mrq->data; + if (data) { + atmci_set_timeout(host, slot, data); + + /* Must set block count/size before sending command */ + atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks) + | ATMCI_BLKLEN(data->blksz)); + dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", + ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz)); + + iflags |= host->prepare_data(host, data); + } + + iflags |= ATMCI_CMDRDY; + cmd = mrq->cmd; + cmdflags = atmci_prepare_command(slot->mmc, cmd); + + /* + * DMA transfer should be started before sending the command to avoid + * unexpected errors especially for read operations in SDIO mode. + * Unfortunately, in PDC mode, command has to be sent before starting + * the transfer. + */ + if (host->submit_data != &atmci_submit_data_dma) + atmci_send_command(host, cmd, cmdflags); + + if (data) + host->submit_data(host, data); + + if (host->submit_data == &atmci_submit_data_dma) + atmci_send_command(host, cmd, cmdflags); + + if (mrq->stop) { + host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); + host->stop_cmdr |= ATMCI_CMDR_STOP_XFER; + if (!(data->flags & MMC_DATA_WRITE)) + host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ; + host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK; + } + + /* + * We could have enabled interrupts earlier, but I suspect + * that would open up a nice can of interesting race + * conditions (e.g. command and data complete, but stop not + * prepared yet.) + */ + atmci_writel(host, ATMCI_IER, iflags); +} + +static void atmci_queue_request(struct atmel_mci *host, + struct atmel_mci_slot *slot, struct mmc_request *mrq) +{ + dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", + host->state); + + spin_lock_bh(&host->lock); + slot->mrq = mrq; + if (host->state == STATE_IDLE) { + host->state = STATE_SENDING_CMD; + atmci_start_request(host, slot); + } else { + dev_dbg(&host->pdev->dev, "queue request\n"); + list_add_tail(&slot->queue_node, &host->queue); + } + spin_unlock_bh(&host->lock); +} + +static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct atmel_mci_slot *slot = mmc_priv(mmc); + struct atmel_mci *host = slot->host; + struct mmc_data *data; + + WARN_ON(slot->mrq); + dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode); + + /* + * We may "know" the card is gone even though there's still an + * electrical connection. If so, we really need to communicate + * this to the MMC core since there won't be any more + * interrupts as the card is completely removed. Otherwise, + * the MMC core might believe the card is still there even + * though the card was just removed very slowly. + */ + if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) { + mrq->cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, mrq); + return; + } + + /* We don't support multiple blocks of weird lengths. */ + data = mrq->data; + if (data && data->blocks > 1 && data->blksz & 3) { + mrq->cmd->error = -EINVAL; + mmc_request_done(mmc, mrq); + } + + atmci_queue_request(host, slot, mrq); +} + +static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct atmel_mci_slot *slot = mmc_priv(mmc); + struct atmel_mci *host = slot->host; + unsigned int i; + + slot->sdc_reg &= ~ATMCI_SDCBUS_MASK; + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + slot->sdc_reg |= ATMCI_SDCBUS_1BIT; + break; + case MMC_BUS_WIDTH_4: + slot->sdc_reg |= ATMCI_SDCBUS_4BIT; + break; + case MMC_BUS_WIDTH_8: + slot->sdc_reg |= ATMCI_SDCBUS_8BIT; + break; + } + + if (ios->clock) { + unsigned int clock_min = ~0U; + int clkdiv; + + spin_lock_bh(&host->lock); + if (!host->mode_reg) { + atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); + if (host->caps.has_cfg_reg) + atmci_writel(host, ATMCI_CFG, host->cfg_reg); + } + + /* + * Use mirror of ios->clock to prevent race with mmc + * core ios update when finding the minimum. + */ + slot->clock = ios->clock; + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { + if (host->slot[i] && host->slot[i]->clock + && host->slot[i]->clock < clock_min) + clock_min = host->slot[i]->clock; + } + + /* Calculate clock divider */ + if (host->caps.has_odd_clk_div) { + clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2; + if (clkdiv < 0) { + dev_warn(&mmc->class_dev, + "clock %u too fast; using %lu\n", + clock_min, host->bus_hz / 2); + clkdiv = 0; + } else if (clkdiv > 511) { + dev_warn(&mmc->class_dev, + "clock %u too slow; using %lu\n", + clock_min, host->bus_hz / (511 + 2)); + clkdiv = 511; + } + host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1) + | ATMCI_MR_CLKODD(clkdiv & 1); + } else { + clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; + if (clkdiv > 255) { + dev_warn(&mmc->class_dev, + "clock %u too slow; using %lu\n", + clock_min, host->bus_hz / (2 * 256)); + clkdiv = 255; + } + host->mode_reg = ATMCI_MR_CLKDIV(clkdiv); + } + + /* + * WRPROOF and RDPROOF prevent overruns/underruns by + * stopping the clock when the FIFO is full/empty. + * This state is not expected to last for long. + */ + if (host->caps.has_rwproof) + host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF); + + if (host->caps.has_cfg_reg) { + /* setup High Speed mode in relation with card capacity */ + if (ios->timing == MMC_TIMING_SD_HS) + host->cfg_reg |= ATMCI_CFG_HSMODE; + else + host->cfg_reg &= ~ATMCI_CFG_HSMODE; + } + + if (list_empty(&host->queue)) { + atmci_writel(host, ATMCI_MR, host->mode_reg); + if (host->caps.has_cfg_reg) + atmci_writel(host, ATMCI_CFG, host->cfg_reg); + } else { + host->need_clock_update = true; + } + + spin_unlock_bh(&host->lock); + } else { + bool any_slot_active = false; + + spin_lock_bh(&host->lock); + slot->clock = 0; + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { + if (host->slot[i] && host->slot[i]->clock) { + any_slot_active = true; + break; + } + } + if (!any_slot_active) { + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); + if (host->mode_reg) { + atmci_readl(host, ATMCI_MR); + } + host->mode_reg = 0; + } + spin_unlock_bh(&host->lock); + } + + switch (ios->power_mode) { + case MMC_POWER_OFF: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + break; + case MMC_POWER_UP: + set_bit(ATMCI_CARD_NEED_INIT, &slot->flags); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + break; + default: + break; + } +} + +static int atmci_get_ro(struct mmc_host *mmc) +{ + int read_only = -ENOSYS; + struct atmel_mci_slot *slot = mmc_priv(mmc); + + if (gpio_is_valid(slot->wp_pin)) { + read_only = gpio_get_value(slot->wp_pin); + dev_dbg(&mmc->class_dev, "card is %s\n", + read_only ? "read-only" : "read-write"); + } + + return read_only; +} + +static int atmci_get_cd(struct mmc_host *mmc) +{ + int present = -ENOSYS; + struct atmel_mci_slot *slot = mmc_priv(mmc); + + if (gpio_is_valid(slot->detect_pin)) { + present = !(gpio_get_value(slot->detect_pin) ^ + slot->detect_is_active_high); + dev_dbg(&mmc->class_dev, "card is %spresent\n", + present ? "" : "not "); + } + + return present; +} + +static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct atmel_mci_slot *slot = mmc_priv(mmc); + struct atmel_mci *host = slot->host; + + if (enable) + atmci_writel(host, ATMCI_IER, slot->sdio_irq); + else + atmci_writel(host, ATMCI_IDR, slot->sdio_irq); +} + +static const struct mmc_host_ops atmci_ops = { + .request = atmci_request, + .set_ios = atmci_set_ios, + .get_ro = atmci_get_ro, + .get_cd = atmci_get_cd, + .enable_sdio_irq = atmci_enable_sdio_irq, +}; + +/* Called with host->lock held */ +static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) + __releases(&host->lock) + __acquires(&host->lock) +{ + struct atmel_mci_slot *slot = NULL; + struct mmc_host *prev_mmc = host->cur_slot->mmc; + + WARN_ON(host->cmd || host->data); + + del_timer(&host->timer); + + /* + * Update the MMC clock rate if necessary. This may be + * necessary if set_ios() is called when a different slot is + * busy transferring data. + */ + if (host->need_clock_update) { + atmci_writel(host, ATMCI_MR, host->mode_reg); + if (host->caps.has_cfg_reg) + atmci_writel(host, ATMCI_CFG, host->cfg_reg); + } + + host->cur_slot->mrq = NULL; + host->mrq = NULL; + if (!list_empty(&host->queue)) { + slot = list_entry(host->queue.next, + struct atmel_mci_slot, queue_node); + list_del(&slot->queue_node); + dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", + mmc_hostname(slot->mmc)); + host->state = STATE_SENDING_CMD; + atmci_start_request(host, slot); + } else { + dev_vdbg(&host->pdev->dev, "list empty\n"); + host->state = STATE_IDLE; + } + + spin_unlock(&host->lock); + mmc_request_done(prev_mmc, mrq); + spin_lock(&host->lock); +} + +static void atmci_command_complete(struct atmel_mci *host, + struct mmc_command *cmd) +{ + u32 status = host->cmd_status; + + /* Read the response from the card (up to 16 bytes) */ + cmd->resp[0] = atmci_readl(host, ATMCI_RSPR); + cmd->resp[1] = atmci_readl(host, ATMCI_RSPR); + cmd->resp[2] = atmci_readl(host, ATMCI_RSPR); + cmd->resp[3] = atmci_readl(host, ATMCI_RSPR); + + if (status & ATMCI_RTOE) + cmd->error = -ETIMEDOUT; + else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE)) + cmd->error = -EILSEQ; + else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE)) + cmd->error = -EIO; + else if (host->mrq->data && (host->mrq->data->blksz & 3)) { + if (host->caps.need_blksz_mul_4) { + cmd->error = -EINVAL; + host->need_reset = 1; + } + } else + cmd->error = 0; +} + +static void atmci_detect_change(struct timer_list *t) +{ + struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer); + bool present; + bool present_old; + + /* + * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before + * freeing the interrupt. We must not re-enable the interrupt + * if it has been freed, and if we're shutting down, it + * doesn't really matter whether the card is present or not. + */ + smp_rmb(); + if (test_bit(ATMCI_SHUTDOWN, &slot->flags)) + return; + + enable_irq(gpio_to_irq(slot->detect_pin)); + present = !(gpio_get_value(slot->detect_pin) ^ + slot->detect_is_active_high); + present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); + + dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", + present, present_old); + + if (present != present_old) { + struct atmel_mci *host = slot->host; + struct mmc_request *mrq; + + dev_dbg(&slot->mmc->class_dev, "card %s\n", + present ? "inserted" : "removed"); + + spin_lock(&host->lock); + + if (!present) + clear_bit(ATMCI_CARD_PRESENT, &slot->flags); + else + set_bit(ATMCI_CARD_PRESENT, &slot->flags); + + /* Clean up queue if present */ + mrq = slot->mrq; + if (mrq) { + if (mrq == host->mrq) { + /* + * Reset controller to terminate any ongoing + * commands or data transfers. + */ + atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); + atmci_writel(host, ATMCI_MR, host->mode_reg); + if (host->caps.has_cfg_reg) + atmci_writel(host, ATMCI_CFG, host->cfg_reg); + + host->data = NULL; + host->cmd = NULL; + + switch (host->state) { + case STATE_IDLE: + break; + case STATE_SENDING_CMD: + mrq->cmd->error = -ENOMEDIUM; + if (mrq->data) + host->stop_transfer(host); + break; + case STATE_DATA_XFER: + mrq->data->error = -ENOMEDIUM; + host->stop_transfer(host); + break; + case STATE_WAITING_NOTBUSY: + mrq->data->error = -ENOMEDIUM; + break; + case STATE_SENDING_STOP: + mrq->stop->error = -ENOMEDIUM; + break; + case STATE_END_REQUEST: + break; + } + + atmci_request_end(host, mrq); + } else { + list_del(&slot->queue_node); + mrq->cmd->error = -ENOMEDIUM; + if (mrq->data) + mrq->data->error = -ENOMEDIUM; + if (mrq->stop) + mrq->stop->error = -ENOMEDIUM; + + spin_unlock(&host->lock); + mmc_request_done(slot->mmc, mrq); + spin_lock(&host->lock); + } + } + spin_unlock(&host->lock); + + mmc_detect_change(slot->mmc, 0); + } +} + +static void atmci_tasklet_func(unsigned long priv) +{ + struct atmel_mci *host = (struct atmel_mci *)priv; + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = host->data; + enum atmel_mci_state state = host->state; + enum atmel_mci_state prev_state; + u32 status; + + spin_lock(&host->lock); + + state = host->state; + + dev_vdbg(&host->pdev->dev, + "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", + state, host->pending_events, host->completed_events, + atmci_readl(host, ATMCI_IMR)); + + do { + prev_state = state; + dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state); + + switch (state) { + case STATE_IDLE: + break; + + case STATE_SENDING_CMD: + /* + * Command has been sent, we are waiting for command + * ready. Then we have three next states possible: + * END_REQUEST by default, WAITING_NOTBUSY if it's a + * command needing it or DATA_XFER if there is data. + */ + dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); + if (!atmci_test_and_clear_pending(host, + EVENT_CMD_RDY)) + break; + + dev_dbg(&host->pdev->dev, "set completed cmd ready\n"); + host->cmd = NULL; + atmci_set_completed(host, EVENT_CMD_RDY); + atmci_command_complete(host, mrq->cmd); + if (mrq->data) { + dev_dbg(&host->pdev->dev, + "command with data transfer"); + /* + * If there is a command error don't start + * data transfer. + */ + if (mrq->cmd->error) { + host->stop_transfer(host); + host->data = NULL; + atmci_writel(host, ATMCI_IDR, + ATMCI_TXRDY | ATMCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS); + state = STATE_END_REQUEST; + } else + state = STATE_DATA_XFER; + } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) { + dev_dbg(&host->pdev->dev, + "command response need waiting notbusy"); + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + state = STATE_WAITING_NOTBUSY; + } else + state = STATE_END_REQUEST; + + break; + + case STATE_DATA_XFER: + if (atmci_test_and_clear_pending(host, + EVENT_DATA_ERROR)) { + dev_dbg(&host->pdev->dev, "set completed data error\n"); + atmci_set_completed(host, EVENT_DATA_ERROR); + state = STATE_END_REQUEST; + break; + } + + /* + * A data transfer is in progress. The event expected + * to move to the next state depends of data transfer + * type (PDC or DMA). Once transfer done we can move + * to the next step which is WAITING_NOTBUSY in write + * case and directly SENDING_STOP in read case. + */ + dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n"); + if (!atmci_test_and_clear_pending(host, + EVENT_XFER_COMPLETE)) + break; + + dev_dbg(&host->pdev->dev, + "(%s) set completed xfer complete\n", + __func__); + atmci_set_completed(host, EVENT_XFER_COMPLETE); + + if (host->caps.need_notbusy_for_read_ops || + (host->data->flags & MMC_DATA_WRITE)) { + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + state = STATE_WAITING_NOTBUSY; + } else if (host->mrq->stop) { + atmci_send_stop_cmd(host, data); + state = STATE_SENDING_STOP; + } else { + host->data = NULL; + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; + state = STATE_END_REQUEST; + } + break; + + case STATE_WAITING_NOTBUSY: + /* + * We can be in the state for two reasons: a command + * requiring waiting not busy signal (stop command + * included) or a write operation. In the latest case, + * we need to send a stop command. + */ + dev_dbg(&host->pdev->dev, "FSM: not busy?\n"); + if (!atmci_test_and_clear_pending(host, + EVENT_NOTBUSY)) + break; + + dev_dbg(&host->pdev->dev, "set completed not busy\n"); + atmci_set_completed(host, EVENT_NOTBUSY); + + if (host->data) { + /* + * For some commands such as CMD53, even if + * there is data transfer, there is no stop + * command to send. + */ + if (host->mrq->stop) { + atmci_send_stop_cmd(host, data); + state = STATE_SENDING_STOP; + } else { + host->data = NULL; + data->bytes_xfered = data->blocks + * data->blksz; + data->error = 0; + state = STATE_END_REQUEST; + } + } else + state = STATE_END_REQUEST; + break; + + case STATE_SENDING_STOP: + /* + * In this state, it is important to set host->data to + * NULL (which is tested in the waiting notbusy state) + * in order to go to the end request state instead of + * sending stop again. + */ + dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); + if (!atmci_test_and_clear_pending(host, + EVENT_CMD_RDY)) + break; + + dev_dbg(&host->pdev->dev, "FSM: cmd ready\n"); + host->cmd = NULL; + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; + atmci_command_complete(host, mrq->stop); + if (mrq->stop->error) { + host->stop_transfer(host); + atmci_writel(host, ATMCI_IDR, + ATMCI_TXRDY | ATMCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS); + state = STATE_END_REQUEST; + } else { + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + state = STATE_WAITING_NOTBUSY; + } + host->data = NULL; + break; + + case STATE_END_REQUEST: + atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS); + status = host->data_status; + if (unlikely(status)) { + host->stop_transfer(host); + host->data = NULL; + if (data) { + if (status & ATMCI_DTOE) { + data->error = -ETIMEDOUT; + } else if (status & ATMCI_DCRCE) { + data->error = -EILSEQ; + } else { + data->error = -EIO; + } + } + } + + atmci_request_end(host, host->mrq); + goto unlock; /* atmci_request_end() sets host->state */ + break; + } + } while (state != prev_state); + + host->state = state; + +unlock: + spin_unlock(&host->lock); +} + +static void atmci_read_data_pio(struct atmel_mci *host) +{ + struct scatterlist *sg = host->sg; + unsigned int offset = host->pio_offset; + struct mmc_data *data = host->data; + u32 value; + u32 status; + unsigned int nbytes = 0; + + do { + value = atmci_readl(host, ATMCI_RDR); + if (likely(offset + 4 <= sg->length)) { + sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); + + offset += 4; + nbytes += 4; + + if (offset == sg->length) { + flush_dcache_page(sg_page(sg)); + host->sg = sg = sg_next(sg); + host->sg_len--; + if (!sg || !host->sg_len) + goto done; + + offset = 0; + } + } else { + unsigned int remaining = sg->length - offset; + + sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); + nbytes += remaining; + + flush_dcache_page(sg_page(sg)); + host->sg = sg = sg_next(sg); + host->sg_len--; + if (!sg || !host->sg_len) + goto done; + + offset = 4 - remaining; + sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, + offset, 0); + nbytes += offset; + } + + status = atmci_readl(host, ATMCI_SR); + if (status & ATMCI_DATA_ERROR_FLAGS) { + atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS)); + host->data_status = status; + data->bytes_xfered += nbytes; + return; + } + } while (status & ATMCI_RXRDY); + + host->pio_offset = offset; + data->bytes_xfered += nbytes; + + return; + +done: + atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY); + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + data->bytes_xfered += nbytes; + smp_wmb(); + atmci_set_pending(host, EVENT_XFER_COMPLETE); +} + +static void atmci_write_data_pio(struct atmel_mci *host) +{ + struct scatterlist *sg = host->sg; + unsigned int offset = host->pio_offset; + struct mmc_data *data = host->data; + u32 value; + u32 status; + unsigned int nbytes = 0; + + do { + if (likely(offset + 4 <= sg->length)) { + sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); + atmci_writel(host, ATMCI_TDR, value); + + offset += 4; + nbytes += 4; + if (offset == sg->length) { + host->sg = sg = sg_next(sg); + host->sg_len--; + if (!sg || !host->sg_len) + goto done; + + offset = 0; + } + } else { + unsigned int remaining = sg->length - offset; + + value = 0; + sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); + nbytes += remaining; + + host->sg = sg = sg_next(sg); + host->sg_len--; + if (!sg || !host->sg_len) { + atmci_writel(host, ATMCI_TDR, value); + goto done; + } + + offset = 4 - remaining; + sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, + offset, 0); + atmci_writel(host, ATMCI_TDR, value); + nbytes += offset; + } + + status = atmci_readl(host, ATMCI_SR); + if (status & ATMCI_DATA_ERROR_FLAGS) { + atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY + | ATMCI_DATA_ERROR_FLAGS)); + host->data_status = status; + data->bytes_xfered += nbytes; + return; + } + } while (status & ATMCI_TXRDY); + + host->pio_offset = offset; + data->bytes_xfered += nbytes; + + return; + +done: + atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY); + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + data->bytes_xfered += nbytes; + smp_wmb(); + atmci_set_pending(host, EVENT_XFER_COMPLETE); +} + +static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) +{ + int i; + + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { + struct atmel_mci_slot *slot = host->slot[i]; + if (slot && (status & slot->sdio_irq)) { + mmc_signal_sdio_irq(slot->mmc); + } + } +} + + +static irqreturn_t atmci_interrupt(int irq, void *dev_id) +{ + struct atmel_mci *host = dev_id; + u32 status, mask, pending; + unsigned int pass_count = 0; + + do { + status = atmci_readl(host, ATMCI_SR); + mask = atmci_readl(host, ATMCI_IMR); + pending = status & mask; + if (!pending) + break; + + if (pending & ATMCI_DATA_ERROR_FLAGS) { + dev_dbg(&host->pdev->dev, "IRQ: data error\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS + | ATMCI_RXRDY | ATMCI_TXRDY + | ATMCI_ENDRX | ATMCI_ENDTX + | ATMCI_RXBUFF | ATMCI_TXBUFE); + + host->data_status = status; + dev_dbg(&host->pdev->dev, "set pending data error\n"); + smp_wmb(); + atmci_set_pending(host, EVENT_DATA_ERROR); + tasklet_schedule(&host->tasklet); + } + + if (pending & ATMCI_TXBUFE) { + dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE); + atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); + /* + * We can receive this interruption before having configured + * the second pdc buffer, so we need to reconfigure first and + * second buffers again + */ + if (host->data_size) { + atmci_pdc_set_both_buf(host, XFER_TRANSMIT); + atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); + atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE); + } else { + atmci_pdc_complete(host); + } + } else if (pending & ATMCI_ENDTX) { + dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); + + if (host->data_size) { + atmci_pdc_set_single_buf(host, + XFER_TRANSMIT, PDC_SECOND_BUF); + atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); + } + } + + if (pending & ATMCI_RXBUFF) { + dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF); + atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); + /* + * We can receive this interruption before having configured + * the second pdc buffer, so we need to reconfigure first and + * second buffers again + */ + if (host->data_size) { + atmci_pdc_set_both_buf(host, XFER_RECEIVE); + atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); + atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF); + } else { + atmci_pdc_complete(host); + } + } else if (pending & ATMCI_ENDRX) { + dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); + + if (host->data_size) { + atmci_pdc_set_single_buf(host, + XFER_RECEIVE, PDC_SECOND_BUF); + atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); + } + } + + /* + * First mci IPs, so mainly the ones having pdc, have some + * issues with the notbusy signal. You can't get it after + * data transmission if you have not sent a stop command. + * The appropriate workaround is to use the BLKE signal. + */ + if (pending & ATMCI_BLKE) { + dev_dbg(&host->pdev->dev, "IRQ: blke\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_BLKE); + smp_wmb(); + dev_dbg(&host->pdev->dev, "set pending notbusy\n"); + atmci_set_pending(host, EVENT_NOTBUSY); + tasklet_schedule(&host->tasklet); + } + + if (pending & ATMCI_NOTBUSY) { + dev_dbg(&host->pdev->dev, "IRQ: not_busy\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY); + smp_wmb(); + dev_dbg(&host->pdev->dev, "set pending notbusy\n"); + atmci_set_pending(host, EVENT_NOTBUSY); + tasklet_schedule(&host->tasklet); + } + + if (pending & ATMCI_RXRDY) + atmci_read_data_pio(host); + if (pending & ATMCI_TXRDY) + atmci_write_data_pio(host); + + if (pending & ATMCI_CMDRDY) { + dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY); + host->cmd_status = status; + smp_wmb(); + dev_dbg(&host->pdev->dev, "set pending cmd rdy\n"); + atmci_set_pending(host, EVENT_CMD_RDY); + tasklet_schedule(&host->tasklet); + } + + if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) + atmci_sdio_interrupt(host, status); + + } while (pass_count++ < 5); + + return pass_count ? IRQ_HANDLED : IRQ_NONE; +} + +static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) +{ + struct atmel_mci_slot *slot = dev_id; + + /* + * Disable interrupts until the pin has stabilized and check + * the state then. Use mod_timer() since we may be in the + * middle of the timer routine when this interrupt triggers. + */ + disable_irq_nosync(irq); + mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20)); + + return IRQ_HANDLED; +} + +static int atmci_init_slot(struct atmel_mci *host, + struct mci_slot_pdata *slot_data, unsigned int id, + u32 sdc_reg, u32 sdio_irq) +{ + struct mmc_host *mmc; + struct atmel_mci_slot *slot; + int ret; + + mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); + if (!mmc) + return -ENOMEM; + + slot = mmc_priv(mmc); + slot->mmc = mmc; + slot->host = host; + slot->detect_pin = slot_data->detect_pin; + slot->wp_pin = slot_data->wp_pin; + slot->detect_is_active_high = slot_data->detect_is_active_high; + slot->sdc_reg = sdc_reg; + slot->sdio_irq = sdio_irq; + + dev_dbg(&mmc->class_dev, + "slot[%u]: bus_width=%u, detect_pin=%d, " + "detect_is_active_high=%s, wp_pin=%d\n", + id, slot_data->bus_width, slot_data->detect_pin, + slot_data->detect_is_active_high ? "true" : "false", + slot_data->wp_pin); + + mmc->ops = &atmci_ops; + mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); + mmc->f_max = host->bus_hz / 2; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + if (sdio_irq) + mmc->caps |= MMC_CAP_SDIO_IRQ; + if (host->caps.has_highspeed) + mmc->caps |= MMC_CAP_SD_HIGHSPEED; + /* + * Without the read/write proof capability, it is strongly suggested to + * use only one bit for data to prevent fifo underruns and overruns + * which will corrupt data. + */ + if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) { + mmc->caps |= MMC_CAP_4_BIT_DATA; + if (slot_data->bus_width >= 8) + mmc->caps |= MMC_CAP_8_BIT_DATA; + } + + if (atmci_get_version(host) < 0x200) { + mmc->max_segs = 256; + mmc->max_blk_size = 4095; + mmc->max_blk_count = 256; + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs; + } else { + mmc->max_segs = 64; + mmc->max_req_size = 32768 * 512; + mmc->max_blk_size = 32768; + mmc->max_blk_count = 512; + } + + /* Assume card is present initially */ + set_bit(ATMCI_CARD_PRESENT, &slot->flags); + if (gpio_is_valid(slot->detect_pin)) { + if (devm_gpio_request(&host->pdev->dev, slot->detect_pin, + "mmc_detect")) { + dev_dbg(&mmc->class_dev, "no detect pin available\n"); + slot->detect_pin = -EBUSY; + } else if (gpio_get_value(slot->detect_pin) ^ + slot->detect_is_active_high) { + clear_bit(ATMCI_CARD_PRESENT, &slot->flags); + } + } + + if (!gpio_is_valid(slot->detect_pin)) { + if (slot_data->non_removable) + mmc->caps |= MMC_CAP_NONREMOVABLE; + else + mmc->caps |= MMC_CAP_NEEDS_POLL; + } + + if (gpio_is_valid(slot->wp_pin)) { + if (devm_gpio_request(&host->pdev->dev, slot->wp_pin, + "mmc_wp")) { + dev_dbg(&mmc->class_dev, "no WP pin available\n"); + slot->wp_pin = -EBUSY; + } + } + + host->slot[id] = slot; + mmc_regulator_get_supply(mmc); + ret = mmc_add_host(mmc); + if (ret) { + mmc_free_host(mmc); + return ret; + } + + if (gpio_is_valid(slot->detect_pin)) { + timer_setup(&slot->detect_timer, atmci_detect_change, 0); + + ret = request_irq(gpio_to_irq(slot->detect_pin), + atmci_detect_interrupt, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + "mmc-detect", slot); + if (ret) { + dev_dbg(&mmc->class_dev, + "could not request IRQ %d for detect pin\n", + gpio_to_irq(slot->detect_pin)); + slot->detect_pin = -EBUSY; + } + } + + atmci_init_debugfs(slot); + + return 0; +} + +static void atmci_cleanup_slot(struct atmel_mci_slot *slot, + unsigned int id) +{ + /* Debugfs stuff is cleaned up by mmc core */ + + set_bit(ATMCI_SHUTDOWN, &slot->flags); + smp_wmb(); + + mmc_remove_host(slot->mmc); + + if (gpio_is_valid(slot->detect_pin)) { + int pin = slot->detect_pin; + + free_irq(gpio_to_irq(pin), slot); + del_timer_sync(&slot->detect_timer); + } + + slot->host->slot[id] = NULL; + mmc_free_host(slot->mmc); +} + +static int atmci_configure_dma(struct atmel_mci *host) +{ + host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx"); + + if (PTR_ERR(host->dma.chan) == -ENODEV) { + struct mci_platform_data *pdata = host->pdev->dev.platform_data; + dma_cap_mask_t mask; + + if (!pdata || !pdata->dma_filter) + return -ENODEV; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->dma.chan = dma_request_channel(mask, pdata->dma_filter, + pdata->dma_slave); + if (!host->dma.chan) + host->dma.chan = ERR_PTR(-ENODEV); + } + + if (IS_ERR(host->dma.chan)) + return PTR_ERR(host->dma.chan); + + dev_info(&host->pdev->dev, "using %s for DMA transfers\n", + dma_chan_name(host->dma.chan)); + + host->dma_conf.src_addr = host->mapbase + ATMCI_RDR; + host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + host->dma_conf.src_maxburst = 1; + host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR; + host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + host->dma_conf.dst_maxburst = 1; + host->dma_conf.device_fc = false; + + return 0; +} + +/* + * HSMCI (High Speed MCI) module is not fully compatible with MCI module. + * HSMCI provides DMA support and a new config register but no more supports + * PDC. + */ +static void atmci_get_cap(struct atmel_mci *host) +{ + unsigned int version; + + version = atmci_get_version(host); + dev_info(&host->pdev->dev, + "version: 0x%x\n", version); + + host->caps.has_dma_conf_reg = 0; + host->caps.has_pdc = 1; + host->caps.has_cfg_reg = 0; + host->caps.has_cstor_reg = 0; + host->caps.has_highspeed = 0; + host->caps.has_rwproof = 0; + host->caps.has_odd_clk_div = 0; + host->caps.has_bad_data_ordering = 1; + host->caps.need_reset_after_xfer = 1; + host->caps.need_blksz_mul_4 = 1; + host->caps.need_notbusy_for_read_ops = 0; + + /* keep only major version number */ + switch (version & 0xf00) { + case 0x600: + case 0x500: + host->caps.has_odd_clk_div = 1; + fallthrough; + case 0x400: + case 0x300: + host->caps.has_dma_conf_reg = 1; + host->caps.has_pdc = 0; + host->caps.has_cfg_reg = 1; + host->caps.has_cstor_reg = 1; + host->caps.has_highspeed = 1; + fallthrough; + case 0x200: + host->caps.has_rwproof = 1; + host->caps.need_blksz_mul_4 = 0; + host->caps.need_notbusy_for_read_ops = 1; + fallthrough; + case 0x100: + host->caps.has_bad_data_ordering = 0; + host->caps.need_reset_after_xfer = 0; + fallthrough; + case 0x0: + break; + default: + host->caps.has_pdc = 0; + dev_warn(&host->pdev->dev, + "Unmanaged mci version, set minimum capabilities\n"); + break; + } +} + +static int atmci_probe(struct platform_device *pdev) +{ + struct mci_platform_data *pdata; + struct atmel_mci *host; + struct resource *regs; + unsigned int nr_slots; + int irq; + int ret, i; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + pdata = pdev->dev.platform_data; + if (!pdata) { + pdata = atmci_of_init(pdev); + if (IS_ERR(pdata)) { + dev_err(&pdev->dev, "platform data not available\n"); + return PTR_ERR(pdata); + } + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->pdev = pdev; + spin_lock_init(&host->lock); + INIT_LIST_HEAD(&host->queue); + + host->mck = devm_clk_get(&pdev->dev, "mci_clk"); + if (IS_ERR(host->mck)) + return PTR_ERR(host->mck); + + host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); + if (!host->regs) + return -ENOMEM; + + ret = clk_prepare_enable(host->mck); + if (ret) + return ret; + + atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); + host->bus_hz = clk_get_rate(host->mck); + + host->mapbase = regs->start; + + tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host); + + ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host); + if (ret) { + clk_disable_unprepare(host->mck); + return ret; + } + + /* Get MCI capabilities and set operations according to it */ + atmci_get_cap(host); + ret = atmci_configure_dma(host); + if (ret == -EPROBE_DEFER) + goto err_dma_probe_defer; + if (ret == 0) { + host->prepare_data = &atmci_prepare_data_dma; + host->submit_data = &atmci_submit_data_dma; + host->stop_transfer = &atmci_stop_transfer_dma; + } else if (host->caps.has_pdc) { + dev_info(&pdev->dev, "using PDC\n"); + host->prepare_data = &atmci_prepare_data_pdc; + host->submit_data = &atmci_submit_data_pdc; + host->stop_transfer = &atmci_stop_transfer_pdc; + } else { + dev_info(&pdev->dev, "using PIO\n"); + host->prepare_data = &atmci_prepare_data; + host->submit_data = &atmci_submit_data; + host->stop_transfer = &atmci_stop_transfer; + } + + platform_set_drvdata(pdev, host); + + timer_setup(&host->timer, atmci_timeout_timer, 0); + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + /* We need at least one slot to succeed */ + nr_slots = 0; + ret = -ENODEV; + if (pdata->slot[0].bus_width) { + ret = atmci_init_slot(host, &pdata->slot[0], + 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA); + if (!ret) { + nr_slots++; + host->buf_size = host->slot[0]->mmc->max_req_size; + } + } + if (pdata->slot[1].bus_width) { + ret = atmci_init_slot(host, &pdata->slot[1], + 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB); + if (!ret) { + nr_slots++; + if (host->slot[1]->mmc->max_req_size > host->buf_size) + host->buf_size = + host->slot[1]->mmc->max_req_size; + } + } + + if (!nr_slots) { + dev_err(&pdev->dev, "init failed: no slot defined\n"); + goto err_init_slot; + } + + if (!host->caps.has_rwproof) { + host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size, + &host->buf_phys_addr, + GFP_KERNEL); + if (!host->buffer) { + ret = -ENOMEM; + dev_err(&pdev->dev, "buffer allocation failed\n"); + goto err_dma_alloc; + } + } + + dev_info(&pdev->dev, + "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", + host->mapbase, irq, nr_slots); + + pm_runtime_mark_last_busy(&host->pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; + +err_dma_alloc: + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { + if (host->slot[i]) + atmci_cleanup_slot(host->slot[i], i); + } +err_init_slot: + clk_disable_unprepare(host->mck); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + del_timer_sync(&host->timer); + if (!IS_ERR(host->dma.chan)) + dma_release_channel(host->dma.chan); +err_dma_probe_defer: + free_irq(irq, host); + return ret; +} + +static int atmci_remove(struct platform_device *pdev) +{ + struct atmel_mci *host = platform_get_drvdata(pdev); + unsigned int i; + + pm_runtime_get_sync(&pdev->dev); + + if (host->buffer) + dma_free_coherent(&pdev->dev, host->buf_size, + host->buffer, host->buf_phys_addr); + + for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { + if (host->slot[i]) + atmci_cleanup_slot(host->slot[i], i); + } + + atmci_writel(host, ATMCI_IDR, ~0UL); + atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); + atmci_readl(host, ATMCI_SR); + + del_timer_sync(&host->timer); + if (!IS_ERR(host->dma.chan)) + dma_release_channel(host->dma.chan); + + free_irq(platform_get_irq(pdev, 0), host); + + clk_disable_unprepare(host->mck); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM +static int atmci_runtime_suspend(struct device *dev) +{ + struct atmel_mci *host = dev_get_drvdata(dev); + + clk_disable_unprepare(host->mck); + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int atmci_runtime_resume(struct device *dev) +{ + struct atmel_mci *host = dev_get_drvdata(dev); + + pinctrl_select_default_state(dev); + + return clk_prepare_enable(host->mck); +} +#endif + +static const struct dev_pm_ops atmci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL) +}; + +static struct platform_driver atmci_driver = { + .probe = atmci_probe, + .remove = atmci_remove, + .driver = { + .name = "atmel_mci", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(atmci_dt_ids), + .pm = &atmci_dev_pm_ops, + }, +}; +module_platform_driver(atmci_driver); + +MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); +MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c new file mode 100644 index 000000000..56a3bf51d --- /dev/null +++ b/drivers/mmc/host/au1xmmc.c @@ -0,0 +1,1225 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver + * + * Copyright (c) 2005, Advanced Micro Devices, Inc. + * + * Developed with help from the 2.4.30 MMC AU1XXX controller including + * the following copyright notices: + * Copyright (c) 2003-2004 Embedded Edge, LLC. + * Portions Copyright (C) 2002 Embedix, Inc + * Copyright 2002 Hewlett-Packard Company + + * 2.6 version of this driver inspired by: + * (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman, + * All Rights Reserved. + * (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King, + * All Rights Reserved. + * + + */ + +/* Why don't we use the SD controllers' carddetect feature? + * + * From the AU1100 MMC application guide: + * If the Au1100-based design is intended to support both MultiMediaCards + * and 1- or 4-data bit SecureDigital cards, then the solution is to + * connect a weak (560KOhm) pull-up resistor to connector pin 1. + * In doing so, a MMC card never enters SPI-mode communications, + * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective + * (the low to high transition will not occur). + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define DRIVER_NAME "au1xxx-mmc" + +/* Set this to enable special debugging macros */ +/* #define DEBUG */ + +#ifdef DEBUG +#define DBG(fmt, idx, args...) \ + pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args) +#else +#define DBG(fmt, idx, args...) do {} while (0) +#endif + +/* Hardware definitions */ +#define AU1XMMC_DESCRIPTOR_COUNT 1 + +/* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */ +#define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff +#define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff + +#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ + MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ + MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36) + +/* This gives us a hard value for the stop command that we can write directly + * to the command register. + */ +#define STOP_CMD \ + (SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO) + +/* This is the set of interrupts that we configure by default. */ +#define AU1XMMC_INTERRUPTS \ + (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \ + SD_CONFIG_CR | SD_CONFIG_I) + +/* The poll event (looking for insert/remove events runs twice a second. */ +#define AU1XMMC_DETECT_TIMEOUT (HZ/2) + +struct au1xmmc_host { + struct mmc_host *mmc; + struct mmc_request *mrq; + + u32 flags; + void __iomem *iobase; + u32 clock; + u32 bus_width; + u32 power_mode; + + int status; + + struct { + int len; + int dir; + } dma; + + struct { + int index; + int offset; + int len; + } pio; + + u32 tx_chan; + u32 rx_chan; + + int irq; + + struct tasklet_struct finish_task; + struct tasklet_struct data_task; + struct au1xmmc_platform_data *platdata; + struct platform_device *pdev; + struct resource *ioarea; + struct clk *clk; +}; + +/* Status flags used by the host structure */ +#define HOST_F_XMIT 0x0001 +#define HOST_F_RECV 0x0002 +#define HOST_F_DMA 0x0010 +#define HOST_F_DBDMA 0x0020 +#define HOST_F_ACTIVE 0x0100 +#define HOST_F_STOP 0x1000 + +#define HOST_S_IDLE 0x0001 +#define HOST_S_CMD 0x0002 +#define HOST_S_DATA 0x0003 +#define HOST_S_STOP 0x0004 + +/* Easy access macros */ +#define HOST_STATUS(h) ((h)->iobase + SD_STATUS) +#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG) +#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE) +#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT) +#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT) +#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG) +#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE) +#define HOST_CMD(h) ((h)->iobase + SD_CMD) +#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2) +#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT) +#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG) + +#define DMA_CHANNEL(h) \ + (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) + +static inline int has_dbdma(void) +{ + switch (alchemy_get_cputype()) { + case ALCHEMY_CPU_AU1200: + case ALCHEMY_CPU_AU1300: + return 1; + default: + return 0; + } +} + +static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) +{ + u32 val = __raw_readl(HOST_CONFIG(host)); + val |= mask; + __raw_writel(val, HOST_CONFIG(host)); + wmb(); /* drain writebuffer */ +} + +static inline void FLUSH_FIFO(struct au1xmmc_host *host) +{ + u32 val = __raw_readl(HOST_CONFIG2(host)); + + __raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ + mdelay(1); + + /* SEND_STOP will turn off clock control - this re-enables it */ + val &= ~SD_CONFIG2_DF; + + __raw_writel(val, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ +} + +static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask) +{ + u32 val = __raw_readl(HOST_CONFIG(host)); + val &= ~mask; + __raw_writel(val, HOST_CONFIG(host)); + wmb(); /* drain writebuffer */ +} + +static inline void SEND_STOP(struct au1xmmc_host *host) +{ + u32 config2; + + WARN_ON(host->status != HOST_S_DATA); + host->status = HOST_S_STOP; + + config2 = __raw_readl(HOST_CONFIG2(host)); + __raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ + + /* Send the stop command */ + __raw_writel(STOP_CMD, HOST_CMD(host)); + wmb(); /* drain writebuffer */ +} + +static void au1xmmc_set_power(struct au1xmmc_host *host, int state) +{ + if (host->platdata && host->platdata->set_power) + host->platdata->set_power(host->mmc, state); +} + +static int au1xmmc_card_inserted(struct mmc_host *mmc) +{ + struct au1xmmc_host *host = mmc_priv(mmc); + + if (host->platdata && host->platdata->card_inserted) + return !!host->platdata->card_inserted(host->mmc); + + return -ENOSYS; +} + +static int au1xmmc_card_readonly(struct mmc_host *mmc) +{ + struct au1xmmc_host *host = mmc_priv(mmc); + + if (host->platdata && host->platdata->card_readonly) + return !!host->platdata->card_readonly(mmc); + + return -ENOSYS; +} + +static void au1xmmc_finish_request(struct au1xmmc_host *host) +{ + struct mmc_request *mrq = host->mrq; + + host->mrq = NULL; + host->flags &= HOST_F_ACTIVE | HOST_F_DMA; + + host->dma.len = 0; + host->dma.dir = 0; + + host->pio.index = 0; + host->pio.offset = 0; + host->pio.len = 0; + + host->status = HOST_S_IDLE; + + mmc_request_done(host->mmc, mrq); +} + +static void au1xmmc_tasklet_finish(unsigned long param) +{ + struct au1xmmc_host *host = (struct au1xmmc_host *) param; + au1xmmc_finish_request(host); +} + +static int au1xmmc_send_command(struct au1xmmc_host *host, + struct mmc_command *cmd, struct mmc_data *data) +{ + u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + break; + case MMC_RSP_R1: + mmccmd |= SD_CMD_RT_1; + break; + case MMC_RSP_R1B: + mmccmd |= SD_CMD_RT_1B; + break; + case MMC_RSP_R2: + mmccmd |= SD_CMD_RT_2; + break; + case MMC_RSP_R3: + mmccmd |= SD_CMD_RT_3; + break; + default: + pr_info("au1xmmc: unhandled response type %02x\n", + mmc_resp_type(cmd)); + return -EINVAL; + } + + if (data) { + if (data->flags & MMC_DATA_READ) { + if (data->blocks > 1) + mmccmd |= SD_CMD_CT_4; + else + mmccmd |= SD_CMD_CT_2; + } else if (data->flags & MMC_DATA_WRITE) { + if (data->blocks > 1) + mmccmd |= SD_CMD_CT_3; + else + mmccmd |= SD_CMD_CT_1; + } + } + + __raw_writel(cmd->arg, HOST_CMDARG(host)); + wmb(); /* drain writebuffer */ + + __raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host)); + wmb(); /* drain writebuffer */ + + /* Wait for the command to go on the line */ + while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO) + /* nop */; + + return 0; +} + +static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_data *data; + u32 crc; + + WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP)); + + if (host->mrq == NULL) + return; + + data = mrq->cmd->data; + + if (status == 0) + status = __raw_readl(HOST_STATUS(host)); + + /* The transaction is really over when the SD_STATUS_DB bit is clear */ + while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) + status = __raw_readl(HOST_STATUS(host)); + + data->error = 0; + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); + + /* Process any errors */ + crc = (status & (SD_STATUS_WC | SD_STATUS_RC)); + if (host->flags & HOST_F_XMIT) + crc |= ((status & 0x07) == 0x02) ? 0 : 1; + + if (crc) + data->error = -EILSEQ; + + /* Clear the CRC bits */ + __raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host)); + + data->bytes_xfered = 0; + + if (!data->error) { + if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { + u32 chan = DMA_CHANNEL(host); + + chan_tab_t *c = *((chan_tab_t **)chan); + au1x_dma_chan_t *cp = c->chan_ptr; + data->bytes_xfered = cp->ddma_bytecnt; + } else + data->bytes_xfered = + (data->blocks * data->blksz) - host->pio.len; + } + + au1xmmc_finish_request(host); +} + +static void au1xmmc_tasklet_data(unsigned long param) +{ + struct au1xmmc_host *host = (struct au1xmmc_host *)param; + + u32 status = __raw_readl(HOST_STATUS(host)); + au1xmmc_data_complete(host, status); +} + +#define AU1XMMC_MAX_TRANSFER 8 + +static void au1xmmc_send_pio(struct au1xmmc_host *host) +{ + struct mmc_data *data; + int sg_len, max, count; + unsigned char *sg_ptr, val; + u32 status; + struct scatterlist *sg; + + data = host->mrq->data; + + if (!(host->flags & HOST_F_XMIT)) + return; + + /* This is the pointer to the data buffer */ + sg = &data->sg[host->pio.index]; + sg_ptr = kmap_atomic(sg_page(sg)) + sg->offset + host->pio.offset; + + /* This is the space left inside the buffer */ + sg_len = data->sg[host->pio.index].length - host->pio.offset; + + /* Check if we need less than the size of the sg_buffer */ + max = (sg_len > host->pio.len) ? host->pio.len : sg_len; + if (max > AU1XMMC_MAX_TRANSFER) + max = AU1XMMC_MAX_TRANSFER; + + for (count = 0; count < max; count++) { + status = __raw_readl(HOST_STATUS(host)); + + if (!(status & SD_STATUS_TH)) + break; + + val = sg_ptr[count]; + + __raw_writel((unsigned long)val, HOST_TXPORT(host)); + wmb(); /* drain writebuffer */ + } + kunmap_atomic(sg_ptr); + + host->pio.len -= count; + host->pio.offset += count; + + if (count == sg_len) { + host->pio.index++; + host->pio.offset = 0; + } + + if (host->pio.len == 0) { + IRQ_OFF(host, SD_CONFIG_TH); + + if (host->flags & HOST_F_STOP) + SEND_STOP(host); + + tasklet_schedule(&host->data_task); + } +} + +static void au1xmmc_receive_pio(struct au1xmmc_host *host) +{ + struct mmc_data *data; + int max, count, sg_len = 0; + unsigned char *sg_ptr = NULL; + u32 status, val; + struct scatterlist *sg; + + data = host->mrq->data; + + if (!(host->flags & HOST_F_RECV)) + return; + + max = host->pio.len; + + if (host->pio.index < host->dma.len) { + sg = &data->sg[host->pio.index]; + sg_ptr = kmap_atomic(sg_page(sg)) + sg->offset + host->pio.offset; + + /* This is the space left inside the buffer */ + sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; + + /* Check if we need less than the size of the sg_buffer */ + if (sg_len < max) + max = sg_len; + } + + if (max > AU1XMMC_MAX_TRANSFER) + max = AU1XMMC_MAX_TRANSFER; + + for (count = 0; count < max; count++) { + status = __raw_readl(HOST_STATUS(host)); + + if (!(status & SD_STATUS_NE)) + break; + + if (status & SD_STATUS_RC) { + DBG("RX CRC Error [%d + %d].\n", host->pdev->id, + host->pio.len, count); + break; + } + + if (status & SD_STATUS_RO) { + DBG("RX Overrun [%d + %d]\n", host->pdev->id, + host->pio.len, count); + break; + } + else if (status & SD_STATUS_RU) { + DBG("RX Underrun [%d + %d]\n", host->pdev->id, + host->pio.len, count); + break; + } + + val = __raw_readl(HOST_RXPORT(host)); + + if (sg_ptr) + sg_ptr[count] = (unsigned char)(val & 0xFF); + } + if (sg_ptr) + kunmap_atomic(sg_ptr); + + host->pio.len -= count; + host->pio.offset += count; + + if (sg_len && count == sg_len) { + host->pio.index++; + host->pio.offset = 0; + } + + if (host->pio.len == 0) { + /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */ + IRQ_OFF(host, SD_CONFIG_NE); + + if (host->flags & HOST_F_STOP) + SEND_STOP(host); + + tasklet_schedule(&host->data_task); + } +} + +/* This is called when a command has been completed - grab the response + * and check for errors. Then start the data transfer if it is indicated. + */ +static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd; + u32 r[4]; + int i, trans; + + if (!host->mrq) + return; + + cmd = mrq->cmd; + cmd->error = 0; + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + r[0] = __raw_readl(host->iobase + SD_RESP3); + r[1] = __raw_readl(host->iobase + SD_RESP2); + r[2] = __raw_readl(host->iobase + SD_RESP1); + r[3] = __raw_readl(host->iobase + SD_RESP0); + + /* The CRC is omitted from the response, so really + * we only got 120 bytes, but the engine expects + * 128 bits, so we have to shift things up. + */ + for (i = 0; i < 4; i++) { + cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8; + if (i != 3) + cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24; + } + } else { + /* Techincally, we should be getting all 48 bits of + * the response (SD_RESP1 + SD_RESP2), but because + * our response omits the CRC, our data ends up + * being shifted 8 bits to the right. In this case, + * that means that the OSR data starts at bit 31, + * so we can just read RESP0 and return that. + */ + cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0); + } + } + + /* Figure out errors */ + if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC)) + cmd->error = -EILSEQ; + + trans = host->flags & (HOST_F_XMIT | HOST_F_RECV); + + if (!trans || cmd->error) { + IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); + tasklet_schedule(&host->finish_task); + return; + } + + host->status = HOST_S_DATA; + + if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) { + u32 channel = DMA_CHANNEL(host); + + /* Start the DBDMA as soon as the buffer gets something in it */ + + if (host->flags & HOST_F_RECV) { + u32 mask = SD_STATUS_DB | SD_STATUS_NE; + + while((status & mask) != mask) + status = __raw_readl(HOST_STATUS(host)); + } + + au1xxx_dbdma_start(channel); + } +} + +static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) +{ + unsigned int pbus = clk_get_rate(host->clk); + unsigned int divisor = ((pbus / rate) / 2) - 1; + u32 config; + + config = __raw_readl(HOST_CONFIG(host)); + + config &= ~(SD_CONFIG_DIV); + config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE; + + __raw_writel(config, HOST_CONFIG(host)); + wmb(); /* drain writebuffer */ +} + +static int au1xmmc_prepare_data(struct au1xmmc_host *host, + struct mmc_data *data) +{ + int datalen = data->blocks * data->blksz; + + if (data->flags & MMC_DATA_READ) + host->flags |= HOST_F_RECV; + else + host->flags |= HOST_F_XMIT; + + if (host->mrq->stop) + host->flags |= HOST_F_STOP; + + host->dma.dir = DMA_BIDIRECTIONAL; + + host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, host->dma.dir); + + if (host->dma.len == 0) + return -ETIMEDOUT; + + __raw_writel(data->blksz - 1, HOST_BLKSIZE(host)); + + if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { + int i; + u32 channel = DMA_CHANNEL(host); + + au1xxx_dbdma_stop(channel); + + for (i = 0; i < host->dma.len; i++) { + u32 ret = 0, flags = DDMA_FLAGS_NOIE; + struct scatterlist *sg = &data->sg[i]; + int sg_len = sg->length; + + int len = (datalen > sg_len) ? sg_len : datalen; + + if (i == host->dma.len - 1) + flags = DDMA_FLAGS_IE; + + if (host->flags & HOST_F_XMIT) { + ret = au1xxx_dbdma_put_source(channel, + sg_phys(sg), len, flags); + } else { + ret = au1xxx_dbdma_put_dest(channel, + sg_phys(sg), len, flags); + } + + if (!ret) + goto dataerr; + + datalen -= len; + } + } else { + host->pio.index = 0; + host->pio.offset = 0; + host->pio.len = datalen; + + if (host->flags & HOST_F_XMIT) + IRQ_ON(host, SD_CONFIG_TH); + else + IRQ_ON(host, SD_CONFIG_NE); + /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */ + } + + return 0; + +dataerr: + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + host->dma.dir); + return -ETIMEDOUT; +} + +/* This actually starts a command or data transaction */ +static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) +{ + struct au1xmmc_host *host = mmc_priv(mmc); + int ret = 0; + + WARN_ON(irqs_disabled()); + WARN_ON(host->status != HOST_S_IDLE); + + host->mrq = mrq; + host->status = HOST_S_CMD; + + /* fail request immediately if no card is present */ + if (0 == au1xmmc_card_inserted(mmc)) { + mrq->cmd->error = -ENOMEDIUM; + au1xmmc_finish_request(host); + return; + } + + if (mrq->data) { + FLUSH_FIFO(host); + ret = au1xmmc_prepare_data(host, mrq->data); + } + + if (!ret) + ret = au1xmmc_send_command(host, mrq->cmd, mrq->data); + + if (ret) { + mrq->cmd->error = ret; + au1xmmc_finish_request(host); + } +} + +static void au1xmmc_reset_controller(struct au1xmmc_host *host) +{ + /* Apply the clock */ + __raw_writel(SD_ENABLE_CE, HOST_ENABLE(host)); + wmb(); /* drain writebuffer */ + mdelay(1); + + __raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host)); + wmb(); /* drain writebuffer */ + mdelay(5); + + __raw_writel(~0, HOST_STATUS(host)); + wmb(); /* drain writebuffer */ + + __raw_writel(0, HOST_BLKSIZE(host)); + __raw_writel(0x001fffff, HOST_TIMEOUT(host)); + wmb(); /* drain writebuffer */ + + __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ + + __raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ + mdelay(1); + + __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ + + /* Configure interrupts */ + __raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host)); + wmb(); /* drain writebuffer */ +} + + +static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct au1xmmc_host *host = mmc_priv(mmc); + u32 config2; + + if (ios->power_mode == MMC_POWER_OFF) + au1xmmc_set_power(host, 0); + else if (ios->power_mode == MMC_POWER_ON) { + au1xmmc_set_power(host, 1); + } + + if (ios->clock && ios->clock != host->clock) { + au1xmmc_set_clock(host, ios->clock); + host->clock = ios->clock; + } + + config2 = __raw_readl(HOST_CONFIG2(host)); + switch (ios->bus_width) { + case MMC_BUS_WIDTH_8: + config2 |= SD_CONFIG2_BB; + break; + case MMC_BUS_WIDTH_4: + config2 &= ~SD_CONFIG2_BB; + config2 |= SD_CONFIG2_WB; + break; + case MMC_BUS_WIDTH_1: + config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB); + break; + } + __raw_writel(config2, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ +} + +#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT) +#define STATUS_DATA_IN (SD_STATUS_NE) +#define STATUS_DATA_OUT (SD_STATUS_TH) + +static irqreturn_t au1xmmc_irq(int irq, void *dev_id) +{ + struct au1xmmc_host *host = dev_id; + u32 status; + + status = __raw_readl(HOST_STATUS(host)); + + if (!(status & SD_STATUS_I)) + return IRQ_NONE; /* not ours */ + + if (status & SD_STATUS_SI) /* SDIO */ + mmc_signal_sdio_irq(host->mmc); + + if (host->mrq && (status & STATUS_TIMEOUT)) { + if (status & SD_STATUS_RAT) + host->mrq->cmd->error = -ETIMEDOUT; + else if (status & SD_STATUS_DT) + host->mrq->data->error = -ETIMEDOUT; + + /* In PIO mode, interrupts might still be enabled */ + IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); + + /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */ + tasklet_schedule(&host->finish_task); + } +#if 0 + else if (status & SD_STATUS_DD) { + /* Sometimes we get a DD before a NE in PIO mode */ + if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE)) + au1xmmc_receive_pio(host); + else { + au1xmmc_data_complete(host, status); + /* tasklet_schedule(&host->data_task); */ + } + } +#endif + else if (status & SD_STATUS_CR) { + if (host->status == HOST_S_CMD) + au1xmmc_cmd_complete(host, status); + + } else if (!(host->flags & HOST_F_DMA)) { + if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT)) + au1xmmc_send_pio(host); + else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN)) + au1xmmc_receive_pio(host); + + } else if (status & 0x203F3C70) { + DBG("Unhandled status %8.8x\n", host->pdev->id, + status); + } + + __raw_writel(status, HOST_STATUS(host)); + wmb(); /* drain writebuffer */ + + return IRQ_HANDLED; +} + +/* 8bit memory DMA device */ +static dbdev_tab_t au1xmmc_mem_dbdev = { + .dev_id = DSCR_CMD0_ALWAYS, + .dev_flags = DEV_FLAGS_ANYUSE, + .dev_tsize = 0, + .dev_devwidth = 8, + .dev_physaddr = 0x00000000, + .dev_intlevel = 0, + .dev_intpolarity = 0, +}; +static int memid; + +static void au1xmmc_dbdma_callback(int irq, void *dev_id) +{ + struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id; + + /* Avoid spurious interrupts */ + if (!host->mrq) + return; + + if (host->flags & HOST_F_STOP) + SEND_STOP(host); + + tasklet_schedule(&host->data_task); +} + +static int au1xmmc_dbdma_init(struct au1xmmc_host *host) +{ + struct resource *res; + int txid, rxid; + + res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0); + if (!res) + return -ENODEV; + txid = res->start; + + res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1); + if (!res) + return -ENODEV; + rxid = res->start; + + if (!memid) + return -ENODEV; + + host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid, + au1xmmc_dbdma_callback, (void *)host); + if (!host->tx_chan) { + dev_err(&host->pdev->dev, "cannot allocate TX DMA\n"); + return -ENODEV; + } + + host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid, + au1xmmc_dbdma_callback, (void *)host); + if (!host->rx_chan) { + dev_err(&host->pdev->dev, "cannot allocate RX DMA\n"); + au1xxx_dbdma_chan_free(host->tx_chan); + return -ENODEV; + } + + au1xxx_dbdma_set_devwidth(host->tx_chan, 8); + au1xxx_dbdma_set_devwidth(host->rx_chan, 8); + + au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT); + au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT); + + /* DBDMA is good to go */ + host->flags |= HOST_F_DMA | HOST_F_DBDMA; + + return 0; +} + +static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host) +{ + if (host->flags & HOST_F_DMA) { + host->flags &= ~HOST_F_DMA; + au1xxx_dbdma_chan_free(host->tx_chan); + au1xxx_dbdma_chan_free(host->rx_chan); + } +} + +static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en) +{ + struct au1xmmc_host *host = mmc_priv(mmc); + + if (en) + IRQ_ON(host, SD_CONFIG_SI); + else + IRQ_OFF(host, SD_CONFIG_SI); +} + +static const struct mmc_host_ops au1xmmc_ops = { + .request = au1xmmc_request, + .set_ios = au1xmmc_set_ios, + .get_ro = au1xmmc_card_readonly, + .get_cd = au1xmmc_card_inserted, + .enable_sdio_irq = au1xmmc_enable_sdio_irq, +}; + +static int au1xmmc_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct au1xmmc_host *host; + struct resource *r; + int ret, iflag; + + mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "no memory for mmc_host\n"); + ret = -ENOMEM; + goto out0; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + host->platdata = pdev->dev.platform_data; + host->pdev = pdev; + + ret = -ENODEV; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no mmio defined\n"); + goto out1; + } + + host->ioarea = request_mem_region(r->start, resource_size(r), + pdev->name); + if (!host->ioarea) { + dev_err(&pdev->dev, "mmio already in use\n"); + goto out1; + } + + host->iobase = ioremap(r->start, 0x3c); + if (!host->iobase) { + dev_err(&pdev->dev, "cannot remap mmio\n"); + goto out2; + } + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) + goto out3; + + mmc->ops = &au1xmmc_ops; + + mmc->f_min = 450000; + mmc->f_max = 24000000; + + mmc->max_blk_size = 2048; + mmc->max_blk_count = 512; + + mmc->ocr_avail = AU1XMMC_OCR; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; + mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT; + + iflag = IRQF_SHARED; /* Au1100/Au1200: one int for both ctrls */ + + switch (alchemy_get_cputype()) { + case ALCHEMY_CPU_AU1100: + mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE; + break; + case ALCHEMY_CPU_AU1200: + mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE; + break; + case ALCHEMY_CPU_AU1300: + iflag = 0; /* nothing is shared */ + mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE; + mmc->f_max = 52000000; + if (host->ioarea->start == AU1100_SD0_PHYS_ADDR) + mmc->caps |= MMC_CAP_8_BIT_DATA; + break; + } + + ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host); + if (ret) { + dev_err(&pdev->dev, "cannot grab IRQ\n"); + goto out3; + } + + host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK); + if (IS_ERR(host->clk)) { + dev_err(&pdev->dev, "cannot find clock\n"); + ret = PTR_ERR(host->clk); + goto out_irq; + } + + ret = clk_prepare_enable(host->clk); + if (ret) { + dev_err(&pdev->dev, "cannot enable clock\n"); + goto out_clk; + } + + host->status = HOST_S_IDLE; + + /* board-specific carddetect setup, if any */ + if (host->platdata && host->platdata->cd_setup) { + ret = host->platdata->cd_setup(mmc, 1); + if (ret) { + dev_warn(&pdev->dev, "board CD setup failed\n"); + mmc->caps |= MMC_CAP_NEEDS_POLL; + } + } else + mmc->caps |= MMC_CAP_NEEDS_POLL; + + /* platform may not be able to use all advertised caps */ + if (host->platdata) + mmc->caps &= ~(host->platdata->mask_host_caps); + + tasklet_init(&host->data_task, au1xmmc_tasklet_data, + (unsigned long)host); + + tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, + (unsigned long)host); + + if (has_dbdma()) { + ret = au1xmmc_dbdma_init(host); + if (ret) + pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n"); + } + +#ifdef CONFIG_LEDS_CLASS + if (host->platdata && host->platdata->led) { + struct led_classdev *led = host->platdata->led; + led->name = mmc_hostname(mmc); + led->brightness = LED_OFF; + led->default_trigger = mmc_hostname(mmc); + ret = led_classdev_register(mmc_dev(mmc), led); + if (ret) + goto out5; + } +#endif + + au1xmmc_reset_controller(host); + + ret = mmc_add_host(mmc); + if (ret) { + dev_err(&pdev->dev, "cannot add mmc host\n"); + goto out6; + } + + platform_set_drvdata(pdev, host); + + pr_info(DRIVER_NAME ": MMC Controller %d set up at %p" + " (mode=%s)\n", pdev->id, host->iobase, + host->flags & HOST_F_DMA ? "dma" : "pio"); + + return 0; /* all ok */ + +out6: +#ifdef CONFIG_LEDS_CLASS + if (host->platdata && host->platdata->led) + led_classdev_unregister(host->platdata->led); +out5: +#endif + __raw_writel(0, HOST_ENABLE(host)); + __raw_writel(0, HOST_CONFIG(host)); + __raw_writel(0, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ + + if (host->flags & HOST_F_DBDMA) + au1xmmc_dbdma_shutdown(host); + + tasklet_kill(&host->data_task); + tasklet_kill(&host->finish_task); + + if (host->platdata && host->platdata->cd_setup && + !(mmc->caps & MMC_CAP_NEEDS_POLL)) + host->platdata->cd_setup(mmc, 0); + + clk_disable_unprepare(host->clk); +out_clk: + clk_put(host->clk); +out_irq: + free_irq(host->irq, host); +out3: + iounmap((void *)host->iobase); +out2: + release_resource(host->ioarea); + kfree(host->ioarea); +out1: + mmc_free_host(mmc); +out0: + return ret; +} + +static int au1xmmc_remove(struct platform_device *pdev) +{ + struct au1xmmc_host *host = platform_get_drvdata(pdev); + + if (host) { + mmc_remove_host(host->mmc); + +#ifdef CONFIG_LEDS_CLASS + if (host->platdata && host->platdata->led) + led_classdev_unregister(host->platdata->led); +#endif + + if (host->platdata && host->platdata->cd_setup && + !(host->mmc->caps & MMC_CAP_NEEDS_POLL)) + host->platdata->cd_setup(host->mmc, 0); + + __raw_writel(0, HOST_ENABLE(host)); + __raw_writel(0, HOST_CONFIG(host)); + __raw_writel(0, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ + + tasklet_kill(&host->data_task); + tasklet_kill(&host->finish_task); + + if (host->flags & HOST_F_DBDMA) + au1xmmc_dbdma_shutdown(host); + + au1xmmc_set_power(host, 0); + + clk_disable_unprepare(host->clk); + clk_put(host->clk); + + free_irq(host->irq, host); + iounmap((void *)host->iobase); + release_resource(host->ioarea); + kfree(host->ioarea); + + mmc_free_host(host->mmc); + } + return 0; +} + +#ifdef CONFIG_PM +static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct au1xmmc_host *host = platform_get_drvdata(pdev); + + __raw_writel(0, HOST_CONFIG2(host)); + __raw_writel(0, HOST_CONFIG(host)); + __raw_writel(0xffffffff, HOST_STATUS(host)); + __raw_writel(0, HOST_ENABLE(host)); + wmb(); /* drain writebuffer */ + + return 0; +} + +static int au1xmmc_resume(struct platform_device *pdev) +{ + struct au1xmmc_host *host = platform_get_drvdata(pdev); + + au1xmmc_reset_controller(host); + + return 0; +} +#else +#define au1xmmc_suspend NULL +#define au1xmmc_resume NULL +#endif + +static struct platform_driver au1xmmc_driver = { + .probe = au1xmmc_probe, + .remove = au1xmmc_remove, + .suspend = au1xmmc_suspend, + .resume = au1xmmc_resume, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +static int __init au1xmmc_init(void) +{ + if (has_dbdma()) { + /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride + * of 8 bits. And since devices are shared, we need to create + * our own to avoid freaking out other devices. + */ + memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); + if (!memid) + pr_err("au1xmmc: cannot add memory dbdma\n"); + } + return platform_driver_register(&au1xmmc_driver); +} + +static void __exit au1xmmc_exit(void) +{ + if (has_dbdma() && memid) + au1xxx_ddma_del_device(memid); + + platform_driver_unregister(&au1xmmc_driver); +} + +module_init(au1xmmc_init); +module_exit(au1xmmc_exit); + +MODULE_AUTHOR("Advanced Micro Devices, Inc"); +MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:au1xxx-mmc"); diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c new file mode 100644 index 000000000..985079943 --- /dev/null +++ b/drivers/mmc/host/bcm2835.c @@ -0,0 +1,1486 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * bcm2835 sdhost driver. + * + * The 2835 has two SD controllers: The Arasan sdhci controller + * (supported by the iproc driver) and a custom sdhost controller + * (supported by this driver). + * + * The sdhci controller supports both sdcard and sdio. The sdhost + * controller supports the sdcard only, but has better performance. + * Also note that the rpi3 has sdio wifi, so driving the sdcard with + * the sdhost controller allows to use the sdhci controller for wifi + * support. + * + * The configuration is done by devicetree via pin muxing. Both + * SD controller are available on the same pins (2 pin groups = pin 22 + * to 27 + pin 48 to 53). So it's possible to use both SD controllers + * at the same time with different pin groups. + * + * Author: Phil Elwell + * Copyright (C) 2015-2016 Raspberry Pi (Trading) Ltd. + * + * Based on + * mmc-bcm2835.c by Gellert Weisz + * which is, in turn, based on + * sdhci-bcm2708.c by Broadcom + * sdhci-bcm2835.c by Stephen Warren and Oleksandr Tymoshenko + * sdhci.c and sdhci-pci.c by Pierre Ossman + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define SDCMD 0x00 /* Command to SD card - 16 R/W */ +#define SDARG 0x04 /* Argument to SD card - 32 R/W */ +#define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */ +#define SDCDIV 0x0c /* Start value for clock divider - 11 R/W */ +#define SDRSP0 0x10 /* SD card response (31:0) - 32 R */ +#define SDRSP1 0x14 /* SD card response (63:32) - 32 R */ +#define SDRSP2 0x18 /* SD card response (95:64) - 32 R */ +#define SDRSP3 0x1c /* SD card response (127:96) - 32 R */ +#define SDHSTS 0x20 /* SD host status - 11 R/W */ +#define SDVDD 0x30 /* SD card power control - 1 R/W */ +#define SDEDM 0x34 /* Emergency Debug Mode - 13 R/W */ +#define SDHCFG 0x38 /* Host configuration - 2 R/W */ +#define SDHBCT 0x3c /* Host byte count (debug) - 32 R/W */ +#define SDDATA 0x40 /* Data to/from SD card - 32 R/W */ +#define SDHBLC 0x50 /* Host block count (SDIO/SDHC) - 9 R/W */ + +#define SDCMD_NEW_FLAG 0x8000 +#define SDCMD_FAIL_FLAG 0x4000 +#define SDCMD_BUSYWAIT 0x800 +#define SDCMD_NO_RESPONSE 0x400 +#define SDCMD_LONG_RESPONSE 0x200 +#define SDCMD_WRITE_CMD 0x80 +#define SDCMD_READ_CMD 0x40 +#define SDCMD_CMD_MASK 0x3f + +#define SDCDIV_MAX_CDIV 0x7ff + +#define SDHSTS_BUSY_IRPT 0x400 +#define SDHSTS_BLOCK_IRPT 0x200 +#define SDHSTS_SDIO_IRPT 0x100 +#define SDHSTS_REW_TIME_OUT 0x80 +#define SDHSTS_CMD_TIME_OUT 0x40 +#define SDHSTS_CRC16_ERROR 0x20 +#define SDHSTS_CRC7_ERROR 0x10 +#define SDHSTS_FIFO_ERROR 0x08 +/* Reserved */ +/* Reserved */ +#define SDHSTS_DATA_FLAG 0x01 + +#define SDHSTS_TRANSFER_ERROR_MASK (SDHSTS_CRC7_ERROR | \ + SDHSTS_CRC16_ERROR | \ + SDHSTS_REW_TIME_OUT | \ + SDHSTS_FIFO_ERROR) + +#define SDHSTS_ERROR_MASK (SDHSTS_CMD_TIME_OUT | \ + SDHSTS_TRANSFER_ERROR_MASK) + +#define SDHCFG_BUSY_IRPT_EN BIT(10) +#define SDHCFG_BLOCK_IRPT_EN BIT(8) +#define SDHCFG_SDIO_IRPT_EN BIT(5) +#define SDHCFG_DATA_IRPT_EN BIT(4) +#define SDHCFG_SLOW_CARD BIT(3) +#define SDHCFG_WIDE_EXT_BUS BIT(2) +#define SDHCFG_WIDE_INT_BUS BIT(1) +#define SDHCFG_REL_CMD_LINE BIT(0) + +#define SDVDD_POWER_OFF 0 +#define SDVDD_POWER_ON 1 + +#define SDEDM_FORCE_DATA_MODE BIT(19) +#define SDEDM_CLOCK_PULSE BIT(20) +#define SDEDM_BYPASS BIT(21) + +#define SDEDM_WRITE_THRESHOLD_SHIFT 9 +#define SDEDM_READ_THRESHOLD_SHIFT 14 +#define SDEDM_THRESHOLD_MASK 0x1f + +#define SDEDM_FSM_MASK 0xf +#define SDEDM_FSM_IDENTMODE 0x0 +#define SDEDM_FSM_DATAMODE 0x1 +#define SDEDM_FSM_READDATA 0x2 +#define SDEDM_FSM_WRITEDATA 0x3 +#define SDEDM_FSM_READWAIT 0x4 +#define SDEDM_FSM_READCRC 0x5 +#define SDEDM_FSM_WRITECRC 0x6 +#define SDEDM_FSM_WRITEWAIT1 0x7 +#define SDEDM_FSM_POWERDOWN 0x8 +#define SDEDM_FSM_POWERUP 0x9 +#define SDEDM_FSM_WRITESTART1 0xa +#define SDEDM_FSM_WRITESTART2 0xb +#define SDEDM_FSM_GENPULSES 0xc +#define SDEDM_FSM_WRITEWAIT2 0xd +#define SDEDM_FSM_STARTPOWDOWN 0xf + +#define SDDATA_FIFO_WORDS 16 + +#define FIFO_READ_THRESHOLD 4 +#define FIFO_WRITE_THRESHOLD 4 +#define SDDATA_FIFO_PIO_BURST 8 + +#define PIO_THRESHOLD 1 /* Maximum block count for PIO (0 = always DMA) */ + +struct bcm2835_host { + spinlock_t lock; + struct mutex mutex; + + void __iomem *ioaddr; + u32 phys_addr; + + struct platform_device *pdev; + + int clock; /* Current clock speed */ + unsigned int max_clk; /* Max possible freq */ + struct work_struct dma_work; + struct delayed_work timeout_work; /* Timer for timeouts */ + struct sg_mapping_iter sg_miter; /* SG state for PIO */ + unsigned int blocks; /* remaining PIO blocks */ + int irq; /* Device IRQ */ + + u32 ns_per_fifo_word; + + /* cached registers */ + u32 hcfg; + u32 cdiv; + + struct mmc_request *mrq; /* Current request */ + struct mmc_command *cmd; /* Current command */ + struct mmc_data *data; /* Current data request */ + bool data_complete:1;/* Data finished before cmd */ + bool use_busy:1; /* Wait for busy interrupt */ + bool use_sbc:1; /* Send CMD23 */ + + /* for threaded irq handler */ + bool irq_block; + bool irq_busy; + bool irq_data; + + /* DMA part */ + struct dma_chan *dma_chan_rxtx; + struct dma_chan *dma_chan; + struct dma_slave_config dma_cfg_rx; + struct dma_slave_config dma_cfg_tx; + struct dma_async_tx_descriptor *dma_desc; + u32 dma_dir; + u32 drain_words; + struct page *drain_page; + u32 drain_offset; + bool use_dma; +}; + +static void bcm2835_dumpcmd(struct bcm2835_host *host, struct mmc_command *cmd, + const char *label) +{ + struct device *dev = &host->pdev->dev; + + if (!cmd) + return; + + dev_dbg(dev, "%c%s op %d arg 0x%x flags 0x%x - resp %08x %08x %08x %08x, err %d\n", + (cmd == host->cmd) ? '>' : ' ', + label, cmd->opcode, cmd->arg, cmd->flags, + cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], + cmd->error); +} + +static void bcm2835_dumpregs(struct bcm2835_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct device *dev = &host->pdev->dev; + + if (mrq) { + bcm2835_dumpcmd(host, mrq->sbc, "sbc"); + bcm2835_dumpcmd(host, mrq->cmd, "cmd"); + if (mrq->data) { + dev_dbg(dev, "data blocks %x blksz %x - err %d\n", + mrq->data->blocks, + mrq->data->blksz, + mrq->data->error); + } + bcm2835_dumpcmd(host, mrq->stop, "stop"); + } + + dev_dbg(dev, "=========== REGISTER DUMP ===========\n"); + dev_dbg(dev, "SDCMD 0x%08x\n", readl(host->ioaddr + SDCMD)); + dev_dbg(dev, "SDARG 0x%08x\n", readl(host->ioaddr + SDARG)); + dev_dbg(dev, "SDTOUT 0x%08x\n", readl(host->ioaddr + SDTOUT)); + dev_dbg(dev, "SDCDIV 0x%08x\n", readl(host->ioaddr + SDCDIV)); + dev_dbg(dev, "SDRSP0 0x%08x\n", readl(host->ioaddr + SDRSP0)); + dev_dbg(dev, "SDRSP1 0x%08x\n", readl(host->ioaddr + SDRSP1)); + dev_dbg(dev, "SDRSP2 0x%08x\n", readl(host->ioaddr + SDRSP2)); + dev_dbg(dev, "SDRSP3 0x%08x\n", readl(host->ioaddr + SDRSP3)); + dev_dbg(dev, "SDHSTS 0x%08x\n", readl(host->ioaddr + SDHSTS)); + dev_dbg(dev, "SDVDD 0x%08x\n", readl(host->ioaddr + SDVDD)); + dev_dbg(dev, "SDEDM 0x%08x\n", readl(host->ioaddr + SDEDM)); + dev_dbg(dev, "SDHCFG 0x%08x\n", readl(host->ioaddr + SDHCFG)); + dev_dbg(dev, "SDHBCT 0x%08x\n", readl(host->ioaddr + SDHBCT)); + dev_dbg(dev, "SDHBLC 0x%08x\n", readl(host->ioaddr + SDHBLC)); + dev_dbg(dev, "===========================================\n"); +} + +static void bcm2835_reset_internal(struct bcm2835_host *host) +{ + u32 temp; + + writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD); + writel(0, host->ioaddr + SDCMD); + writel(0, host->ioaddr + SDARG); + writel(0xf00000, host->ioaddr + SDTOUT); + writel(0, host->ioaddr + SDCDIV); + writel(0x7f8, host->ioaddr + SDHSTS); /* Write 1s to clear */ + writel(0, host->ioaddr + SDHCFG); + writel(0, host->ioaddr + SDHBCT); + writel(0, host->ioaddr + SDHBLC); + + /* Limit fifo usage due to silicon bug */ + temp = readl(host->ioaddr + SDEDM); + temp &= ~((SDEDM_THRESHOLD_MASK << SDEDM_READ_THRESHOLD_SHIFT) | + (SDEDM_THRESHOLD_MASK << SDEDM_WRITE_THRESHOLD_SHIFT)); + temp |= (FIFO_READ_THRESHOLD << SDEDM_READ_THRESHOLD_SHIFT) | + (FIFO_WRITE_THRESHOLD << SDEDM_WRITE_THRESHOLD_SHIFT); + writel(temp, host->ioaddr + SDEDM); + msleep(20); + writel(SDVDD_POWER_ON, host->ioaddr + SDVDD); + msleep(20); + host->clock = 0; + writel(host->hcfg, host->ioaddr + SDHCFG); + writel(host->cdiv, host->ioaddr + SDCDIV); +} + +static void bcm2835_reset(struct mmc_host *mmc) +{ + struct bcm2835_host *host = mmc_priv(mmc); + + if (host->dma_chan) + dmaengine_terminate_sync(host->dma_chan); + host->dma_chan = NULL; + bcm2835_reset_internal(host); +} + +static void bcm2835_finish_command(struct bcm2835_host *host); + +static void bcm2835_wait_transfer_complete(struct bcm2835_host *host) +{ + int timediff; + u32 alternate_idle; + + alternate_idle = (host->mrq->data->flags & MMC_DATA_READ) ? + SDEDM_FSM_READWAIT : SDEDM_FSM_WRITESTART1; + + timediff = 0; + + while (1) { + u32 edm, fsm; + + edm = readl(host->ioaddr + SDEDM); + fsm = edm & SDEDM_FSM_MASK; + + if ((fsm == SDEDM_FSM_IDENTMODE) || + (fsm == SDEDM_FSM_DATAMODE)) + break; + if (fsm == alternate_idle) { + writel(edm | SDEDM_FORCE_DATA_MODE, + host->ioaddr + SDEDM); + break; + } + + timediff++; + if (timediff == 100000) { + dev_err(&host->pdev->dev, + "wait_transfer_complete - still waiting after %d retries\n", + timediff); + bcm2835_dumpregs(host); + host->mrq->data->error = -ETIMEDOUT; + return; + } + cpu_relax(); + } +} + +static void bcm2835_dma_complete(void *param) +{ + struct bcm2835_host *host = param; + + schedule_work(&host->dma_work); +} + +static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read) +{ + unsigned long flags; + size_t blksize; + unsigned long wait_max; + + blksize = host->data->blksz; + + wait_max = jiffies + msecs_to_jiffies(500); + + local_irq_save(flags); + + while (blksize) { + int copy_words; + u32 hsts = 0; + size_t len; + u32 *buf; + + if (!sg_miter_next(&host->sg_miter)) { + host->data->error = -EINVAL; + break; + } + + len = min(host->sg_miter.length, blksize); + if (len % 4) { + host->data->error = -EINVAL; + break; + } + + blksize -= len; + host->sg_miter.consumed = len; + + buf = (u32 *)host->sg_miter.addr; + + copy_words = len / 4; + + while (copy_words) { + int burst_words, words; + u32 edm; + + burst_words = min(SDDATA_FIFO_PIO_BURST, copy_words); + edm = readl(host->ioaddr + SDEDM); + if (is_read) + words = ((edm >> 4) & 0x1f); + else + words = SDDATA_FIFO_WORDS - ((edm >> 4) & 0x1f); + + if (words < burst_words) { + int fsm_state = (edm & SDEDM_FSM_MASK); + struct device *dev = &host->pdev->dev; + + if ((is_read && + (fsm_state != SDEDM_FSM_READDATA && + fsm_state != SDEDM_FSM_READWAIT && + fsm_state != SDEDM_FSM_READCRC)) || + (!is_read && + (fsm_state != SDEDM_FSM_WRITEDATA && + fsm_state != SDEDM_FSM_WRITESTART1 && + fsm_state != SDEDM_FSM_WRITESTART2))) { + hsts = readl(host->ioaddr + SDHSTS); + dev_err(dev, "fsm %x, hsts %08x\n", + fsm_state, hsts); + if (hsts & SDHSTS_ERROR_MASK) + break; + } + + if (time_after(jiffies, wait_max)) { + dev_err(dev, "PIO %s timeout - EDM %08x\n", + is_read ? "read" : "write", + edm); + hsts = SDHSTS_REW_TIME_OUT; + break; + } + ndelay((burst_words - words) * + host->ns_per_fifo_word); + continue; + } else if (words > copy_words) { + words = copy_words; + } + + copy_words -= words; + + while (words) { + if (is_read) + *(buf++) = readl(host->ioaddr + SDDATA); + else + writel(*(buf++), host->ioaddr + SDDATA); + words--; + } + } + + if (hsts & SDHSTS_ERROR_MASK) + break; + } + + sg_miter_stop(&host->sg_miter); + + local_irq_restore(flags); +} + +static void bcm2835_transfer_pio(struct bcm2835_host *host) +{ + struct device *dev = &host->pdev->dev; + u32 sdhsts; + bool is_read; + + is_read = (host->data->flags & MMC_DATA_READ) != 0; + bcm2835_transfer_block_pio(host, is_read); + + sdhsts = readl(host->ioaddr + SDHSTS); + if (sdhsts & (SDHSTS_CRC16_ERROR | + SDHSTS_CRC7_ERROR | + SDHSTS_FIFO_ERROR)) { + dev_err(dev, "%s transfer error - HSTS %08x\n", + is_read ? "read" : "write", sdhsts); + host->data->error = -EILSEQ; + } else if ((sdhsts & (SDHSTS_CMD_TIME_OUT | + SDHSTS_REW_TIME_OUT))) { + dev_err(dev, "%s timeout error - HSTS %08x\n", + is_read ? "read" : "write", sdhsts); + host->data->error = -ETIMEDOUT; + } +} + +static +void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data) +{ + int sg_len, dir_data, dir_slave; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *dma_chan; + + dma_chan = host->dma_chan_rxtx; + if (data->flags & MMC_DATA_READ) { + dir_data = DMA_FROM_DEVICE; + dir_slave = DMA_DEV_TO_MEM; + } else { + dir_data = DMA_TO_DEVICE; + dir_slave = DMA_MEM_TO_DEV; + } + + /* The block doesn't manage the FIFO DREQs properly for + * multi-block transfers, so don't attempt to DMA the final + * few words. Unfortunately this requires the final sg entry + * to be trimmed. N.B. This code demands that the overspill + * is contained in a single sg entry. + */ + + host->drain_words = 0; + if ((data->blocks > 1) && (dir_data == DMA_FROM_DEVICE)) { + struct scatterlist *sg; + u32 len; + int i; + + len = min((u32)(FIFO_READ_THRESHOLD - 1) * 4, + (u32)data->blocks * data->blksz); + + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg_is_last(sg)) { + WARN_ON(sg->length < len); + sg->length -= len; + host->drain_page = sg_page(sg); + host->drain_offset = sg->offset + sg->length; + } + } + host->drain_words = len / 4; + } + + /* The parameters have already been validated, so this will not fail */ + (void)dmaengine_slave_config(dma_chan, + (dir_data == DMA_FROM_DEVICE) ? + &host->dma_cfg_rx : + &host->dma_cfg_tx); + + sg_len = dma_map_sg(dma_chan->device->dev, data->sg, data->sg_len, + dir_data); + if (!sg_len) + return; + + desc = dmaengine_prep_slave_sg(dma_chan, data->sg, sg_len, dir_slave, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + + if (!desc) { + dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data); + return; + } + + desc->callback = bcm2835_dma_complete; + desc->callback_param = host; + host->dma_desc = desc; + host->dma_chan = dma_chan; + host->dma_dir = dir_data; +} + +static void bcm2835_start_dma(struct bcm2835_host *host) +{ + dmaengine_submit(host->dma_desc); + dma_async_issue_pending(host->dma_chan); +} + +static void bcm2835_set_transfer_irqs(struct bcm2835_host *host) +{ + u32 all_irqs = SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN | + SDHCFG_BUSY_IRPT_EN; + + if (host->dma_desc) { + host->hcfg = (host->hcfg & ~all_irqs) | + SDHCFG_BUSY_IRPT_EN; + } else { + host->hcfg = (host->hcfg & ~all_irqs) | + SDHCFG_DATA_IRPT_EN | + SDHCFG_BUSY_IRPT_EN; + } + + writel(host->hcfg, host->ioaddr + SDHCFG); +} + +static +void bcm2835_prepare_data(struct bcm2835_host *host, struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + WARN_ON(host->data); + + host->data = data; + if (!data) + return; + + host->data_complete = false; + host->data->bytes_xfered = 0; + + if (!host->dma_desc) { + /* Use PIO */ + int flags = SG_MITER_ATOMIC; + + if (data->flags & MMC_DATA_READ) + flags |= SG_MITER_TO_SG; + else + flags |= SG_MITER_FROM_SG; + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); + host->blocks = data->blocks; + } + + bcm2835_set_transfer_irqs(host); + + writel(data->blksz, host->ioaddr + SDHBCT); + writel(data->blocks, host->ioaddr + SDHBLC); +} + +static u32 bcm2835_read_wait_sdcmd(struct bcm2835_host *host, u32 max_ms) +{ + struct device *dev = &host->pdev->dev; + u32 value; + int ret; + + ret = readl_poll_timeout(host->ioaddr + SDCMD, value, + !(value & SDCMD_NEW_FLAG), 1, 10); + if (ret == -ETIMEDOUT) + /* if it takes a while make poll interval bigger */ + ret = readl_poll_timeout(host->ioaddr + SDCMD, value, + !(value & SDCMD_NEW_FLAG), + 10, max_ms * 1000); + if (ret == -ETIMEDOUT) + dev_err(dev, "%s: timeout (%d ms)\n", __func__, max_ms); + + return value; +} + +static void bcm2835_finish_request(struct bcm2835_host *host) +{ + struct dma_chan *terminate_chan = NULL; + struct mmc_request *mrq; + + cancel_delayed_work(&host->timeout_work); + + mrq = host->mrq; + + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + + host->dma_desc = NULL; + terminate_chan = host->dma_chan; + host->dma_chan = NULL; + + if (terminate_chan) { + int err = dmaengine_terminate_all(terminate_chan); + + if (err) + dev_err(&host->pdev->dev, + "failed to terminate DMA (%d)\n", err); + } + + mmc_request_done(mmc_from_priv(host), mrq); +} + +static +bool bcm2835_send_command(struct bcm2835_host *host, struct mmc_command *cmd) +{ + struct device *dev = &host->pdev->dev; + u32 sdcmd, sdhsts; + unsigned long timeout; + + WARN_ON(host->cmd); + + sdcmd = bcm2835_read_wait_sdcmd(host, 100); + if (sdcmd & SDCMD_NEW_FLAG) { + dev_err(dev, "previous command never completed.\n"); + bcm2835_dumpregs(host); + cmd->error = -EILSEQ; + bcm2835_finish_request(host); + return false; + } + + if (!cmd->data && cmd->busy_timeout > 9000) + timeout = DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; + else + timeout = 10 * HZ; + schedule_delayed_work(&host->timeout_work, timeout); + + host->cmd = cmd; + + /* Clear any error flags */ + sdhsts = readl(host->ioaddr + SDHSTS); + if (sdhsts & SDHSTS_ERROR_MASK) + writel(sdhsts, host->ioaddr + SDHSTS); + + if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { + dev_err(dev, "unsupported response type!\n"); + cmd->error = -EINVAL; + bcm2835_finish_request(host); + return false; + } + + bcm2835_prepare_data(host, cmd); + + writel(cmd->arg, host->ioaddr + SDARG); + + sdcmd = cmd->opcode & SDCMD_CMD_MASK; + + host->use_busy = false; + if (!(cmd->flags & MMC_RSP_PRESENT)) { + sdcmd |= SDCMD_NO_RESPONSE; + } else { + if (cmd->flags & MMC_RSP_136) + sdcmd |= SDCMD_LONG_RESPONSE; + if (cmd->flags & MMC_RSP_BUSY) { + sdcmd |= SDCMD_BUSYWAIT; + host->use_busy = true; + } + } + + if (cmd->data) { + if (cmd->data->flags & MMC_DATA_WRITE) + sdcmd |= SDCMD_WRITE_CMD; + if (cmd->data->flags & MMC_DATA_READ) + sdcmd |= SDCMD_READ_CMD; + } + + writel(sdcmd | SDCMD_NEW_FLAG, host->ioaddr + SDCMD); + + return true; +} + +static void bcm2835_transfer_complete(struct bcm2835_host *host) +{ + struct mmc_data *data; + + WARN_ON(!host->data_complete); + + data = host->data; + host->data = NULL; + + /* Need to send CMD12 if - + * a) open-ended multiblock transfer (no CMD23) + * b) error in multiblock transfer + */ + if (host->mrq->stop && (data->error || !host->use_sbc)) { + if (bcm2835_send_command(host, host->mrq->stop)) { + /* No busy, so poll for completion */ + if (!host->use_busy) + bcm2835_finish_command(host); + } + } else { + bcm2835_wait_transfer_complete(host); + bcm2835_finish_request(host); + } +} + +static void bcm2835_finish_data(struct bcm2835_host *host) +{ + struct device *dev = &host->pdev->dev; + struct mmc_data *data; + + data = host->data; + + host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN); + writel(host->hcfg, host->ioaddr + SDHCFG); + + data->bytes_xfered = data->error ? 0 : (data->blksz * data->blocks); + + host->data_complete = true; + + if (host->cmd) { + /* Data managed to finish before the + * command completed. Make sure we do + * things in the proper order. + */ + dev_dbg(dev, "Finished early - HSTS %08x\n", + readl(host->ioaddr + SDHSTS)); + } else { + bcm2835_transfer_complete(host); + } +} + +static void bcm2835_finish_command(struct bcm2835_host *host) +{ + struct device *dev = &host->pdev->dev; + struct mmc_command *cmd = host->cmd; + u32 sdcmd; + + sdcmd = bcm2835_read_wait_sdcmd(host, 100); + + /* Check for errors */ + if (sdcmd & SDCMD_NEW_FLAG) { + dev_err(dev, "command never completed.\n"); + bcm2835_dumpregs(host); + host->cmd->error = -EIO; + bcm2835_finish_request(host); + return; + } else if (sdcmd & SDCMD_FAIL_FLAG) { + u32 sdhsts = readl(host->ioaddr + SDHSTS); + + /* Clear the errors */ + writel(SDHSTS_ERROR_MASK, host->ioaddr + SDHSTS); + + if (!(sdhsts & SDHSTS_CRC7_ERROR) || + (host->cmd->opcode != MMC_SEND_OP_COND)) { + u32 edm, fsm; + + if (sdhsts & SDHSTS_CMD_TIME_OUT) { + host->cmd->error = -ETIMEDOUT; + } else { + dev_err(dev, "unexpected command %d error\n", + host->cmd->opcode); + bcm2835_dumpregs(host); + host->cmd->error = -EILSEQ; + } + edm = readl(host->ioaddr + SDEDM); + fsm = edm & SDEDM_FSM_MASK; + if (fsm == SDEDM_FSM_READWAIT || + fsm == SDEDM_FSM_WRITESTART1) + /* Kick the FSM out of its wait */ + writel(edm | SDEDM_FORCE_DATA_MODE, + host->ioaddr + SDEDM); + bcm2835_finish_request(host); + return; + } + } + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + int i; + + for (i = 0; i < 4; i++) { + cmd->resp[3 - i] = + readl(host->ioaddr + SDRSP0 + i * 4); + } + } else { + cmd->resp[0] = readl(host->ioaddr + SDRSP0); + } + } + + if (cmd == host->mrq->sbc) { + /* Finished CMD23, now send actual command. */ + host->cmd = NULL; + if (bcm2835_send_command(host, host->mrq->cmd)) { + if (host->data && host->dma_desc) + /* DMA transfer starts now, PIO starts + * after irq + */ + bcm2835_start_dma(host); + + if (!host->use_busy) + bcm2835_finish_command(host); + } + } else if (cmd == host->mrq->stop) { + /* Finished CMD12 */ + bcm2835_finish_request(host); + } else { + /* Processed actual command. */ + host->cmd = NULL; + if (!host->data) + bcm2835_finish_request(host); + else if (host->data_complete) + bcm2835_transfer_complete(host); + } +} + +static void bcm2835_timeout(struct work_struct *work) +{ + struct delayed_work *d = to_delayed_work(work); + struct bcm2835_host *host = + container_of(d, struct bcm2835_host, timeout_work); + struct device *dev = &host->pdev->dev; + + mutex_lock(&host->mutex); + + if (host->mrq) { + dev_err(dev, "timeout waiting for hardware interrupt.\n"); + bcm2835_dumpregs(host); + + bcm2835_reset(mmc_from_priv(host)); + + if (host->data) { + host->data->error = -ETIMEDOUT; + bcm2835_finish_data(host); + } else { + if (host->cmd) + host->cmd->error = -ETIMEDOUT; + else + host->mrq->cmd->error = -ETIMEDOUT; + + bcm2835_finish_request(host); + } + } + + mutex_unlock(&host->mutex); +} + +static bool bcm2835_check_cmd_error(struct bcm2835_host *host, u32 intmask) +{ + struct device *dev = &host->pdev->dev; + + if (!(intmask & SDHSTS_ERROR_MASK)) + return false; + + if (!host->cmd) + return true; + + dev_err(dev, "sdhost_busy_irq: intmask %08x\n", intmask); + if (intmask & SDHSTS_CRC7_ERROR) { + host->cmd->error = -EILSEQ; + } else if (intmask & (SDHSTS_CRC16_ERROR | + SDHSTS_FIFO_ERROR)) { + if (host->mrq->data) + host->mrq->data->error = -EILSEQ; + else + host->cmd->error = -EILSEQ; + } else if (intmask & SDHSTS_REW_TIME_OUT) { + if (host->mrq->data) + host->mrq->data->error = -ETIMEDOUT; + else + host->cmd->error = -ETIMEDOUT; + } else if (intmask & SDHSTS_CMD_TIME_OUT) { + host->cmd->error = -ETIMEDOUT; + } + bcm2835_dumpregs(host); + return true; +} + +static void bcm2835_check_data_error(struct bcm2835_host *host, u32 intmask) +{ + if (!host->data) + return; + if (intmask & (SDHSTS_CRC16_ERROR | SDHSTS_FIFO_ERROR)) + host->data->error = -EILSEQ; + if (intmask & SDHSTS_REW_TIME_OUT) + host->data->error = -ETIMEDOUT; +} + +static void bcm2835_busy_irq(struct bcm2835_host *host) +{ + if (WARN_ON(!host->cmd)) { + bcm2835_dumpregs(host); + return; + } + + if (WARN_ON(!host->use_busy)) { + bcm2835_dumpregs(host); + return; + } + host->use_busy = false; + + bcm2835_finish_command(host); +} + +static void bcm2835_data_irq(struct bcm2835_host *host, u32 intmask) +{ + /* There are no dedicated data/space available interrupt + * status bits, so it is necessary to use the single shared + * data/space available FIFO status bits. It is therefore not + * an error to get here when there is no data transfer in + * progress. + */ + if (!host->data) + return; + + bcm2835_check_data_error(host, intmask); + if (host->data->error) + goto finished; + + if (host->data->flags & MMC_DATA_WRITE) { + /* Use the block interrupt for writes after the first block */ + host->hcfg &= ~(SDHCFG_DATA_IRPT_EN); + host->hcfg |= SDHCFG_BLOCK_IRPT_EN; + writel(host->hcfg, host->ioaddr + SDHCFG); + bcm2835_transfer_pio(host); + } else { + bcm2835_transfer_pio(host); + host->blocks--; + if ((host->blocks == 0) || host->data->error) + goto finished; + } + return; + +finished: + host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN); + writel(host->hcfg, host->ioaddr + SDHCFG); +} + +static void bcm2835_data_threaded_irq(struct bcm2835_host *host) +{ + if (!host->data) + return; + if ((host->blocks == 0) || host->data->error) + bcm2835_finish_data(host); +} + +static void bcm2835_block_irq(struct bcm2835_host *host) +{ + if (WARN_ON(!host->data)) { + bcm2835_dumpregs(host); + return; + } + + if (!host->dma_desc) { + WARN_ON(!host->blocks); + if (host->data->error || (--host->blocks == 0)) + bcm2835_finish_data(host); + else + bcm2835_transfer_pio(host); + } else if (host->data->flags & MMC_DATA_WRITE) { + bcm2835_finish_data(host); + } +} + +static irqreturn_t bcm2835_irq(int irq, void *dev_id) +{ + irqreturn_t result = IRQ_NONE; + struct bcm2835_host *host = dev_id; + u32 intmask; + + spin_lock(&host->lock); + + intmask = readl(host->ioaddr + SDHSTS); + + writel(SDHSTS_BUSY_IRPT | + SDHSTS_BLOCK_IRPT | + SDHSTS_SDIO_IRPT | + SDHSTS_DATA_FLAG, + host->ioaddr + SDHSTS); + + if (intmask & SDHSTS_BLOCK_IRPT) { + bcm2835_check_data_error(host, intmask); + host->irq_block = true; + result = IRQ_WAKE_THREAD; + } + + if (intmask & SDHSTS_BUSY_IRPT) { + if (!bcm2835_check_cmd_error(host, intmask)) { + host->irq_busy = true; + result = IRQ_WAKE_THREAD; + } else { + result = IRQ_HANDLED; + } + } + + /* There is no true data interrupt status bit, so it is + * necessary to qualify the data flag with the interrupt + * enable bit. + */ + if ((intmask & SDHSTS_DATA_FLAG) && + (host->hcfg & SDHCFG_DATA_IRPT_EN)) { + bcm2835_data_irq(host, intmask); + host->irq_data = true; + result = IRQ_WAKE_THREAD; + } + + spin_unlock(&host->lock); + + return result; +} + +static irqreturn_t bcm2835_threaded_irq(int irq, void *dev_id) +{ + struct bcm2835_host *host = dev_id; + unsigned long flags; + bool block, busy, data; + + spin_lock_irqsave(&host->lock, flags); + + block = host->irq_block; + busy = host->irq_busy; + data = host->irq_data; + host->irq_block = false; + host->irq_busy = false; + host->irq_data = false; + + spin_unlock_irqrestore(&host->lock, flags); + + mutex_lock(&host->mutex); + + if (block) + bcm2835_block_irq(host); + if (busy) + bcm2835_busy_irq(host); + if (data) + bcm2835_data_threaded_irq(host); + + mutex_unlock(&host->mutex); + + return IRQ_HANDLED; +} + +static void bcm2835_dma_complete_work(struct work_struct *work) +{ + struct bcm2835_host *host = + container_of(work, struct bcm2835_host, dma_work); + struct mmc_data *data; + + mutex_lock(&host->mutex); + + data = host->data; + + if (host->dma_chan) { + dma_unmap_sg(host->dma_chan->device->dev, + data->sg, data->sg_len, + host->dma_dir); + + host->dma_chan = NULL; + } + + if (host->drain_words) { + unsigned long flags; + void *page; + u32 *buf; + + if (host->drain_offset & PAGE_MASK) { + host->drain_page += host->drain_offset >> PAGE_SHIFT; + host->drain_offset &= ~PAGE_MASK; + } + local_irq_save(flags); + page = kmap_atomic(host->drain_page); + buf = page + host->drain_offset; + + while (host->drain_words) { + u32 edm = readl(host->ioaddr + SDEDM); + + if ((edm >> 4) & 0x1f) + *(buf++) = readl(host->ioaddr + SDDATA); + host->drain_words--; + } + + kunmap_atomic(page); + local_irq_restore(flags); + } + + bcm2835_finish_data(host); + + mutex_unlock(&host->mutex); +} + +static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock) +{ + struct mmc_host *mmc = mmc_from_priv(host); + int div; + + /* The SDCDIV register has 11 bits, and holds (div - 2). But + * in data mode the max is 50MHz wihout a minimum, and only + * the bottom 3 bits are used. Since the switch over is + * automatic (unless we have marked the card as slow...), + * chosen values have to make sense in both modes. Ident mode + * must be 100-400KHz, so can range check the requested + * clock. CMD15 must be used to return to data mode, so this + * can be monitored. + * + * clock 250MHz -> 0->125MHz, 1->83.3MHz, 2->62.5MHz, 3->50.0MHz + * 4->41.7MHz, 5->35.7MHz, 6->31.3MHz, 7->27.8MHz + * + * 623->400KHz/27.8MHz + * reset value (507)->491159/50MHz + * + * BUT, the 3-bit clock divisor in data mode is too small if + * the core clock is higher than 250MHz, so instead use the + * SLOW_CARD configuration bit to force the use of the ident + * clock divisor at all times. + */ + + if (clock < 100000) { + /* Can't stop the clock, but make it as slow as possible + * to show willing + */ + host->cdiv = SDCDIV_MAX_CDIV; + writel(host->cdiv, host->ioaddr + SDCDIV); + return; + } + + div = host->max_clk / clock; + if (div < 2) + div = 2; + if ((host->max_clk / div) > clock) + div++; + div -= 2; + + if (div > SDCDIV_MAX_CDIV) + div = SDCDIV_MAX_CDIV; + + clock = host->max_clk / (div + 2); + mmc->actual_clock = clock; + + /* Calibrate some delays */ + + host->ns_per_fifo_word = (1000000000 / clock) * + ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32); + + host->cdiv = div; + writel(host->cdiv, host->ioaddr + SDCDIV); + + /* Set the timeout to 500ms */ + writel(mmc->actual_clock / 2, host->ioaddr + SDTOUT); +} + +static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct bcm2835_host *host = mmc_priv(mmc); + struct device *dev = &host->pdev->dev; + u32 edm, fsm; + + /* Reset the error statuses in case this is a retry */ + if (mrq->sbc) + mrq->sbc->error = 0; + if (mrq->cmd) + mrq->cmd->error = 0; + if (mrq->data) + mrq->data->error = 0; + if (mrq->stop) + mrq->stop->error = 0; + + if (mrq->data && !is_power_of_2(mrq->data->blksz)) { + dev_err(dev, "unsupported block size (%d bytes)\n", + mrq->data->blksz); + + if (mrq->cmd) + mrq->cmd->error = -EINVAL; + + mmc_request_done(mmc, mrq); + return; + } + + mutex_lock(&host->mutex); + + WARN_ON(host->mrq); + host->mrq = mrq; + + edm = readl(host->ioaddr + SDEDM); + fsm = edm & SDEDM_FSM_MASK; + + if ((fsm != SDEDM_FSM_IDENTMODE) && + (fsm != SDEDM_FSM_DATAMODE)) { + dev_err(dev, "previous command (%d) not complete (EDM %08x)\n", + readl(host->ioaddr + SDCMD) & SDCMD_CMD_MASK, + edm); + bcm2835_dumpregs(host); + + if (mrq->cmd) + mrq->cmd->error = -EILSEQ; + + bcm2835_finish_request(host); + mutex_unlock(&host->mutex); + return; + } + + if (host->use_dma && mrq->data && (mrq->data->blocks > PIO_THRESHOLD)) + bcm2835_prepare_dma(host, mrq->data); + + host->use_sbc = !!mrq->sbc && host->mrq->data && + (host->mrq->data->flags & MMC_DATA_READ); + if (host->use_sbc) { + if (bcm2835_send_command(host, mrq->sbc)) { + if (!host->use_busy) + bcm2835_finish_command(host); + } + } else if (mrq->cmd && bcm2835_send_command(host, mrq->cmd)) { + if (host->data && host->dma_desc) { + /* DMA transfer starts now, PIO starts after irq */ + bcm2835_start_dma(host); + } + + if (!host->use_busy) + bcm2835_finish_command(host); + } + + mutex_unlock(&host->mutex); +} + +static void bcm2835_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct bcm2835_host *host = mmc_priv(mmc); + + mutex_lock(&host->mutex); + + if (!ios->clock || ios->clock != host->clock) { + bcm2835_set_clock(host, ios->clock); + host->clock = ios->clock; + } + + /* set bus width */ + host->hcfg &= ~SDHCFG_WIDE_EXT_BUS; + if (ios->bus_width == MMC_BUS_WIDTH_4) + host->hcfg |= SDHCFG_WIDE_EXT_BUS; + + host->hcfg |= SDHCFG_WIDE_INT_BUS; + + /* Disable clever clock switching, to cope with fast core clocks */ + host->hcfg |= SDHCFG_SLOW_CARD; + + writel(host->hcfg, host->ioaddr + SDHCFG); + + mutex_unlock(&host->mutex); +} + +static const struct mmc_host_ops bcm2835_ops = { + .request = bcm2835_request, + .set_ios = bcm2835_set_ios, + .hw_reset = bcm2835_reset, +}; + +static int bcm2835_add_host(struct bcm2835_host *host) +{ + struct mmc_host *mmc = mmc_from_priv(host); + struct device *dev = &host->pdev->dev; + char pio_limit_string[20]; + int ret; + + if (!mmc->f_max || mmc->f_max > host->max_clk) + mmc->f_max = host->max_clk; + mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; + + mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); + + dev_dbg(dev, "f_max %d, f_min %d, max_busy_timeout %d\n", + mmc->f_max, mmc->f_min, mmc->max_busy_timeout); + + /* host controller capabilities */ + mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_NEEDS_POLL | MMC_CAP_HW_RESET | MMC_CAP_CMD23; + + spin_lock_init(&host->lock); + mutex_init(&host->mutex); + + if (!host->dma_chan_rxtx) { + dev_warn(dev, "unable to initialise DMA channel. Falling back to PIO\n"); + host->use_dma = false; + } else { + host->use_dma = true; + + host->dma_cfg_tx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + host->dma_cfg_tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + host->dma_cfg_tx.slave_id = 13; /* DREQ channel */ + host->dma_cfg_tx.direction = DMA_MEM_TO_DEV; + host->dma_cfg_tx.src_addr = 0; + host->dma_cfg_tx.dst_addr = host->phys_addr + SDDATA; + + host->dma_cfg_rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + host->dma_cfg_rx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + host->dma_cfg_rx.slave_id = 13; /* DREQ channel */ + host->dma_cfg_rx.direction = DMA_DEV_TO_MEM; + host->dma_cfg_rx.src_addr = host->phys_addr + SDDATA; + host->dma_cfg_rx.dst_addr = 0; + + if (dmaengine_slave_config(host->dma_chan_rxtx, + &host->dma_cfg_tx) != 0 || + dmaengine_slave_config(host->dma_chan_rxtx, + &host->dma_cfg_rx) != 0) + host->use_dma = false; + } + + mmc->max_segs = 128; + mmc->max_req_size = min_t(size_t, 524288, dma_max_mapping_size(dev)); + mmc->max_seg_size = mmc->max_req_size; + mmc->max_blk_size = 1024; + mmc->max_blk_count = 65535; + + /* report supported voltage ranges */ + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + INIT_WORK(&host->dma_work, bcm2835_dma_complete_work); + INIT_DELAYED_WORK(&host->timeout_work, bcm2835_timeout); + + /* Set interrupt enables */ + host->hcfg = SDHCFG_BUSY_IRPT_EN; + + bcm2835_reset_internal(host); + + ret = request_threaded_irq(host->irq, bcm2835_irq, + bcm2835_threaded_irq, + 0, mmc_hostname(mmc), host); + if (ret) { + dev_err(dev, "failed to request IRQ %d: %d\n", host->irq, ret); + return ret; + } + + ret = mmc_add_host(mmc); + if (ret) { + free_irq(host->irq, host); + return ret; + } + + pio_limit_string[0] = '\0'; + if (host->use_dma && (PIO_THRESHOLD > 0)) + sprintf(pio_limit_string, " (>%d)", PIO_THRESHOLD); + dev_info(dev, "loaded - DMA %s%s\n", + host->use_dma ? "enabled" : "disabled", pio_limit_string); + + return 0; +} + +static int bcm2835_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct clk *clk; + struct bcm2835_host *host; + struct mmc_host *mmc; + const __be32 *regaddr_p; + int ret; + + dev_dbg(dev, "%s\n", __func__); + mmc = mmc_alloc_host(sizeof(*host), dev); + if (!mmc) + return -ENOMEM; + + mmc->ops = &bcm2835_ops; + host = mmc_priv(mmc); + host->pdev = pdev; + spin_lock_init(&host->lock); + + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->ioaddr)) { + ret = PTR_ERR(host->ioaddr); + goto err; + } + + /* Parse OF address directly to get the physical address for + * DMA to our registers. + */ + regaddr_p = of_get_address(pdev->dev.of_node, 0, NULL, NULL); + if (!regaddr_p) { + dev_err(dev, "Can't get phys address\n"); + ret = -EINVAL; + goto err; + } + + host->phys_addr = be32_to_cpup(regaddr_p); + + host->dma_chan = NULL; + host->dma_desc = NULL; + + host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx"); + if (IS_ERR(host->dma_chan_rxtx)) { + ret = PTR_ERR(host->dma_chan_rxtx); + host->dma_chan_rxtx = NULL; + + if (ret == -EPROBE_DEFER) + goto err; + + /* Ignore errors to fall back to PIO mode */ + } + + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + ret = dev_err_probe(dev, PTR_ERR(clk), "could not get clk\n"); + goto err; + } + + host->max_clk = clk_get_rate(clk); + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) { + ret = host->irq; + goto err; + } + + ret = mmc_of_parse(mmc); + if (ret) + goto err; + + ret = bcm2835_add_host(host); + if (ret) + goto err; + + platform_set_drvdata(pdev, host); + + dev_dbg(dev, "%s -> OK\n", __func__); + + return 0; + +err: + dev_dbg(dev, "%s -> err %d\n", __func__, ret); + if (host->dma_chan_rxtx) + dma_release_channel(host->dma_chan_rxtx); + mmc_free_host(mmc); + + return ret; +} + +static int bcm2835_remove(struct platform_device *pdev) +{ + struct bcm2835_host *host = platform_get_drvdata(pdev); + struct mmc_host *mmc = mmc_from_priv(host); + + mmc_remove_host(mmc); + + writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD); + + free_irq(host->irq, host); + + cancel_work_sync(&host->dma_work); + cancel_delayed_work_sync(&host->timeout_work); + + if (host->dma_chan_rxtx) + dma_release_channel(host->dma_chan_rxtx); + + mmc_free_host(mmc); + + return 0; +} + +static const struct of_device_id bcm2835_match[] = { + { .compatible = "brcm,bcm2835-sdhost" }, + { } +}; +MODULE_DEVICE_TABLE(of, bcm2835_match); + +static struct platform_driver bcm2835_driver = { + .probe = bcm2835_probe, + .remove = bcm2835_remove, + .driver = { + .name = "sdhost-bcm2835", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = bcm2835_match, + }, +}; +module_platform_driver(bcm2835_driver); + +MODULE_ALIAS("platform:sdhost-bcm2835"); +MODULE_DESCRIPTION("BCM2835 SDHost driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Phil Elwell"); diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c new file mode 100644 index 000000000..12dca91a8 --- /dev/null +++ b/drivers/mmc/host/cavium-octeon.c @@ -0,0 +1,340 @@ +/* + * Driver for MMC and SSD cards for Cavium OCTEON SOCs. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012-2017 Cavium Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "cavium.h" + +#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull) + +/* + * The l2c* functions below are used for the EMMC-17978 workaround. + * + * Due to a bug in the design of the MMC bus hardware, the 2nd to last + * cache block of a DMA read must be locked into the L2 Cache. + * Otherwise, data corruption may occur. + */ +static inline void *phys_to_ptr(u64 address) +{ + return (void *)(address | (1ull << 63)); /* XKPHYS */ +} + +/* + * Lock a single line into L2. The line is zeroed before locking + * to make sure no dram accesses are made. + */ +static void l2c_lock_line(u64 addr) +{ + char *addr_ptr = phys_to_ptr(addr); + + asm volatile ( + "cache 31, %[line]" /* Unlock the line */ + ::[line] "m" (*addr_ptr)); +} + +/* Unlock a single line in the L2 cache. */ +static void l2c_unlock_line(u64 addr) +{ + char *addr_ptr = phys_to_ptr(addr); + + asm volatile ( + "cache 23, %[line]" /* Unlock the line */ + ::[line] "m" (*addr_ptr)); +} + +/* Locks a memory region in the L2 cache. */ +static void l2c_lock_mem_region(u64 start, u64 len) +{ + u64 end; + + /* Round start/end to cache line boundaries */ + end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE); + start = ALIGN(start, CVMX_CACHE_LINE_SIZE); + + while (start <= end) { + l2c_lock_line(start); + start += CVMX_CACHE_LINE_SIZE; + } + asm volatile("sync"); +} + +/* Unlock a memory region in the L2 cache. */ +static void l2c_unlock_mem_region(u64 start, u64 len) +{ + u64 end; + + /* Round start/end to cache line boundaries */ + end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE); + start = ALIGN(start, CVMX_CACHE_LINE_SIZE); + + while (start <= end) { + l2c_unlock_line(start); + start += CVMX_CACHE_LINE_SIZE; + } +} + +static void octeon_mmc_acquire_bus(struct cvm_mmc_host *host) +{ + if (!host->has_ciu3) { + down(&octeon_bootbus_sem); + /* For CN70XX, switch the MMC controller onto the bus. */ + if (OCTEON_IS_MODEL(OCTEON_CN70XX)) + writeq(0, (void __iomem *)CVMX_MIO_BOOT_CTL); + } else { + down(&host->mmc_serializer); + } +} + +static void octeon_mmc_release_bus(struct cvm_mmc_host *host) +{ + if (!host->has_ciu3) + up(&octeon_bootbus_sem); + else + up(&host->mmc_serializer); +} + +static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val) +{ + writeq(val, host->base + MIO_EMM_INT(host)); + if (!host->has_ciu3) + writeq(val, host->base + MIO_EMM_INT_EN(host)); +} + +static void octeon_mmc_set_shared_power(struct cvm_mmc_host *host, int dir) +{ + if (dir == 0) + if (!atomic_dec_return(&host->shared_power_users)) + gpiod_set_value_cansleep(host->global_pwr_gpiod, 0); + if (dir == 1) + if (atomic_inc_return(&host->shared_power_users) == 1) + gpiod_set_value_cansleep(host->global_pwr_gpiod, 1); +} + +static void octeon_mmc_dmar_fixup(struct cvm_mmc_host *host, + struct mmc_command *cmd, + struct mmc_data *data, + u64 addr) +{ + if (cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK) + return; + if (data->blksz * data->blocks <= 1024) + return; + + host->n_minus_one = addr + (data->blksz * data->blocks) - 1024; + l2c_lock_mem_region(host->n_minus_one, 512); +} + +static void octeon_mmc_dmar_fixup_done(struct cvm_mmc_host *host) +{ + if (!host->n_minus_one) + return; + l2c_unlock_mem_region(host->n_minus_one, 512); + host->n_minus_one = 0; +} + +static int octeon_mmc_probe(struct platform_device *pdev) +{ + struct device_node *cn, *node = pdev->dev.of_node; + struct cvm_mmc_host *host; + void __iomem *base; + int mmc_irq[9]; + int i, ret = 0; + u64 val; + + host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + spin_lock_init(&host->irq_handler_lock); + sema_init(&host->mmc_serializer, 1); + + host->dev = &pdev->dev; + host->acquire_bus = octeon_mmc_acquire_bus; + host->release_bus = octeon_mmc_release_bus; + host->int_enable = octeon_mmc_int_enable; + host->set_shared_power = octeon_mmc_set_shared_power; + if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || + OCTEON_IS_MODEL(OCTEON_CNF7XXX)) { + host->dmar_fixup = octeon_mmc_dmar_fixup; + host->dmar_fixup_done = octeon_mmc_dmar_fixup_done; + } + + host->sys_freq = octeon_get_io_clock_rate(); + + if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) { + host->big_dma_addr = true; + host->need_irq_handler_lock = true; + host->has_ciu3 = true; + host->use_sg = true; + /* + * First seven are the EMM_INT bits 0..6, then two for + * the EMM_DMA_INT bits + */ + for (i = 0; i < 9; i++) { + mmc_irq[i] = platform_get_irq(pdev, i); + if (mmc_irq[i] < 0) + return mmc_irq[i]; + + /* work around legacy u-boot device trees */ + irq_set_irq_type(mmc_irq[i], IRQ_TYPE_EDGE_RISING); + } + } else { + host->big_dma_addr = false; + host->need_irq_handler_lock = false; + host->has_ciu3 = false; + /* First one is EMM second DMA */ + for (i = 0; i < 2; i++) { + mmc_irq[i] = platform_get_irq(pdev, i); + if (mmc_irq[i] < 0) + return mmc_irq[i]; + } + } + + host->last_slot = -1; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + host->base = base; + host->reg_off = 0; + + base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(base)) + return PTR_ERR(base); + host->dma_base = base; + /* + * To keep the register addresses shared we intentionaly use + * a negative offset here, first register used on Octeon therefore + * starts at 0x20 (MIO_EMM_DMA_CFG). + */ + host->reg_off_dma = -0x20; + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + /* + * Clear out any pending interrupts that may be left over from + * bootloader. + */ + val = readq(host->base + MIO_EMM_INT(host)); + writeq(val, host->base + MIO_EMM_INT(host)); + + if (host->has_ciu3) { + /* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */ + for (i = 1; i <= 4; i++) { + ret = devm_request_irq(&pdev->dev, mmc_irq[i], + cvm_mmc_interrupt, + 0, cvm_mmc_irq_names[i], host); + if (ret < 0) { + dev_err(&pdev->dev, "Error: devm_request_irq %d\n", + mmc_irq[i]); + return ret; + } + } + } else { + ret = devm_request_irq(&pdev->dev, mmc_irq[0], + cvm_mmc_interrupt, 0, KBUILD_MODNAME, + host); + if (ret < 0) { + dev_err(&pdev->dev, "Error: devm_request_irq %d\n", + mmc_irq[0]); + return ret; + } + } + + host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev, + "power", + GPIOD_OUT_HIGH); + if (IS_ERR(host->global_pwr_gpiod)) { + dev_err(&pdev->dev, "Invalid power GPIO\n"); + return PTR_ERR(host->global_pwr_gpiod); + } + + platform_set_drvdata(pdev, host); + + i = 0; + for_each_child_of_node(node, cn) { + host->slot_pdev[i] = + of_platform_device_create(cn, NULL, &pdev->dev); + if (!host->slot_pdev[i]) { + i++; + continue; + } + ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host); + if (ret) { + dev_err(&pdev->dev, "Error populating slots\n"); + octeon_mmc_set_shared_power(host, 0); + of_node_put(cn); + goto error; + } + i++; + } + return 0; + +error: + for (i = 0; i < CAVIUM_MAX_MMC; i++) { + if (host->slot[i]) + cvm_mmc_of_slot_remove(host->slot[i]); + if (host->slot_pdev[i]) + of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); + } + return ret; +} + +static int octeon_mmc_remove(struct platform_device *pdev) +{ + struct cvm_mmc_host *host = platform_get_drvdata(pdev); + u64 dma_cfg; + int i; + + for (i = 0; i < CAVIUM_MAX_MMC; i++) + if (host->slot[i]) + cvm_mmc_of_slot_remove(host->slot[i]); + + dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host)); + dma_cfg &= ~MIO_EMM_DMA_CFG_EN; + writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); + + octeon_mmc_set_shared_power(host, 0); + return 0; +} + +static const struct of_device_id octeon_mmc_match[] = { + { + .compatible = "cavium,octeon-6130-mmc", + }, + { + .compatible = "cavium,octeon-7890-mmc", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, octeon_mmc_match); + +static struct platform_driver octeon_mmc_driver = { + .probe = octeon_mmc_probe, + .remove = octeon_mmc_remove, + .driver = { + .name = KBUILD_MODNAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = octeon_mmc_match, + }, +}; + +module_platform_driver(octeon_mmc_driver); + +MODULE_AUTHOR("Cavium Inc. "); +MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c new file mode 100644 index 000000000..202b1d6da --- /dev/null +++ b/drivers/mmc/host/cavium-thunderx.c @@ -0,0 +1,205 @@ +/* + * Driver for MMC and SSD cards for Cavium ThunderX SOCs. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2016 Cavium Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "cavium.h" + +static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host) +{ + down(&host->mmc_serializer); +} + +static void thunder_mmc_release_bus(struct cvm_mmc_host *host) +{ + up(&host->mmc_serializer); +} + +static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val) +{ + writeq(val, host->base + MIO_EMM_INT(host)); + writeq(val, host->base + MIO_EMM_INT_EN_SET(host)); +} + +static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host, + struct pci_dev *pdev) +{ + int nvec, ret, i; + + nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX); + if (nvec < 0) + return nvec; + + /* register interrupts */ + for (i = 0; i < nvec; i++) { + ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), + cvm_mmc_interrupt, + 0, cvm_mmc_irq_names[i], host); + if (ret) + return ret; + } + return 0; +} + +static int thunder_mmc_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct device_node *child_node; + struct cvm_mmc_host *host; + int ret, i = 0; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + pci_set_drvdata(pdev, host); + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) + return ret; + + host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); + if (!host->base) { + ret = -EINVAL; + goto error; + } + + /* On ThunderX these are identical */ + host->dma_base = host->base; + + host->reg_off = 0x2000; + host->reg_off_dma = 0x160; + + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto error; + } + + ret = clk_prepare_enable(host->clk); + if (ret) + goto error; + host->sys_freq = clk_get_rate(host->clk); + + spin_lock_init(&host->irq_handler_lock); + sema_init(&host->mmc_serializer, 1); + + host->dev = dev; + host->acquire_bus = thunder_mmc_acquire_bus; + host->release_bus = thunder_mmc_release_bus; + host->int_enable = thunder_mmc_int_enable; + + host->use_sg = true; + host->big_dma_addr = true; + host->need_irq_handler_lock = true; + host->last_slot = -1; + + ret = dma_set_mask(dev, DMA_BIT_MASK(48)); + if (ret) + goto error; + + /* + * Clear out any pending interrupts that may be left over from + * bootloader. Writing 1 to the bits clears them. + */ + writeq(127, host->base + MIO_EMM_INT_EN(host)); + writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host)); + /* Clear DMA FIFO */ + writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host)); + + ret = thunder_mmc_register_interrupts(host, pdev); + if (ret) + goto error; + + for_each_child_of_node(node, child_node) { + /* + * mmc_of_parse and devm* require one device per slot. + * Create a dummy device per slot and set the node pointer to + * the slot. The easiest way to get this is using + * of_platform_device_create. + */ + if (of_device_is_compatible(child_node, "mmc-slot")) { + host->slot_pdev[i] = of_platform_device_create(child_node, NULL, + &pdev->dev); + if (!host->slot_pdev[i]) + continue; + + ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host); + if (ret) { + of_node_put(child_node); + goto error; + } + } + i++; + } + dev_info(dev, "probed\n"); + return 0; + +error: + for (i = 0; i < CAVIUM_MAX_MMC; i++) { + if (host->slot[i]) + cvm_mmc_of_slot_remove(host->slot[i]); + if (host->slot_pdev[i]) { + get_device(&host->slot_pdev[i]->dev); + of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); + put_device(&host->slot_pdev[i]->dev); + } + } + clk_disable_unprepare(host->clk); + pci_release_regions(pdev); + return ret; +} + +static void thunder_mmc_remove(struct pci_dev *pdev) +{ + struct cvm_mmc_host *host = pci_get_drvdata(pdev); + u64 dma_cfg; + int i; + + for (i = 0; i < CAVIUM_MAX_MMC; i++) + if (host->slot[i]) + cvm_mmc_of_slot_remove(host->slot[i]); + + dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host)); + dma_cfg &= ~MIO_EMM_DMA_CFG_EN; + writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); + + clk_disable_unprepare(host->clk); + pci_release_regions(pdev); +} + +static const struct pci_device_id thunder_mmc_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) }, + { 0, } /* end of table */ +}; + +static struct pci_driver thunder_mmc_driver = { + .name = KBUILD_MODNAME, + .id_table = thunder_mmc_id_table, + .probe = thunder_mmc_probe, + .remove = thunder_mmc_remove, +}; + +module_pci_driver(thunder_mmc_driver); + +MODULE_AUTHOR("Cavium Inc."); +MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table); diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c new file mode 100644 index 000000000..c5da3aaee --- /dev/null +++ b/drivers/mmc/host/cavium.c @@ -0,0 +1,1086 @@ +/* + * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and + * ThunderX SOCs. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012-2017 Cavium Inc. + * Authors: + * David Daney + * Peter Swain + * Steven J. Hill + * Jan Glauber + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cavium.h" + +const char *cvm_mmc_irq_names[] = { + "MMC Buffer", + "MMC Command", + "MMC DMA", + "MMC Command Error", + "MMC DMA Error", + "MMC Switch", + "MMC Switch Error", + "MMC DMA int Fifo", + "MMC DMA int", +}; + +/* + * The Cavium MMC host hardware assumes that all commands have fixed + * command and response types. These are correct if MMC devices are + * being used. However, non-MMC devices like SD use command and + * response types that are unexpected by the host hardware. + * + * The command and response types can be overridden by supplying an + * XOR value that is applied to the type. We calculate the XOR value + * from the values in this table and the flags passed from the MMC + * core. + */ +static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = { + {0, 0}, /* CMD0 */ + {0, 3}, /* CMD1 */ + {0, 2}, /* CMD2 */ + {0, 1}, /* CMD3 */ + {0, 0}, /* CMD4 */ + {0, 1}, /* CMD5 */ + {0, 1}, /* CMD6 */ + {0, 1}, /* CMD7 */ + {1, 1}, /* CMD8 */ + {0, 2}, /* CMD9 */ + {0, 2}, /* CMD10 */ + {1, 1}, /* CMD11 */ + {0, 1}, /* CMD12 */ + {0, 1}, /* CMD13 */ + {1, 1}, /* CMD14 */ + {0, 0}, /* CMD15 */ + {0, 1}, /* CMD16 */ + {1, 1}, /* CMD17 */ + {1, 1}, /* CMD18 */ + {3, 1}, /* CMD19 */ + {2, 1}, /* CMD20 */ + {0, 0}, /* CMD21 */ + {0, 0}, /* CMD22 */ + {0, 1}, /* CMD23 */ + {2, 1}, /* CMD24 */ + {2, 1}, /* CMD25 */ + {2, 1}, /* CMD26 */ + {2, 1}, /* CMD27 */ + {0, 1}, /* CMD28 */ + {0, 1}, /* CMD29 */ + {1, 1}, /* CMD30 */ + {1, 1}, /* CMD31 */ + {0, 0}, /* CMD32 */ + {0, 0}, /* CMD33 */ + {0, 0}, /* CMD34 */ + {0, 1}, /* CMD35 */ + {0, 1}, /* CMD36 */ + {0, 0}, /* CMD37 */ + {0, 1}, /* CMD38 */ + {0, 4}, /* CMD39 */ + {0, 5}, /* CMD40 */ + {0, 0}, /* CMD41 */ + {2, 1}, /* CMD42 */ + {0, 0}, /* CMD43 */ + {0, 0}, /* CMD44 */ + {0, 0}, /* CMD45 */ + {0, 0}, /* CMD46 */ + {0, 0}, /* CMD47 */ + {0, 0}, /* CMD48 */ + {0, 0}, /* CMD49 */ + {0, 0}, /* CMD50 */ + {0, 0}, /* CMD51 */ + {0, 0}, /* CMD52 */ + {0, 0}, /* CMD53 */ + {0, 0}, /* CMD54 */ + {0, 1}, /* CMD55 */ + {0xff, 0xff}, /* CMD56 */ + {0, 0}, /* CMD57 */ + {0, 0}, /* CMD58 */ + {0, 0}, /* CMD59 */ + {0, 0}, /* CMD60 */ + {0, 0}, /* CMD61 */ + {0, 0}, /* CMD62 */ + {0, 0} /* CMD63 */ +}; + +static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd) +{ + struct cvm_mmc_cr_type *cr; + u8 hardware_ctype, hardware_rtype; + u8 desired_ctype = 0, desired_rtype = 0; + struct cvm_mmc_cr_mods r; + + cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f); + hardware_ctype = cr->ctype; + hardware_rtype = cr->rtype; + if (cmd->opcode == MMC_GEN_CMD) + hardware_ctype = (cmd->arg & 1) ? 1 : 2; + + switch (mmc_cmd_type(cmd)) { + case MMC_CMD_ADTC: + desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1; + break; + case MMC_CMD_AC: + case MMC_CMD_BC: + case MMC_CMD_BCR: + desired_ctype = 0; + break; + } + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + desired_rtype = 0; + break; + case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */ + case MMC_RSP_R1B: + desired_rtype = 1; + break; + case MMC_RSP_R2: + desired_rtype = 2; + break; + case MMC_RSP_R3: /* MMC_RSP_R4 */ + desired_rtype = 3; + break; + } + r.ctype_xor = desired_ctype ^ hardware_ctype; + r.rtype_xor = desired_rtype ^ hardware_rtype; + return r; +} + +static void check_switch_errors(struct cvm_mmc_host *host) +{ + u64 emm_switch; + + emm_switch = readq(host->base + MIO_EMM_SWITCH(host)); + if (emm_switch & MIO_EMM_SWITCH_ERR0) + dev_err(host->dev, "Switch power class error\n"); + if (emm_switch & MIO_EMM_SWITCH_ERR1) + dev_err(host->dev, "Switch hs timing error\n"); + if (emm_switch & MIO_EMM_SWITCH_ERR2) + dev_err(host->dev, "Switch bus width error\n"); +} + +static void clear_bus_id(u64 *reg) +{ + u64 bus_id_mask = GENMASK_ULL(61, 60); + + *reg &= ~bus_id_mask; +} + +static void set_bus_id(u64 *reg, int bus_id) +{ + clear_bus_id(reg); + *reg |= FIELD_PREP(GENMASK(61, 60), bus_id); +} + +static int get_bus_id(u64 reg) +{ + return FIELD_GET(GENMASK_ULL(61, 60), reg); +} + +/* + * We never set the switch_exe bit since that would interfere + * with the commands send by the MMC core. + */ +static void do_switch(struct cvm_mmc_host *host, u64 emm_switch) +{ + int retries = 100; + u64 rsp_sts; + int bus_id; + + /* + * Modes setting only taken from slot 0. Work around that hardware + * issue by first switching to slot 0. + */ + bus_id = get_bus_id(emm_switch); + clear_bus_id(&emm_switch); + writeq(emm_switch, host->base + MIO_EMM_SWITCH(host)); + + set_bus_id(&emm_switch, bus_id); + writeq(emm_switch, host->base + MIO_EMM_SWITCH(host)); + + /* wait for the switch to finish */ + do { + rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); + if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL)) + break; + udelay(10); + } while (--retries); + + check_switch_errors(host); +} + +static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val) +{ + /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */ + u64 match = 0x3001070fffffffffull; + + return (slot->cached_switch & match) != (new_val & match); +} + +static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns) +{ + u64 timeout; + + if (!slot->clock) + return; + + if (ns) + timeout = (slot->clock * ns) / NSEC_PER_SEC; + else + timeout = (slot->clock * 850ull) / 1000ull; + writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host)); +} + +static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot) +{ + struct cvm_mmc_host *host = slot->host; + u64 emm_switch, wdog; + + emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host)); + emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 | + MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2); + set_bus_id(&emm_switch, slot->bus_id); + + wdog = readq(slot->host->base + MIO_EMM_WDOG(host)); + do_switch(slot->host, emm_switch); + + slot->cached_switch = emm_switch; + + msleep(20); + + writeq(wdog, slot->host->base + MIO_EMM_WDOG(host)); +} + +/* Switch to another slot if needed */ +static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot) +{ + struct cvm_mmc_host *host = slot->host; + struct cvm_mmc_slot *old_slot; + u64 emm_sample, emm_switch; + + if (slot->bus_id == host->last_slot) + return; + + if (host->last_slot >= 0 && host->slot[host->last_slot]) { + old_slot = host->slot[host->last_slot]; + old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host)); + old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host)); + } + + writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host)); + emm_switch = slot->cached_switch; + set_bus_id(&emm_switch, slot->bus_id); + do_switch(host, emm_switch); + + emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) | + FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt); + writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host)); + + host->last_slot = slot->bus_id; +} + +static void do_read(struct cvm_mmc_host *host, struct mmc_request *req, + u64 dbuf) +{ + struct sg_mapping_iter *smi = &host->smi; + int data_len = req->data->blocks * req->data->blksz; + int bytes_xfered, shift = -1; + u64 dat = 0; + + /* Auto inc from offset zero */ + writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host)); + + for (bytes_xfered = 0; bytes_xfered < data_len;) { + if (smi->consumed >= smi->length) { + if (!sg_miter_next(smi)) + break; + smi->consumed = 0; + } + + if (shift < 0) { + dat = readq(host->base + MIO_EMM_BUF_DAT(host)); + shift = 56; + } + + while (smi->consumed < smi->length && shift >= 0) { + ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff; + bytes_xfered++; + smi->consumed++; + shift -= 8; + } + } + + sg_miter_stop(smi); + req->data->bytes_xfered = bytes_xfered; + req->data->error = 0; +} + +static void do_write(struct mmc_request *req) +{ + req->data->bytes_xfered = req->data->blocks * req->data->blksz; + req->data->error = 0; +} + +static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req, + u64 rsp_sts) +{ + u64 rsp_hi, rsp_lo; + + if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL)) + return; + + rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host)); + + switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) { + case 1: + case 3: + req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff; + req->cmd->resp[1] = 0; + req->cmd->resp[2] = 0; + req->cmd->resp[3] = 0; + break; + case 2: + req->cmd->resp[3] = rsp_lo & 0xffffffff; + req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff; + rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host)); + req->cmd->resp[1] = rsp_hi & 0xffffffff; + req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff; + break; + } +} + +static int get_dma_dir(struct mmc_data *data) +{ + return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; +} + +static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) +{ + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; + dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); + return 1; +} + +static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data) +{ + u64 fifo_cfg; + int count; + + /* Check if there are any pending requests left */ + fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); + count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg); + if (count) + dev_err(host->dev, "%u requests still pending\n", count); + + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; + + /* Clear and disable FIFO */ + writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); + dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); + return 1; +} + +static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data) +{ + if (host->use_sg && data->sg_len > 1) + return finish_dma_sg(host, data); + else + return finish_dma_single(host, data); +} + +static int check_status(u64 rsp_sts) +{ + if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS || + rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR || + rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR) + return -EILSEQ; + if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT || + rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT) + return -ETIMEDOUT; + if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR) + return -EIO; + return 0; +} + +/* Try to clean up failed DMA. */ +static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts) +{ + u64 emm_dma; + + emm_dma = readq(host->base + MIO_EMM_DMA(host)); + emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) | + FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1); + set_bus_id(&emm_dma, get_bus_id(rsp_sts)); + writeq(emm_dma, host->base + MIO_EMM_DMA(host)); +} + +irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id) +{ + struct cvm_mmc_host *host = dev_id; + struct mmc_request *req; + unsigned long flags = 0; + u64 emm_int, rsp_sts; + bool host_done; + + if (host->need_irq_handler_lock) + spin_lock_irqsave(&host->irq_handler_lock, flags); + else + __acquire(&host->irq_handler_lock); + + /* Clear interrupt bits (write 1 clears ). */ + emm_int = readq(host->base + MIO_EMM_INT(host)); + writeq(emm_int, host->base + MIO_EMM_INT(host)); + + if (emm_int & MIO_EMM_INT_SWITCH_ERR) + check_switch_errors(host); + + req = host->current_req; + if (!req) + goto out; + + rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); + /* + * dma_val set means DMA is still in progress. Don't touch + * the request and wait for the interrupt indicating that + * the DMA is finished. + */ + if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active) + goto out; + + if (!host->dma_active && req->data && + (emm_int & MIO_EMM_INT_BUF_DONE)) { + unsigned int type = (rsp_sts >> 7) & 3; + + if (type == 1) + do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF); + else if (type == 2) + do_write(req); + } + + host_done = emm_int & MIO_EMM_INT_CMD_DONE || + emm_int & MIO_EMM_INT_DMA_DONE || + emm_int & MIO_EMM_INT_CMD_ERR || + emm_int & MIO_EMM_INT_DMA_ERR; + + if (!(host_done && req->done)) + goto no_req_done; + + req->cmd->error = check_status(rsp_sts); + + if (host->dma_active && req->data) + if (!finish_dma(host, req->data)) + goto no_req_done; + + set_cmd_response(host, req, rsp_sts); + if ((emm_int & MIO_EMM_INT_DMA_ERR) && + (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND)) + cleanup_dma(host, rsp_sts); + + host->current_req = NULL; + req->done(req); + +no_req_done: + if (host->dmar_fixup_done) + host->dmar_fixup_done(host); + if (host_done) + host->release_bus(host); +out: + if (host->need_irq_handler_lock) + spin_unlock_irqrestore(&host->irq_handler_lock, flags); + else + __release(&host->irq_handler_lock); + return IRQ_RETVAL(emm_int != 0); +} + +/* + * Program DMA_CFG and if needed DMA_ADR. + * Returns 0 on error, DMA address otherwise. + */ +static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) +{ + u64 dma_cfg, addr; + int count, rw; + + count = dma_map_sg(host->dev, data->sg, data->sg_len, + get_dma_dir(data)); + if (!count) + return 0; + + rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; + dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) | + FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw); +#ifdef __LITTLE_ENDIAN + dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1); +#endif + dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE, + (sg_dma_len(&data->sg[0]) / 8) - 1); + + addr = sg_dma_address(&data->sg[0]); + if (!host->big_dma_addr) + dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr); + writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); + + pr_debug("[%s] sg_dma_len: %u total sg_elem: %d\n", + (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count); + + if (host->big_dma_addr) + writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host)); + return addr; +} + +/* + * Queue complete sg list into the FIFO. + * Returns 0 on error, 1 otherwise. + */ +static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data) +{ + struct scatterlist *sg; + u64 fifo_cmd, addr; + int count, i, rw; + + count = dma_map_sg(host->dev, data->sg, data->sg_len, + get_dma_dir(data)); + if (!count) + return 0; + if (count > 16) + goto error; + + /* Enable FIFO by removing CLR bit */ + writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); + + for_each_sg(data->sg, sg, count, i) { + /* Program DMA address */ + addr = sg_dma_address(sg); + if (addr & 7) + goto error; + writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host)); + + /* + * If we have scatter-gather support we also have an extra + * register for the DMA addr, so no need to check + * host->big_dma_addr here. + */ + rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; + fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw); + + /* enable interrupts on the last element */ + fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS, + (i + 1 == count) ? 0 : 1); + +#ifdef __LITTLE_ENDIAN + fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1); +#endif + fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE, + sg_dma_len(sg) / 8 - 1); + /* + * The write copies the address and the command to the FIFO + * and increments the FIFO's COUNT field. + */ + writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host)); + pr_debug("[%s] sg_dma_len: %u sg_elem: %d/%d\n", + (rw) ? "W" : "R", sg_dma_len(sg), i, count); + } + + /* + * In difference to prepare_dma_single we don't return the + * address here, as it would not make sense for scatter-gather. + * The dma fixup is only required on models that don't support + * scatter-gather, so that is not a problem. + */ + return 1; + +error: + WARN_ON_ONCE(1); + dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); + /* Disable FIFO */ + writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); + return 0; +} + +static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data) +{ + if (host->use_sg && data->sg_len > 1) + return prepare_dma_sg(host, data); + else + return prepare_dma_single(host, data); +} + +static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct cvm_mmc_slot *slot = mmc_priv(mmc); + u64 emm_dma; + + emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) | + FIELD_PREP(MIO_EMM_DMA_SECTOR, + mmc_card_is_blockaddr(mmc->card) ? 1 : 0) | + FIELD_PREP(MIO_EMM_DMA_RW, + (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) | + FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) | + FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg); + set_bus_id(&emm_dma, slot->bus_id); + + if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) && + (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT))) + emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1); + + pr_debug("[%s] blocks: %u multi: %d\n", + (emm_dma & MIO_EMM_DMA_RW) ? "W" : "R", + mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0); + return emm_dma; +} + +static void cvm_mmc_dma_request(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct cvm_mmc_slot *slot = mmc_priv(mmc); + struct cvm_mmc_host *host = slot->host; + struct mmc_data *data; + u64 emm_dma, addr; + + if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len || + !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) { + dev_err(&mmc->card->dev, + "Error: cmv_mmc_dma_request no data\n"); + goto error; + } + + cvm_mmc_switch_to(slot); + + data = mrq->data; + pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n", + data->blocks, data->blksz, data->blocks * data->blksz); + if (data->timeout_ns) + set_wdog(slot, data->timeout_ns); + + WARN_ON(host->current_req); + host->current_req = mrq; + + emm_dma = prepare_ext_dma(mmc, mrq); + addr = prepare_dma(host, data); + if (!addr) { + dev_err(host->dev, "prepare_dma failed\n"); + goto error; + } + + host->dma_active = true; + host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE | + MIO_EMM_INT_DMA_ERR); + + if (host->dmar_fixup) + host->dmar_fixup(host, mrq->cmd, data, addr); + + /* + * If we have a valid SD card in the slot, we set the response + * bit mask to check for CRC errors and timeouts only. + * Otherwise, use the default power reset value. + */ + if (mmc_card_sd(mmc->card)) + writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host)); + else + writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host)); + writeq(emm_dma, host->base + MIO_EMM_DMA(host)); + return; + +error: + mrq->cmd->error = -EINVAL; + if (mrq->done) + mrq->done(mrq); + host->release_bus(host); +} + +static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq) +{ + sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len, + SG_MITER_ATOMIC | SG_MITER_TO_SG); +} + +static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq) +{ + unsigned int data_len = mrq->data->blocks * mrq->data->blksz; + struct sg_mapping_iter *smi = &host->smi; + unsigned int bytes_xfered; + int shift = 56; + u64 dat = 0; + + /* Copy data to the xmit buffer before issuing the command. */ + sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG); + + /* Auto inc from offset zero, dbuf zero */ + writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host)); + + for (bytes_xfered = 0; bytes_xfered < data_len;) { + if (smi->consumed >= smi->length) { + if (!sg_miter_next(smi)) + break; + smi->consumed = 0; + } + + while (smi->consumed < smi->length && shift >= 0) { + dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift; + bytes_xfered++; + smi->consumed++; + shift -= 8; + } + + if (shift < 0) { + writeq(dat, host->base + MIO_EMM_BUF_DAT(host)); + shift = 56; + dat = 0; + } + } + sg_miter_stop(smi); +} + +static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct cvm_mmc_slot *slot = mmc_priv(mmc); + struct cvm_mmc_host *host = slot->host; + struct mmc_command *cmd = mrq->cmd; + struct cvm_mmc_cr_mods mods; + u64 emm_cmd, rsp_sts; + int retries = 100; + + /* + * Note about locking: + * All MMC devices share the same bus and controller. Allow only a + * single user of the bootbus/MMC bus at a time. The lock is acquired + * on all entry points from the MMC layer. + * + * For requests the lock is only released after the completion + * interrupt! + */ + host->acquire_bus(host); + + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) + return cvm_mmc_dma_request(mmc, mrq); + + cvm_mmc_switch_to(slot); + + mods = cvm_mmc_get_cr_mods(cmd); + + WARN_ON(host->current_req); + host->current_req = mrq; + + if (cmd->data) { + if (cmd->data->flags & MMC_DATA_READ) + do_read_request(host, mrq); + else + do_write_request(host, mrq); + + if (cmd->data->timeout_ns) + set_wdog(slot, cmd->data->timeout_ns); + } else + set_wdog(slot, 0); + + host->dma_active = false; + host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR); + + emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) | + FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) | + FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) | + FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) | + FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg); + set_bus_id(&emm_cmd, slot->bus_id); + if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC) + emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET, + 64 - ((cmd->data->blocks * cmd->data->blksz) / 8)); + + writeq(0, host->base + MIO_EMM_STS_MASK(host)); + +retry: + rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); + if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL || + rsp_sts & MIO_EMM_RSP_STS_CMD_VAL || + rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL || + rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) { + udelay(10); + if (--retries) + goto retry; + } + if (!retries) + dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts); + writeq(emm_cmd, host->base + MIO_EMM_CMD(host)); +} + +static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct cvm_mmc_slot *slot = mmc_priv(mmc); + struct cvm_mmc_host *host = slot->host; + int clk_period = 0, power_class = 10, bus_width = 0; + u64 clock, emm_switch; + + host->acquire_bus(host); + cvm_mmc_switch_to(slot); + + /* Set the power state */ + switch (ios->power_mode) { + case MMC_POWER_ON: + break; + + case MMC_POWER_OFF: + cvm_mmc_reset_bus(slot); + if (host->global_pwr_gpiod) + host->set_shared_power(host, 0); + else if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + break; + + case MMC_POWER_UP: + if (host->global_pwr_gpiod) + host->set_shared_power(host, 1); + else if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + break; + } + + /* Convert bus width to HW definition */ + switch (ios->bus_width) { + case MMC_BUS_WIDTH_8: + bus_width = 2; + break; + case MMC_BUS_WIDTH_4: + bus_width = 1; + break; + case MMC_BUS_WIDTH_1: + bus_width = 0; + break; + } + + /* DDR is available for 4/8 bit bus width */ + if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52) + bus_width |= 4; + + /* Change the clock frequency. */ + clock = ios->clock; + if (clock > 52000000) + clock = 52000000; + slot->clock = clock; + + if (clock) + clk_period = (host->sys_freq + clock - 1) / (2 * clock); + + emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING, + (ios->timing == MMC_TIMING_MMC_HS)) | + FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) | + FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) | + FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) | + FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period); + set_bus_id(&emm_switch, slot->bus_id); + + if (!switch_val_changed(slot, emm_switch)) + goto out; + + set_wdog(slot, 0); + do_switch(host, emm_switch); + slot->cached_switch = emm_switch; +out: + host->release_bus(host); +} + +static const struct mmc_host_ops cvm_mmc_ops = { + .request = cvm_mmc_request, + .set_ios = cvm_mmc_set_ios, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, +}; + +static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock) +{ + struct mmc_host *mmc = slot->mmc; + + clock = min(clock, mmc->f_max); + clock = max(clock, mmc->f_min); + slot->clock = clock; +} + +static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot) +{ + struct cvm_mmc_host *host = slot->host; + u64 emm_switch; + + /* Enable this bus slot. */ + host->emm_cfg |= (1ull << slot->bus_id); + writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host)); + udelay(10); + + /* Program initial clock speed and power. */ + cvm_mmc_set_clock(slot, slot->mmc->f_min); + emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10); + emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, + (host->sys_freq / slot->clock) / 2); + emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, + (host->sys_freq / slot->clock) / 2); + + /* Make the changes take effect on this bus slot. */ + set_bus_id(&emm_switch, slot->bus_id); + do_switch(host, emm_switch); + + slot->cached_switch = emm_switch; + + /* + * Set watchdog timeout value and default reset value + * for the mask register. Finally, set the CARD_RCA + * bit so that we can get the card address relative + * to the CMD register for CMD7 transactions. + */ + set_wdog(slot, 0); + writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host)); + writeq(1, host->base + MIO_EMM_RCA(host)); + return 0; +} + +static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot) +{ + u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0; + struct device_node *node = dev->of_node; + struct mmc_host *mmc = slot->mmc; + u64 clock_period; + int ret; + + ret = of_property_read_u32(node, "reg", &id); + if (ret) { + dev_err(dev, "Missing or invalid reg property on %pOF\n", node); + return ret; + } + + if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) { + dev_err(dev, "Invalid reg property on %pOF\n", node); + return -EINVAL; + } + + ret = mmc_regulator_get_supply(mmc); + if (ret) + return ret; + /* + * Legacy Octeon firmware has no regulator entry, fall-back to + * a hard-coded voltage to get a sane OCR. + */ + if (IS_ERR(mmc->supply.vmmc)) + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + /* Common MMC bindings */ + ret = mmc_of_parse(mmc); + if (ret) + return ret; + + /* Set bus width */ + if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) { + of_property_read_u32(node, "cavium,bus-max-width", &bus_width); + if (bus_width == 8) + mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; + else if (bus_width == 4) + mmc->caps |= MMC_CAP_4_BIT_DATA; + } + + /* Set maximum and minimum frequency */ + if (!mmc->f_max) + of_property_read_u32(node, "spi-max-frequency", &mmc->f_max); + if (!mmc->f_max || mmc->f_max > 52000000) + mmc->f_max = 52000000; + mmc->f_min = 400000; + + /* Sampling register settings, period in picoseconds */ + clock_period = 1000000000000ull / slot->host->sys_freq; + of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew); + of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew); + slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period; + slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period; + + return id; +} + +int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) +{ + struct cvm_mmc_slot *slot; + struct mmc_host *mmc; + int ret, id; + + mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev); + if (!mmc) + return -ENOMEM; + + slot = mmc_priv(mmc); + slot->mmc = mmc; + slot->host = host; + + ret = cvm_mmc_of_parse(dev, slot); + if (ret < 0) + goto error; + id = ret; + + /* Set up host parameters */ + mmc->ops = &cvm_mmc_ops; + + /* + * We only have a 3.3v supply, we cannot support any + * of the UHS modes. We do support the high speed DDR + * modes up to 52MHz. + * + * Disable bounce buffers for max_segs = 1 + */ + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | + MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR; + + if (host->use_sg) + mmc->max_segs = 16; + else + mmc->max_segs = 1; + + /* DMA size field can address up to 8 MB */ + mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024, + dma_get_max_seg_size(host->dev)); + mmc->max_req_size = mmc->max_seg_size; + /* External DMA is in 512 byte blocks */ + mmc->max_blk_size = 512; + /* DMA block count field is 15 bits */ + mmc->max_blk_count = 32767; + + slot->clock = mmc->f_min; + slot->bus_id = id; + slot->cached_rca = 1; + + host->acquire_bus(host); + host->slot[id] = slot; + cvm_mmc_switch_to(slot); + cvm_mmc_init_lowlevel(slot); + host->release_bus(host); + + ret = mmc_add_host(mmc); + if (ret) { + dev_err(dev, "mmc_add_host() returned %d\n", ret); + slot->host->slot[id] = NULL; + goto error; + } + return 0; + +error: + mmc_free_host(slot->mmc); + return ret; +} + +int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot) +{ + mmc_remove_host(slot->mmc); + slot->host->slot[slot->bus_id] = NULL; + mmc_free_host(slot->mmc); + return 0; +} diff --git a/drivers/mmc/host/cavium.h b/drivers/mmc/host/cavium.h new file mode 100644 index 000000000..f3eea5eaa --- /dev/null +++ b/drivers/mmc/host/cavium.h @@ -0,0 +1,215 @@ +/* + * Driver for MMC and SSD cards for Cavium OCTEON and ThunderX SOCs. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012-2017 Cavium Inc. + */ + +#ifndef _CAVIUM_MMC_H_ +#define _CAVIUM_MMC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CAVIUM_MAX_MMC 4 + +/* DMA register addresses */ +#define MIO_EMM_DMA_FIFO_CFG(x) (0x00 + x->reg_off_dma) +#define MIO_EMM_DMA_FIFO_ADR(x) (0x10 + x->reg_off_dma) +#define MIO_EMM_DMA_FIFO_CMD(x) (0x18 + x->reg_off_dma) +#define MIO_EMM_DMA_CFG(x) (0x20 + x->reg_off_dma) +#define MIO_EMM_DMA_ADR(x) (0x28 + x->reg_off_dma) +#define MIO_EMM_DMA_INT(x) (0x30 + x->reg_off_dma) +#define MIO_EMM_DMA_INT_W1S(x) (0x38 + x->reg_off_dma) +#define MIO_EMM_DMA_INT_ENA_W1S(x) (0x40 + x->reg_off_dma) +#define MIO_EMM_DMA_INT_ENA_W1C(x) (0x48 + x->reg_off_dma) + +/* register addresses */ +#define MIO_EMM_CFG(x) (0x00 + x->reg_off) +#define MIO_EMM_SWITCH(x) (0x48 + x->reg_off) +#define MIO_EMM_DMA(x) (0x50 + x->reg_off) +#define MIO_EMM_CMD(x) (0x58 + x->reg_off) +#define MIO_EMM_RSP_STS(x) (0x60 + x->reg_off) +#define MIO_EMM_RSP_LO(x) (0x68 + x->reg_off) +#define MIO_EMM_RSP_HI(x) (0x70 + x->reg_off) +#define MIO_EMM_INT(x) (0x78 + x->reg_off) +#define MIO_EMM_INT_EN(x) (0x80 + x->reg_off) +#define MIO_EMM_WDOG(x) (0x88 + x->reg_off) +#define MIO_EMM_SAMPLE(x) (0x90 + x->reg_off) +#define MIO_EMM_STS_MASK(x) (0x98 + x->reg_off) +#define MIO_EMM_RCA(x) (0xa0 + x->reg_off) +#define MIO_EMM_INT_EN_SET(x) (0xb0 + x->reg_off) +#define MIO_EMM_INT_EN_CLR(x) (0xb8 + x->reg_off) +#define MIO_EMM_BUF_IDX(x) (0xe0 + x->reg_off) +#define MIO_EMM_BUF_DAT(x) (0xe8 + x->reg_off) + +struct cvm_mmc_host { + struct device *dev; + void __iomem *base; + void __iomem *dma_base; + int reg_off; + int reg_off_dma; + u64 emm_cfg; + u64 n_minus_one; /* OCTEON II workaround location */ + int last_slot; + struct clk *clk; + int sys_freq; + + struct mmc_request *current_req; + struct sg_mapping_iter smi; + bool dma_active; + bool use_sg; + + bool has_ciu3; + bool big_dma_addr; + bool need_irq_handler_lock; + spinlock_t irq_handler_lock; + struct semaphore mmc_serializer; + + struct gpio_desc *global_pwr_gpiod; + atomic_t shared_power_users; + + struct cvm_mmc_slot *slot[CAVIUM_MAX_MMC]; + struct platform_device *slot_pdev[CAVIUM_MAX_MMC]; + + void (*set_shared_power)(struct cvm_mmc_host *, int); + void (*acquire_bus)(struct cvm_mmc_host *); + void (*release_bus)(struct cvm_mmc_host *); + void (*int_enable)(struct cvm_mmc_host *, u64); + /* required on some MIPS models */ + void (*dmar_fixup)(struct cvm_mmc_host *, struct mmc_command *, + struct mmc_data *, u64); + void (*dmar_fixup_done)(struct cvm_mmc_host *); +}; + +struct cvm_mmc_slot { + struct mmc_host *mmc; /* slot-level mmc_core object */ + struct cvm_mmc_host *host; /* common hw for all slots */ + + u64 clock; + + u64 cached_switch; + u64 cached_rca; + + unsigned int cmd_cnt; /* sample delay */ + unsigned int dat_cnt; /* sample delay */ + + int bus_id; +}; + +struct cvm_mmc_cr_type { + u8 ctype; + u8 rtype; +}; + +struct cvm_mmc_cr_mods { + u8 ctype_xor; + u8 rtype_xor; +}; + +/* Bitfield definitions */ +#define MIO_EMM_DMA_FIFO_CFG_CLR BIT_ULL(16) +#define MIO_EMM_DMA_FIFO_CFG_INT_LVL GENMASK_ULL(12, 8) +#define MIO_EMM_DMA_FIFO_CFG_COUNT GENMASK_ULL(4, 0) + +#define MIO_EMM_DMA_FIFO_CMD_RW BIT_ULL(62) +#define MIO_EMM_DMA_FIFO_CMD_INTDIS BIT_ULL(60) +#define MIO_EMM_DMA_FIFO_CMD_SWAP32 BIT_ULL(59) +#define MIO_EMM_DMA_FIFO_CMD_SWAP16 BIT_ULL(58) +#define MIO_EMM_DMA_FIFO_CMD_SWAP8 BIT_ULL(57) +#define MIO_EMM_DMA_FIFO_CMD_ENDIAN BIT_ULL(56) +#define MIO_EMM_DMA_FIFO_CMD_SIZE GENMASK_ULL(55, 36) + +#define MIO_EMM_CMD_SKIP_BUSY BIT_ULL(62) +#define MIO_EMM_CMD_BUS_ID GENMASK_ULL(61, 60) +#define MIO_EMM_CMD_VAL BIT_ULL(59) +#define MIO_EMM_CMD_DBUF BIT_ULL(55) +#define MIO_EMM_CMD_OFFSET GENMASK_ULL(54, 49) +#define MIO_EMM_CMD_CTYPE_XOR GENMASK_ULL(42, 41) +#define MIO_EMM_CMD_RTYPE_XOR GENMASK_ULL(40, 38) +#define MIO_EMM_CMD_IDX GENMASK_ULL(37, 32) +#define MIO_EMM_CMD_ARG GENMASK_ULL(31, 0) + +#define MIO_EMM_DMA_SKIP_BUSY BIT_ULL(62) +#define MIO_EMM_DMA_BUS_ID GENMASK_ULL(61, 60) +#define MIO_EMM_DMA_VAL BIT_ULL(59) +#define MIO_EMM_DMA_SECTOR BIT_ULL(58) +#define MIO_EMM_DMA_DAT_NULL BIT_ULL(57) +#define MIO_EMM_DMA_THRES GENMASK_ULL(56, 51) +#define MIO_EMM_DMA_REL_WR BIT_ULL(50) +#define MIO_EMM_DMA_RW BIT_ULL(49) +#define MIO_EMM_DMA_MULTI BIT_ULL(48) +#define MIO_EMM_DMA_BLOCK_CNT GENMASK_ULL(47, 32) +#define MIO_EMM_DMA_CARD_ADDR GENMASK_ULL(31, 0) + +#define MIO_EMM_DMA_CFG_EN BIT_ULL(63) +#define MIO_EMM_DMA_CFG_RW BIT_ULL(62) +#define MIO_EMM_DMA_CFG_CLR BIT_ULL(61) +#define MIO_EMM_DMA_CFG_SWAP32 BIT_ULL(59) +#define MIO_EMM_DMA_CFG_SWAP16 BIT_ULL(58) +#define MIO_EMM_DMA_CFG_SWAP8 BIT_ULL(57) +#define MIO_EMM_DMA_CFG_ENDIAN BIT_ULL(56) +#define MIO_EMM_DMA_CFG_SIZE GENMASK_ULL(55, 36) +#define MIO_EMM_DMA_CFG_ADR GENMASK_ULL(35, 0) + +#define MIO_EMM_INT_SWITCH_ERR BIT_ULL(6) +#define MIO_EMM_INT_SWITCH_DONE BIT_ULL(5) +#define MIO_EMM_INT_DMA_ERR BIT_ULL(4) +#define MIO_EMM_INT_CMD_ERR BIT_ULL(3) +#define MIO_EMM_INT_DMA_DONE BIT_ULL(2) +#define MIO_EMM_INT_CMD_DONE BIT_ULL(1) +#define MIO_EMM_INT_BUF_DONE BIT_ULL(0) + +#define MIO_EMM_RSP_STS_BUS_ID GENMASK_ULL(61, 60) +#define MIO_EMM_RSP_STS_CMD_VAL BIT_ULL(59) +#define MIO_EMM_RSP_STS_SWITCH_VAL BIT_ULL(58) +#define MIO_EMM_RSP_STS_DMA_VAL BIT_ULL(57) +#define MIO_EMM_RSP_STS_DMA_PEND BIT_ULL(56) +#define MIO_EMM_RSP_STS_DBUF_ERR BIT_ULL(28) +#define MIO_EMM_RSP_STS_DBUF BIT_ULL(23) +#define MIO_EMM_RSP_STS_BLK_TIMEOUT BIT_ULL(22) +#define MIO_EMM_RSP_STS_BLK_CRC_ERR BIT_ULL(21) +#define MIO_EMM_RSP_STS_RSP_BUSYBIT BIT_ULL(20) +#define MIO_EMM_RSP_STS_STP_TIMEOUT BIT_ULL(19) +#define MIO_EMM_RSP_STS_STP_CRC_ERR BIT_ULL(18) +#define MIO_EMM_RSP_STS_STP_BAD_STS BIT_ULL(17) +#define MIO_EMM_RSP_STS_STP_VAL BIT_ULL(16) +#define MIO_EMM_RSP_STS_RSP_TIMEOUT BIT_ULL(15) +#define MIO_EMM_RSP_STS_RSP_CRC_ERR BIT_ULL(14) +#define MIO_EMM_RSP_STS_RSP_BAD_STS BIT_ULL(13) +#define MIO_EMM_RSP_STS_RSP_VAL BIT_ULL(12) +#define MIO_EMM_RSP_STS_RSP_TYPE GENMASK_ULL(11, 9) +#define MIO_EMM_RSP_STS_CMD_TYPE GENMASK_ULL(8, 7) +#define MIO_EMM_RSP_STS_CMD_IDX GENMASK_ULL(6, 1) +#define MIO_EMM_RSP_STS_CMD_DONE BIT_ULL(0) + +#define MIO_EMM_SAMPLE_CMD_CNT GENMASK_ULL(25, 16) +#define MIO_EMM_SAMPLE_DAT_CNT GENMASK_ULL(9, 0) + +#define MIO_EMM_SWITCH_BUS_ID GENMASK_ULL(61, 60) +#define MIO_EMM_SWITCH_EXE BIT_ULL(59) +#define MIO_EMM_SWITCH_ERR0 BIT_ULL(58) +#define MIO_EMM_SWITCH_ERR1 BIT_ULL(57) +#define MIO_EMM_SWITCH_ERR2 BIT_ULL(56) +#define MIO_EMM_SWITCH_HS_TIMING BIT_ULL(48) +#define MIO_EMM_SWITCH_BUS_WIDTH GENMASK_ULL(42, 40) +#define MIO_EMM_SWITCH_POWER_CLASS GENMASK_ULL(35, 32) +#define MIO_EMM_SWITCH_CLK_HI GENMASK_ULL(31, 16) +#define MIO_EMM_SWITCH_CLK_LO GENMASK_ULL(15, 0) + +/* Protoypes */ +irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id); +int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host); +int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot); +extern const char *cvm_mmc_irq_names[]; + +#endif diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c new file mode 100644 index 000000000..e84ed84ea --- /dev/null +++ b/drivers/mmc/host/cb710-mmc.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cb710/mmc.c + * + * Copyright by Michał Mirosław, 2008-2009 + */ +#include +#include +#include +#include +#include "cb710-mmc.h" + +#define CB710_MMC_REQ_TIMEOUT_MS 2000 + +static const u8 cb710_clock_divider_log2[8] = { +/* 1, 2, 4, 8, 16, 32, 128, 512 */ + 0, 1, 2, 3, 4, 5, 7, 9 +}; +#define CB710_MAX_DIVIDER_IDX \ + (ARRAY_SIZE(cb710_clock_divider_log2) - 1) + +static const u8 cb710_src_freq_mhz[16] = { + 33, 10, 20, 25, 30, 35, 40, 45, + 50, 55, 60, 65, 70, 75, 80, 85 +}; + +static void cb710_mmc_select_clock_divider(struct mmc_host *mmc, int hz) +{ + struct cb710_slot *slot = cb710_mmc_to_slot(mmc); + struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev; + u32 src_freq_idx; + u32 divider_idx; + int src_hz; + + /* on CB710 in HP nx9500: + * src_freq_idx == 0 + * indexes 1-7 work as written in the table + * indexes 0,8-15 give no clock output + */ + pci_read_config_dword(pdev, 0x48, &src_freq_idx); + src_freq_idx = (src_freq_idx >> 16) & 0xF; + src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000; + + for (divider_idx = 0; divider_idx < CB710_MAX_DIVIDER_IDX; ++divider_idx) { + if (hz >= src_hz >> cb710_clock_divider_log2[divider_idx]) + break; + } + + if (src_freq_idx) + divider_idx |= 0x8; + else if (divider_idx == 0) + divider_idx = 1; + + cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28); + + dev_dbg(cb710_slot_dev(slot), + "clock set to %d Hz, wanted %d Hz; src_freq_idx = %d, divider_idx = %d|%d\n", + src_hz >> cb710_clock_divider_log2[divider_idx & 7], + hz, src_freq_idx, divider_idx & 7, divider_idx & 8); +} + +static void __cb710_mmc_enable_irq(struct cb710_slot *slot, + unsigned short enable, unsigned short mask) +{ + /* clear global IE + * - it gets set later if any interrupt sources are enabled */ + mask |= CB710_MMC_IE_IRQ_ENABLE; + + /* look like interrupt is fired whenever + * WORD[0x0C] & WORD[0x10] != 0; + * -> bit 15 port 0x0C seems to be global interrupt enable + */ + + enable = (cb710_read_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT) + & ~mask) | enable; + + if (enable) + enable |= CB710_MMC_IE_IRQ_ENABLE; + + cb710_write_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT, enable); +} + +static void cb710_mmc_enable_irq(struct cb710_slot *slot, + unsigned short enable, unsigned short mask) +{ + struct cb710_mmc_reader *reader = mmc_priv(cb710_slot_to_mmc(slot)); + unsigned long flags; + + spin_lock_irqsave(&reader->irq_lock, flags); + /* this is the only thing irq_lock protects */ + __cb710_mmc_enable_irq(slot, enable, mask); + spin_unlock_irqrestore(&reader->irq_lock, flags); +} + +static void cb710_mmc_reset_events(struct cb710_slot *slot) +{ + cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, 0xFF); + cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, 0xFF); + cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF); +} + +static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable) +{ + if (enable) + cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, + CB710_MMC_C1_4BIT_DATA_BUS, 0); + else + cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, + 0, CB710_MMC_C1_4BIT_DATA_BUS); +} + +static int cb710_check_event(struct cb710_slot *slot, u8 what) +{ + u16 status; + + status = cb710_read_port_16(slot, CB710_MMC_STATUS_PORT); + + if (status & CB710_MMC_S0_FIFO_UNDERFLOW) { + /* it is just a guess, so log it */ + dev_dbg(cb710_slot_dev(slot), + "CHECK : ignoring bit 6 in status %04X\n", status); + cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, + CB710_MMC_S0_FIFO_UNDERFLOW); + status &= ~CB710_MMC_S0_FIFO_UNDERFLOW; + } + + if (status & CB710_MMC_STATUS_ERROR_EVENTS) { + dev_dbg(cb710_slot_dev(slot), + "CHECK : returning EIO on status %04X\n", status); + cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, status & 0xFF); + cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, + CB710_MMC_S1_RESET); + return -EIO; + } + + /* 'what' is a bit in MMC_STATUS1 */ + if ((status >> 8) & what) { + cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, what); + return 1; + } + + return 0; +} + +static int cb710_wait_for_event(struct cb710_slot *slot, u8 what) +{ + int err = 0; + unsigned limit = 2000000; /* FIXME: real timeout */ + +#ifdef CONFIG_CB710_DEBUG + u32 e, x; + e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); +#endif + + while (!(err = cb710_check_event(slot, what))) { + if (!--limit) { + cb710_dump_regs(cb710_slot_to_chip(slot), + CB710_DUMP_REGS_MMC); + err = -ETIMEDOUT; + break; + } + udelay(1); + } + +#ifdef CONFIG_CB710_DEBUG + x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); + + limit = 2000000 - limit; + if (limit > 100) + dev_dbg(cb710_slot_dev(slot), + "WAIT10: waited %d loops, what %d, entry val %08X, exit val %08X\n", + limit, what, e, x); +#endif + return err < 0 ? err : 0; +} + + +static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask) +{ + unsigned limit = 500000; /* FIXME: real timeout */ + int err = 0; + +#ifdef CONFIG_CB710_DEBUG + u32 e, x; + e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); +#endif + + while (cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & mask) { + if (!--limit) { + cb710_dump_regs(cb710_slot_to_chip(slot), + CB710_DUMP_REGS_MMC); + err = -ETIMEDOUT; + break; + } + udelay(1); + } + +#ifdef CONFIG_CB710_DEBUG + x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); + + limit = 500000 - limit; + if (limit > 100) + dev_dbg(cb710_slot_dev(slot), + "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n", + limit, mask, e, x); +#endif + return err; +} + +static void cb710_mmc_set_transfer_size(struct cb710_slot *slot, + size_t count, size_t blocksize) +{ + cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); + cb710_write_port_32(slot, CB710_MMC_TRANSFER_SIZE_PORT, + ((count - 1) << 16)|(blocksize - 1)); + + dev_vdbg(cb710_slot_dev(slot), "set up for %zu block%s of %zu bytes\n", + count, count == 1 ? "" : "s", blocksize); +} + +static void cb710_mmc_fifo_hack(struct cb710_slot *slot) +{ + /* without this, received data is prepended with 8-bytes of zeroes */ + u32 r1, r2; + int ok = 0; + + r1 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT); + r2 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT); + if (cb710_read_port_8(slot, CB710_MMC_STATUS0_PORT) + & CB710_MMC_S0_FIFO_UNDERFLOW) { + cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, + CB710_MMC_S0_FIFO_UNDERFLOW); + ok = 1; + } + + dev_dbg(cb710_slot_dev(slot), + "FIFO-read-hack: expected STATUS0 bit was %s\n", + ok ? "set." : "NOT SET!"); + dev_dbg(cb710_slot_dev(slot), + "FIFO-read-hack: dwords ignored: %08X %08X - %s\n", + r1, r2, (r1|r2) ? "BAD (NOT ZERO)!" : "ok"); +} + +static int cb710_mmc_receive_pio(struct cb710_slot *slot, + struct sg_mapping_iter *miter, size_t dw_count) +{ + if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & CB710_MMC_S2_FIFO_READY)) { + int err = cb710_wait_for_event(slot, + CB710_MMC_S1_PIO_TRANSFER_DONE); + if (err) + return err; + } + + cb710_sg_dwiter_write_from_io(miter, + slot->iobase + CB710_MMC_DATA_PORT, dw_count); + + return 0; +} + +static bool cb710_is_transfer_size_supported(struct mmc_data *data) +{ + return !(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8)); +} + +static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data) +{ + struct sg_mapping_iter miter; + size_t len, blocks = data->blocks; + int err = 0; + + /* TODO: I don't know how/if the hardware handles non-16B-boundary blocks + * except single 8B block */ + if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8))) + return -EINVAL; + + sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_TO_SG); + + cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, + 15, CB710_MMC_C2_READ_PIO_SIZE_MASK); + + cb710_mmc_fifo_hack(slot); + + while (blocks-- > 0) { + len = data->blksz; + + while (len >= 16) { + err = cb710_mmc_receive_pio(slot, &miter, 4); + if (err) + goto out; + len -= 16; + } + + if (!len) + continue; + + cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, + len - 1, CB710_MMC_C2_READ_PIO_SIZE_MASK); + + len = (len >= 8) ? 4 : 2; + err = cb710_mmc_receive_pio(slot, &miter, len); + if (err) + goto out; + } +out: + sg_miter_stop(&miter); + return err; +} + +static int cb710_mmc_send(struct cb710_slot *slot, struct mmc_data *data) +{ + struct sg_mapping_iter miter; + size_t len, blocks = data->blocks; + int err = 0; + + /* TODO: I don't know how/if the hardware handles multiple + * non-16B-boundary blocks */ + if (unlikely(data->blocks > 1 && data->blksz & 15)) + return -EINVAL; + + sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_FROM_SG); + + cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, + 0, CB710_MMC_C2_READ_PIO_SIZE_MASK); + + while (blocks-- > 0) { + len = (data->blksz + 15) >> 4; + do { + if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) + & CB710_MMC_S2_FIFO_EMPTY)) { + err = cb710_wait_for_event(slot, + CB710_MMC_S1_PIO_TRANSFER_DONE); + if (err) + goto out; + } + cb710_sg_dwiter_read_to_io(&miter, + slot->iobase + CB710_MMC_DATA_PORT, 4); + } while (--len); + } +out: + sg_miter_stop(&miter); + return err; +} + +static u16 cb710_encode_cmd_flags(struct cb710_mmc_reader *reader, + struct mmc_command *cmd) +{ + unsigned int flags = cmd->flags; + u16 cb_flags = 0; + + /* Windows driver returned 0 for commands for which no response + * is expected. It happened that there were only two such commands + * used: MMC_GO_IDLE_STATE and MMC_GO_INACTIVE_STATE so it might + * as well be a bug in that driver. + * + * Original driver set bit 14 for MMC/SD application + * commands. There's no difference 'on the wire' and + * it apparently works without it anyway. + */ + + switch (flags & MMC_CMD_MASK) { + case MMC_CMD_AC: cb_flags = CB710_MMC_CMD_AC; break; + case MMC_CMD_ADTC: cb_flags = CB710_MMC_CMD_ADTC; break; + case MMC_CMD_BC: cb_flags = CB710_MMC_CMD_BC; break; + case MMC_CMD_BCR: cb_flags = CB710_MMC_CMD_BCR; break; + } + + if (flags & MMC_RSP_BUSY) + cb_flags |= CB710_MMC_RSP_BUSY; + + cb_flags |= cmd->opcode << CB710_MMC_CMD_CODE_SHIFT; + + if (cmd->data && (cmd->data->flags & MMC_DATA_READ)) + cb_flags |= CB710_MMC_DATA_READ; + + if (flags & MMC_RSP_PRESENT) { + /* Windows driver set 01 at bits 4,3 except for + * MMC_SET_BLOCKLEN where it set 10. Maybe the + * hardware can do something special about this + * command? The original driver looks buggy/incomplete + * anyway so we ignore this for now. + * + * I assume that 00 here means no response is expected. + */ + cb_flags |= CB710_MMC_RSP_PRESENT; + + if (flags & MMC_RSP_136) + cb_flags |= CB710_MMC_RSP_136; + if (!(flags & MMC_RSP_CRC)) + cb_flags |= CB710_MMC_RSP_NO_CRC; + } + + return cb_flags; +} + +static void cb710_receive_response(struct cb710_slot *slot, + struct mmc_command *cmd) +{ + unsigned rsp_opcode, wanted_opcode; + + /* Looks like final byte with CRC is always stripped (same as SDHCI) */ + if (cmd->flags & MMC_RSP_136) { + u32 resp[4]; + + resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE3_PORT); + resp[1] = cb710_read_port_32(slot, CB710_MMC_RESPONSE2_PORT); + resp[2] = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT); + resp[3] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT); + rsp_opcode = resp[0] >> 24; + + cmd->resp[0] = (resp[0] << 8)|(resp[1] >> 24); + cmd->resp[1] = (resp[1] << 8)|(resp[2] >> 24); + cmd->resp[2] = (resp[2] << 8)|(resp[3] >> 24); + cmd->resp[3] = (resp[3] << 8); + } else { + rsp_opcode = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT) & 0x3F; + cmd->resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT); + } + + wanted_opcode = (cmd->flags & MMC_RSP_OPCODE) ? cmd->opcode : 0x3F; + if (rsp_opcode != wanted_opcode) + cmd->error = -EILSEQ; +} + +static int cb710_mmc_transfer_data(struct cb710_slot *slot, + struct mmc_data *data) +{ + int error, to; + + if (data->flags & MMC_DATA_READ) + error = cb710_mmc_receive(slot, data); + else + error = cb710_mmc_send(slot, data); + + to = cb710_wait_for_event(slot, CB710_MMC_S1_DATA_TRANSFER_DONE); + if (!error) + error = to; + + if (!error) + data->bytes_xfered = data->blksz * data->blocks; + return error; +} + +static int cb710_mmc_command(struct mmc_host *mmc, struct mmc_command *cmd) +{ + struct cb710_slot *slot = cb710_mmc_to_slot(mmc); + struct cb710_mmc_reader *reader = mmc_priv(mmc); + struct mmc_data *data = cmd->data; + + u16 cb_cmd = cb710_encode_cmd_flags(reader, cmd); + dev_dbg(cb710_slot_dev(slot), "cmd request: 0x%04X\n", cb_cmd); + + if (data) { + if (!cb710_is_transfer_size_supported(data)) { + data->error = -EINVAL; + return -1; + } + cb710_mmc_set_transfer_size(slot, data->blocks, data->blksz); + } + + cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20|CB710_MMC_S2_BUSY_10); + cb710_write_port_16(slot, CB710_MMC_CMD_TYPE_PORT, cb_cmd); + cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); + cb710_write_port_32(slot, CB710_MMC_CMD_PARAM_PORT, cmd->arg); + cb710_mmc_reset_events(slot); + cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); + cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x01, 0); + + cmd->error = cb710_wait_for_event(slot, CB710_MMC_S1_COMMAND_SENT); + if (cmd->error) + return -1; + + if (cmd->flags & MMC_RSP_PRESENT) { + cb710_receive_response(slot, cmd); + if (cmd->error) + return -1; + } + + if (data) + data->error = cb710_mmc_transfer_data(slot, data); + return 0; +} + +static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct cb710_slot *slot = cb710_mmc_to_slot(mmc); + struct cb710_mmc_reader *reader = mmc_priv(mmc); + + WARN_ON(reader->mrq != NULL); + + reader->mrq = mrq; + cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0); + + if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop) + cb710_mmc_command(mmc, mrq->stop); + + tasklet_schedule(&reader->finish_req_tasklet); +} + +static int cb710_mmc_powerup(struct cb710_slot *slot) +{ +#ifdef CONFIG_CB710_DEBUG + struct cb710_chip *chip = cb710_slot_to_chip(slot); +#endif + int err; + + /* a lot of magic for now */ + dev_dbg(cb710_slot_dev(slot), "bus powerup\n"); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); + if (unlikely(err)) + return err; + cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x80, 0); + cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x80, 0); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + mdelay(1); + dev_dbg(cb710_slot_dev(slot), "after delay 1\n"); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); + if (unlikely(err)) + return err; + cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x09, 0); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + mdelay(1); + dev_dbg(cb710_slot_dev(slot), "after delay 2\n"); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); + if (unlikely(err)) + return err; + cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x08); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + mdelay(2); + dev_dbg(cb710_slot_dev(slot), "after delay 3\n"); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0); + cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x70, 0); + cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 0x80, 0); + cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x03, 0); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); + if (unlikely(err)) + return err; + /* This port behaves weird: quick byte reads of 0x08,0x09 return + * 0xFF,0x00 after writing 0xFFFF to 0x08; it works correctly when + * read/written from userspace... What am I missing here? + * (it doesn't depend on write-to-read delay) */ + cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0xFFFF); + cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + dev_dbg(cb710_slot_dev(slot), "bus powerup finished\n"); + + return cb710_check_event(slot, 0); +} + +static void cb710_mmc_powerdown(struct cb710_slot *slot) +{ + cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x81); + cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0, 0x80); +} + +static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct cb710_slot *slot = cb710_mmc_to_slot(mmc); + struct cb710_mmc_reader *reader = mmc_priv(mmc); + int err; + + cb710_mmc_select_clock_divider(mmc, ios->clock); + + if (ios->power_mode != reader->last_power_mode) { + switch (ios->power_mode) { + case MMC_POWER_ON: + err = cb710_mmc_powerup(slot); + if (err) { + dev_warn(cb710_slot_dev(slot), + "powerup failed (%d)- retrying\n", err); + cb710_mmc_powerdown(slot); + udelay(1); + err = cb710_mmc_powerup(slot); + if (err) + dev_warn(cb710_slot_dev(slot), + "powerup retry failed (%d) - expect errors\n", + err); + } + reader->last_power_mode = MMC_POWER_ON; + break; + case MMC_POWER_OFF: + cb710_mmc_powerdown(slot); + reader->last_power_mode = MMC_POWER_OFF; + break; + case MMC_POWER_UP: + default: + /* ignore */ + break; + } + } + + cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1); + + cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0); +} + +static int cb710_mmc_get_ro(struct mmc_host *mmc) +{ + struct cb710_slot *slot = cb710_mmc_to_slot(mmc); + + return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT) + & CB710_MMC_S3_WRITE_PROTECTED; +} + +static int cb710_mmc_get_cd(struct mmc_host *mmc) +{ + struct cb710_slot *slot = cb710_mmc_to_slot(mmc); + + return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT) + & CB710_MMC_S3_CARD_DETECTED; +} + +static int cb710_mmc_irq_handler(struct cb710_slot *slot) +{ + struct mmc_host *mmc = cb710_slot_to_mmc(slot); + struct cb710_mmc_reader *reader = mmc_priv(mmc); + u32 status, config1, config2, irqen; + + status = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); + irqen = cb710_read_port_32(slot, CB710_MMC_IRQ_ENABLE_PORT); + config2 = cb710_read_port_32(slot, CB710_MMC_CONFIGB_PORT); + config1 = cb710_read_port_32(slot, CB710_MMC_CONFIG_PORT); + + dev_dbg(cb710_slot_dev(slot), "interrupt; status: %08X, " + "ie: %08X, c2: %08X, c1: %08X\n", + status, irqen, config2, config1); + + if (status & (CB710_MMC_S1_CARD_CHANGED << 8)) { + /* ack the event */ + cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, + CB710_MMC_S1_CARD_CHANGED); + if ((irqen & CB710_MMC_IE_CISTATUS_MASK) + == CB710_MMC_IE_CISTATUS_MASK) + mmc_detect_change(mmc, HZ/5); + } else { + dev_dbg(cb710_slot_dev(slot), "unknown interrupt (test)\n"); + spin_lock(&reader->irq_lock); + __cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_TEST_MASK); + spin_unlock(&reader->irq_lock); + } + + return 1; +} + +static void cb710_mmc_finish_request_tasklet(unsigned long data) +{ + struct mmc_host *mmc = (void *)data; + struct cb710_mmc_reader *reader = mmc_priv(mmc); + struct mmc_request *mrq = reader->mrq; + + reader->mrq = NULL; + mmc_request_done(mmc, mrq); +} + +static const struct mmc_host_ops cb710_mmc_host = { + .request = cb710_mmc_request, + .set_ios = cb710_mmc_set_ios, + .get_ro = cb710_mmc_get_ro, + .get_cd = cb710_mmc_get_cd, +}; + +#ifdef CONFIG_PM + +static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct cb710_slot *slot = cb710_pdev_to_slot(pdev); + + cb710_mmc_enable_irq(slot, 0, ~0); + return 0; +} + +static int cb710_mmc_resume(struct platform_device *pdev) +{ + struct cb710_slot *slot = cb710_pdev_to_slot(pdev); + + cb710_mmc_enable_irq(slot, 0, ~0); + return 0; +} + +#endif /* CONFIG_PM */ + +static int cb710_mmc_init(struct platform_device *pdev) +{ + struct cb710_slot *slot = cb710_pdev_to_slot(pdev); + struct cb710_chip *chip = cb710_slot_to_chip(slot); + struct mmc_host *mmc; + struct cb710_mmc_reader *reader; + int err; + u32 val; + + mmc = mmc_alloc_host(sizeof(*reader), cb710_slot_dev(slot)); + if (!mmc) + return -ENOMEM; + + platform_set_drvdata(pdev, mmc); + + /* harmless (maybe) magic */ + pci_read_config_dword(chip->pdev, 0x48, &val); + val = cb710_src_freq_mhz[(val >> 16) & 0xF]; + dev_dbg(cb710_slot_dev(slot), "source frequency: %dMHz\n", val); + val *= 1000000; + + mmc->ops = &cb710_mmc_host; + mmc->f_max = val; + mmc->f_min = val >> cb710_clock_divider_log2[CB710_MAX_DIVIDER_IDX]; + mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; + mmc->caps = MMC_CAP_4_BIT_DATA; + /* + * In cb710_wait_for_event() we use a fixed timeout of ~2s, hence let's + * inform the core about it. A future improvement should instead make + * use of the cmd->busy_timeout. + */ + mmc->max_busy_timeout = CB710_MMC_REQ_TIMEOUT_MS; + + reader = mmc_priv(mmc); + + tasklet_init(&reader->finish_req_tasklet, + cb710_mmc_finish_request_tasklet, (unsigned long)mmc); + spin_lock_init(&reader->irq_lock); + cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); + + cb710_mmc_enable_irq(slot, 0, ~0); + cb710_set_irq_handler(slot, cb710_mmc_irq_handler); + + err = mmc_add_host(mmc); + if (unlikely(err)) + goto err_free_mmc; + + dev_dbg(cb710_slot_dev(slot), "mmc_hostname is %s\n", + mmc_hostname(mmc)); + + cb710_mmc_enable_irq(slot, CB710_MMC_IE_CARD_INSERTION_STATUS, 0); + + return 0; + +err_free_mmc: + dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err); + + cb710_set_irq_handler(slot, NULL); + mmc_free_host(mmc); + return err; +} + +static int cb710_mmc_exit(struct platform_device *pdev) +{ + struct cb710_slot *slot = cb710_pdev_to_slot(pdev); + struct mmc_host *mmc = cb710_slot_to_mmc(slot); + struct cb710_mmc_reader *reader = mmc_priv(mmc); + + cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_CARD_INSERTION_STATUS); + + mmc_remove_host(mmc); + + /* IRQs should be disabled now, but let's stay on the safe side */ + cb710_mmc_enable_irq(slot, 0, ~0); + cb710_set_irq_handler(slot, NULL); + + /* clear config ports - just in case */ + cb710_write_port_32(slot, CB710_MMC_CONFIG_PORT, 0); + cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0); + + tasklet_kill(&reader->finish_req_tasklet); + + mmc_free_host(mmc); + return 0; +} + +static struct platform_driver cb710_mmc_driver = { + .driver.name = "cb710-mmc", + .probe = cb710_mmc_init, + .remove = cb710_mmc_exit, +#ifdef CONFIG_PM + .suspend = cb710_mmc_suspend, + .resume = cb710_mmc_resume, +#endif +}; + +module_platform_driver(cb710_mmc_driver); + +MODULE_AUTHOR("Michał Mirosław "); +MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:cb710-mmc"); diff --git a/drivers/mmc/host/cb710-mmc.h b/drivers/mmc/host/cb710-mmc.h new file mode 100644 index 000000000..5e053077d --- /dev/null +++ b/drivers/mmc/host/cb710-mmc.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cb710/cb710-mmc.h + * + * Copyright by Michał Mirosław, 2008-2009 + */ +#ifndef LINUX_CB710_MMC_H +#define LINUX_CB710_MMC_H + +#include + +/* per-MMC-reader structure */ +struct cb710_mmc_reader { + struct tasklet_struct finish_req_tasklet; + struct mmc_request *mrq; + spinlock_t irq_lock; + unsigned char last_power_mode; +}; + +/* some device struct walking */ + +static inline struct mmc_host *cb710_slot_to_mmc(struct cb710_slot *slot) +{ + return platform_get_drvdata(&slot->pdev); +} + +static inline struct cb710_slot *cb710_mmc_to_slot(struct mmc_host *mmc) +{ + struct platform_device *pdev = to_platform_device(mmc_dev(mmc)); + return cb710_pdev_to_slot(pdev); +} + +/* registers (this might be all wrong ;) */ + +#define CB710_MMC_DATA_PORT 0x00 + +#define CB710_MMC_CONFIG_PORT 0x04 +#define CB710_MMC_CONFIG0_PORT 0x04 +#define CB710_MMC_CONFIG1_PORT 0x05 +#define CB710_MMC_C1_4BIT_DATA_BUS 0x40 +#define CB710_MMC_CONFIG2_PORT 0x06 +#define CB710_MMC_C2_READ_PIO_SIZE_MASK 0x0F /* N-1 */ +#define CB710_MMC_CONFIG3_PORT 0x07 + +#define CB710_MMC_CONFIGB_PORT 0x08 + +#define CB710_MMC_IRQ_ENABLE_PORT 0x0C +#define CB710_MMC_IE_TEST_MASK 0x00BF +#define CB710_MMC_IE_CARD_INSERTION_STATUS 0x1000 +#define CB710_MMC_IE_IRQ_ENABLE 0x8000 +#define CB710_MMC_IE_CISTATUS_MASK \ + (CB710_MMC_IE_CARD_INSERTION_STATUS|CB710_MMC_IE_IRQ_ENABLE) + +#define CB710_MMC_STATUS_PORT 0x10 +#define CB710_MMC_STATUS_ERROR_EVENTS 0x60FF +#define CB710_MMC_STATUS0_PORT 0x10 +#define CB710_MMC_S0_FIFO_UNDERFLOW 0x40 +#define CB710_MMC_STATUS1_PORT 0x11 +#define CB710_MMC_S1_COMMAND_SENT 0x01 +#define CB710_MMC_S1_DATA_TRANSFER_DONE 0x02 +#define CB710_MMC_S1_PIO_TRANSFER_DONE 0x04 +#define CB710_MMC_S1_CARD_CHANGED 0x10 +#define CB710_MMC_S1_RESET 0x20 +#define CB710_MMC_STATUS2_PORT 0x12 +#define CB710_MMC_S2_FIFO_READY 0x01 +#define CB710_MMC_S2_FIFO_EMPTY 0x02 +#define CB710_MMC_S2_BUSY_10 0x10 +#define CB710_MMC_S2_BUSY_20 0x20 +#define CB710_MMC_STATUS3_PORT 0x13 +#define CB710_MMC_S3_CARD_DETECTED 0x02 +#define CB710_MMC_S3_WRITE_PROTECTED 0x04 + +#define CB710_MMC_CMD_TYPE_PORT 0x14 +#define CB710_MMC_RSP_TYPE_MASK 0x0007 +#define CB710_MMC_RSP_R1 (0) +#define CB710_MMC_RSP_136 (5) +#define CB710_MMC_RSP_NO_CRC (2) +#define CB710_MMC_RSP_PRESENT_MASK 0x0018 +#define CB710_MMC_RSP_NONE (0 << 3) +#define CB710_MMC_RSP_PRESENT (1 << 3) +#define CB710_MMC_RSP_PRESENT_X (2 << 3) +#define CB710_MMC_CMD_TYPE_MASK 0x0060 +#define CB710_MMC_CMD_BC (0 << 5) +#define CB710_MMC_CMD_BCR (1 << 5) +#define CB710_MMC_CMD_AC (2 << 5) +#define CB710_MMC_CMD_ADTC (3 << 5) +#define CB710_MMC_DATA_READ 0x0080 +#define CB710_MMC_CMD_CODE_MASK 0x3F00 +#define CB710_MMC_CMD_CODE_SHIFT 8 +#define CB710_MMC_IS_APP_CMD 0x4000 +#define CB710_MMC_RSP_BUSY 0x8000 + +#define CB710_MMC_CMD_PARAM_PORT 0x18 +#define CB710_MMC_TRANSFER_SIZE_PORT 0x1C +#define CB710_MMC_RESPONSE0_PORT 0x20 +#define CB710_MMC_RESPONSE1_PORT 0x24 +#define CB710_MMC_RESPONSE2_PORT 0x28 +#define CB710_MMC_RESPONSE3_PORT 0x2C + +#endif /* LINUX_CB710_MMC_H */ diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c new file mode 100644 index 000000000..23cf7912c --- /dev/null +++ b/drivers/mmc/host/cqhci.c @@ -0,0 +1,1168 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cqhci.h" + +#define DCMD_SLOT 31 +#define NUM_SLOTS 32 + +struct cqhci_slot { + struct mmc_request *mrq; + unsigned int flags; +#define CQHCI_EXTERNAL_TIMEOUT BIT(0) +#define CQHCI_COMPLETED BIT(1) +#define CQHCI_HOST_CRC BIT(2) +#define CQHCI_HOST_TIMEOUT BIT(3) +#define CQHCI_HOST_OTHER BIT(4) +}; + +static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag) +{ + return cq_host->desc_base + (tag * cq_host->slot_sz); +} + +static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag) +{ + u8 *desc = get_desc(cq_host, tag); + + return desc + cq_host->task_desc_len; +} + +static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) +{ + return cq_host->trans_desc_dma_base + + (cq_host->mmc->max_segs * tag * + cq_host->trans_desc_len); +} + +static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag) +{ + return cq_host->trans_desc_base + + (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag); +} + +static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag) +{ + u8 *link_temp; + dma_addr_t trans_temp; + + link_temp = get_link_desc(cq_host, tag); + trans_temp = get_trans_desc_dma(cq_host, tag); + + memset(link_temp, 0, cq_host->link_desc_len); + if (cq_host->link_desc_len > 8) + *(link_temp + 8) = 0; + + if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) { + *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1); + return; + } + + *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0); + + if (cq_host->dma64) { + __le64 *data_addr = (__le64 __force *)(link_temp + 4); + + data_addr[0] = cpu_to_le64(trans_temp); + } else { + __le32 *data_addr = (__le32 __force *)(link_temp + 4); + + data_addr[0] = cpu_to_le32(trans_temp); + } +} + +static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set) +{ + cqhci_writel(cq_host, set, CQHCI_ISTE); + cqhci_writel(cq_host, set, CQHCI_ISGE); +} + +#define DRV_NAME "cqhci" + +#define CQHCI_DUMP(f, x...) \ + pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x) + +static void cqhci_dumpregs(struct cqhci_host *cq_host) +{ + struct mmc_host *mmc = cq_host->mmc; + + CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n"); + + CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_CAP), + cqhci_readl(cq_host, CQHCI_VER)); + CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_CFG), + cqhci_readl(cq_host, CQHCI_CTL)); + CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_IS), + cqhci_readl(cq_host, CQHCI_ISTE)); + CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_ISGE), + cqhci_readl(cq_host, CQHCI_IC)); + CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_TDLBA), + cqhci_readl(cq_host, CQHCI_TDLBAU)); + CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_TDBR), + cqhci_readl(cq_host, CQHCI_TCN)); + CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_DQS), + cqhci_readl(cq_host, CQHCI_DPT)); + CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_TCLR), + cqhci_readl(cq_host, CQHCI_SSC1)); + CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_SSC2), + cqhci_readl(cq_host, CQHCI_CRDCT)); + CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_RMEM), + cqhci_readl(cq_host, CQHCI_TERRI)); + CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n", + cqhci_readl(cq_host, CQHCI_CRI), + cqhci_readl(cq_host, CQHCI_CRA)); + + if (cq_host->ops->dumpregs) + cq_host->ops->dumpregs(mmc); + else + CQHCI_DUMP(": ===========================================\n"); +} + +/* + * The allocated descriptor table for task, link & transfer descritors + * looks like: + * |----------| + * |task desc | |->|----------| + * |----------| | |trans desc| + * |link desc-|->| |----------| + * |----------| . + * . . + * no. of slots max-segs + * . |----------| + * |----------| + * The idea here is to create the [task+trans] table and mark & point the + * link desc to the transfer desc table on a per slot basis. + */ +static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) +{ + int i = 0; + + /* task descriptor can be 64/128 bit irrespective of arch */ + if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { + cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) | + CQHCI_TASK_DESC_SZ, CQHCI_CFG); + cq_host->task_desc_len = 16; + } else { + cq_host->task_desc_len = 8; + } + + /* + * 96 bits length of transfer desc instead of 128 bits which means + * ADMA would expect next valid descriptor at the 96th bit + * or 128th bit + */ + if (cq_host->dma64) { + if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ) + cq_host->trans_desc_len = 12; + else + cq_host->trans_desc_len = 16; + cq_host->link_desc_len = 16; + } else { + cq_host->trans_desc_len = 8; + cq_host->link_desc_len = 8; + } + + /* total size of a slot: 1 task & 1 transfer (link) */ + cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len; + + cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; + + cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * + cq_host->mmc->cqe_qdepth; + + pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", + mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, + cq_host->slot_sz); + + /* + * allocate a dma-mapped chunk of memory for the descriptors + * allocate a dma-mapped chunk of memory for link descriptors + * setup each link-desc memory offset per slot-number to + * the descriptor table. + */ + cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), + cq_host->desc_size, + &cq_host->desc_dma_base, + GFP_KERNEL); + if (!cq_host->desc_base) + return -ENOMEM; + + cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), + cq_host->data_size, + &cq_host->trans_desc_dma_base, + GFP_KERNEL); + if (!cq_host->trans_desc_base) { + dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, + cq_host->desc_base, + cq_host->desc_dma_base); + cq_host->desc_base = NULL; + cq_host->desc_dma_base = 0; + return -ENOMEM; + } + + pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", + mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, + (unsigned long long)cq_host->desc_dma_base, + (unsigned long long)cq_host->trans_desc_dma_base); + + for (; i < (cq_host->num_slots); i++) + setup_trans_desc(cq_host, i); + + return 0; +} + +static void __cqhci_enable(struct cqhci_host *cq_host) +{ + struct mmc_host *mmc = cq_host->mmc; + u32 cqcfg; + + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + + /* Configuration must not be changed while enabled */ + if (cqcfg & CQHCI_ENABLE) { + cqcfg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + } + + cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ); + + if (mmc->caps2 & MMC_CAP2_CQE_DCMD) + cqcfg |= CQHCI_DCMD; + + if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) + cqcfg |= CQHCI_TASK_DESC_SZ; + + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), + CQHCI_TDLBA); + cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), + CQHCI_TDLBAU); + + cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2); + + cqhci_set_irqs(cq_host, 0); + + cqcfg |= CQHCI_ENABLE; + + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) + cqhci_writel(cq_host, 0, CQHCI_CTL); + + mmc->cqe_on = true; + + if (cq_host->ops->enable) + cq_host->ops->enable(mmc); + + /* Ensure all writes are done before interrupts are enabled */ + wmb(); + + cqhci_set_irqs(cq_host, CQHCI_IS_MASK); + + cq_host->activated = true; +} + +static void __cqhci_disable(struct cqhci_host *cq_host) +{ + u32 cqcfg; + + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + cqcfg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + cq_host->mmc->cqe_on = false; + + cq_host->activated = false; +} + +int cqhci_deactivate(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + + if (cq_host->enabled && cq_host->activated) + __cqhci_disable(cq_host); + + return 0; +} +EXPORT_SYMBOL(cqhci_deactivate); + +int cqhci_resume(struct mmc_host *mmc) +{ + /* Re-enable is done upon first request */ + return 0; +} +EXPORT_SYMBOL(cqhci_resume); + +static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + int err; + + if (!card->ext_csd.cmdq_en) + return -EINVAL; + + if (cq_host->enabled) + return 0; + + cq_host->rca = card->rca; + + err = cqhci_host_alloc_tdl(cq_host); + if (err) { + pr_err("%s: Failed to enable CQE, error %d\n", + mmc_hostname(mmc), err); + return err; + } + + __cqhci_enable(cq_host); + + cq_host->enabled = true; + +#ifdef DEBUG + cqhci_dumpregs(cq_host); +#endif + return 0; +} + +/* CQHCI is idle and should halt immediately, so set a small timeout */ +#define CQHCI_OFF_TIMEOUT 100 + +static u32 cqhci_read_ctl(struct cqhci_host *cq_host) +{ + return cqhci_readl(cq_host, CQHCI_CTL); +} + +static void cqhci_off(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + u32 reg; + int err; + + if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) + return; + + if (cq_host->ops->disable) + cq_host->ops->disable(mmc, false); + + cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); + + err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, + reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); + if (err < 0) + pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); + else + pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); + + if (cq_host->ops->post_disable) + cq_host->ops->post_disable(mmc); + + mmc->cqe_on = false; +} + +static void cqhci_disable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + + if (!cq_host->enabled) + return; + + cqhci_off(mmc); + + __cqhci_disable(cq_host); + + dmam_free_coherent(mmc_dev(mmc), cq_host->data_size, + cq_host->trans_desc_base, + cq_host->trans_desc_dma_base); + + dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size, + cq_host->desc_base, + cq_host->desc_dma_base); + + cq_host->trans_desc_base = NULL; + cq_host->desc_base = NULL; + + cq_host->enabled = false; +} + +static void cqhci_prep_task_desc(struct mmc_request *mrq, + u64 *data, bool intr) +{ + u32 req_flags = mrq->data->flags; + + *data = CQHCI_VALID(1) | + CQHCI_END(1) | + CQHCI_INT(intr) | + CQHCI_ACT(0x5) | + CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) | + CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) | + CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) | + CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) | + CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) | + CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) | + CQHCI_BLK_COUNT(mrq->data->blocks) | + CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); + + pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n", + mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data); +} + +static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq) +{ + int sg_count; + struct mmc_data *data = mrq->data; + + if (!data) + return -EINVAL; + + sg_count = dma_map_sg(mmc_dev(host), data->sg, + data->sg_len, + (data->flags & MMC_DATA_WRITE) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (!sg_count) { + pr_err("%s: sg-len: %d\n", __func__, data->sg_len); + return -ENOMEM; + } + + return sg_count; +} + +static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, + bool dma64) +{ + __le32 *attr = (__le32 __force *)desc; + + *attr = (CQHCI_VALID(1) | + CQHCI_END(end ? 1 : 0) | + CQHCI_INT(0) | + CQHCI_ACT(0x4) | + CQHCI_DAT_LENGTH(len)); + + if (dma64) { + __le64 *dataddr = (__le64 __force *)(desc + 4); + + dataddr[0] = cpu_to_le64(addr); + } else { + __le32 *dataddr = (__le32 __force *)(desc + 4); + + dataddr[0] = cpu_to_le32(addr); + } +} + +static int cqhci_prep_tran_desc(struct mmc_request *mrq, + struct cqhci_host *cq_host, int tag) +{ + struct mmc_data *data = mrq->data; + int i, sg_count, len; + bool end = false; + bool dma64 = cq_host->dma64; + dma_addr_t addr; + u8 *desc; + struct scatterlist *sg; + + sg_count = cqhci_dma_map(mrq->host, mrq); + if (sg_count < 0) { + pr_err("%s: %s: unable to map sg lists, %d\n", + mmc_hostname(mrq->host), __func__, sg_count); + return sg_count; + } + + desc = get_trans_desc(cq_host, tag); + + for_each_sg(data->sg, sg, sg_count, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + + if ((i+1) == sg_count) + end = true; + cqhci_set_tran_desc(desc, addr, len, end, dma64); + desc += cq_host->trans_desc_len; + } + + return 0; +} + +static void cqhci_prep_dcmd_desc(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + u64 *task_desc = NULL; + u64 data = 0; + u8 resp_type; + u8 *desc; + __le64 *dataddr; + struct cqhci_host *cq_host = mmc->cqe_private; + u8 timing; + + if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) { + resp_type = 0x0; + timing = 0x1; + } else { + if (mrq->cmd->flags & MMC_RSP_R1B) { + resp_type = 0x3; + timing = 0x0; + } else { + resp_type = 0x2; + timing = 0x1; + } + } + + task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot); + memset(task_desc, 0, cq_host->task_desc_len); + data |= (CQHCI_VALID(1) | + CQHCI_END(1) | + CQHCI_INT(1) | + CQHCI_QBAR(1) | + CQHCI_ACT(0x5) | + CQHCI_CMD_INDEX(mrq->cmd->opcode) | + CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type)); + if (cq_host->ops->update_dcmd_desc) + cq_host->ops->update_dcmd_desc(mmc, mrq, &data); + *task_desc |= data; + desc = (u8 *)task_desc; + pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n", + mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type); + dataddr = (__le64 __force *)(desc + 4); + dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg); + +} + +static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (data) { + dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len, + (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE); + } +} + +static inline int cqhci_tag(struct mmc_request *mrq) +{ + return mrq->cmd ? DCMD_SLOT : mrq->tag; +} + +static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + int err = 0; + u64 data = 0; + u64 *task_desc = NULL; + int tag = cqhci_tag(mrq); + struct cqhci_host *cq_host = mmc->cqe_private; + unsigned long flags; + + if (!cq_host->enabled) { + pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc)); + return -EINVAL; + } + + /* First request after resume has to re-enable */ + if (!cq_host->activated) + __cqhci_enable(cq_host); + + if (!mmc->cqe_on) { + if (cq_host->ops->pre_enable) + cq_host->ops->pre_enable(mmc); + + cqhci_writel(cq_host, 0, CQHCI_CTL); + mmc->cqe_on = true; + pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc)); + if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) { + pr_err("%s: cqhci: CQE failed to exit halt state\n", + mmc_hostname(mmc)); + } + if (cq_host->ops->enable) + cq_host->ops->enable(mmc); + } + + if (mrq->data) { + task_desc = (__le64 __force *)get_desc(cq_host, tag); + cqhci_prep_task_desc(mrq, &data, 1); + *task_desc = cpu_to_le64(data); + err = cqhci_prep_tran_desc(mrq, cq_host, tag); + if (err) { + pr_err("%s: cqhci: failed to setup tx desc: %d\n", + mmc_hostname(mmc), err); + return err; + } + } else { + cqhci_prep_dcmd_desc(mmc, mrq); + } + + spin_lock_irqsave(&cq_host->lock, flags); + + if (cq_host->recovery_halt) { + err = -EBUSY; + goto out_unlock; + } + + cq_host->slot[tag].mrq = mrq; + cq_host->slot[tag].flags = 0; + + cq_host->qcnt += 1; + /* Make sure descriptors are ready before ringing the doorbell */ + wmb(); + cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR); + if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag))) + pr_debug("%s: cqhci: doorbell not set for tag %d\n", + mmc_hostname(mmc), tag); +out_unlock: + spin_unlock_irqrestore(&cq_host->lock, flags); + + if (err) + cqhci_post_req(mmc, mrq); + + return err; +} + +static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq, + bool notify) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + + if (!cq_host->recovery_halt) { + cq_host->recovery_halt = true; + pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc)); + wake_up(&cq_host->wait_queue); + if (notify && mrq->recovery_notifier) + mrq->recovery_notifier(mrq); + } +} + +static unsigned int cqhci_error_flags(int error1, int error2) +{ + int error = error1 ? error1 : error2; + + switch (error) { + case -EILSEQ: + return CQHCI_HOST_CRC; + case -ETIMEDOUT: + return CQHCI_HOST_TIMEOUT; + default: + return CQHCI_HOST_OTHER; + } +} + +static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, + int data_error) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + struct cqhci_slot *slot; + u32 terri; + int tag; + + spin_lock(&cq_host->lock); + + terri = cqhci_readl(cq_host, CQHCI_TERRI); + + pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", + mmc_hostname(mmc), status, cmd_error, data_error, terri); + + /* Forget about errors when recovery has already been triggered */ + if (cq_host->recovery_halt) + goto out_unlock; + + if (!cq_host->qcnt) { + WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n", + mmc_hostname(mmc), status, cmd_error, data_error, + terri); + goto out_unlock; + } + + if (CQHCI_TERRI_C_VALID(terri)) { + tag = CQHCI_TERRI_C_TASK(terri); + slot = &cq_host->slot[tag]; + if (slot->mrq) { + slot->flags = cqhci_error_flags(cmd_error, data_error); + cqhci_recovery_needed(mmc, slot->mrq, true); + } + } + + if (CQHCI_TERRI_D_VALID(terri)) { + tag = CQHCI_TERRI_D_TASK(terri); + slot = &cq_host->slot[tag]; + if (slot->mrq) { + slot->flags = cqhci_error_flags(data_error, cmd_error); + cqhci_recovery_needed(mmc, slot->mrq, true); + } + } + + if (!cq_host->recovery_halt) { + /* + * The only way to guarantee forward progress is to mark at + * least one task in error, so if none is indicated, pick one. + */ + for (tag = 0; tag < NUM_SLOTS; tag++) { + slot = &cq_host->slot[tag]; + if (!slot->mrq) + continue; + slot->flags = cqhci_error_flags(data_error, cmd_error); + cqhci_recovery_needed(mmc, slot->mrq, true); + break; + } + } + +out_unlock: + spin_unlock(&cq_host->lock); +} + +static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + struct cqhci_slot *slot = &cq_host->slot[tag]; + struct mmc_request *mrq = slot->mrq; + struct mmc_data *data; + + if (!mrq) { + WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n", + mmc_hostname(mmc), tag); + return; + } + + /* No completions allowed during recovery */ + if (cq_host->recovery_halt) { + slot->flags |= CQHCI_COMPLETED; + return; + } + + slot->mrq = NULL; + + cq_host->qcnt -= 1; + + data = mrq->data; + if (data) { + if (data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blksz * data->blocks; + } + + mmc_cqe_request_done(mmc, mrq); +} + +irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, + int data_error) +{ + u32 status; + unsigned long tag = 0, comp_status; + struct cqhci_host *cq_host = mmc->cqe_private; + + status = cqhci_readl(cq_host, CQHCI_IS); + cqhci_writel(cq_host, status, CQHCI_IS); + + pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status); + + if ((status & CQHCI_IS_RED) || cmd_error || data_error) + cqhci_error_irq(mmc, status, cmd_error, data_error); + + if (status & CQHCI_IS_TCC) { + /* read TCN and complete the request */ + comp_status = cqhci_readl(cq_host, CQHCI_TCN); + cqhci_writel(cq_host, comp_status, CQHCI_TCN); + pr_debug("%s: cqhci: TCN: 0x%08lx\n", + mmc_hostname(mmc), comp_status); + + spin_lock(&cq_host->lock); + + for_each_set_bit(tag, &comp_status, cq_host->num_slots) { + /* complete the corresponding mrq */ + pr_debug("%s: cqhci: completing tag %lu\n", + mmc_hostname(mmc), tag); + cqhci_finish_mrq(mmc, tag); + } + + if (cq_host->waiting_for_idle && !cq_host->qcnt) { + cq_host->waiting_for_idle = false; + wake_up(&cq_host->wait_queue); + } + + spin_unlock(&cq_host->lock); + } + + if (status & CQHCI_IS_TCL) + wake_up(&cq_host->wait_queue); + + if (status & CQHCI_IS_HAC) + wake_up(&cq_host->wait_queue); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(cqhci_irq); + +static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret) +{ + unsigned long flags; + bool is_idle; + + spin_lock_irqsave(&cq_host->lock, flags); + is_idle = !cq_host->qcnt || cq_host->recovery_halt; + *ret = cq_host->recovery_halt ? -EBUSY : 0; + cq_host->waiting_for_idle = !is_idle; + spin_unlock_irqrestore(&cq_host->lock, flags); + + return is_idle; +} + +static int cqhci_wait_for_idle(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + int ret; + + wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret)); + + return ret; +} + +static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq, + bool *recovery_needed) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + int tag = cqhci_tag(mrq); + struct cqhci_slot *slot = &cq_host->slot[tag]; + unsigned long flags; + bool timed_out; + + spin_lock_irqsave(&cq_host->lock, flags); + timed_out = slot->mrq == mrq; + if (timed_out) { + slot->flags |= CQHCI_EXTERNAL_TIMEOUT; + cqhci_recovery_needed(mmc, mrq, false); + *recovery_needed = cq_host->recovery_halt; + } + spin_unlock_irqrestore(&cq_host->lock, flags); + + if (timed_out) { + pr_err("%s: cqhci: timeout for tag %d\n", + mmc_hostname(mmc), tag); + cqhci_dumpregs(cq_host); + } + + return timed_out; +} + +static bool cqhci_tasks_cleared(struct cqhci_host *cq_host) +{ + return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS); +} + +static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + bool ret; + u32 ctl; + + cqhci_set_irqs(cq_host, CQHCI_IS_TCL); + + ctl = cqhci_readl(cq_host, CQHCI_CTL); + ctl |= CQHCI_CLEAR_ALL_TASKS; + cqhci_writel(cq_host, ctl, CQHCI_CTL); + + wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host), + msecs_to_jiffies(timeout) + 1); + + cqhci_set_irqs(cq_host, 0); + + ret = cqhci_tasks_cleared(cq_host); + + if (!ret) + pr_warn("%s: cqhci: Failed to clear tasks\n", + mmc_hostname(mmc)); + + return ret; +} + +static bool cqhci_halted(struct cqhci_host *cq_host) +{ + return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT; +} + +static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + bool ret; + u32 ctl; + + if (cqhci_halted(cq_host)) + return true; + + cqhci_set_irqs(cq_host, CQHCI_IS_HAC); + + ctl = cqhci_readl(cq_host, CQHCI_CTL); + ctl |= CQHCI_HALT; + cqhci_writel(cq_host, ctl, CQHCI_CTL); + + wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host), + msecs_to_jiffies(timeout) + 1); + + cqhci_set_irqs(cq_host, 0); + + ret = cqhci_halted(cq_host); + + if (!ret) + pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); + + return ret; +} + +/* + * After halting we expect to be able to use the command line. We interpret the + * failure to halt to mean the data lines might still be in use (and the upper + * layers will need to send a STOP command), however failing to halt complicates + * the recovery, so set a timeout that would reasonably allow I/O to complete. + */ +#define CQHCI_START_HALT_TIMEOUT 500 + +static void cqhci_recovery_start(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + + pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); + + WARN_ON(!cq_host->recovery_halt); + + cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT); + + if (cq_host->ops->disable) + cq_host->ops->disable(mmc, true); + + mmc->cqe_on = false; +} + +static int cqhci_error_from_flags(unsigned int flags) +{ + if (!flags) + return 0; + + /* CRC errors might indicate re-tuning so prefer to report that */ + if (flags & CQHCI_HOST_CRC) + return -EILSEQ; + + if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT)) + return -ETIMEDOUT; + + return -EIO; +} + +static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag) +{ + struct cqhci_slot *slot = &cq_host->slot[tag]; + struct mmc_request *mrq = slot->mrq; + struct mmc_data *data; + + if (!mrq) + return; + + slot->mrq = NULL; + + cq_host->qcnt -= 1; + + data = mrq->data; + if (data) { + data->bytes_xfered = 0; + data->error = cqhci_error_from_flags(slot->flags); + } else { + mrq->cmd->error = cqhci_error_from_flags(slot->flags); + } + + mmc_cqe_request_done(cq_host->mmc, mrq); +} + +static void cqhci_recover_mrqs(struct cqhci_host *cq_host) +{ + int i; + + for (i = 0; i < cq_host->num_slots; i++) + cqhci_recover_mrq(cq_host, i); +} + +/* + * By now the command and data lines should be unused so there is no reason for + * CQHCI to take a long time to halt, but if it doesn't halt there could be + * problems clearing tasks, so be generous. + */ +#define CQHCI_FINISH_HALT_TIMEOUT 20 + +/* CQHCI could be expected to clear it's internal state pretty quickly */ +#define CQHCI_CLEAR_TIMEOUT 20 + +static void cqhci_recovery_finish(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + unsigned long flags; + u32 cqcfg; + bool ok; + + pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__); + + WARN_ON(!cq_host->recovery_halt); + + ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); + + /* + * The specification contradicts itself, by saying that tasks cannot be + * cleared if CQHCI does not halt, but if CQHCI does not halt, it should + * be disabled/re-enabled, but not to disable before clearing tasks. + * Have a go anyway. + */ + if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) + ok = false; + + /* Disable to make sure tasks really are cleared */ + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + cqcfg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + cqcfg |= CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + + cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); + + if (!ok) + cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT); + + cqhci_recover_mrqs(cq_host); + + WARN_ON(cq_host->qcnt); + + spin_lock_irqsave(&cq_host->lock, flags); + cq_host->qcnt = 0; + cq_host->recovery_halt = false; + mmc->cqe_on = false; + spin_unlock_irqrestore(&cq_host->lock, flags); + + /* Ensure all writes are done before interrupts are re-enabled */ + wmb(); + + cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS); + + cqhci_set_irqs(cq_host, CQHCI_IS_MASK); + + pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc)); +} + +static const struct mmc_cqe_ops cqhci_cqe_ops = { + .cqe_enable = cqhci_enable, + .cqe_disable = cqhci_disable, + .cqe_request = cqhci_request, + .cqe_post_req = cqhci_post_req, + .cqe_off = cqhci_off, + .cqe_wait_for_idle = cqhci_wait_for_idle, + .cqe_timeout = cqhci_timeout, + .cqe_recovery_start = cqhci_recovery_start, + .cqe_recovery_finish = cqhci_recovery_finish, +}; + +struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev) +{ + struct cqhci_host *cq_host; + struct resource *cqhci_memres = NULL; + + /* check and setup CMDQ interface */ + cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "cqhci"); + if (!cqhci_memres) { + dev_dbg(&pdev->dev, "CMDQ not supported\n"); + return ERR_PTR(-EINVAL); + } + + cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) + return ERR_PTR(-ENOMEM); + cq_host->mmio = devm_ioremap(&pdev->dev, + cqhci_memres->start, + resource_size(cqhci_memres)); + if (!cq_host->mmio) { + dev_err(&pdev->dev, "failed to remap cqhci regs\n"); + return ERR_PTR(-EBUSY); + } + dev_dbg(&pdev->dev, "CMDQ ioremap: done\n"); + + return cq_host; +} +EXPORT_SYMBOL(cqhci_pltfm_init); + +static unsigned int cqhci_ver_major(struct cqhci_host *cq_host) +{ + return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER)); +} + +static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host) +{ + u32 ver = cqhci_readl(cq_host, CQHCI_VER); + + return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver); +} + +int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, + bool dma64) +{ + int err; + + cq_host->dma64 = dma64; + cq_host->mmc = mmc; + cq_host->mmc->cqe_private = cq_host; + + cq_host->num_slots = NUM_SLOTS; + cq_host->dcmd_slot = DCMD_SLOT; + + mmc->cqe_ops = &cqhci_cqe_ops; + + mmc->cqe_qdepth = NUM_SLOTS; + if (mmc->caps2 & MMC_CAP2_CQE_DCMD) + mmc->cqe_qdepth -= 1; + + cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots, + sizeof(*cq_host->slot), GFP_KERNEL); + if (!cq_host->slot) { + err = -ENOMEM; + goto out_err; + } + + spin_lock_init(&cq_host->lock); + + init_completion(&cq_host->halt_comp); + init_waitqueue_head(&cq_host->wait_queue); + + pr_info("%s: CQHCI version %u.%02u\n", + mmc_hostname(mmc), cqhci_ver_major(cq_host), + cqhci_ver_minor(cq_host)); + + return 0; + +out_err: + pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n", + mmc_hostname(mmc), cqhci_ver_major(cq_host), + cqhci_ver_minor(cq_host), err); + return err; +} +EXPORT_SYMBOL(cqhci_init); + +MODULE_AUTHOR("Venkat Gopalakrishnan "); +MODULE_DESCRIPTION("Command Queue Host Controller Interface driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h new file mode 100644 index 000000000..89bf6adbc --- /dev/null +++ b/drivers/mmc/host/cqhci.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ +#ifndef LINUX_MMC_CQHCI_H +#define LINUX_MMC_CQHCI_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* registers */ +/* version */ +#define CQHCI_VER 0x00 +#define CQHCI_VER_MAJOR(x) (((x) & GENMASK(11, 8)) >> 8) +#define CQHCI_VER_MINOR1(x) (((x) & GENMASK(7, 4)) >> 4) +#define CQHCI_VER_MINOR2(x) ((x) & GENMASK(3, 0)) + +/* capabilities */ +#define CQHCI_CAP 0x04 +/* configuration */ +#define CQHCI_CFG 0x08 +#define CQHCI_DCMD 0x00001000 +#define CQHCI_TASK_DESC_SZ 0x00000100 +#define CQHCI_ENABLE 0x00000001 + +/* control */ +#define CQHCI_CTL 0x0C +#define CQHCI_CLEAR_ALL_TASKS 0x00000100 +#define CQHCI_HALT 0x00000001 + +/* interrupt status */ +#define CQHCI_IS 0x10 +#define CQHCI_IS_HAC BIT(0) +#define CQHCI_IS_TCC BIT(1) +#define CQHCI_IS_RED BIT(2) +#define CQHCI_IS_TCL BIT(3) + +#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED) + +/* interrupt status enable */ +#define CQHCI_ISTE 0x14 + +/* interrupt signal enable */ +#define CQHCI_ISGE 0x18 + +/* interrupt coalescing */ +#define CQHCI_IC 0x1C +#define CQHCI_IC_ENABLE BIT(31) +#define CQHCI_IC_RESET BIT(16) +#define CQHCI_IC_ICCTHWEN BIT(15) +#define CQHCI_IC_ICCTH(x) (((x) & 0x1F) << 8) +#define CQHCI_IC_ICTOVALWEN BIT(7) +#define CQHCI_IC_ICTOVAL(x) ((x) & 0x7F) + +/* task list base address */ +#define CQHCI_TDLBA 0x20 + +/* task list base address upper */ +#define CQHCI_TDLBAU 0x24 + +/* door-bell */ +#define CQHCI_TDBR 0x28 + +/* task completion notification */ +#define CQHCI_TCN 0x2C + +/* device queue status */ +#define CQHCI_DQS 0x30 + +/* device pending tasks */ +#define CQHCI_DPT 0x34 + +/* task clear */ +#define CQHCI_TCLR 0x38 + +/* send status config 1 */ +#define CQHCI_SSC1 0x40 +#define CQHCI_SSC1_CBC_MASK GENMASK(19, 16) + +/* send status config 2 */ +#define CQHCI_SSC2 0x44 + +/* response for dcmd */ +#define CQHCI_CRDCT 0x48 + +/* response mode error mask */ +#define CQHCI_RMEM 0x50 + +/* task error info */ +#define CQHCI_TERRI 0x54 + +#define CQHCI_TERRI_C_INDEX(x) ((x) & GENMASK(5, 0)) +#define CQHCI_TERRI_C_TASK(x) (((x) & GENMASK(12, 8)) >> 8) +#define CQHCI_TERRI_C_VALID(x) ((x) & BIT(15)) +#define CQHCI_TERRI_D_INDEX(x) (((x) & GENMASK(21, 16)) >> 16) +#define CQHCI_TERRI_D_TASK(x) (((x) & GENMASK(28, 24)) >> 24) +#define CQHCI_TERRI_D_VALID(x) ((x) & BIT(31)) + +/* command response index */ +#define CQHCI_CRI 0x58 + +/* command response argument */ +#define CQHCI_CRA 0x5C + +#define CQHCI_INT_ALL 0xF +#define CQHCI_IC_DEFAULT_ICCTH 31 +#define CQHCI_IC_DEFAULT_ICTOVAL 1 + +/* attribute fields */ +#define CQHCI_VALID(x) (((x) & 1) << 0) +#define CQHCI_END(x) (((x) & 1) << 1) +#define CQHCI_INT(x) (((x) & 1) << 2) +#define CQHCI_ACT(x) (((x) & 0x7) << 3) + +/* data command task descriptor fields */ +#define CQHCI_FORCED_PROG(x) (((x) & 1) << 6) +#define CQHCI_CONTEXT(x) (((x) & 0xF) << 7) +#define CQHCI_DATA_TAG(x) (((x) & 1) << 11) +#define CQHCI_DATA_DIR(x) (((x) & 1) << 12) +#define CQHCI_PRIORITY(x) (((x) & 1) << 13) +#define CQHCI_QBAR(x) (((x) & 1) << 14) +#define CQHCI_REL_WRITE(x) (((x) & 1) << 15) +#define CQHCI_BLK_COUNT(x) (((x) & 0xFFFF) << 16) +#define CQHCI_BLK_ADDR(x) (((x) & 0xFFFFFFFF) << 32) + +/* direct command task descriptor fields */ +#define CQHCI_CMD_INDEX(x) (((x) & 0x3F) << 16) +#define CQHCI_CMD_TIMING(x) (((x) & 1) << 22) +#define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23) + +/* transfer descriptor fields */ +#define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16) +#define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32) +#define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0) + +struct cqhci_host_ops; +struct mmc_host; +struct mmc_request; +struct cqhci_slot; + +struct cqhci_host { + const struct cqhci_host_ops *ops; + void __iomem *mmio; + struct mmc_host *mmc; + + spinlock_t lock; + + /* relative card address of device */ + unsigned int rca; + + /* 64 bit DMA */ + bool dma64; + int num_slots; + int qcnt; + + u32 dcmd_slot; + u32 caps; +#define CQHCI_TASK_DESC_SZ_128 0x1 + + u32 quirks; +#define CQHCI_QUIRK_SHORT_TXFR_DESC_SZ 0x1 + + bool enabled; + bool halted; + bool init_done; + bool activated; + bool waiting_for_idle; + bool recovery_halt; + + size_t desc_size; + size_t data_size; + + u8 *desc_base; + + /* total descriptor size */ + u8 slot_sz; + + /* 64/128 bit depends on CQHCI_CFG */ + u8 task_desc_len; + + /* 64 bit on 32-bit arch, 128 bit on 64-bit */ + u8 link_desc_len; + + u8 *trans_desc_base; + /* same length as transfer descriptor */ + u8 trans_desc_len; + + dma_addr_t desc_dma_base; + dma_addr_t trans_desc_dma_base; + + struct completion halt_comp; + wait_queue_head_t wait_queue; + struct cqhci_slot *slot; +}; + +struct cqhci_host_ops { + void (*dumpregs)(struct mmc_host *mmc); + void (*write_l)(struct cqhci_host *host, u32 val, int reg); + u32 (*read_l)(struct cqhci_host *host, int reg); + void (*enable)(struct mmc_host *mmc); + void (*disable)(struct mmc_host *mmc, bool recovery); + void (*update_dcmd_desc)(struct mmc_host *mmc, struct mmc_request *mrq, + u64 *data); + void (*pre_enable)(struct mmc_host *mmc); + void (*post_disable)(struct mmc_host *mmc); +}; + +static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg) +{ + if (unlikely(host->ops->write_l)) + host->ops->write_l(host, val, reg); + else + writel_relaxed(val, host->mmio + reg); +} + +static inline u32 cqhci_readl(struct cqhci_host *host, int reg) +{ + if (unlikely(host->ops->read_l)) + return host->ops->read_l(host, reg); + else + return readl_relaxed(host->mmio + reg); +} + +struct platform_device; + +irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, + int data_error); +int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64); +struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev); +int cqhci_deactivate(struct mmc_host *mmc); +static inline int cqhci_suspend(struct mmc_host *mmc) +{ + return cqhci_deactivate(mmc); +} +int cqhci_resume(struct mmc_host *mmc); + +#endif diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c new file mode 100644 index 000000000..647928ab0 --- /dev/null +++ b/drivers/mmc/host/davinci_mmc.c @@ -0,0 +1,1417 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver + * + * Copyright (C) 2006 Texas Instruments. + * Original author: Purushotam Kumar + * Copyright (C) 2009 David Brownell + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Register Definitions + */ +#define DAVINCI_MMCCTL 0x00 /* Control Register */ +#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ +#define DAVINCI_MMCST0 0x08 /* Status Register 0 */ +#define DAVINCI_MMCST1 0x0C /* Status Register 1 */ +#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ +#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ +#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ +#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ +#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ +#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ +#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ +#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ +#define DAVINCI_MMCCMD 0x30 /* Command Register */ +#define DAVINCI_MMCARGHL 0x34 /* Argument Register */ +#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ +#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ +#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ +#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ +#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ +#define DAVINCI_MMCETOK 0x4C +#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ +#define DAVINCI_MMCCKC 0x54 +#define DAVINCI_MMCTORC 0x58 +#define DAVINCI_MMCTODC 0x5C +#define DAVINCI_MMCBLNC 0x60 +#define DAVINCI_SDIOCTL 0x64 +#define DAVINCI_SDIOST0 0x68 +#define DAVINCI_SDIOIEN 0x6C +#define DAVINCI_SDIOIST 0x70 +#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ + +/* DAVINCI_MMCCTL definitions */ +#define MMCCTL_DATRST (1 << 0) +#define MMCCTL_CMDRST (1 << 1) +#define MMCCTL_WIDTH_8_BIT (1 << 8) +#define MMCCTL_WIDTH_4_BIT (1 << 2) +#define MMCCTL_DATEG_DISABLED (0 << 6) +#define MMCCTL_DATEG_RISING (1 << 6) +#define MMCCTL_DATEG_FALLING (2 << 6) +#define MMCCTL_DATEG_BOTH (3 << 6) +#define MMCCTL_PERMDR_LE (0 << 9) +#define MMCCTL_PERMDR_BE (1 << 9) +#define MMCCTL_PERMDX_LE (0 << 10) +#define MMCCTL_PERMDX_BE (1 << 10) + +/* DAVINCI_MMCCLK definitions */ +#define MMCCLK_CLKEN (1 << 8) +#define MMCCLK_CLKRT_MASK (0xFF << 0) + +/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ +#define MMCST0_DATDNE BIT(0) /* data done */ +#define MMCST0_BSYDNE BIT(1) /* busy done */ +#define MMCST0_RSPDNE BIT(2) /* command done */ +#define MMCST0_TOUTRD BIT(3) /* data read timeout */ +#define MMCST0_TOUTRS BIT(4) /* command response timeout */ +#define MMCST0_CRCWR BIT(5) /* data write CRC error */ +#define MMCST0_CRCRD BIT(6) /* data read CRC error */ +#define MMCST0_CRCRS BIT(7) /* command response CRC error */ +#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ +#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ +#define MMCST0_DATED BIT(11) /* DAT3 edge detect */ +#define MMCST0_TRNDNE BIT(12) /* transfer done */ + +/* DAVINCI_MMCST1 definitions */ +#define MMCST1_BUSY (1 << 0) + +/* DAVINCI_MMCCMD definitions */ +#define MMCCMD_CMD_MASK (0x3F << 0) +#define MMCCMD_PPLEN (1 << 7) +#define MMCCMD_BSYEXP (1 << 8) +#define MMCCMD_RSPFMT_MASK (3 << 9) +#define MMCCMD_RSPFMT_NONE (0 << 9) +#define MMCCMD_RSPFMT_R1456 (1 << 9) +#define MMCCMD_RSPFMT_R2 (2 << 9) +#define MMCCMD_RSPFMT_R3 (3 << 9) +#define MMCCMD_DTRW (1 << 11) +#define MMCCMD_STRMTP (1 << 12) +#define MMCCMD_WDATX (1 << 13) +#define MMCCMD_INITCK (1 << 14) +#define MMCCMD_DCLR (1 << 15) +#define MMCCMD_DMATRIG (1 << 16) + +/* DAVINCI_MMCFIFOCTL definitions */ +#define MMCFIFOCTL_FIFORST (1 << 0) +#define MMCFIFOCTL_FIFODIR_WR (1 << 1) +#define MMCFIFOCTL_FIFODIR_RD (0 << 1) +#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ +#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ +#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ +#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ +#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ + +/* DAVINCI_SDIOST0 definitions */ +#define SDIOST0_DAT1_HI BIT(0) + +/* DAVINCI_SDIOIEN definitions */ +#define SDIOIEN_IOINTEN BIT(0) + +/* DAVINCI_SDIOIST definitions */ +#define SDIOIST_IOINT BIT(0) + +/* MMCSD Init clock in Hz in opendrain mode */ +#define MMCSD_INIT_CLOCK 200000 + +/* + * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, + * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only + * for drivers with max_segs == 1, making the segments bigger (64KB) + * than the page or two that's otherwise typical. nr_sg (passed from + * platform data) == 16 gives at least the same throughput boost, using + * EDMA transfer linkage instead of spending CPU time copying pages. + */ +#define MAX_CCNT ((1 << 16) - 1) + +#define MAX_NR_SG 16 + +static unsigned rw_threshold = 32; +module_param(rw_threshold, uint, S_IRUGO); +MODULE_PARM_DESC(rw_threshold, + "Read/Write threshold. Default = 32"); + +static unsigned poll_threshold = 128; +module_param(poll_threshold, uint, S_IRUGO); +MODULE_PARM_DESC(poll_threshold, + "Polling transaction size threshold. Default = 128"); + +static unsigned poll_loopcount = 32; +module_param(poll_loopcount, uint, S_IRUGO); +MODULE_PARM_DESC(poll_loopcount, + "Maximum polling loop count. Default = 32"); + +static unsigned use_dma = 1; +module_param(use_dma, uint, 0); +MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); + +struct mmc_davinci_host { + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_host *mmc; + struct clk *clk; + unsigned int mmc_input_clk; + void __iomem *base; + struct resource *mem_res; + int mmc_irq, sdio_irq; + unsigned char bus_mode; + +#define DAVINCI_MMC_DATADIR_NONE 0 +#define DAVINCI_MMC_DATADIR_READ 1 +#define DAVINCI_MMC_DATADIR_WRITE 2 + unsigned char data_dir; + + /* buffer is used during PIO of one scatterlist segment, and + * is updated along with buffer_bytes_left. bytes_left applies + * to all N blocks of the PIO transfer. + */ + u8 *buffer; + u32 buffer_bytes_left; + u32 bytes_left; + + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; + bool use_dma; + bool do_dma; + bool sdio_int; + bool active_request; + + /* For PIO we walk scatterlists one segment at a time. */ + unsigned int sg_len; + struct scatterlist *sg; + + /* Version of the MMC/SD controller */ + u8 version; + /* for ns in one cycle calculation */ + unsigned ns_in_one_cycle; + /* Number of sg segments */ + u8 nr_sg; +#ifdef CONFIG_CPU_FREQ + struct notifier_block freq_transition; +#endif +}; + +static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); + +/* PIO only */ +static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) +{ + host->buffer_bytes_left = sg_dma_len(host->sg); + host->buffer = sg_virt(host->sg); + if (host->buffer_bytes_left > host->bytes_left) + host->buffer_bytes_left = host->bytes_left; +} + +static void davinci_fifo_data_trans(struct mmc_davinci_host *host, + unsigned int n) +{ + u8 *p; + unsigned int i; + + if (host->buffer_bytes_left == 0) { + host->sg = sg_next(host->data->sg); + mmc_davinci_sg_to_buf(host); + } + + p = host->buffer; + if (n > host->buffer_bytes_left) + n = host->buffer_bytes_left; + host->buffer_bytes_left -= n; + host->bytes_left -= n; + + /* NOTE: we never transfer more than rw_threshold bytes + * to/from the fifo here; there's no I/O overlap. + * This also assumes that access width( i.e. ACCWD) is 4 bytes + */ + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { + for (i = 0; i < (n >> 2); i++) { + writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); + p = p + 4; + } + if (n & 3) { + iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); + p = p + (n & 3); + } + } else { + for (i = 0; i < (n >> 2); i++) { + *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); + p = p + 4; + } + if (n & 3) { + ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); + p = p + (n & 3); + } + } + host->buffer = p; +} + +static void mmc_davinci_start_command(struct mmc_davinci_host *host, + struct mmc_command *cmd) +{ + u32 cmd_reg = 0; + u32 im_val; + + dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", + cmd->opcode, cmd->arg, + ({ char *s; + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + s = ", R1/R5/R6/R7 response"; + break; + case MMC_RSP_R1B: + s = ", R1b response"; + break; + case MMC_RSP_R2: + s = ", R2 response"; + break; + case MMC_RSP_R3: + s = ", R3/R4 response"; + break; + default: + s = ", (R? response)"; + break; + }; s; })); + host->cmd = cmd; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1B: + /* There's some spec confusion about when R1B is + * allowed, but if the card doesn't issue a BUSY + * then it's harmless for us to allow it. + */ + cmd_reg |= MMCCMD_BSYEXP; + fallthrough; + case MMC_RSP_R1: /* 48 bits, CRC */ + cmd_reg |= MMCCMD_RSPFMT_R1456; + break; + case MMC_RSP_R2: /* 136 bits, CRC */ + cmd_reg |= MMCCMD_RSPFMT_R2; + break; + case MMC_RSP_R3: /* 48 bits, no CRC */ + cmd_reg |= MMCCMD_RSPFMT_R3; + break; + default: + cmd_reg |= MMCCMD_RSPFMT_NONE; + dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", + mmc_resp_type(cmd)); + break; + } + + /* Set command index */ + cmd_reg |= cmd->opcode; + + /* Enable EDMA transfer triggers */ + if (host->do_dma) + cmd_reg |= MMCCMD_DMATRIG; + + if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && + host->data_dir == DAVINCI_MMC_DATADIR_READ) + cmd_reg |= MMCCMD_DMATRIG; + + /* Setting whether command involves data transfer or not */ + if (cmd->data) + cmd_reg |= MMCCMD_WDATX; + + /* Setting whether data read or write */ + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) + cmd_reg |= MMCCMD_DTRW; + + if (host->bus_mode == MMC_BUSMODE_PUSHPULL) + cmd_reg |= MMCCMD_PPLEN; + + /* set Command timeout */ + writel(0x1FFF, host->base + DAVINCI_MMCTOR); + + /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ + im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { + im_val |= MMCST0_DATDNE | MMCST0_CRCWR; + + if (!host->do_dma) + im_val |= MMCST0_DXRDY; + } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { + im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; + + if (!host->do_dma) + im_val |= MMCST0_DRRDY; + } + + /* + * Before non-DMA WRITE commands the controller needs priming: + * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size + */ + if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) + davinci_fifo_data_trans(host, rw_threshold); + + writel(cmd->arg, host->base + DAVINCI_MMCARGHL); + writel(cmd_reg, host->base + DAVINCI_MMCCMD); + + host->active_request = true; + + if (!host->do_dma && host->bytes_left <= poll_threshold) { + u32 count = poll_loopcount; + + while (host->active_request && count--) { + mmc_davinci_irq(0, host); + cpu_relax(); + } + } + + if (host->active_request) + writel(im_val, host->base + DAVINCI_MMCIM); +} + +/*----------------------------------------------------------------------*/ + +/* DMA infrastructure */ + +static void davinci_abort_dma(struct mmc_davinci_host *host) +{ + struct dma_chan *sync_dev; + + if (host->data_dir == DAVINCI_MMC_DATADIR_READ) + sync_dev = host->dma_rx; + else + sync_dev = host->dma_tx; + + dmaengine_terminate_all(sync_dev); +} + +static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, + struct mmc_data *data) +{ + struct dma_chan *chan; + struct dma_async_tx_descriptor *desc; + int ret = 0; + + if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { + struct dma_slave_config dma_tx_conf = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_maxburst = + rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, + }; + chan = host->dma_tx; + dmaengine_slave_config(host->dma_tx, &dma_tx_conf); + + desc = dmaengine_prep_slave_sg(host->dma_tx, + data->sg, + host->sg_len, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_dbg(mmc_dev(host->mmc), + "failed to allocate DMA TX descriptor"); + ret = -1; + goto out; + } + } else { + struct dma_slave_config dma_rx_conf = { + .direction = DMA_DEV_TO_MEM, + .src_addr = host->mem_res->start + DAVINCI_MMCDRR, + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .src_maxburst = + rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, + }; + chan = host->dma_rx; + dmaengine_slave_config(host->dma_rx, &dma_rx_conf); + + desc = dmaengine_prep_slave_sg(host->dma_rx, + data->sg, + host->sg_len, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_dbg(mmc_dev(host->mmc), + "failed to allocate DMA RX descriptor"); + ret = -1; + goto out; + } + } + + dmaengine_submit(desc); + dma_async_issue_pending(chan); + +out: + return ret; +} + +static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, + struct mmc_data *data) +{ + int i; + int mask = rw_threshold - 1; + int ret = 0; + + host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + + /* no individual DMA segment should need a partial FIFO */ + for (i = 0; i < host->sg_len; i++) { + if (sg_dma_len(data->sg + i) & mask) { + dma_unmap_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + mmc_get_dma_dir(data)); + return -1; + } + } + + host->do_dma = 1; + ret = mmc_davinci_send_dma_request(host, data); + + return ret; +} + +static void davinci_release_dma_channels(struct mmc_davinci_host *host) +{ + if (!host->use_dma) + return; + + dma_release_channel(host->dma_tx); + dma_release_channel(host->dma_rx); +} + +static int davinci_acquire_dma_channels(struct mmc_davinci_host *host) +{ + host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); + if (IS_ERR(host->dma_tx)) { + dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); + return PTR_ERR(host->dma_tx); + } + + host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); + if (IS_ERR(host->dma_rx)) { + dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); + dma_release_channel(host->dma_tx); + return PTR_ERR(host->dma_rx); + } + + return 0; +} + +/*----------------------------------------------------------------------*/ + +static void +mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) +{ + int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; + int timeout; + struct mmc_data *data = req->data; + + if (host->version == MMC_CTLR_VERSION_2) + fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; + + host->data = data; + if (data == NULL) { + host->data_dir = DAVINCI_MMC_DATADIR_NONE; + writel(0, host->base + DAVINCI_MMCBLEN); + writel(0, host->base + DAVINCI_MMCNBLK); + return; + } + + dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n", + (data->flags & MMC_DATA_WRITE) ? "write" : "read", + data->blocks, data->blksz); + dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", + data->timeout_clks, data->timeout_ns); + timeout = data->timeout_clks + + (data->timeout_ns / host->ns_in_one_cycle); + if (timeout > 0xffff) + timeout = 0xffff; + + writel(timeout, host->base + DAVINCI_MMCTOD); + writel(data->blocks, host->base + DAVINCI_MMCNBLK); + writel(data->blksz, host->base + DAVINCI_MMCBLEN); + + /* Configure the FIFO */ + if (data->flags & MMC_DATA_WRITE) { + host->data_dir = DAVINCI_MMC_DATADIR_WRITE; + writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, + host->base + DAVINCI_MMCFIFOCTL); + writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, + host->base + DAVINCI_MMCFIFOCTL); + } else { + host->data_dir = DAVINCI_MMC_DATADIR_READ; + writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, + host->base + DAVINCI_MMCFIFOCTL); + writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, + host->base + DAVINCI_MMCFIFOCTL); + } + + host->buffer = NULL; + host->bytes_left = data->blocks * data->blksz; + + /* For now we try to use DMA whenever we won't need partial FIFO + * reads or writes, either for the whole transfer (as tested here) + * or for any individual scatterlist segment (tested when we call + * start_dma_transfer). + * + * While we *could* change that, unusual block sizes are rarely + * used. The occasional fallback to PIO should't hurt. + */ + if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 + && mmc_davinci_start_dma_transfer(host, data) == 0) { + /* zero this to ensure we take no PIO paths */ + host->bytes_left = 0; + } else { + /* Revert to CPU Copy */ + host->sg_len = data->sg_len; + host->sg = host->data->sg; + mmc_davinci_sg_to_buf(host); + } +} + +static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct mmc_davinci_host *host = mmc_priv(mmc); + unsigned long timeout = jiffies + msecs_to_jiffies(900); + u32 mmcst1 = 0; + + /* Card may still be sending BUSY after a previous operation, + * typically some kind of write. If so, we can't proceed yet. + */ + while (time_before(jiffies, timeout)) { + mmcst1 = readl(host->base + DAVINCI_MMCST1); + if (!(mmcst1 & MMCST1_BUSY)) + break; + cpu_relax(); + } + if (mmcst1 & MMCST1_BUSY) { + dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); + req->cmd->error = -ETIMEDOUT; + mmc_request_done(mmc, req); + return; + } + + host->do_dma = 0; + mmc_davinci_prepare_data(host, req); + mmc_davinci_start_command(host, req->cmd); +} + +static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, + unsigned int mmc_req_freq) +{ + unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; + + mmc_pclk = host->mmc_input_clk; + if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) + mmc_push_pull_divisor = ((unsigned int)mmc_pclk + / (2 * mmc_req_freq)) - 1; + else + mmc_push_pull_divisor = 0; + + mmc_freq = (unsigned int)mmc_pclk + / (2 * (mmc_push_pull_divisor + 1)); + + if (mmc_freq > mmc_req_freq) + mmc_push_pull_divisor = mmc_push_pull_divisor + 1; + /* Convert ns to clock cycles */ + if (mmc_req_freq <= 400000) + host->ns_in_one_cycle = (1000000) / (((mmc_pclk + / (2 * (mmc_push_pull_divisor + 1)))/1000)); + else + host->ns_in_one_cycle = (1000000) / (((mmc_pclk + / (2 * (mmc_push_pull_divisor + 1)))/1000000)); + + return mmc_push_pull_divisor; +} + +static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) +{ + unsigned int open_drain_freq = 0, mmc_pclk = 0; + unsigned int mmc_push_pull_freq = 0; + struct mmc_davinci_host *host = mmc_priv(mmc); + + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { + u32 temp; + + /* Ignoring the init clock value passed for fixing the inter + * operability with different cards. + */ + open_drain_freq = ((unsigned int)mmc_pclk + / (2 * MMCSD_INIT_CLOCK)) - 1; + + if (open_drain_freq > 0xFF) + open_drain_freq = 0xFF; + + temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; + temp |= open_drain_freq; + writel(temp, host->base + DAVINCI_MMCCLK); + + /* Convert ns to clock cycles */ + host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); + } else { + u32 temp; + mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); + + if (mmc_push_pull_freq > 0xFF) + mmc_push_pull_freq = 0xFF; + + temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; + writel(temp, host->base + DAVINCI_MMCCLK); + + udelay(10); + + temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; + temp |= mmc_push_pull_freq; + writel(temp, host->base + DAVINCI_MMCCLK); + + writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); + + udelay(10); + } +} + +static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mmc_davinci_host *host = mmc_priv(mmc); + struct platform_device *pdev = to_platform_device(mmc->parent); + struct davinci_mmc_config *config = pdev->dev.platform_data; + + dev_dbg(mmc_dev(host->mmc), + "clock %dHz busmode %d powermode %d Vdd %04x\n", + ios->clock, ios->bus_mode, ios->power_mode, + ios->vdd); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + if (config && config->set_power) + config->set_power(pdev->id, false); + break; + case MMC_POWER_UP: + if (config && config->set_power) + config->set_power(pdev->id, true); + break; + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_8: + dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); + writel((readl(host->base + DAVINCI_MMCCTL) & + ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, + host->base + DAVINCI_MMCCTL); + break; + case MMC_BUS_WIDTH_4: + dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); + if (host->version == MMC_CTLR_VERSION_2) + writel((readl(host->base + DAVINCI_MMCCTL) & + ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, + host->base + DAVINCI_MMCCTL); + else + writel(readl(host->base + DAVINCI_MMCCTL) | + MMCCTL_WIDTH_4_BIT, + host->base + DAVINCI_MMCCTL); + break; + case MMC_BUS_WIDTH_1: + dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); + if (host->version == MMC_CTLR_VERSION_2) + writel(readl(host->base + DAVINCI_MMCCTL) & + ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), + host->base + DAVINCI_MMCCTL); + else + writel(readl(host->base + DAVINCI_MMCCTL) & + ~MMCCTL_WIDTH_4_BIT, + host->base + DAVINCI_MMCCTL); + break; + } + + calculate_clk_divider(mmc, ios); + + host->bus_mode = ios->bus_mode; + if (ios->power_mode == MMC_POWER_UP) { + unsigned long timeout = jiffies + msecs_to_jiffies(50); + bool lose = true; + + /* Send clock cycles, poll completion */ + writel(0, host->base + DAVINCI_MMCARGHL); + writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); + while (time_before(jiffies, timeout)) { + u32 tmp = readl(host->base + DAVINCI_MMCST0); + + if (tmp & MMCST0_RSPDNE) { + lose = false; + break; + } + cpu_relax(); + } + if (lose) + dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); + } + + /* FIXME on power OFF, reset things ... */ +} + +static void +mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) +{ + host->data = NULL; + + if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { + /* + * SDIO Interrupt Detection work-around as suggested by + * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata + * 2.1.6): Signal SDIO interrupt only if it is enabled by core + */ + if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & + SDIOST0_DAT1_HI)) { + writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); + mmc_signal_sdio_irq(host->mmc); + } + } + + if (host->do_dma) { + davinci_abort_dma(host); + + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + host->do_dma = false; + } + host->data_dir = DAVINCI_MMC_DATADIR_NONE; + + if (!data->stop || (host->cmd && host->cmd->error)) { + mmc_request_done(host->mmc, data->mrq); + writel(0, host->base + DAVINCI_MMCIM); + host->active_request = false; + } else + mmc_davinci_start_command(host, data->stop); +} + +static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, + struct mmc_command *cmd) +{ + host->cmd = NULL; + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* response type 2 */ + cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); + cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); + cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); + cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); + } else { + /* response types 1, 1b, 3, 4, 5, 6 */ + cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); + } + } + + if (host->data == NULL || cmd->error) { + if (cmd->error == -ETIMEDOUT) + cmd->mrq->cmd->retries = 0; + mmc_request_done(host->mmc, cmd->mrq); + writel(0, host->base + DAVINCI_MMCIM); + host->active_request = false; + } +} + +static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, + int val) +{ + u32 temp; + + temp = readl(host->base + DAVINCI_MMCCTL); + if (val) /* reset */ + temp |= MMCCTL_CMDRST | MMCCTL_DATRST; + else /* enable */ + temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); + + writel(temp, host->base + DAVINCI_MMCCTL); + udelay(10); +} + +static void +davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) +{ + mmc_davinci_reset_ctrl(host, 1); + mmc_davinci_reset_ctrl(host, 0); +} + +static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) +{ + struct mmc_davinci_host *host = dev_id; + unsigned int status; + + status = readl(host->base + DAVINCI_SDIOIST); + if (status & SDIOIST_IOINT) { + dev_dbg(mmc_dev(host->mmc), + "SDIO interrupt status %x\n", status); + writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); + mmc_signal_sdio_irq(host->mmc); + } + return IRQ_HANDLED; +} + +static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) +{ + struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; + unsigned int status, qstatus; + int end_command = 0; + int end_transfer = 0; + struct mmc_data *data = host->data; + + if (host->cmd == NULL && host->data == NULL) { + status = readl(host->base + DAVINCI_MMCST0); + dev_dbg(mmc_dev(host->mmc), + "Spurious interrupt 0x%04x\n", status); + /* Disable the interrupt from mmcsd */ + writel(0, host->base + DAVINCI_MMCIM); + return IRQ_NONE; + } + + status = readl(host->base + DAVINCI_MMCST0); + qstatus = status; + + /* handle FIFO first when using PIO for data. + * bytes_left will decrease to zero as I/O progress and status will + * read zero over iteration because this controller status + * register(MMCST0) reports any status only once and it is cleared + * by read. So, it is not unbouned loop even in the case of + * non-dma. + */ + if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { + unsigned long im_val; + + /* + * If interrupts fire during the following loop, they will be + * handled by the handler, but the PIC will still buffer these. + * As a result, the handler will be called again to serve these + * needlessly. In order to avoid these spurious interrupts, + * keep interrupts masked during the loop. + */ + im_val = readl(host->base + DAVINCI_MMCIM); + writel(0, host->base + DAVINCI_MMCIM); + + do { + davinci_fifo_data_trans(host, rw_threshold); + status = readl(host->base + DAVINCI_MMCST0); + qstatus |= status; + } while (host->bytes_left && + (status & (MMCST0_DXRDY | MMCST0_DRRDY))); + + /* + * If an interrupt is pending, it is assumed it will fire when + * it is unmasked. This assumption is also taken when the MMCIM + * is first set. Otherwise, writing to MMCIM after reading the + * status is race-prone. + */ + writel(im_val, host->base + DAVINCI_MMCIM); + } + + if (qstatus & MMCST0_DATDNE) { + /* All blocks sent/received, and CRC checks passed */ + if (data != NULL) { + if ((host->do_dma == 0) && (host->bytes_left > 0)) { + /* if datasize < rw_threshold + * no RX ints are generated + */ + davinci_fifo_data_trans(host, host->bytes_left); + } + end_transfer = 1; + data->bytes_xfered = data->blocks * data->blksz; + } else { + dev_err(mmc_dev(host->mmc), + "DATDNE with no host->data\n"); + } + } + + if (qstatus & MMCST0_TOUTRD) { + /* Read data timeout */ + data->error = -ETIMEDOUT; + end_transfer = 1; + + dev_dbg(mmc_dev(host->mmc), + "read data timeout, status %x\n", + qstatus); + + davinci_abort_data(host, data); + } + + if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { + /* Data CRC error */ + data->error = -EILSEQ; + end_transfer = 1; + + /* NOTE: this controller uses CRCWR to report both CRC + * errors and timeouts (on writes). MMCDRSP values are + * only weakly documented, but 0x9f was clearly a timeout + * case and the two three-bit patterns in various SD specs + * (101, 010) aren't part of it ... + */ + if (qstatus & MMCST0_CRCWR) { + u32 temp = readb(host->base + DAVINCI_MMCDRSP); + + if (temp == 0x9f) + data->error = -ETIMEDOUT; + } + dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", + (qstatus & MMCST0_CRCWR) ? "write" : "read", + (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); + + davinci_abort_data(host, data); + } + + if (qstatus & MMCST0_TOUTRS) { + /* Command timeout */ + if (host->cmd) { + dev_dbg(mmc_dev(host->mmc), + "CMD%d timeout, status %x\n", + host->cmd->opcode, qstatus); + host->cmd->error = -ETIMEDOUT; + if (data) { + end_transfer = 1; + davinci_abort_data(host, data); + } else + end_command = 1; + } + } + + if (qstatus & MMCST0_CRCRS) { + /* Command CRC error */ + dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); + if (host->cmd) { + host->cmd->error = -EILSEQ; + end_command = 1; + } + } + + if (qstatus & MMCST0_RSPDNE) { + /* End of command phase */ + end_command = host->cmd ? 1 : 0; + } + + if (end_command) + mmc_davinci_cmd_done(host, host->cmd); + if (end_transfer) + mmc_davinci_xfer_done(host, data); + return IRQ_HANDLED; +} + +static int mmc_davinci_get_cd(struct mmc_host *mmc) +{ + struct platform_device *pdev = to_platform_device(mmc->parent); + struct davinci_mmc_config *config = pdev->dev.platform_data; + + if (config && config->get_cd) + return config->get_cd(pdev->id); + + return mmc_gpio_get_cd(mmc); +} + +static int mmc_davinci_get_ro(struct mmc_host *mmc) +{ + struct platform_device *pdev = to_platform_device(mmc->parent); + struct davinci_mmc_config *config = pdev->dev.platform_data; + + if (config && config->get_ro) + return config->get_ro(pdev->id); + + return mmc_gpio_get_ro(mmc); +} + +static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct mmc_davinci_host *host = mmc_priv(mmc); + + if (enable) { + if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { + writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); + mmc_signal_sdio_irq(host->mmc); + } else { + host->sdio_int = true; + writel(readl(host->base + DAVINCI_SDIOIEN) | + SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); + } + } else { + host->sdio_int = false; + writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, + host->base + DAVINCI_SDIOIEN); + } +} + +static const struct mmc_host_ops mmc_davinci_ops = { + .request = mmc_davinci_request, + .set_ios = mmc_davinci_set_ios, + .get_cd = mmc_davinci_get_cd, + .get_ro = mmc_davinci_get_ro, + .enable_sdio_irq = mmc_davinci_enable_sdio_irq, +}; + +/*----------------------------------------------------------------------*/ + +#ifdef CONFIG_CPU_FREQ +static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct mmc_davinci_host *host; + unsigned int mmc_pclk; + struct mmc_host *mmc; + unsigned long flags; + + host = container_of(nb, struct mmc_davinci_host, freq_transition); + mmc = host->mmc; + mmc_pclk = clk_get_rate(host->clk); + + if (val == CPUFREQ_POSTCHANGE) { + spin_lock_irqsave(&mmc->lock, flags); + host->mmc_input_clk = mmc_pclk; + calculate_clk_divider(mmc, &mmc->ios); + spin_unlock_irqrestore(&mmc->lock, flags); + } + + return 0; +} + +static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) +{ + host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; + + return cpufreq_register_notifier(&host->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} + +static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) +{ + cpufreq_unregister_notifier(&host->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} +#else +static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) +{ + return 0; +} + +static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) +{ +} +#endif +static void init_mmcsd_host(struct mmc_davinci_host *host) +{ + + mmc_davinci_reset_ctrl(host, 1); + + writel(0, host->base + DAVINCI_MMCCLK); + writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); + + writel(0x1FFF, host->base + DAVINCI_MMCTOR); + writel(0xFFFF, host->base + DAVINCI_MMCTOD); + + mmc_davinci_reset_ctrl(host, 0); +} + +static const struct platform_device_id davinci_mmc_devtype[] = { + { + .name = "dm6441-mmc", + .driver_data = MMC_CTLR_VERSION_1, + }, { + .name = "da830-mmc", + .driver_data = MMC_CTLR_VERSION_2, + }, + {}, +}; +MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); + +static const struct of_device_id davinci_mmc_dt_ids[] = { + { + .compatible = "ti,dm6441-mmc", + .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], + }, + { + .compatible = "ti,da830-mmc", + .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], + }, + {}, +}; +MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); + +static int mmc_davinci_parse_pdata(struct mmc_host *mmc) +{ + struct platform_device *pdev = to_platform_device(mmc->parent); + struct davinci_mmc_config *pdata = pdev->dev.platform_data; + struct mmc_davinci_host *host; + int ret; + + if (!pdata) + return -EINVAL; + + host = mmc_priv(mmc); + if (!host) + return -EINVAL; + + if (pdata && pdata->nr_sg) + host->nr_sg = pdata->nr_sg - 1; + + if (pdata && (pdata->wires == 4 || pdata->wires == 0)) + mmc->caps |= MMC_CAP_4_BIT_DATA; + + if (pdata && (pdata->wires == 8)) + mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); + + mmc->f_min = 312500; + mmc->f_max = 25000000; + if (pdata && pdata->max_freq) + mmc->f_max = pdata->max_freq; + if (pdata && pdata->caps) + mmc->caps |= pdata->caps; + + /* Register a cd gpio, if there is not one, enable polling */ + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); + if (ret == -EPROBE_DEFER) + return ret; + else if (ret) + mmc->caps |= MMC_CAP_NEEDS_POLL; + + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); + if (ret == -EPROBE_DEFER) + return ret; + + return 0; +} + +static int davinci_mmcsd_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + struct mmc_davinci_host *host = NULL; + struct mmc_host *mmc = NULL; + struct resource *r, *mem = NULL; + int ret, irq; + size_t mem_size; + const struct platform_device_id *id_entry; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) + return -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mem_size = resource_size(r); + mem = devm_request_mem_region(&pdev->dev, r->start, mem_size, + pdev->name); + if (!mem) + return -EBUSY; + + mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + host->mmc = mmc; /* Important */ + + host->mem_res = mem; + host->base = devm_ioremap(&pdev->dev, mem->start, mem_size); + if (!host->base) { + ret = -ENOMEM; + goto ioremap_fail; + } + + host->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto clk_get_fail; + } + ret = clk_prepare_enable(host->clk); + if (ret) + goto clk_prepare_enable_fail; + + host->mmc_input_clk = clk_get_rate(host->clk); + + match = of_match_device(davinci_mmc_dt_ids, &pdev->dev); + if (match) { + pdev->id_entry = match->data; + ret = mmc_of_parse(mmc); + if (ret) { + dev_err_probe(&pdev->dev, ret, + "could not parse of data\n"); + goto parse_fail; + } + } else { + ret = mmc_davinci_parse_pdata(mmc); + if (ret) { + dev_err(&pdev->dev, + "could not parse platform data: %d\n", ret); + goto parse_fail; + } } + + if (host->nr_sg > MAX_NR_SG || !host->nr_sg) + host->nr_sg = MAX_NR_SG; + + init_mmcsd_host(host); + + host->use_dma = use_dma; + host->mmc_irq = irq; + host->sdio_irq = platform_get_irq(pdev, 1); + + if (host->use_dma) { + ret = davinci_acquire_dma_channels(host); + if (ret == -EPROBE_DEFER) + goto dma_probe_defer; + else if (ret) + host->use_dma = 0; + } + + mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; + + id_entry = platform_get_device_id(pdev); + if (id_entry) + host->version = id_entry->driver_data; + + mmc->ops = &mmc_davinci_ops; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + /* With no iommu coalescing pages, each phys_seg is a hw_seg. + * Each hw_seg uses one EDMA parameter RAM slot, always one + * channel and then usually some linked slots. + */ + mmc->max_segs = MAX_NR_SG; + + /* EDMA limit per hw segment (one or two MBytes) */ + mmc->max_seg_size = MAX_CCNT * rw_threshold; + + /* MMC/SD controller limits for multiblock requests */ + mmc->max_blk_size = 4095; /* BLEN is 12 bits */ + mmc->max_blk_count = 65535; /* NBLK is 16 bits */ + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + + dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); + dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); + dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); + dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); + + platform_set_drvdata(pdev, host); + + ret = mmc_davinci_cpufreq_register(host); + if (ret) { + dev_err(&pdev->dev, "failed to register cpufreq\n"); + goto cpu_freq_fail; + } + + ret = mmc_add_host(mmc); + if (ret < 0) + goto mmc_add_host_fail; + + ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0, + mmc_hostname(mmc), host); + if (ret) + goto request_irq_fail; + + if (host->sdio_irq >= 0) { + ret = devm_request_irq(&pdev->dev, host->sdio_irq, + mmc_davinci_sdio_irq, 0, + mmc_hostname(mmc), host); + if (!ret) + mmc->caps |= MMC_CAP_SDIO_IRQ; + } + + rename_region(mem, mmc_hostname(mmc)); + + dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", + host->use_dma ? "DMA" : "PIO", + (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); + + return 0; + +request_irq_fail: + mmc_remove_host(mmc); +mmc_add_host_fail: + mmc_davinci_cpufreq_deregister(host); +cpu_freq_fail: + davinci_release_dma_channels(host); +parse_fail: +dma_probe_defer: + clk_disable_unprepare(host->clk); +clk_prepare_enable_fail: +clk_get_fail: +ioremap_fail: + mmc_free_host(mmc); + + return ret; +} + +static int __exit davinci_mmcsd_remove(struct platform_device *pdev) +{ + struct mmc_davinci_host *host = platform_get_drvdata(pdev); + + mmc_remove_host(host->mmc); + mmc_davinci_cpufreq_deregister(host); + davinci_release_dma_channels(host); + clk_disable_unprepare(host->clk); + mmc_free_host(host->mmc); + + return 0; +} + +#ifdef CONFIG_PM +static int davinci_mmcsd_suspend(struct device *dev) +{ + struct mmc_davinci_host *host = dev_get_drvdata(dev); + + writel(0, host->base + DAVINCI_MMCIM); + mmc_davinci_reset_ctrl(host, 1); + clk_disable(host->clk); + + return 0; +} + +static int davinci_mmcsd_resume(struct device *dev) +{ + struct mmc_davinci_host *host = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(host->clk); + if (ret) + return ret; + + mmc_davinci_reset_ctrl(host, 0); + + return 0; +} + +static const struct dev_pm_ops davinci_mmcsd_pm = { + .suspend = davinci_mmcsd_suspend, + .resume = davinci_mmcsd_resume, +}; + +#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) +#else +#define davinci_mmcsd_pm_ops NULL +#endif + +static struct platform_driver davinci_mmcsd_driver = { + .driver = { + .name = "davinci_mmc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = davinci_mmcsd_pm_ops, + .of_match_table = davinci_mmc_dt_ids, + }, + .probe = davinci_mmcsd_probe, + .remove = __exit_p(davinci_mmcsd_remove), + .id_table = davinci_mmc_devtype, +}; + +module_platform_driver(davinci_mmcsd_driver); + +MODULE_AUTHOR("Texas Instruments India"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); +MODULE_ALIAS("platform:davinci_mmc"); + diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c new file mode 100644 index 000000000..10baf122b --- /dev/null +++ b/drivers/mmc/host/dw_mmc-bluefield.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Mellanox Technologies. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +#define UHS_REG_EXT_SAMPLE_MASK GENMASK(22, 16) +#define UHS_REG_EXT_DRIVE_MASK GENMASK(29, 23) +#define BLUEFIELD_UHS_REG_EXT_SAMPLE 2 +#define BLUEFIELD_UHS_REG_EXT_DRIVE 4 + +static void dw_mci_bluefield_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + u32 reg; + + /* Update the Drive and Sample fields in register UHS_REG_EXT. */ + reg = mci_readl(host, UHS_REG_EXT); + reg &= ~UHS_REG_EXT_SAMPLE_MASK; + reg |= FIELD_PREP(UHS_REG_EXT_SAMPLE_MASK, + BLUEFIELD_UHS_REG_EXT_SAMPLE); + reg &= ~UHS_REG_EXT_DRIVE_MASK; + reg |= FIELD_PREP(UHS_REG_EXT_DRIVE_MASK, BLUEFIELD_UHS_REG_EXT_DRIVE); + mci_writel(host, UHS_REG_EXT, reg); +} + +static const struct dw_mci_drv_data bluefield_drv_data = { + .set_ios = dw_mci_bluefield_set_ios +}; + +static const struct of_device_id dw_mci_bluefield_match[] = { + { .compatible = "mellanox,bluefield-dw-mshc", + .data = &bluefield_drv_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_bluefield_match); + +static int dw_mci_bluefield_probe(struct platform_device *pdev) +{ + return dw_mci_pltfm_register(pdev, &bluefield_drv_data); +} + +static struct platform_driver dw_mci_bluefield_pltfm_driver = { + .probe = dw_mci_bluefield_probe, + .remove = dw_mci_pltfm_remove, + .driver = { + .name = "dwmmc_bluefield", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = dw_mci_bluefield_match, + .pm = &dw_mci_pltfm_pmops, + }, +}; + +module_platform_driver(dw_mci_bluefield_pltfm_driver); + +MODULE_DESCRIPTION("BlueField DW Multimedia Card driver"); +MODULE_AUTHOR("Mellanox Technologies"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c new file mode 100644 index 000000000..1f8a3c0dd --- /dev/null +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -0,0 +1,620 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver + * + * Copyright (C) 2012, Samsung Electronics Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" +#include "dw_mmc-exynos.h" + +/* Variations in Exynos specific dw-mshc controller */ +enum dw_mci_exynos_type { + DW_MCI_TYPE_EXYNOS4210, + DW_MCI_TYPE_EXYNOS4412, + DW_MCI_TYPE_EXYNOS5250, + DW_MCI_TYPE_EXYNOS5420, + DW_MCI_TYPE_EXYNOS5420_SMU, + DW_MCI_TYPE_EXYNOS7, + DW_MCI_TYPE_EXYNOS7_SMU, +}; + +/* Exynos implementation specific driver private data */ +struct dw_mci_exynos_priv_data { + enum dw_mci_exynos_type ctrl_type; + u8 ciu_div; + u32 sdr_timing; + u32 ddr_timing; + u32 hs400_timing; + u32 tuned_sample; + u32 cur_speed; + u32 dqs_delay; + u32 saved_dqs_en; + u32 saved_strobe_ctrl; +}; + +static struct dw_mci_exynos_compatible { + char *compatible; + enum dw_mci_exynos_type ctrl_type; +} exynos_compat[] = { + { + .compatible = "samsung,exynos4210-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS4210, + }, { + .compatible = "samsung,exynos4412-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS4412, + }, { + .compatible = "samsung,exynos5250-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS5250, + }, { + .compatible = "samsung,exynos5420-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS5420, + }, { + .compatible = "samsung,exynos5420-dw-mshc-smu", + .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU, + }, { + .compatible = "samsung,exynos7-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS7, + }, { + .compatible = "samsung,exynos7-dw-mshc-smu", + .ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU, + }, +}; + +static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) + return EXYNOS4412_FIXED_CIU_CLK_DIV; + else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) + return EXYNOS4210_FIXED_CIU_CLK_DIV; + else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1; + else + return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1; +} + +static void dw_mci_exynos_config_smu(struct dw_mci *host) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + + /* + * If Exynos is provided the Security management, + * set for non-ecryption mode at this time. + */ + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) { + mci_writel(host, MPSBEGIN0, 0); + mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX); + mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT | + SDMMC_MPSCTRL_NON_SECURE_READ_BIT | + SDMMC_MPSCTRL_VALID | + SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT); + } +} + +static int dw_mci_exynos_priv_init(struct dw_mci *host) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + + dw_mci_exynos_config_smu(host); + + if (priv->ctrl_type >= DW_MCI_TYPE_EXYNOS5420) { + priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL); + priv->saved_dqs_en = mci_readl(host, HS400_DQS_EN); + priv->saved_dqs_en |= AXI_NON_BLOCKING_WR; + mci_writel(host, HS400_DQS_EN, priv->saved_dqs_en); + if (!priv->dqs_delay) + priv->dqs_delay = + DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl); + } + + host->bus_hz /= (priv->ciu_div + 1); + + return 0; +} + +static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + u32 clksel; + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + clksel = mci_readl(host, CLKSEL64); + else + clksel = mci_readl(host, CLKSEL); + + clksel = (clksel & ~SDMMC_CLKSEL_TIMING_MASK) | timing; + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + mci_writel(host, CLKSEL64, clksel); + else + mci_writel(host, CLKSEL, clksel); + + /* + * Exynos4412 and Exynos5250 extends the use of CMD register with the + * use of bit 29 (which is reserved on standard MSHC controllers) for + * optionally bypassing the HOLD register for command and data. The + * HOLD register should be bypassed in case there is no phase shift + * applied on CMD/DATA that is sent to the card. + */ + if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->slot) + set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags); +} + +#ifdef CONFIG_PM +static int dw_mci_exynos_runtime_resume(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + int ret; + + ret = dw_mci_runtime_resume(dev); + if (ret) + return ret; + + dw_mci_exynos_config_smu(host); + + return ret; +} +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_SLEEP +/** + * dw_mci_exynos_suspend_noirq - Exynos-specific suspend code + * @dev: Device to suspend (this device) + * + * This ensures that device will be in runtime active state in + * dw_mci_exynos_resume_noirq after calling pm_runtime_force_resume() + */ +static int dw_mci_exynos_suspend_noirq(struct device *dev) +{ + pm_runtime_get_noresume(dev); + return pm_runtime_force_suspend(dev); +} + +/** + * dw_mci_exynos_resume_noirq - Exynos-specific resume code + * @dev: Device to resume (this device) + * + * On exynos5420 there is a silicon errata that will sometimes leave the + * WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate + * that it fired and we can clear it by writing a 1 back. Clear it to prevent + * interrupts from going off constantly. + * + * We run this code on all exynos variants because it doesn't hurt. + */ +static int dw_mci_exynos_resume_noirq(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + struct dw_mci_exynos_priv_data *priv = host->priv; + u32 clksel; + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + clksel = mci_readl(host, CLKSEL64); + else + clksel = mci_readl(host, CLKSEL); + + if (clksel & SDMMC_CLKSEL_WAKEUP_INT) { + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + mci_writel(host, CLKSEL64, clksel); + else + mci_writel(host, CLKSEL, clksel); + } + + pm_runtime_put(dev); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + u32 dqs, strobe; + + /* + * Not supported to configure register + * related to HS400 + */ + if (priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) { + if (timing == MMC_TIMING_MMC_HS400) + dev_warn(host->dev, + "cannot configure HS400, unsupported chipset\n"); + return; + } + + dqs = priv->saved_dqs_en; + strobe = priv->saved_strobe_ctrl; + + if (timing == MMC_TIMING_MMC_HS400) { + dqs |= DATA_STROBE_EN; + strobe = DQS_CTRL_RD_DELAY(strobe, priv->dqs_delay); + } else if (timing == MMC_TIMING_UHS_SDR104) { + dqs &= 0xffffff00; + } else { + dqs &= ~DATA_STROBE_EN; + } + + mci_writel(host, HS400_DQS_EN, dqs); + mci_writel(host, HS400_DLINE_CTRL, strobe); +} + +static void dw_mci_exynos_adjust_clock(struct dw_mci *host, unsigned int wanted) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + unsigned long actual; + u8 div; + int ret; + /* + * Don't care if wanted clock is zero or + * ciu clock is unavailable + */ + if (!wanted || IS_ERR(host->ciu_clk)) + return; + + /* Guaranteed minimum frequency for cclkin */ + if (wanted < EXYNOS_CCLKIN_MIN) + wanted = EXYNOS_CCLKIN_MIN; + + if (wanted == priv->cur_speed) + return; + + div = dw_mci_exynos_get_ciu_div(host); + ret = clk_set_rate(host->ciu_clk, wanted * div); + if (ret) + dev_warn(host->dev, + "failed to set clk-rate %u error: %d\n", + wanted * div, ret); + actual = clk_get_rate(host->ciu_clk); + host->bus_hz = actual / div; + priv->cur_speed = wanted; + host->current_speed = 0; +} + +static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + unsigned int wanted = ios->clock; + u32 timing = ios->timing, clksel; + + switch (timing) { + case MMC_TIMING_MMC_HS400: + /* Update tuned sample timing */ + clksel = SDMMC_CLKSEL_UP_SAMPLE( + priv->hs400_timing, priv->tuned_sample); + wanted <<= 1; + break; + case MMC_TIMING_MMC_DDR52: + clksel = priv->ddr_timing; + /* Should be double rate for DDR mode */ + if (ios->bus_width == MMC_BUS_WIDTH_8) + wanted <<= 1; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + clksel = (priv->sdr_timing & 0xfff8ffff) | + (priv->ciu_div << 16); + break; + case MMC_TIMING_UHS_DDR50: + clksel = (priv->ddr_timing & 0xfff8ffff) | + (priv->ciu_div << 16); + break; + default: + clksel = priv->sdr_timing; + } + + /* Set clock timing for the requested speed mode*/ + dw_mci_exynos_set_clksel_timing(host, clksel); + + /* Configure setting for HS400 */ + dw_mci_exynos_config_hs400(host, timing); + + /* Configure clock rate */ + dw_mci_exynos_adjust_clock(host, wanted); +} + +static int dw_mci_exynos_parse_dt(struct dw_mci *host) +{ + struct dw_mci_exynos_priv_data *priv; + struct device_node *np = host->dev->of_node; + u32 timing[2]; + u32 div = 0; + int idx; + int ret; + + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { + if (of_device_is_compatible(np, exynos_compat[idx].compatible)) + priv->ctrl_type = exynos_compat[idx].ctrl_type; + } + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) + priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1; + else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) + priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1; + else { + of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); + priv->ciu_div = div; + } + + ret = of_property_read_u32_array(np, + "samsung,dw-mshc-sdr-timing", timing, 2); + if (ret) + return ret; + + priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); + + ret = of_property_read_u32_array(np, + "samsung,dw-mshc-ddr-timing", timing, 2); + if (ret) + return ret; + + priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); + + ret = of_property_read_u32_array(np, + "samsung,dw-mshc-hs400-timing", timing, 2); + if (!ret && of_property_read_u32(np, + "samsung,read-strobe-delay", &priv->dqs_delay)) + dev_dbg(host->dev, + "read-strobe-delay is not found, assuming usage of default value\n"); + + priv->hs400_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], + HS400_FIXED_CIU_CLK_DIV); + host->priv = priv; + return 0; +} + +static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64)); + else + return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL)); +} + +static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) +{ + u32 clksel; + struct dw_mci_exynos_priv_data *priv = host->priv; + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + clksel = mci_readl(host, CLKSEL64); + else + clksel = mci_readl(host, CLKSEL); + clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample); + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + mci_writel(host, CLKSEL64, clksel); + else + mci_writel(host, CLKSEL, clksel); +} + +static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + u32 clksel; + u8 sample; + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + clksel = mci_readl(host, CLKSEL64); + else + clksel = mci_readl(host, CLKSEL); + + sample = (clksel + 1) & 0x7; + clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample); + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) + mci_writel(host, CLKSEL64, clksel); + else + mci_writel(host, CLKSEL, clksel); + + return sample; +} + +static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates) +{ + const u8 iter = 8; + u8 __c; + s8 i, loc = -1; + + for (i = 0; i < iter; i++) { + __c = ror8(candiates, i); + if ((__c & 0xc7) == 0xc7) { + loc = i; + goto out; + } + } + + for (i = 0; i < iter; i++) { + __c = ror8(candiates, i); + if ((__c & 0x83) == 0x83) { + loc = i; + goto out; + } + } + + /* + * If there is no cadiates value, then it needs to return -EIO. + * If there are candiates values and don't find bset clk sample value, + * then use a first candiates clock sample value. + */ + for (i = 0; i < iter; i++) { + __c = ror8(candiates, i); + if ((__c & 0x1) == 0x1) { + loc = i; + goto out; + } + } +out: + return loc; +} + +static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode) +{ + struct dw_mci *host = slot->host; + struct dw_mci_exynos_priv_data *priv = host->priv; + struct mmc_host *mmc = slot->mmc; + u8 start_smpl, smpl, candiates = 0; + s8 found; + int ret = 0; + + start_smpl = dw_mci_exynos_get_clksmpl(host); + + do { + mci_writel(host, TMOUT, ~0); + smpl = dw_mci_exynos_move_next_clksmpl(host); + + if (!mmc_send_tuning(mmc, opcode, NULL)) + candiates |= (1 << smpl); + + } while (start_smpl != smpl); + + found = dw_mci_exynos_get_best_clksmpl(candiates); + if (found >= 0) { + dw_mci_exynos_set_clksmpl(host, found); + priv->tuned_sample = found; + } else { + ret = -EIO; + dev_warn(&mmc->class_dev, + "There is no candiates value about clksmpl!\n"); + } + + return ret; +} + +static int dw_mci_exynos_prepare_hs400_tuning(struct dw_mci *host, + struct mmc_ios *ios) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + + dw_mci_exynos_set_clksel_timing(host, priv->hs400_timing); + dw_mci_exynos_adjust_clock(host, (ios->clock) << 1); + + return 0; +} + +/* Common capabilities of Exynos4/Exynos5 SoC */ +static unsigned long exynos_dwmmc_caps[4] = { + MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, +}; + +static const struct dw_mci_drv_data exynos_drv_data = { + .caps = exynos_dwmmc_caps, + .num_caps = ARRAY_SIZE(exynos_dwmmc_caps), + .init = dw_mci_exynos_priv_init, + .set_ios = dw_mci_exynos_set_ios, + .parse_dt = dw_mci_exynos_parse_dt, + .execute_tuning = dw_mci_exynos_execute_tuning, + .prepare_hs400_tuning = dw_mci_exynos_prepare_hs400_tuning, +}; + +static const struct of_device_id dw_mci_exynos_match[] = { + { .compatible = "samsung,exynos4412-dw-mshc", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos5250-dw-mshc", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos5420-dw-mshc", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos5420-dw-mshc-smu", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos7-dw-mshc", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos7-dw-mshc-smu", + .data = &exynos_drv_data, }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); + +static int dw_mci_exynos_probe(struct platform_device *pdev) +{ + const struct dw_mci_drv_data *drv_data; + const struct of_device_id *match; + int ret; + + match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node); + drv_data = match->data; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = dw_mci_pltfm_register(pdev, drv_data); + if (ret) { + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + return ret; + } + + return 0; +} + +static int dw_mci_exynos_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + return dw_mci_pltfm_remove(pdev); +} + +static const struct dev_pm_ops dw_mci_exynos_pmops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend_noirq, + dw_mci_exynos_resume_noirq) + SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend, + dw_mci_exynos_runtime_resume, + NULL) +}; + +static struct platform_driver dw_mci_exynos_pltfm_driver = { + .probe = dw_mci_exynos_probe, + .remove = dw_mci_exynos_remove, + .driver = { + .name = "dwmmc_exynos", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = dw_mci_exynos_match, + .pm = &dw_mci_exynos_pmops, + }, +}; + +module_platform_driver(dw_mci_exynos_pltfm_driver); + +MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension"); +MODULE_AUTHOR("Thomas Abraham > 16) & 0x7) +#define SDMMC_CLKSEL_GET_DIV(x) (((x) >> 24) & 0x7) +#define SDMMC_CLKSEL_UP_SAMPLE(x, y) (((x) & ~SDMMC_CLKSEL_CCLK_SAMPLE(7)) |\ + SDMMC_CLKSEL_CCLK_SAMPLE(y)) +#define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \ + SDMMC_CLKSEL_CCLK_DRIVE(y) | \ + SDMMC_CLKSEL_CCLK_DIVIDER(z)) +#define SDMMC_CLKSEL_TIMING_MASK SDMMC_CLKSEL_TIMING(0x7, 0x7, 0x7) +#define SDMMC_CLKSEL_WAKEUP_INT BIT(11) + +/* RCLK_EN register defines */ +#define DATA_STROBE_EN BIT(0) +#define AXI_NON_BLOCKING_WR BIT(7) + +/* DLINE_CTRL register defines */ +#define DQS_CTRL_RD_DELAY(x, y) (((x) & ~0x3FF) | ((y) & 0x3FF)) +#define DQS_CTRL_GET_RD_DELAY(x) ((x) & 0x3FF) + +/* Protector Register */ +#define SDMMC_EMMCP_BASE 0x1000 +#define SDMMC_MPSECURITY (SDMMC_EMMCP_BASE + 0x0010) +#define SDMMC_MPSBEGIN0 (SDMMC_EMMCP_BASE + 0x0200) +#define SDMMC_MPSEND0 (SDMMC_EMMCP_BASE + 0x0204) +#define SDMMC_MPSCTRL0 (SDMMC_EMMCP_BASE + 0x020C) + +/* SMU control defines */ +#define SDMMC_MPSCTRL_SECURE_READ_BIT BIT(7) +#define SDMMC_MPSCTRL_SECURE_WRITE_BIT BIT(6) +#define SDMMC_MPSCTRL_NON_SECURE_READ_BIT BIT(5) +#define SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT BIT(4) +#define SDMMC_MPSCTRL_USE_FUSE_KEY BIT(3) +#define SDMMC_MPSCTRL_ECB_MODE BIT(2) +#define SDMMC_MPSCTRL_ENCRYPTION BIT(1) +#define SDMMC_MPSCTRL_VALID BIT(0) + +/* Maximum number of Ending sector */ +#define SDMMC_ENDING_SEC_NR_MAX 0xFFFFFFFF + +/* Fixed clock divider */ +#define EXYNOS4210_FIXED_CIU_CLK_DIV 2 +#define EXYNOS4412_FIXED_CIU_CLK_DIV 4 +#define HS400_FIXED_CIU_CLK_DIV 1 + +/* Minimal required clock frequency for cclkin, unit: HZ */ +#define EXYNOS_CCLKIN_MIN 50000000 + +#endif /* _DW_MMC_EXYNOS_H_ */ diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c new file mode 100644 index 000000000..39794f938 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 HiSilicon Technologies Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +#define ALL_INT_CLR 0x1ffff + +struct hi3798cv200_priv { + struct clk *sample_clk; + struct clk *drive_clk; +}; + +static unsigned long dw_mci_hi3798cv200_caps[] = { + MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23 +}; + +static void dw_mci_hi3798cv200_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + struct hi3798cv200_priv *priv = host->priv; + u32 val; + + val = mci_readl(host, UHS_REG); + if (ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_DDR50) + val |= SDMMC_UHS_DDR; + else + val &= ~SDMMC_UHS_DDR; + mci_writel(host, UHS_REG, val); + + val = mci_readl(host, ENABLE_SHIFT); + if (ios->timing == MMC_TIMING_MMC_DDR52) + val |= SDMMC_ENABLE_PHASE; + else + val &= ~SDMMC_ENABLE_PHASE; + mci_writel(host, ENABLE_SHIFT, val); + + val = mci_readl(host, DDR_REG); + if (ios->timing == MMC_TIMING_MMC_HS400) + val |= SDMMC_DDR_HS400; + else + val &= ~SDMMC_DDR_HS400; + mci_writel(host, DDR_REG, val); + + if (ios->timing == MMC_TIMING_MMC_HS || + ios->timing == MMC_TIMING_LEGACY) + clk_set_phase(priv->drive_clk, 180); + else if (ios->timing == MMC_TIMING_MMC_HS200) + clk_set_phase(priv->drive_clk, 135); +} + +static int dw_mci_hi3798cv200_execute_tuning(struct dw_mci_slot *slot, + u32 opcode) +{ + static const int degrees[] = { 0, 45, 90, 135, 180, 225, 270, 315 }; + struct dw_mci *host = slot->host; + struct hi3798cv200_priv *priv = host->priv; + int raise_point = -1, fall_point = -1; + int err, prev_err = -1; + int found = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(degrees); i++) { + clk_set_phase(priv->sample_clk, degrees[i]); + mci_writel(host, RINTSTS, ALL_INT_CLR); + + err = mmc_send_tuning(slot->mmc, opcode, NULL); + if (!err) + found = 1; + + if (i > 0) { + if (err && !prev_err) + fall_point = i - 1; + if (!err && prev_err) + raise_point = i; + } + + if (raise_point != -1 && fall_point != -1) + goto tuning_out; + + prev_err = err; + err = 0; + } + +tuning_out: + if (found) { + if (raise_point == -1) + raise_point = 0; + if (fall_point == -1) + fall_point = ARRAY_SIZE(degrees) - 1; + if (fall_point < raise_point) { + if ((raise_point + fall_point) > + (ARRAY_SIZE(degrees) - 1)) + i = fall_point / 2; + else + i = (raise_point + ARRAY_SIZE(degrees) - 1) / 2; + } else { + i = (raise_point + fall_point) / 2; + } + + clk_set_phase(priv->sample_clk, degrees[i]); + dev_dbg(host->dev, "Tuning clk_sample[%d, %d], set[%d]\n", + raise_point, fall_point, degrees[i]); + } else { + dev_err(host->dev, "No valid clk_sample shift! use default\n"); + err = -EINVAL; + } + + mci_writel(host, RINTSTS, ALL_INT_CLR); + return err; +} + +static int dw_mci_hi3798cv200_init(struct dw_mci *host) +{ + struct hi3798cv200_priv *priv; + int ret; + + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->sample_clk = devm_clk_get(host->dev, "ciu-sample"); + if (IS_ERR(priv->sample_clk)) { + dev_err(host->dev, "failed to get ciu-sample clock\n"); + return PTR_ERR(priv->sample_clk); + } + + priv->drive_clk = devm_clk_get(host->dev, "ciu-drive"); + if (IS_ERR(priv->drive_clk)) { + dev_err(host->dev, "failed to get ciu-drive clock\n"); + return PTR_ERR(priv->drive_clk); + } + + ret = clk_prepare_enable(priv->sample_clk); + if (ret) { + dev_err(host->dev, "failed to enable ciu-sample clock\n"); + return ret; + } + + ret = clk_prepare_enable(priv->drive_clk); + if (ret) { + dev_err(host->dev, "failed to enable ciu-drive clock\n"); + goto disable_sample_clk; + } + + host->priv = priv; + return 0; + +disable_sample_clk: + clk_disable_unprepare(priv->sample_clk); + return ret; +} + +static const struct dw_mci_drv_data hi3798cv200_data = { + .caps = dw_mci_hi3798cv200_caps, + .num_caps = ARRAY_SIZE(dw_mci_hi3798cv200_caps), + .init = dw_mci_hi3798cv200_init, + .set_ios = dw_mci_hi3798cv200_set_ios, + .execute_tuning = dw_mci_hi3798cv200_execute_tuning, +}; + +static int dw_mci_hi3798cv200_probe(struct platform_device *pdev) +{ + return dw_mci_pltfm_register(pdev, &hi3798cv200_data); +} + +static int dw_mci_hi3798cv200_remove(struct platform_device *pdev) +{ + struct dw_mci *host = platform_get_drvdata(pdev); + struct hi3798cv200_priv *priv = host->priv; + + clk_disable_unprepare(priv->drive_clk); + clk_disable_unprepare(priv->sample_clk); + + return dw_mci_pltfm_remove(pdev); +} + +static const struct of_device_id dw_mci_hi3798cv200_match[] = { + { .compatible = "hisilicon,hi3798cv200-dw-mshc", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, dw_mci_hi3798cv200_match); +static struct platform_driver dw_mci_hi3798cv200_driver = { + .probe = dw_mci_hi3798cv200_probe, + .remove = dw_mci_hi3798cv200_remove, + .driver = { + .name = "dwmmc_hi3798cv200", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = dw_mci_hi3798cv200_match, + }, +}; +module_platform_driver(dw_mci_hi3798cv200_driver); + +MODULE_DESCRIPTION("HiSilicon Hi3798CV200 Specific DW-MSHC Driver Extension"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:dwmmc_hi3798cv200"); diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c new file mode 100644 index 000000000..29d2494eb --- /dev/null +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2013 Linaro Ltd. + * Copyright (c) 2013 Hisilicon Limited. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +/* + * hi6220 sd only support io voltage 1.8v and 3v + * Also need config AO_SCTRL_SEL18 accordingly + */ +#define AO_SCTRL_SEL18 BIT(10) +#define AO_SCTRL_CTRL3 0x40C + +#define DWMMC_SDIO_ID 2 + +#define SOC_SCTRL_SCPERCTRL5 (0x314) +#define SDCARD_IO_SEL18 BIT(2) + +#define SDCARD_RD_THRESHOLD (512) + +#define GENCLK_DIV (7) + +#define GPIO_CLK_ENABLE BIT(16) +#define GPIO_CLK_DIV_MASK GENMASK(11, 8) +#define GPIO_USE_SAMPLE_DLY_MASK GENMASK(13, 13) +#define UHS_REG_EXT_SAMPLE_PHASE_MASK GENMASK(20, 16) +#define UHS_REG_EXT_SAMPLE_DRVPHASE_MASK GENMASK(25, 21) +#define UHS_REG_EXT_SAMPLE_DLY_MASK GENMASK(30, 26) + +#define TIMING_MODE 3 +#define TIMING_CFG_NUM 10 + +#define NUM_PHASES (40) + +#define ENABLE_SHIFT_MIN_SMPL (4) +#define ENABLE_SHIFT_MAX_SMPL (12) +#define USE_DLY_MIN_SMPL (11) +#define USE_DLY_MAX_SMPL (14) + +struct k3_priv { + int ctrl_id; + u32 cur_speed; + struct regmap *reg; +}; + +static unsigned long dw_mci_hi6220_caps[] = { + MMC_CAP_CMD23, + MMC_CAP_CMD23, + 0 +}; + +struct hs_timing { + u32 drv_phase; + u32 smpl_dly; + u32 smpl_phase_max; + u32 smpl_phase_min; +}; + +static struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = { + { /* reserved */ }, + { /* SD */ + {7, 0, 15, 15,}, /* 0: LEGACY 400k */ + {6, 0, 4, 4,}, /* 1: MMC_HS */ + {6, 0, 3, 3,}, /* 2: SD_HS */ + {6, 0, 15, 15,}, /* 3: SDR12 */ + {6, 0, 2, 2,}, /* 4: SDR25 */ + {4, 0, 11, 0,}, /* 5: SDR50 */ + {6, 4, 15, 0,}, /* 6: SDR104 */ + {0}, /* 7: DDR50 */ + {0}, /* 8: DDR52 */ + {0}, /* 9: HS200 */ + }, + { /* SDIO */ + {7, 0, 15, 15,}, /* 0: LEGACY 400k */ + {0}, /* 1: MMC_HS */ + {6, 0, 15, 15,}, /* 2: SD_HS */ + {6, 0, 15, 15,}, /* 3: SDR12 */ + {6, 0, 0, 0,}, /* 4: SDR25 */ + {4, 0, 12, 0,}, /* 5: SDR50 */ + {5, 4, 15, 0,}, /* 6: SDR104 */ + {0}, /* 7: DDR50 */ + {0}, /* 8: DDR52 */ + {0}, /* 9: HS200 */ + } +}; + +static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + int ret; + + ret = clk_set_rate(host->ciu_clk, ios->clock); + if (ret) + dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock); + + host->bus_hz = clk_get_rate(host->ciu_clk); +} + +static const struct dw_mci_drv_data k3_drv_data = { + .set_ios = dw_mci_k3_set_ios, +}; + +static int dw_mci_hi6220_parse_dt(struct dw_mci *host) +{ + struct k3_priv *priv; + + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->reg = syscon_regmap_lookup_by_phandle(host->dev->of_node, + "hisilicon,peripheral-syscon"); + if (IS_ERR(priv->reg)) + priv->reg = NULL; + + priv->ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); + if (priv->ctrl_id < 0) + priv->ctrl_id = 0; + + if (priv->ctrl_id >= TIMING_MODE) + return -EINVAL; + + host->priv = priv; + return 0; +} + +static int dw_mci_hi6220_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct k3_priv *priv; + struct dw_mci *host; + int min_uv, max_uv; + int ret; + + host = slot->host; + priv = host->priv; + + if (!priv || !priv->reg) + return 0; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + ret = regmap_update_bits(priv->reg, AO_SCTRL_CTRL3, + AO_SCTRL_SEL18, 0); + min_uv = 3000000; + max_uv = 3000000; + } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + ret = regmap_update_bits(priv->reg, AO_SCTRL_CTRL3, + AO_SCTRL_SEL18, AO_SCTRL_SEL18); + min_uv = 1800000; + max_uv = 1800000; + } else { + dev_dbg(host->dev, "voltage not supported\n"); + return -EINVAL; + } + + if (ret) { + dev_dbg(host->dev, "switch voltage failed\n"); + return ret; + } + + if (IS_ERR_OR_NULL(mmc->supply.vqmmc)) + return 0; + + ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv); + if (ret) { + dev_dbg(host->dev, "Regulator set error %d: %d - %d\n", + ret, min_uv, max_uv); + return ret; + } + + return 0; +} + +static void dw_mci_hi6220_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + int ret; + unsigned int clock; + + clock = (ios->clock <= 25000000) ? 25000000 : ios->clock; + + ret = clk_set_rate(host->biu_clk, clock); + if (ret) + dev_warn(host->dev, "failed to set rate %uHz\n", clock); + + host->bus_hz = clk_get_rate(host->biu_clk); +} + +static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode) +{ + return 0; +} + +static const struct dw_mci_drv_data hi6220_data = { + .caps = dw_mci_hi6220_caps, + .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps), + .switch_voltage = dw_mci_hi6220_switch_voltage, + .set_ios = dw_mci_hi6220_set_ios, + .parse_dt = dw_mci_hi6220_parse_dt, + .execute_tuning = dw_mci_hi6220_execute_tuning, +}; + +static void dw_mci_hs_set_timing(struct dw_mci *host, int timing, + int smpl_phase) +{ + u32 drv_phase; + u32 smpl_dly; + u32 use_smpl_dly = 0; + u32 enable_shift = 0; + u32 reg_value; + int ctrl_id; + struct k3_priv *priv; + + priv = host->priv; + ctrl_id = priv->ctrl_id; + + drv_phase = hs_timing_cfg[ctrl_id][timing].drv_phase; + smpl_dly = hs_timing_cfg[ctrl_id][timing].smpl_dly; + if (smpl_phase == -1) + smpl_phase = (hs_timing_cfg[ctrl_id][timing].smpl_phase_max + + hs_timing_cfg[ctrl_id][timing].smpl_phase_min) / 2; + + switch (timing) { + case MMC_TIMING_UHS_SDR104: + if (smpl_phase >= USE_DLY_MIN_SMPL && + smpl_phase <= USE_DLY_MAX_SMPL) + use_smpl_dly = 1; + fallthrough; + case MMC_TIMING_UHS_SDR50: + if (smpl_phase >= ENABLE_SHIFT_MIN_SMPL && + smpl_phase <= ENABLE_SHIFT_MAX_SMPL) + enable_shift = 1; + break; + } + + mci_writel(host, GPIO, 0x0); + usleep_range(5, 10); + + reg_value = FIELD_PREP(UHS_REG_EXT_SAMPLE_PHASE_MASK, smpl_phase) | + FIELD_PREP(UHS_REG_EXT_SAMPLE_DLY_MASK, smpl_dly) | + FIELD_PREP(UHS_REG_EXT_SAMPLE_DRVPHASE_MASK, drv_phase); + mci_writel(host, UHS_REG_EXT, reg_value); + + mci_writel(host, ENABLE_SHIFT, enable_shift); + + reg_value = FIELD_PREP(GPIO_CLK_DIV_MASK, GENCLK_DIV) | + FIELD_PREP(GPIO_USE_SAMPLE_DLY_MASK, use_smpl_dly); + mci_writel(host, GPIO, (unsigned int)reg_value | GPIO_CLK_ENABLE); + + /* We should delay 1ms wait for timing setting finished. */ + usleep_range(1000, 2000); +} + +static int dw_mci_hi3660_init(struct dw_mci *host) +{ + mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(SDCARD_RD_THRESHOLD, + SDMMC_CARD_RD_THR_EN)); + + dw_mci_hs_set_timing(host, MMC_TIMING_LEGACY, -1); + host->bus_hz /= (GENCLK_DIV + 1); + + return 0; +} + +static int dw_mci_set_sel18(struct dw_mci *host, bool set) +{ + int ret; + unsigned int val; + struct k3_priv *priv; + + priv = host->priv; + + val = set ? SDCARD_IO_SEL18 : 0; + ret = regmap_update_bits(priv->reg, SOC_SCTRL_SCPERCTRL5, + SDCARD_IO_SEL18, val); + if (ret) { + dev_err(host->dev, "sel18 %u error\n", val); + return ret; + } + + return 0; +} + +static void dw_mci_hi3660_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + int ret; + unsigned long wanted; + unsigned long actual; + struct k3_priv *priv = host->priv; + + if (!ios->clock || ios->clock == priv->cur_speed) + return; + + wanted = ios->clock * (GENCLK_DIV + 1); + ret = clk_set_rate(host->ciu_clk, wanted); + if (ret) { + dev_err(host->dev, "failed to set rate %luHz\n", wanted); + return; + } + actual = clk_get_rate(host->ciu_clk); + + dw_mci_hs_set_timing(host, ios->timing, -1); + host->bus_hz = actual / (GENCLK_DIV + 1); + host->current_speed = 0; + priv->cur_speed = host->bus_hz; +} + +static int dw_mci_get_best_clksmpl(unsigned int sample_flag) +{ + int i; + int interval; + unsigned int v; + unsigned int len; + unsigned int range_start = 0; + unsigned int range_length = 0; + unsigned int middle_range = 0; + + if (!sample_flag) + return -EIO; + + if (~sample_flag == 0) + return 0; + + i = ffs(sample_flag) - 1; + + /* + * A clock cycle is divided into 32 phases, + * each of which is represented by a bit, + * finding the optimal phase. + */ + while (i < 32) { + v = ror32(sample_flag, i); + len = ffs(~v) - 1; + + if (len > range_length) { + range_length = len; + range_start = i; + } + + interval = ffs(v >> len) - 1; + if (interval < 0) + break; + + i += len + interval; + } + + middle_range = range_start + range_length / 2; + if (middle_range >= 32) + middle_range %= 32; + + return middle_range; +} + +static int dw_mci_hi3660_execute_tuning(struct dw_mci_slot *slot, u32 opcode) +{ + int i = 0; + struct dw_mci *host = slot->host; + struct mmc_host *mmc = slot->mmc; + int smpl_phase = 0; + u32 tuning_sample_flag = 0; + int best_clksmpl = 0; + + for (i = 0; i < NUM_PHASES; ++i, ++smpl_phase) { + smpl_phase %= 32; + + mci_writel(host, TMOUT, ~0); + dw_mci_hs_set_timing(host, mmc->ios.timing, smpl_phase); + + if (!mmc_send_tuning(mmc, opcode, NULL)) + tuning_sample_flag |= (1 << smpl_phase); + else + tuning_sample_flag &= ~(1 << smpl_phase); + } + + best_clksmpl = dw_mci_get_best_clksmpl(tuning_sample_flag); + if (best_clksmpl < 0) { + dev_err(host->dev, "All phases bad!\n"); + return -EIO; + } + + dw_mci_hs_set_timing(host, mmc->ios.timing, best_clksmpl); + + dev_info(host->dev, "tuning ok best_clksmpl %u tuning_sample_flag %x\n", + best_clksmpl, tuning_sample_flag); + return 0; +} + +static int dw_mci_hi3660_switch_voltage(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + int ret = 0; + struct dw_mci_slot *slot = mmc_priv(mmc); + struct k3_priv *priv; + struct dw_mci *host; + + host = slot->host; + priv = host->priv; + + if (!priv || !priv->reg) + return 0; + + if (priv->ctrl_id == DWMMC_SDIO_ID) + return 0; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) + ret = dw_mci_set_sel18(host, 0); + else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) + ret = dw_mci_set_sel18(host, 1); + if (ret) + return ret; + + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) { + dev_err(host->dev, "Regulator set error %d\n", ret); + return ret; + } + } + + return 0; +} + +static const struct dw_mci_drv_data hi3660_data = { + .init = dw_mci_hi3660_init, + .set_ios = dw_mci_hi3660_set_ios, + .parse_dt = dw_mci_hi6220_parse_dt, + .execute_tuning = dw_mci_hi3660_execute_tuning, + .switch_voltage = dw_mci_hi3660_switch_voltage, +}; + +static const struct of_device_id dw_mci_k3_match[] = { + { .compatible = "hisilicon,hi3660-dw-mshc", .data = &hi3660_data, }, + { .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, }, + { .compatible = "hisilicon,hi6220-dw-mshc", .data = &hi6220_data, }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_k3_match); + +static int dw_mci_k3_probe(struct platform_device *pdev) +{ + const struct dw_mci_drv_data *drv_data; + const struct of_device_id *match; + + match = of_match_node(dw_mci_k3_match, pdev->dev.of_node); + drv_data = match->data; + + return dw_mci_pltfm_register(pdev, drv_data); +} + +static const struct dev_pm_ops dw_mci_k3_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend, + dw_mci_runtime_resume, + NULL) +}; + +static struct platform_driver dw_mci_k3_pltfm_driver = { + .probe = dw_mci_k3_probe, + .remove = dw_mci_pltfm_remove, + .driver = { + .name = "dwmmc_k3", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = dw_mci_k3_match, + .pm = &dw_mci_k3_dev_pm_ops, + }, +}; + +module_platform_driver(dw_mci_k3_pltfm_driver); + +MODULE_DESCRIPTION("K3 Specific DW-MSHC Driver Extension"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:dwmmc_k3"); diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c new file mode 100644 index 000000000..e7ab699f4 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pci.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Synopsys DesignWare Multimedia Card PCI Interface driver + * + * Copyright (C) 2012 Vayavya Labs Pvt. Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dw_mmc.h" + +#define PCI_BAR_NO 2 +#define SYNOPSYS_DW_MCI_VENDOR_ID 0x700 +#define SYNOPSYS_DW_MCI_DEVICE_ID 0x1107 +/* Defining the Capabilities */ +#define DW_MCI_CAPABILITIES (MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |\ + MMC_CAP_SD_HIGHSPEED | MMC_CAP_8_BIT_DATA |\ + MMC_CAP_SDIO_IRQ) + +static struct dw_mci_board pci_board_data = { + .caps = DW_MCI_CAPABILITIES, + .bus_hz = 33 * 1000 * 1000, + .detect_delay_ms = 200, + .fifo_depth = 32, +}; + +static int dw_mci_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *entries) +{ + struct dw_mci *host; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->irq = pdev->irq; + host->irq_flags = IRQF_SHARED; + host->dev = &pdev->dev; + host->pdata = &pci_board_data; + + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev)); + if (ret) + return ret; + + host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO]; + + pci_set_master(pdev); + + ret = dw_mci_probe(host); + if (ret) + return ret; + + pci_set_drvdata(pdev, host); + + return 0; +} + +static void dw_mci_pci_remove(struct pci_dev *pdev) +{ + struct dw_mci *host = pci_get_drvdata(pdev); + + dw_mci_remove(host); +} + +static const struct dev_pm_ops dw_mci_pci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend, + dw_mci_runtime_resume, + NULL) +}; + +static const struct pci_device_id dw_mci_pci_id[] = { + { PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) }, + {} +}; +MODULE_DEVICE_TABLE(pci, dw_mci_pci_id); + +static struct pci_driver dw_mci_pci_driver = { + .name = "dw_mmc_pci", + .id_table = dw_mci_pci_id, + .probe = dw_mci_pci_probe, + .remove = dw_mci_pci_remove, + .driver = { + .pm = &dw_mci_pci_dev_pm_ops, + }, +}; + +module_pci_driver(dw_mci_pci_driver); + +MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver"); +MODULE_AUTHOR("Shashidhar Hiremath "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c new file mode 100644 index 000000000..73731cd3b --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Synopsys DesignWare Multimedia Card Interface driver + * + * Copyright (C) 2009 NXP Semiconductors + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +int dw_mci_pltfm_register(struct platform_device *pdev, + const struct dw_mci_drv_data *drv_data) +{ + struct dw_mci *host; + struct resource *regs; + + host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) + return host->irq; + + host->drv_data = drv_data; + host->dev = &pdev->dev; + host->irq_flags = 0; + host->pdata = pdev->dev.platform_data; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(host->regs)) + return PTR_ERR(host->regs); + + /* Get registers' physical base address */ + host->phy_regs = regs->start; + + platform_set_drvdata(pdev, host); + return dw_mci_probe(host); +} +EXPORT_SYMBOL_GPL(dw_mci_pltfm_register); + +const struct dev_pm_ops dw_mci_pltfm_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend, + dw_mci_runtime_resume, + NULL) +}; +EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops); + +static const struct of_device_id dw_mci_pltfm_match[] = { + { .compatible = "snps,dw-mshc", }, + { .compatible = "altr,socfpga-dw-mshc", }, + { .compatible = "img,pistachio-dw-mshc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match); + +static int dw_mci_pltfm_probe(struct platform_device *pdev) +{ + const struct dw_mci_drv_data *drv_data = NULL; + const struct of_device_id *match; + + if (pdev->dev.of_node) { + match = of_match_node(dw_mci_pltfm_match, pdev->dev.of_node); + drv_data = match->data; + } + + return dw_mci_pltfm_register(pdev, drv_data); +} + +int dw_mci_pltfm_remove(struct platform_device *pdev) +{ + struct dw_mci *host = platform_get_drvdata(pdev); + + dw_mci_remove(host); + return 0; +} +EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove); + +static struct platform_driver dw_mci_pltfm_driver = { + .probe = dw_mci_pltfm_probe, + .remove = dw_mci_pltfm_remove, + .driver = { + .name = "dw_mmc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = dw_mci_pltfm_match, + .pm = &dw_mci_pltfm_pmops, + }, +}; + +module_platform_driver(dw_mci_pltfm_driver); + +MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); +MODULE_AUTHOR("NXP Semiconductor VietNam"); +MODULE_AUTHOR("Imagination Technologies Ltd"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h new file mode 100644 index 000000000..2d50d7da2 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pltfm.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Synopsys DesignWare Multimedia Card Interface Platform driver + * + * Copyright (C) 2012, Samsung Electronics Co., Ltd. + */ + +#ifndef _DW_MMC_PLTFM_H_ +#define _DW_MMC_PLTFM_H_ + +extern int dw_mci_pltfm_register(struct platform_device *pdev, + const struct dw_mci_drv_data *drv_data); +extern int dw_mci_pltfm_remove(struct platform_device *pdev); +extern const struct dev_pm_ops dw_mci_pltfm_pmops; + +#endif /* _DW_MMC_PLTFM_H_ */ diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c new file mode 100644 index 000000000..753502ce3 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +#define RK3288_CLKGEN_DIV 2 + +struct dw_mci_rockchip_priv_data { + struct clk *drv_clk; + struct clk *sample_clk; + int default_sample_phase; + int num_phases; +}; + +static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + struct dw_mci_rockchip_priv_data *priv = host->priv; + int ret; + unsigned int cclkin; + u32 bus_hz; + + if (ios->clock == 0) + return; + + /* + * cclkin: source clock of mmc controller + * bus_hz: card interface clock generated by CLKGEN + * bus_hz = cclkin / RK3288_CLKGEN_DIV + * ios->clock = (div == 0) ? bus_hz : (bus_hz / (2 * div)) + * + * Note: div can only be 0 or 1, but div must be set to 1 for eMMC + * DDR52 8-bit mode. + */ + if (ios->bus_width == MMC_BUS_WIDTH_8 && + ios->timing == MMC_TIMING_MMC_DDR52) + cclkin = 2 * ios->clock * RK3288_CLKGEN_DIV; + else + cclkin = ios->clock * RK3288_CLKGEN_DIV; + + ret = clk_set_rate(host->ciu_clk, cclkin); + if (ret) + dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock); + + bus_hz = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV; + if (bus_hz != host->bus_hz) { + host->bus_hz = bus_hz; + /* force dw_mci_setup_bus() */ + host->current_speed = 0; + } + + /* Make sure we use phases which we can enumerate with */ + if (!IS_ERR(priv->sample_clk)) + clk_set_phase(priv->sample_clk, priv->default_sample_phase); + + /* + * Set the drive phase offset based on speed mode to achieve hold times. + * + * NOTE: this is _not_ a value that is dynamically tuned and is also + * _not_ a value that will vary from board to board. It is a value + * that could vary between different SoC models if they had massively + * different output clock delays inside their dw_mmc IP block (delay_o), + * but since it's OK to overshoot a little we don't need to do complex + * calculations and can pick values that will just work for everyone. + * + * When picking values we'll stick with picking 0/90/180/270 since + * those can be made very accurately on all known Rockchip SoCs. + * + * Note that these values match values from the DesignWare Databook + * tables for the most part except for SDR12 and "ID mode". For those + * two modes the databook calculations assume a clock in of 50MHz. As + * seen above, we always use a clock in rate that is exactly the + * card's input clock (times RK3288_CLKGEN_DIV, but that gets divided + * back out before the controller sees it). + * + * From measurement of a single device, it appears that delay_o is + * about .5 ns. Since we try to leave a bit of margin, it's expected + * that numbers here will be fine even with much larger delay_o + * (the 1.4 ns assumed by the DesignWare Databook would result in the + * same results, for instance). + */ + if (!IS_ERR(priv->drv_clk)) { + int phase; + + /* + * In almost all cases a 90 degree phase offset will provide + * sufficient hold times across all valid input clock rates + * assuming delay_o is not absurd for a given SoC. We'll use + * that as a default. + */ + phase = 90; + + switch (ios->timing) { + case MMC_TIMING_MMC_DDR52: + /* + * Since clock in rate with MMC_DDR52 is doubled when + * bus width is 8 we need to double the phase offset + * to get the same timings. + */ + if (ios->bus_width == MMC_BUS_WIDTH_8) + phase = 180; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* + * In the case of 150 MHz clock (typical max for + * Rockchip SoCs), 90 degree offset will add a delay + * of 1.67 ns. That will meet min hold time of .8 ns + * as long as clock output delay is < .87 ns. On + * SoCs measured this seems to be OK, but it doesn't + * hurt to give margin here, so we use 180. + */ + phase = 180; + break; + } + + clk_set_phase(priv->drv_clk, phase); + } +} + +#define TUNING_ITERATION_TO_PHASE(i, num_phases) \ + (DIV_ROUND_UP((i) * 360, num_phases)) + +static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode) +{ + struct dw_mci *host = slot->host; + struct dw_mci_rockchip_priv_data *priv = host->priv; + struct mmc_host *mmc = slot->mmc; + int ret = 0; + int i; + bool v, prev_v = 0, first_v; + struct range_t { + int start; + int end; /* inclusive */ + }; + struct range_t *ranges; + unsigned int range_count = 0; + int longest_range_len = -1; + int longest_range = -1; + int middle_phase; + + if (IS_ERR(priv->sample_clk)) { + dev_err(host->dev, "Tuning clock (sample_clk) not defined.\n"); + return -EIO; + } + + ranges = kmalloc_array(priv->num_phases / 2 + 1, + sizeof(*ranges), GFP_KERNEL); + if (!ranges) + return -ENOMEM; + + /* Try each phase and extract good ranges */ + for (i = 0; i < priv->num_phases; ) { + clk_set_phase(priv->sample_clk, + TUNING_ITERATION_TO_PHASE(i, priv->num_phases)); + + v = !mmc_send_tuning(mmc, opcode, NULL); + + if (i == 0) + first_v = v; + + if ((!prev_v) && v) { + range_count++; + ranges[range_count-1].start = i; + } + if (v) { + ranges[range_count-1].end = i; + i++; + } else if (i == priv->num_phases - 1) { + /* No extra skipping rules if we're at the end */ + i++; + } else { + /* + * No need to check too close to an invalid + * one since testing bad phases is slow. Skip + * 20 degrees. + */ + i += DIV_ROUND_UP(20 * priv->num_phases, 360); + + /* Always test the last one */ + if (i >= priv->num_phases) + i = priv->num_phases - 1; + } + + prev_v = v; + } + + if (range_count == 0) { + dev_warn(host->dev, "All phases bad!"); + ret = -EIO; + goto free; + } + + /* wrap around case, merge the end points */ + if ((range_count > 1) && first_v && v) { + ranges[0].start = ranges[range_count-1].start; + range_count--; + } + + if (ranges[0].start == 0 && ranges[0].end == priv->num_phases - 1) { + clk_set_phase(priv->sample_clk, priv->default_sample_phase); + dev_info(host->dev, "All phases work, using default phase %d.", + priv->default_sample_phase); + goto free; + } + + /* Find the longest range */ + for (i = 0; i < range_count; i++) { + int len = (ranges[i].end - ranges[i].start + 1); + + if (len < 0) + len += priv->num_phases; + + if (longest_range_len < len) { + longest_range_len = len; + longest_range = i; + } + + dev_dbg(host->dev, "Good phase range %d-%d (%d len)\n", + TUNING_ITERATION_TO_PHASE(ranges[i].start, + priv->num_phases), + TUNING_ITERATION_TO_PHASE(ranges[i].end, + priv->num_phases), + len + ); + } + + dev_dbg(host->dev, "Best phase range %d-%d (%d len)\n", + TUNING_ITERATION_TO_PHASE(ranges[longest_range].start, + priv->num_phases), + TUNING_ITERATION_TO_PHASE(ranges[longest_range].end, + priv->num_phases), + longest_range_len + ); + + middle_phase = ranges[longest_range].start + longest_range_len / 2; + middle_phase %= priv->num_phases; + dev_info(host->dev, "Successfully tuned phase to %d\n", + TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases)); + + clk_set_phase(priv->sample_clk, + TUNING_ITERATION_TO_PHASE(middle_phase, + priv->num_phases)); + +free: + kfree(ranges); + return ret; +} + +static int dw_mci_rk3288_parse_dt(struct dw_mci *host) +{ + struct device_node *np = host->dev->of_node; + struct dw_mci_rockchip_priv_data *priv; + + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (of_property_read_u32(np, "rockchip,desired-num-phases", + &priv->num_phases)) + priv->num_phases = 360; + + if (of_property_read_u32(np, "rockchip,default-sample-phase", + &priv->default_sample_phase)) + priv->default_sample_phase = 0; + + priv->drv_clk = devm_clk_get(host->dev, "ciu-drive"); + if (IS_ERR(priv->drv_clk)) + dev_dbg(host->dev, "ciu-drive not available\n"); + + priv->sample_clk = devm_clk_get(host->dev, "ciu-sample"); + if (IS_ERR(priv->sample_clk)) + dev_dbg(host->dev, "ciu-sample not available\n"); + + host->priv = priv; + + return 0; +} + +static int dw_mci_rockchip_init(struct dw_mci *host) +{ + /* It is slot 8 on Rockchip SoCs */ + host->sdio_id0 = 8; + + if (of_device_is_compatible(host->dev->of_node, + "rockchip,rk3288-dw-mshc")) + host->bus_hz /= RK3288_CLKGEN_DIV; + + return 0; +} + +/* Common capabilities of RK3288 SoC */ +static unsigned long dw_mci_rk3288_dwmmc_caps[4] = { + MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, +}; + +static const struct dw_mci_drv_data rk2928_drv_data = { + .init = dw_mci_rockchip_init, +}; + +static const struct dw_mci_drv_data rk3288_drv_data = { + .caps = dw_mci_rk3288_dwmmc_caps, + .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps), + .set_ios = dw_mci_rk3288_set_ios, + .execute_tuning = dw_mci_rk3288_execute_tuning, + .parse_dt = dw_mci_rk3288_parse_dt, + .init = dw_mci_rockchip_init, +}; + +static const struct of_device_id dw_mci_rockchip_match[] = { + { .compatible = "rockchip,rk2928-dw-mshc", + .data = &rk2928_drv_data }, + { .compatible = "rockchip,rk3288-dw-mshc", + .data = &rk3288_drv_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_rockchip_match); + +static int dw_mci_rockchip_probe(struct platform_device *pdev) +{ + const struct dw_mci_drv_data *drv_data; + const struct of_device_id *match; + int ret; + + if (!pdev->dev.of_node) + return -ENODEV; + + match = of_match_node(dw_mci_rockchip_match, pdev->dev.of_node); + drv_data = match->data; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + + ret = dw_mci_pltfm_register(pdev, drv_data); + if (ret) { + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + return ret; + } + + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; +} + +static int dw_mci_rockchip_remove(struct platform_device *pdev) +{ + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + return dw_mci_pltfm_remove(pdev); +} + +static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend, + dw_mci_runtime_resume, + NULL) +}; + +static struct platform_driver dw_mci_rockchip_pltfm_driver = { + .probe = dw_mci_rockchip_probe, + .remove = dw_mci_rockchip_remove, + .driver = { + .name = "dwmmc_rockchip", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = dw_mci_rockchip_match, + .pm = &dw_mci_rockchip_dev_pm_ops, + }, +}; + +module_platform_driver(dw_mci_rockchip_pltfm_driver); + +MODULE_AUTHOR("Addy Ke "); +MODULE_DESCRIPTION("Rockchip Specific DW-MSHC Driver Extension"); +MODULE_ALIAS("platform:dwmmc_rockchip"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c new file mode 100644 index 000000000..51bcc6332 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-zx.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ZX Specific Extensions for Synopsys DW Multimedia Card Interface driver + * + * Copyright (C) 2016, Linaro Ltd. + * Copyright (C) 2016, ZTE Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" +#include "dw_mmc-zx.h" + +struct dw_mci_zx_priv_data { + struct regmap *sysc_base; +}; + +enum delay_type { + DELAY_TYPE_READ, /* read dqs delay */ + DELAY_TYPE_CLK, /* clk sample delay */ +}; + +static int dw_mci_zx_emmc_set_delay(struct dw_mci *host, unsigned int delay, + enum delay_type dflag) +{ + struct dw_mci_zx_priv_data *priv = host->priv; + struct regmap *sysc_base = priv->sysc_base; + unsigned int clksel; + unsigned int loop = 1000; + int ret; + + if (!sysc_base) + return -EINVAL; + + ret = regmap_update_bits(sysc_base, LB_AON_EMMC_CFG_REG0, + PARA_HALF_CLK_MODE | PARA_DLL_BYPASS_MODE | + PARA_PHASE_DET_SEL_MASK | + PARA_DLL_LOCK_NUM_MASK | + DLL_REG_SET | PARA_DLL_START_MASK, + PARA_DLL_START(4) | PARA_DLL_LOCK_NUM(4)); + if (ret) + return ret; + + ret = regmap_read(sysc_base, LB_AON_EMMC_CFG_REG1, &clksel); + if (ret) + return ret; + + if (dflag == DELAY_TYPE_CLK) { + clksel &= ~CLK_SAMP_DELAY_MASK; + clksel |= CLK_SAMP_DELAY(delay); + } else { + clksel &= ~READ_DQS_DELAY_MASK; + clksel |= READ_DQS_DELAY(delay); + } + + regmap_write(sysc_base, LB_AON_EMMC_CFG_REG1, clksel); + regmap_update_bits(sysc_base, LB_AON_EMMC_CFG_REG0, + PARA_DLL_START_MASK | PARA_DLL_LOCK_NUM_MASK | + DLL_REG_SET, + PARA_DLL_START(4) | PARA_DLL_LOCK_NUM(4) | + DLL_REG_SET); + + do { + ret = regmap_read(sysc_base, LB_AON_EMMC_CFG_REG2, &clksel); + if (ret) + return ret; + + } while (--loop && !(clksel & ZX_DLL_LOCKED)); + + if (!loop) { + dev_err(host->dev, "Error: %s dll lock fail\n", __func__); + return -EIO; + } + + return 0; +} + +static int dw_mci_zx_emmc_execute_tuning(struct dw_mci_slot *slot, u32 opcode) +{ + struct dw_mci *host = slot->host; + struct mmc_host *mmc = slot->mmc; + int ret, len = 0, start = 0, end = 0, delay, best = 0; + + for (delay = 1; delay < 128; delay++) { + ret = dw_mci_zx_emmc_set_delay(host, delay, DELAY_TYPE_CLK); + if (!ret && mmc_send_tuning(mmc, opcode, NULL)) { + if (start >= 0) { + end = delay - 1; + /* check and update longest good range */ + if ((end - start) > len) { + best = (start + end) >> 1; + len = end - start; + } + } + start = -1; + end = 0; + continue; + } + if (start < 0) + start = delay; + } + + if (start >= 0) { + end = delay - 1; + if ((end - start) > len) { + best = (start + end) >> 1; + len = end - start; + } + } + if (best < 0) + return -EIO; + + dev_info(host->dev, "%s best range: start %d end %d\n", __func__, + start, end); + return dw_mci_zx_emmc_set_delay(host, best, DELAY_TYPE_CLK); +} + +static int dw_mci_zx_prepare_hs400_tuning(struct dw_mci *host, + struct mmc_ios *ios) +{ + int ret; + + /* config phase shift as 90 degree */ + ret = dw_mci_zx_emmc_set_delay(host, 32, DELAY_TYPE_READ); + if (ret < 0) + return -EIO; + + return 0; +} + +static int dw_mci_zx_execute_tuning(struct dw_mci_slot *slot, u32 opcode) +{ + struct dw_mci *host = slot->host; + + if (host->verid == 0x290a) /* only for emmc */ + return dw_mci_zx_emmc_execute_tuning(slot, opcode); + /* TODO: Add 0x210a dedicated tuning for sd/sdio */ + + return 0; +} + +static int dw_mci_zx_parse_dt(struct dw_mci *host) +{ + struct device_node *np = host->dev->of_node; + struct device_node *node; + struct dw_mci_zx_priv_data *priv; + struct regmap *sysc_base; + + /* syscon is needed only by emmc */ + node = of_parse_phandle(np, "zte,aon-syscon", 0); + if (node) { + sysc_base = syscon_node_to_regmap(node); + of_node_put(node); + + if (IS_ERR(sysc_base)) + return dev_err_probe(host->dev, PTR_ERR(sysc_base), + "Can't get syscon\n"); + } else { + return 0; + } + + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->sysc_base = sysc_base; + host->priv = priv; + + return 0; +} + +static unsigned long zx_dwmmc_caps[3] = { + MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, +}; + +static const struct dw_mci_drv_data zx_drv_data = { + .caps = zx_dwmmc_caps, + .num_caps = ARRAY_SIZE(zx_dwmmc_caps), + .execute_tuning = dw_mci_zx_execute_tuning, + .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, + .parse_dt = dw_mci_zx_parse_dt, +}; + +static const struct of_device_id dw_mci_zx_match[] = { + { .compatible = "zte,zx296718-dw-mshc", .data = &zx_drv_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_zx_match); + +static int dw_mci_zx_probe(struct platform_device *pdev) +{ + const struct dw_mci_drv_data *drv_data; + const struct of_device_id *match; + + match = of_match_node(dw_mci_zx_match, pdev->dev.of_node); + drv_data = match->data; + + return dw_mci_pltfm_register(pdev, drv_data); +} + +static const struct dev_pm_ops dw_mci_zx_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend, + dw_mci_runtime_resume, + NULL) +}; + +static struct platform_driver dw_mci_zx_pltfm_driver = { + .probe = dw_mci_zx_probe, + .remove = dw_mci_pltfm_remove, + .driver = { + .name = "dwmmc_zx", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = dw_mci_zx_match, + .pm = &dw_mci_zx_dev_pm_ops, + }, +}; + +module_platform_driver(dw_mci_zx_pltfm_driver); + +MODULE_DESCRIPTION("ZTE emmc/sd driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc-zx.h b/drivers/mmc/host/dw_mmc-zx.h new file mode 100644 index 000000000..09ac52766 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-zx.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DW_MMC_ZX_H_ +#define _DW_MMC_ZX_H_ + +/* ZX296718 SoC specific DLL register offset. */ +#define LB_AON_EMMC_CFG_REG0 0x1B0 +#define LB_AON_EMMC_CFG_REG1 0x1B4 +#define LB_AON_EMMC_CFG_REG2 0x1B8 + +/* LB_AON_EMMC_CFG_REG0 register defines */ +#define PARA_DLL_START(x) ((x) & 0xFF) +#define PARA_DLL_START_MASK 0xFF +#define DLL_REG_SET BIT(8) +#define PARA_DLL_LOCK_NUM(x) (((x) & 7) << 16) +#define PARA_DLL_LOCK_NUM_MASK (7 << 16) +#define PARA_PHASE_DET_SEL(x) (((x) & 7) << 20) +#define PARA_PHASE_DET_SEL_MASK (7 << 20) +#define PARA_DLL_BYPASS_MODE BIT(23) +#define PARA_HALF_CLK_MODE BIT(24) + +/* LB_AON_EMMC_CFG_REG1 register defines */ +#define READ_DQS_DELAY(x) ((x) & 0x7F) +#define READ_DQS_DELAY_MASK (0x7F) +#define READ_DQS_BYPASS_MODE BIT(7) +#define CLK_SAMP_DELAY(x) (((x) & 0x7F) << 8) +#define CLK_SAMP_DELAY_MASK (0x7F << 8) +#define CLK_SAMP_BYPASS_MODE BIT(15) + +/* LB_AON_EMMC_CFG_REG2 register defines */ +#define ZX_DLL_LOCKED BIT(2) + +#endif /* _DW_MMC_ZX_H_ */ diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c new file mode 100644 index 000000000..a6170f80b --- /dev/null +++ b/drivers/mmc/host/dw_mmc.c @@ -0,0 +1,3493 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Synopsys DesignWare Multimedia Card Interface driver + * (Based on NXP driver for lpc 31xx) + * + * Copyright (C) 2009 NXP Semiconductors + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dw_mmc.h" + +/* Common flag combinations */ +#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ + SDMMC_INT_HTO | SDMMC_INT_SBE | \ + SDMMC_INT_EBE | SDMMC_INT_HLE) +#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ + SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) +#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ + DW_MCI_CMD_ERROR_FLAGS) +#define DW_MCI_SEND_STATUS 1 +#define DW_MCI_RECV_STATUS 2 +#define DW_MCI_DMA_THRESHOLD 16 + +#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ +#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ + +#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ + SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ + SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ + SDMMC_IDMAC_INT_TI) + +#define DESC_RING_BUF_SZ PAGE_SIZE + +struct idmac_desc_64addr { + u32 des0; /* Control Descriptor */ +#define IDMAC_OWN_CLR64(x) \ + !((x) & cpu_to_le32(IDMAC_DES0_OWN)) + + u32 des1; /* Reserved */ + + u32 des2; /*Buffer sizes */ +#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ + ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ + ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) + + u32 des3; /* Reserved */ + + u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ + u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ + + u32 des6; /* Lower 32-bits of Next Descriptor Address */ + u32 des7; /* Upper 32-bits of Next Descriptor Address */ +}; + +struct idmac_desc { + __le32 des0; /* Control Descriptor */ +#define IDMAC_DES0_DIC BIT(1) +#define IDMAC_DES0_LD BIT(2) +#define IDMAC_DES0_FD BIT(3) +#define IDMAC_DES0_CH BIT(4) +#define IDMAC_DES0_ER BIT(5) +#define IDMAC_DES0_CES BIT(30) +#define IDMAC_DES0_OWN BIT(31) + + __le32 des1; /* Buffer sizes */ +#define IDMAC_SET_BUFFER1_SIZE(d, s) \ + ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) + + __le32 des2; /* buffer 1 physical address */ + + __le32 des3; /* buffer 2 physical address */ +}; + +/* Each descriptor can transfer up to 4KB of data in chained mode */ +#define DW_MCI_DESC_DATA_LENGTH 0x1000 + +#if defined(CONFIG_DEBUG_FS) +static int dw_mci_req_show(struct seq_file *s, void *v) +{ + struct dw_mci_slot *slot = s->private; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_command *stop; + struct mmc_data *data; + + /* Make sure we get a consistent snapshot */ + spin_lock_bh(&slot->host->lock); + mrq = slot->mrq; + + if (mrq) { + cmd = mrq->cmd; + data = mrq->data; + stop = mrq->stop; + + if (cmd) + seq_printf(s, + "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", + cmd->opcode, cmd->arg, cmd->flags, + cmd->resp[0], cmd->resp[1], cmd->resp[2], + cmd->resp[2], cmd->error); + if (data) + seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", + data->bytes_xfered, data->blocks, + data->blksz, data->flags, data->error); + if (stop) + seq_printf(s, + "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", + stop->opcode, stop->arg, stop->flags, + stop->resp[0], stop->resp[1], stop->resp[2], + stop->resp[2], stop->error); + } + + spin_unlock_bh(&slot->host->lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dw_mci_req); + +static int dw_mci_regs_show(struct seq_file *s, void *v) +{ + struct dw_mci *host = s->private; + + pm_runtime_get_sync(host->dev); + + seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); + seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); + seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); + seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL)); + seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); + seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); + + pm_runtime_put_autosuspend(host->dev); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dw_mci_regs); + +static void dw_mci_init_debugfs(struct dw_mci_slot *slot) +{ + struct mmc_host *mmc = slot->mmc; + struct dw_mci *host = slot->host; + struct dentry *root; + + root = mmc->debugfs_root; + if (!root) + return; + + debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops); + debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops); + debugfs_create_u32("state", S_IRUSR, root, &host->state); + debugfs_create_xul("pending_events", S_IRUSR, root, + &host->pending_events); + debugfs_create_xul("completed_events", S_IRUSR, root, + &host->completed_events); +} +#endif /* defined(CONFIG_DEBUG_FS) */ + +static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) +{ + u32 ctrl; + + ctrl = mci_readl(host, CTRL); + ctrl |= reset; + mci_writel(host, CTRL, ctrl); + + /* wait till resets clear */ + if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl, + !(ctrl & reset), + 1, 500 * USEC_PER_MSEC)) { + dev_err(host->dev, + "Timeout resetting block (ctrl reset %#x)\n", + ctrl & reset); + return false; + } + + return true; +} + +static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) +{ + u32 status; + + /* + * Databook says that before issuing a new data transfer command + * we need to check to see if the card is busy. Data transfer commands + * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. + * + * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is + * expected. + */ + if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && + !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { + if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, + status, + !(status & SDMMC_STATUS_BUSY), + 10, 500 * USEC_PER_MSEC)) + dev_err(host->dev, "Busy; trying anyway\n"); + } +} + +static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) +{ + struct dw_mci *host = slot->host; + unsigned int cmd_status = 0; + + mci_writel(host, CMDARG, arg); + wmb(); /* drain writebuffer */ + dw_mci_wait_while_busy(host, cmd); + mci_writel(host, CMD, SDMMC_CMD_START | cmd); + + if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status, + !(cmd_status & SDMMC_CMD_START), + 1, 500 * USEC_PER_MSEC)) + dev_err(&slot->mmc->class_dev, + "Timeout sending command (cmd %#x arg %#x status %#x)\n", + cmd, arg, cmd_status); +} + +static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + u32 cmdr; + + cmd->error = -EINPROGRESS; + cmdr = cmd->opcode; + + if (cmd->opcode == MMC_STOP_TRANSMISSION || + cmd->opcode == MMC_GO_IDLE_STATE || + cmd->opcode == MMC_GO_INACTIVE_STATE || + (cmd->opcode == SD_IO_RW_DIRECT && + ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) + cmdr |= SDMMC_CMD_STOP; + else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) + cmdr |= SDMMC_CMD_PRV_DAT_WAIT; + + if (cmd->opcode == SD_SWITCH_VOLTAGE) { + u32 clk_en_a; + + /* Special bit makes CMD11 not die */ + cmdr |= SDMMC_CMD_VOLT_SWITCH; + + /* Change state to continue to handle CMD11 weirdness */ + WARN_ON(slot->host->state != STATE_SENDING_CMD); + slot->host->state = STATE_SENDING_CMD11; + + /* + * We need to disable low power mode (automatic clock stop) + * while doing voltage switch so we don't confuse the card, + * since stopping the clock is a specific part of the UHS + * voltage change dance. + * + * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be + * unconditionally turned back on in dw_mci_setup_bus() if it's + * ever called with a non-zero clock. That shouldn't happen + * until the voltage change is all done. + */ + clk_en_a = mci_readl(host, CLKENA); + clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); + mci_writel(host, CLKENA, clk_en_a); + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | + SDMMC_CMD_PRV_DAT_WAIT, 0); + } + + if (cmd->flags & MMC_RSP_PRESENT) { + /* We expect a response, so set this bit */ + cmdr |= SDMMC_CMD_RESP_EXP; + if (cmd->flags & MMC_RSP_136) + cmdr |= SDMMC_CMD_RESP_LONG; + } + + if (cmd->flags & MMC_RSP_CRC) + cmdr |= SDMMC_CMD_RESP_CRC; + + if (cmd->data) { + cmdr |= SDMMC_CMD_DAT_EXP; + if (cmd->data->flags & MMC_DATA_WRITE) + cmdr |= SDMMC_CMD_DAT_WR; + } + + if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) + cmdr |= SDMMC_CMD_USE_HOLD_REG; + + return cmdr; +} + +static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) +{ + struct mmc_command *stop; + u32 cmdr; + + if (!cmd->data) + return 0; + + stop = &host->stop_abort; + cmdr = cmd->opcode; + memset(stop, 0, sizeof(struct mmc_command)); + + if (cmdr == MMC_READ_SINGLE_BLOCK || + cmdr == MMC_READ_MULTIPLE_BLOCK || + cmdr == MMC_WRITE_BLOCK || + cmdr == MMC_WRITE_MULTIPLE_BLOCK || + cmdr == MMC_SEND_TUNING_BLOCK || + cmdr == MMC_SEND_TUNING_BLOCK_HS200) { + stop->opcode = MMC_STOP_TRANSMISSION; + stop->arg = 0; + stop->flags = MMC_RSP_R1B | MMC_CMD_AC; + } else if (cmdr == SD_IO_RW_EXTENDED) { + stop->opcode = SD_IO_RW_DIRECT; + stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | + ((cmd->arg >> 28) & 0x7); + stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; + } else { + return 0; + } + + cmdr = stop->opcode | SDMMC_CMD_STOP | + SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; + + if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags)) + cmdr |= SDMMC_CMD_USE_HOLD_REG; + + return cmdr; +} + +static inline void dw_mci_set_cto(struct dw_mci *host) +{ + unsigned int cto_clks; + unsigned int cto_div; + unsigned int cto_ms; + unsigned long irqflags; + + cto_clks = mci_readl(host, TMOUT) & 0xff; + cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; + if (cto_div == 0) + cto_div = 1; + + cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, + host->bus_hz); + + /* add a bit spare time */ + cto_ms += 10; + + /* + * The durations we're working with are fairly short so we have to be + * extra careful about synchronization here. Specifically in hardware a + * command timeout is _at most_ 5.1 ms, so that means we expect an + * interrupt (either command done or timeout) to come rather quickly + * after the mci_writel. ...but just in case we have a long interrupt + * latency let's add a bit of paranoia. + * + * In general we'll assume that at least an interrupt will be asserted + * in hardware by the time the cto_timer runs. ...and if it hasn't + * been asserted in hardware by that time then we'll assume it'll never + * come. + */ + spin_lock_irqsave(&host->irq_lock, irqflags); + if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) + mod_timer(&host->cto_timer, + jiffies + msecs_to_jiffies(cto_ms) + 1); + spin_unlock_irqrestore(&host->irq_lock, irqflags); +} + +static void dw_mci_start_command(struct dw_mci *host, + struct mmc_command *cmd, u32 cmd_flags) +{ + host->cmd = cmd; + dev_vdbg(host->dev, + "start command: ARGR=0x%08x CMDR=0x%08x\n", + cmd->arg, cmd_flags); + + mci_writel(host, CMDARG, cmd->arg); + wmb(); /* drain writebuffer */ + dw_mci_wait_while_busy(host, cmd_flags); + + mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); + + /* response expected command only */ + if (cmd_flags & SDMMC_CMD_RESP_EXP) + dw_mci_set_cto(host); +} + +static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) +{ + struct mmc_command *stop = &host->stop_abort; + + dw_mci_start_command(host, stop, host->stop_cmdr); +} + +/* DMA interface functions */ +static void dw_mci_stop_dma(struct dw_mci *host) +{ + if (host->using_dma) { + host->dma_ops->stop(host); + host->dma_ops->cleanup(host); + } + + /* Data transfer was stopped by the interrupt handler */ + set_bit(EVENT_XFER_COMPLETE, &host->pending_events); +} + +static void dw_mci_dma_cleanup(struct dw_mci *host) +{ + struct mmc_data *data = host->data; + + if (data && data->host_cookie == COOKIE_MAPPED) { + dma_unmap_sg(host->dev, + data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + data->host_cookie = COOKIE_UNMAPPED; + } +} + +static void dw_mci_idmac_reset(struct dw_mci *host) +{ + u32 bmod = mci_readl(host, BMOD); + /* Software reset of DMA */ + bmod |= SDMMC_IDMAC_SWRESET; + mci_writel(host, BMOD, bmod); +} + +static void dw_mci_idmac_stop_dma(struct dw_mci *host) +{ + u32 temp; + + /* Disable and reset the IDMAC interface */ + temp = mci_readl(host, CTRL); + temp &= ~SDMMC_CTRL_USE_IDMAC; + temp |= SDMMC_CTRL_DMA_RESET; + mci_writel(host, CTRL, temp); + + /* Stop the IDMAC running */ + temp = mci_readl(host, BMOD); + temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); + temp |= SDMMC_IDMAC_SWRESET; + mci_writel(host, BMOD, temp); +} + +static void dw_mci_dmac_complete_dma(void *arg) +{ + struct dw_mci *host = arg; + struct mmc_data *data = host->data; + + dev_vdbg(host->dev, "DMA complete\n"); + + if ((host->use_dma == TRANS_MODE_EDMAC) && + data && (data->flags & MMC_DATA_READ)) + /* Invalidate cache after read */ + dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc), + data->sg, + data->sg_len, + DMA_FROM_DEVICE); + + host->dma_ops->cleanup(host); + + /* + * If the card was removed, data will be NULL. No point in trying to + * send the stop command or waiting for NBUSY in this case. + */ + if (data) { + set_bit(EVENT_XFER_COMPLETE, &host->pending_events); + tasklet_schedule(&host->tasklet); + } +} + +static int dw_mci_idmac_init(struct dw_mci *host) +{ + int i; + + if (host->dma_64bit_address == 1) { + struct idmac_desc_64addr *p; + /* Number of descriptors in the ring buffer */ + host->ring_size = + DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); + + /* Forward link the descriptor list */ + for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; + i++, p++) { + p->des6 = (host->sg_dma + + (sizeof(struct idmac_desc_64addr) * + (i + 1))) & 0xffffffff; + + p->des7 = (u64)(host->sg_dma + + (sizeof(struct idmac_desc_64addr) * + (i + 1))) >> 32; + /* Initialize reserved and buffer size fields to "0" */ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + } + + /* Set the last descriptor as the end-of-ring descriptor */ + p->des6 = host->sg_dma & 0xffffffff; + p->des7 = (u64)host->sg_dma >> 32; + p->des0 = IDMAC_DES0_ER; + + } else { + struct idmac_desc *p; + /* Number of descriptors in the ring buffer */ + host->ring_size = + DESC_RING_BUF_SZ / sizeof(struct idmac_desc); + + /* Forward link the descriptor list */ + for (i = 0, p = host->sg_cpu; + i < host->ring_size - 1; + i++, p++) { + p->des3 = cpu_to_le32(host->sg_dma + + (sizeof(struct idmac_desc) * (i + 1))); + p->des0 = 0; + p->des1 = 0; + } + + /* Set the last descriptor as the end-of-ring descriptor */ + p->des3 = cpu_to_le32(host->sg_dma); + p->des0 = cpu_to_le32(IDMAC_DES0_ER); + } + + dw_mci_idmac_reset(host); + + if (host->dma_64bit_address == 1) { + /* Mask out interrupts - get Tx & Rx complete only */ + mci_writel(host, IDSTS64, IDMAC_INT_CLR); + mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | + SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); + + /* Set the descriptor base address */ + mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); + mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); + + } else { + /* Mask out interrupts - get Tx & Rx complete only */ + mci_writel(host, IDSTS, IDMAC_INT_CLR); + mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | + SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); + + /* Set the descriptor base address */ + mci_writel(host, DBADDR, host->sg_dma); + } + + return 0; +} + +static inline int dw_mci_prepare_desc64(struct dw_mci *host, + struct mmc_data *data, + unsigned int sg_len) +{ + unsigned int desc_len; + struct idmac_desc_64addr *desc_first, *desc_last, *desc; + u32 val; + int i; + + desc_first = desc_last = desc = host->sg_cpu; + + for (i = 0; i < sg_len; i++) { + unsigned int length = sg_dma_len(&data->sg[i]); + + u64 mem_addr = sg_dma_address(&data->sg[i]); + + for ( ; length ; desc++) { + desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? + length : DW_MCI_DESC_DATA_LENGTH; + + length -= desc_len; + + /* + * Wait for the former clear OWN bit operation + * of IDMAC to make sure that this descriptor + * isn't still owned by IDMAC as IDMAC's write + * ops and CPU's read ops are asynchronous. + */ + if (readl_poll_timeout_atomic(&desc->des0, val, + !(val & IDMAC_DES0_OWN), + 10, 100 * USEC_PER_MSEC)) + goto err_own_bit; + + /* + * Set the OWN bit and disable interrupts + * for this descriptor + */ + desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | + IDMAC_DES0_CH; + + /* Buffer length */ + IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); + + /* Physical address to DMA to/from */ + desc->des4 = mem_addr & 0xffffffff; + desc->des5 = mem_addr >> 32; + + /* Update physical address for the next desc */ + mem_addr += desc_len; + + /* Save pointer to the last descriptor */ + desc_last = desc; + } + } + + /* Set first descriptor */ + desc_first->des0 |= IDMAC_DES0_FD; + + /* Set last descriptor */ + desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); + desc_last->des0 |= IDMAC_DES0_LD; + + return 0; +err_own_bit: + /* restore the descriptor chain as it's polluted */ + dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); + memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); + dw_mci_idmac_init(host); + return -EINVAL; +} + + +static inline int dw_mci_prepare_desc32(struct dw_mci *host, + struct mmc_data *data, + unsigned int sg_len) +{ + unsigned int desc_len; + struct idmac_desc *desc_first, *desc_last, *desc; + u32 val; + int i; + + desc_first = desc_last = desc = host->sg_cpu; + + for (i = 0; i < sg_len; i++) { + unsigned int length = sg_dma_len(&data->sg[i]); + + u32 mem_addr = sg_dma_address(&data->sg[i]); + + for ( ; length ; desc++) { + desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? + length : DW_MCI_DESC_DATA_LENGTH; + + length -= desc_len; + + /* + * Wait for the former clear OWN bit operation + * of IDMAC to make sure that this descriptor + * isn't still owned by IDMAC as IDMAC's write + * ops and CPU's read ops are asynchronous. + */ + if (readl_poll_timeout_atomic(&desc->des0, val, + IDMAC_OWN_CLR64(val), + 10, + 100 * USEC_PER_MSEC)) + goto err_own_bit; + + /* + * Set the OWN bit and disable interrupts + * for this descriptor + */ + desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | + IDMAC_DES0_DIC | + IDMAC_DES0_CH); + + /* Buffer length */ + IDMAC_SET_BUFFER1_SIZE(desc, desc_len); + + /* Physical address to DMA to/from */ + desc->des2 = cpu_to_le32(mem_addr); + + /* Update physical address for the next desc */ + mem_addr += desc_len; + + /* Save pointer to the last descriptor */ + desc_last = desc; + } + } + + /* Set first descriptor */ + desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); + + /* Set last descriptor */ + desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | + IDMAC_DES0_DIC)); + desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); + + return 0; +err_own_bit: + /* restore the descriptor chain as it's polluted */ + dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); + memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); + dw_mci_idmac_init(host); + return -EINVAL; +} + +static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) +{ + u32 temp; + int ret; + + if (host->dma_64bit_address == 1) + ret = dw_mci_prepare_desc64(host, host->data, sg_len); + else + ret = dw_mci_prepare_desc32(host, host->data, sg_len); + + if (ret) + goto out; + + /* drain writebuffer */ + wmb(); + + /* Make sure to reset DMA in case we did PIO before this */ + dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); + dw_mci_idmac_reset(host); + + /* Select IDMAC interface */ + temp = mci_readl(host, CTRL); + temp |= SDMMC_CTRL_USE_IDMAC; + mci_writel(host, CTRL, temp); + + /* drain writebuffer */ + wmb(); + + /* Enable the IDMAC */ + temp = mci_readl(host, BMOD); + temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; + mci_writel(host, BMOD, temp); + + /* Start it running */ + mci_writel(host, PLDMND, 1); + +out: + return ret; +} + +static const struct dw_mci_dma_ops dw_mci_idmac_ops = { + .init = dw_mci_idmac_init, + .start = dw_mci_idmac_start_dma, + .stop = dw_mci_idmac_stop_dma, + .complete = dw_mci_dmac_complete_dma, + .cleanup = dw_mci_dma_cleanup, +}; + +static void dw_mci_edmac_stop_dma(struct dw_mci *host) +{ + dmaengine_terminate_async(host->dms->ch); +} + +static int dw_mci_edmac_start_dma(struct dw_mci *host, + unsigned int sg_len) +{ + struct dma_slave_config cfg; + struct dma_async_tx_descriptor *desc = NULL; + struct scatterlist *sgl = host->data->sg; + static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + u32 sg_elems = host->data->sg_len; + u32 fifoth_val; + u32 fifo_offset = host->fifo_reg - host->regs; + int ret = 0; + + /* Set external dma config: burst size, burst width */ + memset(&cfg, 0, sizeof(cfg)); + cfg.dst_addr = host->phy_regs + fifo_offset; + cfg.src_addr = cfg.dst_addr; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + + /* Match burst msize with external dma config */ + fifoth_val = mci_readl(host, FIFOTH); + cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; + cfg.src_maxburst = cfg.dst_maxburst; + + if (host->data->flags & MMC_DATA_WRITE) + cfg.direction = DMA_MEM_TO_DEV; + else + cfg.direction = DMA_DEV_TO_MEM; + + ret = dmaengine_slave_config(host->dms->ch, &cfg); + if (ret) { + dev_err(host->dev, "Failed to config edmac.\n"); + return -EBUSY; + } + + desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, + sg_len, cfg.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_err(host->dev, "Can't prepare slave sg.\n"); + return -EBUSY; + } + + /* Set dw_mci_dmac_complete_dma as callback */ + desc->callback = dw_mci_dmac_complete_dma; + desc->callback_param = (void *)host; + dmaengine_submit(desc); + + /* Flush cache before write */ + if (host->data->flags & MMC_DATA_WRITE) + dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl, + sg_elems, DMA_TO_DEVICE); + + dma_async_issue_pending(host->dms->ch); + + return 0; +} + +static int dw_mci_edmac_init(struct dw_mci *host) +{ + /* Request external dma channel */ + host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); + if (!host->dms) + return -ENOMEM; + + host->dms->ch = dma_request_chan(host->dev, "rx-tx"); + if (IS_ERR(host->dms->ch)) { + int ret = PTR_ERR(host->dms->ch); + + dev_err(host->dev, "Failed to get external DMA channel.\n"); + kfree(host->dms); + host->dms = NULL; + return ret; + } + + return 0; +} + +static void dw_mci_edmac_exit(struct dw_mci *host) +{ + if (host->dms) { + if (host->dms->ch) { + dma_release_channel(host->dms->ch); + host->dms->ch = NULL; + } + kfree(host->dms); + host->dms = NULL; + } +} + +static const struct dw_mci_dma_ops dw_mci_edmac_ops = { + .init = dw_mci_edmac_init, + .exit = dw_mci_edmac_exit, + .start = dw_mci_edmac_start_dma, + .stop = dw_mci_edmac_stop_dma, + .complete = dw_mci_dmac_complete_dma, + .cleanup = dw_mci_dma_cleanup, +}; + +static int dw_mci_pre_dma_transfer(struct dw_mci *host, + struct mmc_data *data, + int cookie) +{ + struct scatterlist *sg; + unsigned int i, sg_len; + + if (data->host_cookie == COOKIE_PRE_MAPPED) + return data->sg_len; + + /* + * We don't do DMA on "complex" transfers, i.e. with + * non-word-aligned buffers or lengths. Also, we don't bother + * with all the DMA setup overhead for short transfers. + */ + if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) + return -EINVAL; + + if (data->blksz & 3) + return -EINVAL; + + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->offset & 3 || sg->length & 3) + return -EINVAL; + } + + sg_len = dma_map_sg(host->dev, + data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + if (sg_len == 0) + return -EINVAL; + + data->host_cookie = cookie; + + return sg_len; +} + +static void dw_mci_pre_req(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!slot->host->use_dma || !data) + return; + + /* This data might be unmapped at this time */ + data->host_cookie = COOKIE_UNMAPPED; + + if (dw_mci_pre_dma_transfer(slot->host, mrq->data, + COOKIE_PRE_MAPPED) < 0) + data->host_cookie = COOKIE_UNMAPPED; +} + +static void dw_mci_post_req(struct mmc_host *mmc, + struct mmc_request *mrq, + int err) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!slot->host->use_dma || !data) + return; + + if (data->host_cookie != COOKIE_UNMAPPED) + dma_unmap_sg(slot->host->dev, + data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + data->host_cookie = COOKIE_UNMAPPED; +} + +static int dw_mci_get_cd(struct mmc_host *mmc) +{ + int present; + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + int gpio_cd = mmc_gpio_get_cd(mmc); + + /* Use platform get_cd function, else try onboard card detect */ + if (((mmc->caps & MMC_CAP_NEEDS_POLL) + || !mmc_card_is_removable(mmc))) { + present = 1; + + if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { + if (mmc->caps & MMC_CAP_NEEDS_POLL) { + dev_info(&mmc->class_dev, + "card is polling.\n"); + } else { + dev_info(&mmc->class_dev, + "card is non-removable.\n"); + } + set_bit(DW_MMC_CARD_PRESENT, &slot->flags); + } + + return present; + } else if (gpio_cd >= 0) + present = gpio_cd; + else + present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) + == 0 ? 1 : 0; + + spin_lock_bh(&host->lock); + if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags)) + dev_dbg(&mmc->class_dev, "card is present\n"); + else if (!present && + !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags)) + dev_dbg(&mmc->class_dev, "card is not present\n"); + spin_unlock_bh(&host->lock); + + return present; +} + +static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) +{ + unsigned int blksz = data->blksz; + static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + u32 fifo_width = 1 << host->data_shift; + u32 blksz_depth = blksz / fifo_width, fifoth_val; + u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; + int idx = ARRAY_SIZE(mszs) - 1; + + /* pio should ship this scenario */ + if (!host->use_dma) + return; + + tx_wmark = (host->fifo_depth) / 2; + tx_wmark_invers = host->fifo_depth - tx_wmark; + + /* + * MSIZE is '1', + * if blksz is not a multiple of the FIFO width + */ + if (blksz % fifo_width) + goto done; + + do { + if (!((blksz_depth % mszs[idx]) || + (tx_wmark_invers % mszs[idx]))) { + msize = idx; + rx_wmark = mszs[idx] - 1; + break; + } + } while (--idx > 0); + /* + * If idx is '0', it won't be tried + * Thus, initial values are uesed + */ +done: + fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); + mci_writel(host, FIFOTH, fifoth_val); +} + +static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) +{ + unsigned int blksz = data->blksz; + u32 blksz_depth, fifo_depth; + u16 thld_size; + u8 enable; + + /* + * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is + * in the FIFO region, so we really shouldn't access it). + */ + if (host->verid < DW_MMC_240A || + (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) + return; + + /* + * Card write Threshold is introduced since 2.80a + * It's used when HS400 mode is enabled. + */ + if (data->flags & MMC_DATA_WRITE && + host->timing != MMC_TIMING_MMC_HS400) + goto disable; + + if (data->flags & MMC_DATA_WRITE) + enable = SDMMC_CARD_WR_THR_EN; + else + enable = SDMMC_CARD_RD_THR_EN; + + if (host->timing != MMC_TIMING_MMC_HS200 && + host->timing != MMC_TIMING_UHS_SDR104 && + host->timing != MMC_TIMING_MMC_HS400) + goto disable; + + blksz_depth = blksz / (1 << host->data_shift); + fifo_depth = host->fifo_depth; + + if (blksz_depth > fifo_depth) + goto disable; + + /* + * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' + * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz + * Currently just choose blksz. + */ + thld_size = blksz; + mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); + return; + +disable: + mci_writel(host, CDTHRCTL, 0); +} + +static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) +{ + unsigned long irqflags; + int sg_len; + u32 temp; + + host->using_dma = 0; + + /* If we don't have a channel, we can't do DMA */ + if (!host->use_dma) + return -ENODEV; + + sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED); + if (sg_len < 0) { + host->dma_ops->stop(host); + return sg_len; + } + + host->using_dma = 1; + + if (host->use_dma == TRANS_MODE_IDMAC) + dev_vdbg(host->dev, + "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", + (unsigned long)host->sg_cpu, + (unsigned long)host->sg_dma, + sg_len); + + /* + * Decide the MSIZE and RX/TX Watermark. + * If current block size is same with previous size, + * no need to update fifoth. + */ + if (host->prev_blksz != data->blksz) + dw_mci_adjust_fifoth(host, data); + + /* Enable the DMA interface */ + temp = mci_readl(host, CTRL); + temp |= SDMMC_CTRL_DMA_ENABLE; + mci_writel(host, CTRL, temp); + + /* Disable RX/TX IRQs, let DMA handle it */ + spin_lock_irqsave(&host->irq_lock, irqflags); + temp = mci_readl(host, INTMASK); + temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); + mci_writel(host, INTMASK, temp); + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + if (host->dma_ops->start(host, sg_len)) { + host->dma_ops->stop(host); + /* We can't do DMA, try PIO for this one */ + dev_dbg(host->dev, + "%s: fall back to PIO mode for current transfer\n", + __func__); + return -ENODEV; + } + + return 0; +} + +static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) +{ + unsigned long irqflags; + int flags = SG_MITER_ATOMIC; + u32 temp; + + data->error = -EINPROGRESS; + + WARN_ON(host->data); + host->sg = NULL; + host->data = data; + + if (data->flags & MMC_DATA_READ) + host->dir_status = DW_MCI_RECV_STATUS; + else + host->dir_status = DW_MCI_SEND_STATUS; + + dw_mci_ctrl_thld(host, data); + + if (dw_mci_submit_data_dma(host, data)) { + if (host->data->flags & MMC_DATA_READ) + flags |= SG_MITER_TO_SG; + else + flags |= SG_MITER_FROM_SG; + + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); + host->sg = data->sg; + host->part_buf_start = 0; + host->part_buf_count = 0; + + mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); + + spin_lock_irqsave(&host->irq_lock, irqflags); + temp = mci_readl(host, INTMASK); + temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; + mci_writel(host, INTMASK, temp); + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + temp = mci_readl(host, CTRL); + temp &= ~SDMMC_CTRL_DMA_ENABLE; + mci_writel(host, CTRL, temp); + + /* + * Use the initial fifoth_val for PIO mode. If wm_algined + * is set, we set watermark same as data size. + * If next issued data may be transfered by DMA mode, + * prev_blksz should be invalidated. + */ + if (host->wm_aligned) + dw_mci_adjust_fifoth(host, data); + else + mci_writel(host, FIFOTH, host->fifoth_val); + host->prev_blksz = 0; + } else { + /* + * Keep the current block size. + * It will be used to decide whether to update + * fifoth register next time. + */ + host->prev_blksz = data->blksz; + } +} + +static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) +{ + struct dw_mci *host = slot->host; + unsigned int clock = slot->clock; + u32 div; + u32 clk_en_a; + u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; + + /* We must continue to set bit 28 in CMD until the change is complete */ + if (host->state == STATE_WAITING_CMD11_DONE) + sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; + + slot->mmc->actual_clock = 0; + + if (!clock) { + mci_writel(host, CLKENA, 0); + mci_send_cmd(slot, sdmmc_cmd_bits, 0); + } else if (clock != host->current_speed || force_clkinit) { + div = host->bus_hz / clock; + if (host->bus_hz % clock && host->bus_hz > clock) + /* + * move the + 1 after the divide to prevent + * over-clocking the card. + */ + div += 1; + + div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; + + if ((clock != slot->__clk_old && + !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || + force_clkinit) { + /* Silent the verbose log if calling from PM context */ + if (!force_clkinit) + dev_info(&slot->mmc->class_dev, + "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", + slot->id, host->bus_hz, clock, + div ? ((host->bus_hz / div) >> 1) : + host->bus_hz, div); + + /* + * If card is polling, display the message only + * one time at boot time. + */ + if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && + slot->mmc->f_min == clock) + set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags); + } + + /* disable clock */ + mci_writel(host, CLKENA, 0); + mci_writel(host, CLKSRC, 0); + + /* inform CIU */ + mci_send_cmd(slot, sdmmc_cmd_bits, 0); + + /* set clock to desired speed */ + mci_writel(host, CLKDIV, div); + + /* inform CIU */ + mci_send_cmd(slot, sdmmc_cmd_bits, 0); + + /* enable clock; only low power if no SDIO */ + clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; + if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) + clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; + mci_writel(host, CLKENA, clk_en_a); + + /* inform CIU */ + mci_send_cmd(slot, sdmmc_cmd_bits, 0); + + /* keep the last clock value that was requested from core */ + slot->__clk_old = clock; + slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : + host->bus_hz; + } + + host->current_speed = clock; + + /* Set the current slot bus width */ + mci_writel(host, CTYPE, (slot->ctype << slot->id)); +} + +static void __dw_mci_start_request(struct dw_mci *host, + struct dw_mci_slot *slot, + struct mmc_command *cmd) +{ + struct mmc_request *mrq; + struct mmc_data *data; + u32 cmdflags; + + mrq = slot->mrq; + + host->mrq = mrq; + + host->pending_events = 0; + host->completed_events = 0; + host->cmd_status = 0; + host->data_status = 0; + host->dir_status = 0; + + data = cmd->data; + if (data) { + mci_writel(host, TMOUT, 0xFFFFFFFF); + mci_writel(host, BYTCNT, data->blksz*data->blocks); + mci_writel(host, BLKSIZ, data->blksz); + } + + cmdflags = dw_mci_prepare_command(slot->mmc, cmd); + + /* this is the first command, send the initialization clock */ + if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) + cmdflags |= SDMMC_CMD_INIT; + + if (data) { + dw_mci_submit_data(host, data); + wmb(); /* drain writebuffer */ + } + + dw_mci_start_command(host, cmd, cmdflags); + + if (cmd->opcode == SD_SWITCH_VOLTAGE) { + unsigned long irqflags; + + /* + * Databook says to fail after 2ms w/ no response, but evidence + * shows that sometimes the cmd11 interrupt takes over 130ms. + * We'll set to 500ms, plus an extra jiffy just in case jiffies + * is just about to roll over. + * + * We do this whole thing under spinlock and only if the + * command hasn't already completed (indicating the the irq + * already ran so we don't want the timeout). + */ + spin_lock_irqsave(&host->irq_lock, irqflags); + if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) + mod_timer(&host->cmd11_timer, + jiffies + msecs_to_jiffies(500) + 1); + spin_unlock_irqrestore(&host->irq_lock, irqflags); + } + + host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); +} + +static void dw_mci_start_request(struct dw_mci *host, + struct dw_mci_slot *slot) +{ + struct mmc_request *mrq = slot->mrq; + struct mmc_command *cmd; + + cmd = mrq->sbc ? mrq->sbc : mrq->cmd; + __dw_mci_start_request(host, slot, cmd); +} + +/* must be called with host->lock held */ +static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, + struct mmc_request *mrq) +{ + dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", + host->state); + + slot->mrq = mrq; + + if (host->state == STATE_WAITING_CMD11_DONE) { + dev_warn(&slot->mmc->class_dev, + "Voltage change didn't complete\n"); + /* + * this case isn't expected to happen, so we can + * either crash here or just try to continue on + * in the closest possible state + */ + host->state = STATE_IDLE; + } + + if (host->state == STATE_IDLE) { + host->state = STATE_SENDING_CMD; + dw_mci_start_request(host, slot); + } else { + list_add_tail(&slot->queue_node, &host->queue); + } +} + +static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + + WARN_ON(slot->mrq); + + /* + * The check for card presence and queueing of the request must be + * atomic, otherwise the card could be removed in between and the + * request wouldn't fail until another card was inserted. + */ + + if (!dw_mci_get_cd(mmc)) { + mrq->cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, mrq); + return; + } + + spin_lock_bh(&host->lock); + + dw_mci_queue_request(host, slot, mrq); + + spin_unlock_bh(&host->lock); +} + +static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + const struct dw_mci_drv_data *drv_data = slot->host->drv_data; + u32 regs; + int ret; + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_4: + slot->ctype = SDMMC_CTYPE_4BIT; + break; + case MMC_BUS_WIDTH_8: + slot->ctype = SDMMC_CTYPE_8BIT; + break; + default: + /* set default 1 bit mode */ + slot->ctype = SDMMC_CTYPE_1BIT; + } + + regs = mci_readl(slot->host, UHS_REG); + + /* DDR mode set */ + if (ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_MMC_HS400) + regs |= ((0x1 << slot->id) << 16); + else + regs &= ~((0x1 << slot->id) << 16); + + mci_writel(slot->host, UHS_REG, regs); + slot->host->timing = ios->timing; + + /* + * Use mirror of ios->clock to prevent race with mmc + * core ios update when finding the minimum. + */ + slot->clock = ios->clock; + + if (drv_data && drv_data->set_ios) + drv_data->set_ios(slot->host, ios); + + switch (ios->power_mode) { + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) { + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, + ios->vdd); + if (ret) { + dev_err(slot->host->dev, + "failed to enable vmmc regulator\n"); + /*return, if failed turn on vmmc*/ + return; + } + } + set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); + regs = mci_readl(slot->host, PWREN); + regs |= (1 << slot->id); + mci_writel(slot->host, PWREN, regs); + break; + case MMC_POWER_ON: + if (!slot->host->vqmmc_enabled) { + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = regulator_enable(mmc->supply.vqmmc); + if (ret < 0) + dev_err(slot->host->dev, + "failed to enable vqmmc\n"); + else + slot->host->vqmmc_enabled = true; + + } else { + /* Keep track so we don't reset again */ + slot->host->vqmmc_enabled = true; + } + + /* Reset our state machine after powering on */ + dw_mci_ctrl_reset(slot->host, + SDMMC_CTRL_ALL_RESET_FLAGS); + } + + /* Adjust clock / bus width after power is up */ + dw_mci_setup_bus(slot, false); + + break; + case MMC_POWER_OFF: + /* Turn clock off before power goes down */ + dw_mci_setup_bus(slot, false); + + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) + regulator_disable(mmc->supply.vqmmc); + slot->host->vqmmc_enabled = false; + + regs = mci_readl(slot->host, PWREN); + regs &= ~(1 << slot->id); + mci_writel(slot->host, PWREN, regs); + break; + default: + break; + } + + if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) + slot->host->state = STATE_IDLE; +} + +static int dw_mci_card_busy(struct mmc_host *mmc) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + u32 status; + + /* + * Check the busy bit which is low when DAT[3:0] + * (the data lines) are 0000 + */ + status = mci_readl(slot->host, STATUS); + + return !!(status & SDMMC_STATUS_BUSY); +} + +static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; + u32 uhs; + u32 v18 = SDMMC_UHS_18V << slot->id; + int ret; + + if (drv_data && drv_data->switch_voltage) + return drv_data->switch_voltage(mmc, ios); + + /* + * Program the voltage. Note that some instances of dw_mmc may use + * the UHS_REG for this. For other instances (like exynos) the UHS_REG + * does no harm but you need to set the regulator directly. Try both. + */ + uhs = mci_readl(host, UHS_REG); + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) + uhs &= ~v18; + else + uhs |= v18; + + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) { + dev_dbg(&mmc->class_dev, + "Regulator set error %d - %s V\n", + ret, uhs & v18 ? "1.8" : "3.3"); + return ret; + } + } + mci_writel(host, UHS_REG, uhs); + + return 0; +} + +static int dw_mci_get_ro(struct mmc_host *mmc) +{ + int read_only; + struct dw_mci_slot *slot = mmc_priv(mmc); + int gpio_ro = mmc_gpio_get_ro(mmc); + + /* Use platform get_ro function, else try on board write protect */ + if (gpio_ro >= 0) + read_only = gpio_ro; + else + read_only = + mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; + + dev_dbg(&mmc->class_dev, "card is %s\n", + read_only ? "read-only" : "read-write"); + + return read_only; +} + +static void dw_mci_hw_reset(struct mmc_host *mmc) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + int reset; + + if (host->use_dma == TRANS_MODE_IDMAC) + dw_mci_idmac_reset(host); + + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | + SDMMC_CTRL_FIFO_RESET)) + return; + + /* + * According to eMMC spec, card reset procedure: + * tRstW >= 1us: RST_n pulse width + * tRSCA >= 200us: RST_n to Command time + * tRSTH >= 1us: RST_n high period + */ + reset = mci_readl(host, RST_N); + reset &= ~(SDMMC_RST_HWACTIVE << slot->id); + mci_writel(host, RST_N, reset); + usleep_range(1, 2); + reset |= SDMMC_RST_HWACTIVE << slot->id; + mci_writel(host, RST_N, reset); + usleep_range(200, 300); +} + +static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + + /* + * Low power mode will stop the card clock when idle. According to the + * description of the CLKENA register we should disable low power mode + * for SDIO cards if we need SDIO interrupts to work. + */ + if (mmc->caps & MMC_CAP_SDIO_IRQ) { + const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; + u32 clk_en_a_old; + u32 clk_en_a; + + clk_en_a_old = mci_readl(host, CLKENA); + + if (card->type == MMC_TYPE_SDIO || + card->type == MMC_TYPE_SD_COMBO) { + set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); + clk_en_a = clk_en_a_old & ~clken_low_pwr; + } else { + clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); + clk_en_a = clk_en_a_old | clken_low_pwr; + } + + if (clk_en_a != clk_en_a_old) { + mci_writel(host, CLKENA, clk_en_a); + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | + SDMMC_CMD_PRV_DAT_WAIT, 0); + } + } +} + +static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb) +{ + struct dw_mci *host = slot->host; + unsigned long irqflags; + u32 int_mask; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + /* Enable/disable Slot Specific SDIO interrupt */ + int_mask = mci_readl(host, INTMASK); + if (enb) + int_mask |= SDMMC_INT_SDIO(slot->sdio_id); + else + int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); + mci_writel(host, INTMASK, int_mask); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); +} + +static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + + __dw_mci_enable_sdio_irq(slot, enb); + + /* Avoid runtime suspending the device when SDIO IRQ is enabled */ + if (enb) + pm_runtime_get_noresume(host->dev); + else + pm_runtime_put_noidle(host->dev); +} + +static void dw_mci_ack_sdio_irq(struct mmc_host *mmc) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + + __dw_mci_enable_sdio_irq(slot, 1); +} + +static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; + int err = -EINVAL; + + if (drv_data && drv_data->execute_tuning) + err = drv_data->execute_tuning(slot, opcode); + return err; +} + +static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; + + if (drv_data && drv_data->prepare_hs400_tuning) + return drv_data->prepare_hs400_tuning(host, ios); + + return 0; +} + +static bool dw_mci_reset(struct dw_mci *host) +{ + u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; + bool ret = false; + u32 status = 0; + + /* + * Resetting generates a block interrupt, hence setting + * the scatter-gather pointer to NULL. + */ + if (host->sg) { + sg_miter_stop(&host->sg_miter); + host->sg = NULL; + } + + if (host->use_dma) + flags |= SDMMC_CTRL_DMA_RESET; + + if (dw_mci_ctrl_reset(host, flags)) { + /* + * In all cases we clear the RAWINTS + * register to clear any interrupts. + */ + mci_writel(host, RINTSTS, 0xFFFFFFFF); + + if (!host->use_dma) { + ret = true; + goto ciu_out; + } + + /* Wait for dma_req to be cleared */ + if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, + status, + !(status & SDMMC_STATUS_DMA_REQ), + 1, 500 * USEC_PER_MSEC)) { + dev_err(host->dev, + "%s: Timeout waiting for dma_req to be cleared\n", + __func__); + goto ciu_out; + } + + /* when using DMA next we reset the fifo again */ + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) + goto ciu_out; + } else { + /* if the controller reset bit did clear, then set clock regs */ + if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { + dev_err(host->dev, + "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", + __func__); + goto ciu_out; + } + } + + if (host->use_dma == TRANS_MODE_IDMAC) + /* It is also required that we reinit idmac */ + dw_mci_idmac_init(host); + + ret = true; + +ciu_out: + /* After a CTRL reset we need to have CIU set clock registers */ + mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0); + + return ret; +} + +static const struct mmc_host_ops dw_mci_ops = { + .request = dw_mci_request, + .pre_req = dw_mci_pre_req, + .post_req = dw_mci_post_req, + .set_ios = dw_mci_set_ios, + .get_ro = dw_mci_get_ro, + .get_cd = dw_mci_get_cd, + .hw_reset = dw_mci_hw_reset, + .enable_sdio_irq = dw_mci_enable_sdio_irq, + .ack_sdio_irq = dw_mci_ack_sdio_irq, + .execute_tuning = dw_mci_execute_tuning, + .card_busy = dw_mci_card_busy, + .start_signal_voltage_switch = dw_mci_switch_voltage, + .init_card = dw_mci_init_card, + .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, +}; + +static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) + __releases(&host->lock) + __acquires(&host->lock) +{ + struct dw_mci_slot *slot; + struct mmc_host *prev_mmc = host->slot->mmc; + + WARN_ON(host->cmd || host->data); + + host->slot->mrq = NULL; + host->mrq = NULL; + if (!list_empty(&host->queue)) { + slot = list_entry(host->queue.next, + struct dw_mci_slot, queue_node); + list_del(&slot->queue_node); + dev_vdbg(host->dev, "list not empty: %s is next\n", + mmc_hostname(slot->mmc)); + host->state = STATE_SENDING_CMD; + dw_mci_start_request(host, slot); + } else { + dev_vdbg(host->dev, "list empty\n"); + + if (host->state == STATE_SENDING_CMD11) + host->state = STATE_WAITING_CMD11_DONE; + else + host->state = STATE_IDLE; + } + + spin_unlock(&host->lock); + mmc_request_done(prev_mmc, mrq); + spin_lock(&host->lock); +} + +static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) +{ + u32 status = host->cmd_status; + + host->cmd_status = 0; + + /* Read the response from the card (up to 16 bytes) */ + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + cmd->resp[3] = mci_readl(host, RESP0); + cmd->resp[2] = mci_readl(host, RESP1); + cmd->resp[1] = mci_readl(host, RESP2); + cmd->resp[0] = mci_readl(host, RESP3); + } else { + cmd->resp[0] = mci_readl(host, RESP0); + cmd->resp[1] = 0; + cmd->resp[2] = 0; + cmd->resp[3] = 0; + } + } + + if (status & SDMMC_INT_RTO) + cmd->error = -ETIMEDOUT; + else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) + cmd->error = -EILSEQ; + else if (status & SDMMC_INT_RESP_ERR) + cmd->error = -EIO; + else + cmd->error = 0; + + return cmd->error; +} + +static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) +{ + u32 status = host->data_status; + + if (status & DW_MCI_DATA_ERROR_FLAGS) { + if (status & SDMMC_INT_DRTO) { + data->error = -ETIMEDOUT; + } else if (status & SDMMC_INT_DCRC) { + data->error = -EILSEQ; + } else if (status & SDMMC_INT_EBE) { + if (host->dir_status == + DW_MCI_SEND_STATUS) { + /* + * No data CRC status was returned. + * The number of bytes transferred + * will be exaggerated in PIO mode. + */ + data->bytes_xfered = 0; + data->error = -ETIMEDOUT; + } else if (host->dir_status == + DW_MCI_RECV_STATUS) { + data->error = -EILSEQ; + } + } else { + /* SDMMC_INT_SBE is included */ + data->error = -EILSEQ; + } + + dev_dbg(host->dev, "data error, status 0x%08x\n", status); + + /* + * After an error, there may be data lingering + * in the FIFO + */ + dw_mci_reset(host); + } else { + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; + } + + return data->error; +} + +static void dw_mci_set_drto(struct dw_mci *host) +{ + unsigned int drto_clks; + unsigned int drto_div; + unsigned int drto_ms; + unsigned long irqflags; + + drto_clks = mci_readl(host, TMOUT) >> 8; + drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; + if (drto_div == 0) + drto_div = 1; + + drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, + host->bus_hz); + + /* add a bit spare time */ + drto_ms += 10; + + spin_lock_irqsave(&host->irq_lock, irqflags); + if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) + mod_timer(&host->dto_timer, + jiffies + msecs_to_jiffies(drto_ms)); + spin_unlock_irqrestore(&host->irq_lock, irqflags); +} + +static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) +{ + if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) + return false; + + /* + * Really be certain that the timer has stopped. This is a bit of + * paranoia and could only really happen if we had really bad + * interrupt latency and the interrupt routine and timeout were + * running concurrently so that the del_timer() in the interrupt + * handler couldn't run. + */ + WARN_ON(del_timer_sync(&host->cto_timer)); + clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); + + return true; +} + +static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) +{ + if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) + return false; + + /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ + WARN_ON(del_timer_sync(&host->dto_timer)); + clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); + + return true; +} + +static void dw_mci_tasklet_func(unsigned long priv) +{ + struct dw_mci *host = (struct dw_mci *)priv; + struct mmc_data *data; + struct mmc_command *cmd; + struct mmc_request *mrq; + enum dw_mci_state state; + enum dw_mci_state prev_state; + unsigned int err; + + spin_lock(&host->lock); + + state = host->state; + data = host->data; + mrq = host->mrq; + + do { + prev_state = state; + + switch (state) { + case STATE_IDLE: + case STATE_WAITING_CMD11_DONE: + break; + + case STATE_SENDING_CMD11: + case STATE_SENDING_CMD: + if (!dw_mci_clear_pending_cmd_complete(host)) + break; + + cmd = host->cmd; + host->cmd = NULL; + set_bit(EVENT_CMD_COMPLETE, &host->completed_events); + err = dw_mci_command_complete(host, cmd); + if (cmd == mrq->sbc && !err) { + __dw_mci_start_request(host, host->slot, + mrq->cmd); + goto unlock; + } + + if (cmd->data && err) { + /* + * During UHS tuning sequence, sending the stop + * command after the response CRC error would + * throw the system into a confused state + * causing all future tuning phases to report + * failure. + * + * In such case controller will move into a data + * transfer state after a response error or + * response CRC error. Let's let that finish + * before trying to send a stop, so we'll go to + * STATE_SENDING_DATA. + * + * Although letting the data transfer take place + * will waste a bit of time (we already know + * the command was bad), it can't cause any + * errors since it's possible it would have + * taken place anyway if this tasklet got + * delayed. Allowing the transfer to take place + * avoids races and keeps things simple. + */ + if (err != -ETIMEDOUT && + host->dir_status == DW_MCI_RECV_STATUS) { + state = STATE_SENDING_DATA; + continue; + } + + send_stop_abort(host, data); + dw_mci_stop_dma(host); + state = STATE_SENDING_STOP; + break; + } + + if (!cmd->data || err) { + dw_mci_request_end(host, mrq); + goto unlock; + } + + prev_state = state = STATE_SENDING_DATA; + fallthrough; + + case STATE_SENDING_DATA: + /* + * We could get a data error and never a transfer + * complete so we'd better check for it here. + * + * Note that we don't really care if we also got a + * transfer complete; stopping the DMA and sending an + * abort won't hurt. + */ + if (test_and_clear_bit(EVENT_DATA_ERROR, + &host->pending_events)) { + if (!(host->data_status & (SDMMC_INT_DRTO | + SDMMC_INT_EBE))) + send_stop_abort(host, data); + dw_mci_stop_dma(host); + state = STATE_DATA_ERROR; + break; + } + + if (!test_and_clear_bit(EVENT_XFER_COMPLETE, + &host->pending_events)) { + /* + * If all data-related interrupts don't come + * within the given time in reading data state. + */ + if (host->dir_status == DW_MCI_RECV_STATUS) + dw_mci_set_drto(host); + break; + } + + set_bit(EVENT_XFER_COMPLETE, &host->completed_events); + + /* + * Handle an EVENT_DATA_ERROR that might have shown up + * before the transfer completed. This might not have + * been caught by the check above because the interrupt + * could have gone off between the previous check and + * the check for transfer complete. + * + * Technically this ought not be needed assuming we + * get a DATA_COMPLETE eventually (we'll notice the + * error and end the request), but it shouldn't hurt. + * + * This has the advantage of sending the stop command. + */ + if (test_and_clear_bit(EVENT_DATA_ERROR, + &host->pending_events)) { + if (!(host->data_status & (SDMMC_INT_DRTO | + SDMMC_INT_EBE))) + send_stop_abort(host, data); + dw_mci_stop_dma(host); + state = STATE_DATA_ERROR; + break; + } + prev_state = state = STATE_DATA_BUSY; + + fallthrough; + + case STATE_DATA_BUSY: + if (!dw_mci_clear_pending_data_complete(host)) { + /* + * If data error interrupt comes but data over + * interrupt doesn't come within the given time. + * in reading data state. + */ + if (host->dir_status == DW_MCI_RECV_STATUS) + dw_mci_set_drto(host); + break; + } + + host->data = NULL; + set_bit(EVENT_DATA_COMPLETE, &host->completed_events); + err = dw_mci_data_complete(host, data); + + if (!err) { + if (!data->stop || mrq->sbc) { + if (mrq->sbc && data->stop) + data->stop->error = 0; + dw_mci_request_end(host, mrq); + goto unlock; + } + + /* stop command for open-ended transfer*/ + if (data->stop) + send_stop_abort(host, data); + } else { + /* + * If we don't have a command complete now we'll + * never get one since we just reset everything; + * better end the request. + * + * If we do have a command complete we'll fall + * through to the SENDING_STOP command and + * everything will be peachy keen. + */ + if (!test_bit(EVENT_CMD_COMPLETE, + &host->pending_events)) { + host->cmd = NULL; + dw_mci_request_end(host, mrq); + goto unlock; + } + } + + /* + * If err has non-zero, + * stop-abort command has been already issued. + */ + prev_state = state = STATE_SENDING_STOP; + + fallthrough; + + case STATE_SENDING_STOP: + if (!dw_mci_clear_pending_cmd_complete(host)) + break; + + /* CMD error in data command */ + if (mrq->cmd->error && mrq->data) + dw_mci_reset(host); + + host->cmd = NULL; + host->data = NULL; + + if (!mrq->sbc && mrq->stop) + dw_mci_command_complete(host, mrq->stop); + else + host->cmd_status = 0; + + dw_mci_request_end(host, mrq); + goto unlock; + + case STATE_DATA_ERROR: + if (!test_and_clear_bit(EVENT_XFER_COMPLETE, + &host->pending_events)) + break; + + state = STATE_DATA_BUSY; + break; + } + } while (state != prev_state); + + host->state = state; +unlock: + spin_unlock(&host->lock); + +} + +/* push final bytes to part_buf, only use during push */ +static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) +{ + memcpy((void *)&host->part_buf, buf, cnt); + host->part_buf_count = cnt; +} + +/* append bytes to part_buf, only use during push */ +static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) +{ + cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); + memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); + host->part_buf_count += cnt; + return cnt; +} + +/* pull first bytes from part_buf, only use during pull */ +static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) +{ + cnt = min_t(int, cnt, host->part_buf_count); + if (cnt) { + memcpy(buf, (void *)&host->part_buf + host->part_buf_start, + cnt); + host->part_buf_count -= cnt; + host->part_buf_start += cnt; + } + return cnt; +} + +/* pull final bytes from the part_buf, assuming it's just been filled */ +static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) +{ + memcpy(buf, &host->part_buf, cnt); + host->part_buf_start = cnt; + host->part_buf_count = (1 << host->data_shift) - cnt; +} + +static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) +{ + struct mmc_data *data = host->data; + int init_cnt = cnt; + + /* try and push anything in the part_buf */ + if (unlikely(host->part_buf_count)) { + int len = dw_mci_push_part_bytes(host, buf, cnt); + + buf += len; + cnt -= len; + if (host->part_buf_count == 2) { + mci_fifo_writew(host->fifo_reg, host->part_buf16); + host->part_buf_count = 0; + } + } +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x1)) { + while (cnt >= 2) { + u16 aligned_buf[64]; + int len = min(cnt & -2, (int)sizeof(aligned_buf)); + int items = len >> 1; + int i; + /* memcpy from input buffer into aligned buffer */ + memcpy(aligned_buf, buf, len); + buf += len; + cnt -= len; + /* push data from aligned buffer into fifo */ + for (i = 0; i < items; ++i) + mci_fifo_writew(host->fifo_reg, aligned_buf[i]); + } + } else +#endif + { + u16 *pdata = buf; + + for (; cnt >= 2; cnt -= 2) + mci_fifo_writew(host->fifo_reg, *pdata++); + buf = pdata; + } + /* put anything remaining in the part_buf */ + if (cnt) { + dw_mci_set_part_bytes(host, buf, cnt); + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == + (data->blksz * data->blocks)) + mci_fifo_writew(host->fifo_reg, host->part_buf16); + } +} + +static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x1)) { + while (cnt >= 2) { + /* pull data from fifo into aligned buffer */ + u16 aligned_buf[64]; + int len = min(cnt & -2, (int)sizeof(aligned_buf)); + int items = len >> 1; + int i; + + for (i = 0; i < items; ++i) + aligned_buf[i] = mci_fifo_readw(host->fifo_reg); + /* memcpy from aligned buffer into output buffer */ + memcpy(buf, aligned_buf, len); + buf += len; + cnt -= len; + } + } else +#endif + { + u16 *pdata = buf; + + for (; cnt >= 2; cnt -= 2) + *pdata++ = mci_fifo_readw(host->fifo_reg); + buf = pdata; + } + if (cnt) { + host->part_buf16 = mci_fifo_readw(host->fifo_reg); + dw_mci_pull_final_bytes(host, buf, cnt); + } +} + +static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) +{ + struct mmc_data *data = host->data; + int init_cnt = cnt; + + /* try and push anything in the part_buf */ + if (unlikely(host->part_buf_count)) { + int len = dw_mci_push_part_bytes(host, buf, cnt); + + buf += len; + cnt -= len; + if (host->part_buf_count == 4) { + mci_fifo_writel(host->fifo_reg, host->part_buf32); + host->part_buf_count = 0; + } + } +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x3)) { + while (cnt >= 4) { + u32 aligned_buf[32]; + int len = min(cnt & -4, (int)sizeof(aligned_buf)); + int items = len >> 2; + int i; + /* memcpy from input buffer into aligned buffer */ + memcpy(aligned_buf, buf, len); + buf += len; + cnt -= len; + /* push data from aligned buffer into fifo */ + for (i = 0; i < items; ++i) + mci_fifo_writel(host->fifo_reg, aligned_buf[i]); + } + } else +#endif + { + u32 *pdata = buf; + + for (; cnt >= 4; cnt -= 4) + mci_fifo_writel(host->fifo_reg, *pdata++); + buf = pdata; + } + /* put anything remaining in the part_buf */ + if (cnt) { + dw_mci_set_part_bytes(host, buf, cnt); + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == + (data->blksz * data->blocks)) + mci_fifo_writel(host->fifo_reg, host->part_buf32); + } +} + +static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x3)) { + while (cnt >= 4) { + /* pull data from fifo into aligned buffer */ + u32 aligned_buf[32]; + int len = min(cnt & -4, (int)sizeof(aligned_buf)); + int items = len >> 2; + int i; + + for (i = 0; i < items; ++i) + aligned_buf[i] = mci_fifo_readl(host->fifo_reg); + /* memcpy from aligned buffer into output buffer */ + memcpy(buf, aligned_buf, len); + buf += len; + cnt -= len; + } + } else +#endif + { + u32 *pdata = buf; + + for (; cnt >= 4; cnt -= 4) + *pdata++ = mci_fifo_readl(host->fifo_reg); + buf = pdata; + } + if (cnt) { + host->part_buf32 = mci_fifo_readl(host->fifo_reg); + dw_mci_pull_final_bytes(host, buf, cnt); + } +} + +static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) +{ + struct mmc_data *data = host->data; + int init_cnt = cnt; + + /* try and push anything in the part_buf */ + if (unlikely(host->part_buf_count)) { + int len = dw_mci_push_part_bytes(host, buf, cnt); + + buf += len; + cnt -= len; + + if (host->part_buf_count == 8) { + mci_fifo_writeq(host->fifo_reg, host->part_buf); + host->part_buf_count = 0; + } + } +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x7)) { + while (cnt >= 8) { + u64 aligned_buf[16]; + int len = min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + int i; + /* memcpy from input buffer into aligned buffer */ + memcpy(aligned_buf, buf, len); + buf += len; + cnt -= len; + /* push data from aligned buffer into fifo */ + for (i = 0; i < items; ++i) + mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); + } + } else +#endif + { + u64 *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + mci_fifo_writeq(host->fifo_reg, *pdata++); + buf = pdata; + } + /* put anything remaining in the part_buf */ + if (cnt) { + dw_mci_set_part_bytes(host, buf, cnt); + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == + (data->blksz * data->blocks)) + mci_fifo_writeq(host->fifo_reg, host->part_buf); + } +} + +static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x7)) { + while (cnt >= 8) { + /* pull data from fifo into aligned buffer */ + u64 aligned_buf[16]; + int len = min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + int i; + + for (i = 0; i < items; ++i) + aligned_buf[i] = mci_fifo_readq(host->fifo_reg); + + /* memcpy from aligned buffer into output buffer */ + memcpy(buf, aligned_buf, len); + buf += len; + cnt -= len; + } + } else +#endif + { + u64 *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + *pdata++ = mci_fifo_readq(host->fifo_reg); + buf = pdata; + } + if (cnt) { + host->part_buf = mci_fifo_readq(host->fifo_reg); + dw_mci_pull_final_bytes(host, buf, cnt); + } +} + +static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) +{ + int len; + + /* get remaining partial bytes */ + len = dw_mci_pull_part_bytes(host, buf, cnt); + if (unlikely(len == cnt)) + return; + buf += len; + cnt -= len; + + /* get the rest of the data */ + host->pull_data(host, buf, cnt); +} + +static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) +{ + struct sg_mapping_iter *sg_miter = &host->sg_miter; + void *buf; + unsigned int offset; + struct mmc_data *data = host->data; + int shift = host->data_shift; + u32 status; + unsigned int len; + unsigned int remain, fcnt; + + do { + if (!sg_miter_next(sg_miter)) + goto done; + + host->sg = sg_miter->piter.sg; + buf = sg_miter->addr; + remain = sg_miter->length; + offset = 0; + + do { + fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) + << shift) + host->part_buf_count; + len = min(remain, fcnt); + if (!len) + break; + dw_mci_pull_data(host, (void *)(buf + offset), len); + data->bytes_xfered += len; + offset += len; + remain -= len; + } while (remain); + + sg_miter->consumed = offset; + status = mci_readl(host, MINTSTS); + mci_writel(host, RINTSTS, SDMMC_INT_RXDR); + /* if the RXDR is ready read again */ + } while ((status & SDMMC_INT_RXDR) || + (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); + + if (!remain) { + if (!sg_miter_next(sg_miter)) + goto done; + sg_miter->consumed = 0; + } + sg_miter_stop(sg_miter); + return; + +done: + sg_miter_stop(sg_miter); + host->sg = NULL; + smp_wmb(); /* drain writebuffer */ + set_bit(EVENT_XFER_COMPLETE, &host->pending_events); +} + +static void dw_mci_write_data_pio(struct dw_mci *host) +{ + struct sg_mapping_iter *sg_miter = &host->sg_miter; + void *buf; + unsigned int offset; + struct mmc_data *data = host->data; + int shift = host->data_shift; + u32 status; + unsigned int len; + unsigned int fifo_depth = host->fifo_depth; + unsigned int remain, fcnt; + + do { + if (!sg_miter_next(sg_miter)) + goto done; + + host->sg = sg_miter->piter.sg; + buf = sg_miter->addr; + remain = sg_miter->length; + offset = 0; + + do { + fcnt = ((fifo_depth - + SDMMC_GET_FCNT(mci_readl(host, STATUS))) + << shift) - host->part_buf_count; + len = min(remain, fcnt); + if (!len) + break; + host->push_data(host, (void *)(buf + offset), len); + data->bytes_xfered += len; + offset += len; + remain -= len; + } while (remain); + + sg_miter->consumed = offset; + status = mci_readl(host, MINTSTS); + mci_writel(host, RINTSTS, SDMMC_INT_TXDR); + } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ + + if (!remain) { + if (!sg_miter_next(sg_miter)) + goto done; + sg_miter->consumed = 0; + } + sg_miter_stop(sg_miter); + return; + +done: + sg_miter_stop(sg_miter); + host->sg = NULL; + smp_wmb(); /* drain writebuffer */ + set_bit(EVENT_XFER_COMPLETE, &host->pending_events); +} + +static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) +{ + del_timer(&host->cto_timer); + + if (!host->cmd_status) + host->cmd_status = status; + + smp_wmb(); /* drain writebuffer */ + + set_bit(EVENT_CMD_COMPLETE, &host->pending_events); + tasklet_schedule(&host->tasklet); +} + +static void dw_mci_handle_cd(struct dw_mci *host) +{ + struct dw_mci_slot *slot = host->slot; + + if (slot->mmc->ops->card_event) + slot->mmc->ops->card_event(slot->mmc); + mmc_detect_change(slot->mmc, + msecs_to_jiffies(host->pdata->detect_delay_ms)); +} + +static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) +{ + struct dw_mci *host = dev_id; + u32 pending; + struct dw_mci_slot *slot = host->slot; + unsigned long irqflags; + + pending = mci_readl(host, MINTSTS); /* read-only mask reg */ + + if (pending) { + /* Check volt switch first, since it can look like an error */ + if ((host->state == STATE_SENDING_CMD11) && + (pending & SDMMC_INT_VOLT_SWITCH)) { + mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); + pending &= ~SDMMC_INT_VOLT_SWITCH; + + /* + * Hold the lock; we know cmd11_timer can't be kicked + * off after the lock is released, so safe to delete. + */ + spin_lock_irqsave(&host->irq_lock, irqflags); + dw_mci_cmd_interrupt(host, pending); + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + del_timer(&host->cmd11_timer); + } + + if (pending & DW_MCI_CMD_ERROR_FLAGS) { + spin_lock_irqsave(&host->irq_lock, irqflags); + + del_timer(&host->cto_timer); + mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); + host->cmd_status = pending; + smp_wmb(); /* drain writebuffer */ + set_bit(EVENT_CMD_COMPLETE, &host->pending_events); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + } + + if (pending & DW_MCI_DATA_ERROR_FLAGS) { + /* if there is an error report DATA_ERROR */ + mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); + host->data_status = pending; + smp_wmb(); /* drain writebuffer */ + set_bit(EVENT_DATA_ERROR, &host->pending_events); + tasklet_schedule(&host->tasklet); + } + + if (pending & SDMMC_INT_DATA_OVER) { + spin_lock_irqsave(&host->irq_lock, irqflags); + + del_timer(&host->dto_timer); + + mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); + if (!host->data_status) + host->data_status = pending; + smp_wmb(); /* drain writebuffer */ + if (host->dir_status == DW_MCI_RECV_STATUS) { + if (host->sg != NULL) + dw_mci_read_data_pio(host, true); + } + set_bit(EVENT_DATA_COMPLETE, &host->pending_events); + tasklet_schedule(&host->tasklet); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + } + + if (pending & SDMMC_INT_RXDR) { + mci_writel(host, RINTSTS, SDMMC_INT_RXDR); + if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) + dw_mci_read_data_pio(host, false); + } + + if (pending & SDMMC_INT_TXDR) { + mci_writel(host, RINTSTS, SDMMC_INT_TXDR); + if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) + dw_mci_write_data_pio(host); + } + + if (pending & SDMMC_INT_CMD_DONE) { + spin_lock_irqsave(&host->irq_lock, irqflags); + + mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); + dw_mci_cmd_interrupt(host, pending); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + } + + if (pending & SDMMC_INT_CD) { + mci_writel(host, RINTSTS, SDMMC_INT_CD); + dw_mci_handle_cd(host); + } + + if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { + mci_writel(host, RINTSTS, + SDMMC_INT_SDIO(slot->sdio_id)); + __dw_mci_enable_sdio_irq(slot, 0); + sdio_signal_irq(slot->mmc); + } + + } + + if (host->use_dma != TRANS_MODE_IDMAC) + return IRQ_HANDLED; + + /* Handle IDMA interrupts */ + if (host->dma_64bit_address == 1) { + pending = mci_readl(host, IDSTS64); + if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { + mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | + SDMMC_IDMAC_INT_RI); + mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); + if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) + host->dma_ops->complete((void *)host); + } + } else { + pending = mci_readl(host, IDSTS); + if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { + mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | + SDMMC_IDMAC_INT_RI); + mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); + if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) + host->dma_ops->complete((void *)host); + } + } + + return IRQ_HANDLED; +} + +static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) +{ + struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; + struct mmc_host *mmc = slot->mmc; + int ctrl_id; + + if (host->pdata->caps) + mmc->caps = host->pdata->caps; + + if (host->pdata->pm_caps) + mmc->pm_caps = host->pdata->pm_caps; + + if (host->dev->of_node) { + ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); + if (ctrl_id < 0) + ctrl_id = 0; + } else { + ctrl_id = to_platform_device(host->dev)->id; + } + + if (drv_data && drv_data->caps) { + if (ctrl_id >= drv_data->num_caps) { + dev_err(host->dev, "invalid controller id %d\n", + ctrl_id); + return -EINVAL; + } + mmc->caps |= drv_data->caps[ctrl_id]; + } + + if (host->pdata->caps2) + mmc->caps2 = host->pdata->caps2; + + mmc->f_min = DW_MCI_FREQ_MIN; + if (!mmc->f_max) + mmc->f_max = DW_MCI_FREQ_MAX; + + /* Process SDIO IRQs through the sdio_irq_work. */ + if (mmc->caps & MMC_CAP_SDIO_IRQ) + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + + return 0; +} + +static int dw_mci_init_slot(struct dw_mci *host) +{ + struct mmc_host *mmc; + struct dw_mci_slot *slot; + int ret; + + mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); + if (!mmc) + return -ENOMEM; + + slot = mmc_priv(mmc); + slot->id = 0; + slot->sdio_id = host->sdio_id0 + slot->id; + slot->mmc = mmc; + slot->host = host; + host->slot = slot; + + mmc->ops = &dw_mci_ops; + + /*if there are external regulators, get them*/ + ret = mmc_regulator_get_supply(mmc); + if (ret) + goto err_host_allocated; + + if (!mmc->ocr_avail) + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + ret = mmc_of_parse(mmc); + if (ret) + goto err_host_allocated; + + ret = dw_mci_init_slot_caps(slot); + if (ret) + goto err_host_allocated; + + /* Useful defaults if platform data is unset. */ + if (host->use_dma == TRANS_MODE_IDMAC) { + mmc->max_segs = host->ring_size; + mmc->max_blk_size = 65535; + mmc->max_seg_size = 0x1000; + mmc->max_req_size = mmc->max_seg_size * host->ring_size; + mmc->max_blk_count = mmc->max_req_size / 512; + } else if (host->use_dma == TRANS_MODE_EDMAC) { + mmc->max_segs = 64; + mmc->max_blk_size = 65535; + mmc->max_blk_count = 65535; + mmc->max_req_size = + mmc->max_blk_size * mmc->max_blk_count; + mmc->max_seg_size = mmc->max_req_size; + } else { + /* TRANS_MODE_PIO */ + mmc->max_segs = 64; + mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ + mmc->max_blk_count = 512; + mmc->max_req_size = mmc->max_blk_size * + mmc->max_blk_count; + mmc->max_seg_size = mmc->max_req_size; + } + + dw_mci_get_cd(mmc); + + ret = mmc_add_host(mmc); + if (ret) + goto err_host_allocated; + +#if defined(CONFIG_DEBUG_FS) + dw_mci_init_debugfs(slot); +#endif + + return 0; + +err_host_allocated: + mmc_free_host(mmc); + return ret; +} + +static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) +{ + /* Debugfs stuff is cleaned up by mmc core */ + mmc_remove_host(slot->mmc); + slot->host->slot = NULL; + mmc_free_host(slot->mmc); +} + +static void dw_mci_init_dma(struct dw_mci *host) +{ + int addr_config; + struct device *dev = host->dev; + + /* + * Check tansfer mode from HCON[17:16] + * Clear the ambiguous description of dw_mmc databook: + * 2b'00: No DMA Interface -> Actually means using Internal DMA block + * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block + * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block + * 2b'11: Non DW DMA Interface -> pio only + * Compared to DesignWare DMA Interface, Generic DMA Interface has a + * simpler request/acknowledge handshake mechanism and both of them + * are regarded as external dma master for dw_mmc. + */ + host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); + if (host->use_dma == DMA_INTERFACE_IDMA) { + host->use_dma = TRANS_MODE_IDMAC; + } else if (host->use_dma == DMA_INTERFACE_DWDMA || + host->use_dma == DMA_INTERFACE_GDMA) { + host->use_dma = TRANS_MODE_EDMAC; + } else { + goto no_dma; + } + + /* Determine which DMA interface to use */ + if (host->use_dma == TRANS_MODE_IDMAC) { + /* + * Check ADDR_CONFIG bit in HCON to find + * IDMAC address bus width + */ + addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); + + if (addr_config == 1) { + /* host supports IDMAC in 64-bit address mode */ + host->dma_64bit_address = 1; + dev_info(host->dev, + "IDMAC supports 64-bit address mode.\n"); + if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) + dma_set_coherent_mask(host->dev, + DMA_BIT_MASK(64)); + } else { + /* host supports IDMAC in 32-bit address mode */ + host->dma_64bit_address = 0; + dev_info(host->dev, + "IDMAC supports 32-bit address mode.\n"); + } + + /* Alloc memory for sg translation */ + host->sg_cpu = dmam_alloc_coherent(host->dev, + DESC_RING_BUF_SZ, + &host->sg_dma, GFP_KERNEL); + if (!host->sg_cpu) { + dev_err(host->dev, + "%s: could not alloc DMA memory\n", + __func__); + goto no_dma; + } + + host->dma_ops = &dw_mci_idmac_ops; + dev_info(host->dev, "Using internal DMA controller.\n"); + } else { + /* TRANS_MODE_EDMAC: check dma bindings again */ + if ((device_property_read_string_array(dev, "dma-names", + NULL, 0) < 0) || + !device_property_present(dev, "dmas")) { + goto no_dma; + } + host->dma_ops = &dw_mci_edmac_ops; + dev_info(host->dev, "Using external DMA controller.\n"); + } + + if (host->dma_ops->init && host->dma_ops->start && + host->dma_ops->stop && host->dma_ops->cleanup) { + if (host->dma_ops->init(host)) { + dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", + __func__); + goto no_dma; + } + } else { + dev_err(host->dev, "DMA initialization not found.\n"); + goto no_dma; + } + + return; + +no_dma: + dev_info(host->dev, "Using PIO mode.\n"); + host->use_dma = TRANS_MODE_PIO; +} + +static void dw_mci_cmd11_timer(struct timer_list *t) +{ + struct dw_mci *host = from_timer(host, t, cmd11_timer); + + if (host->state != STATE_SENDING_CMD11) { + dev_warn(host->dev, "Unexpected CMD11 timeout\n"); + return; + } + + host->cmd_status = SDMMC_INT_RTO; + set_bit(EVENT_CMD_COMPLETE, &host->pending_events); + tasklet_schedule(&host->tasklet); +} + +static void dw_mci_cto_timer(struct timer_list *t) +{ + struct dw_mci *host = from_timer(host, t, cto_timer); + unsigned long irqflags; + u32 pending; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + /* + * If somehow we have very bad interrupt latency it's remotely possible + * that the timer could fire while the interrupt is still pending or + * while the interrupt is midway through running. Let's be paranoid + * and detect those two cases. Note that this is paranoia is somewhat + * justified because in this function we don't actually cancel the + * pending command in the controller--we just assume it will never come. + */ + pending = mci_readl(host, MINTSTS); /* read-only mask reg */ + if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { + /* The interrupt should fire; no need to act but we can warn */ + dev_warn(host->dev, "Unexpected interrupt latency\n"); + goto exit; + } + if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { + /* Presumably interrupt handler couldn't delete the timer */ + dev_warn(host->dev, "CTO timeout when already completed\n"); + goto exit; + } + + /* + * Continued paranoia to make sure we're in the state we expect. + * This paranoia isn't really justified but it seems good to be safe. + */ + switch (host->state) { + case STATE_SENDING_CMD11: + case STATE_SENDING_CMD: + case STATE_SENDING_STOP: + /* + * If CMD_DONE interrupt does NOT come in sending command + * state, we should notify the driver to terminate current + * transfer and report a command timeout to the core. + */ + host->cmd_status = SDMMC_INT_RTO; + set_bit(EVENT_CMD_COMPLETE, &host->pending_events); + tasklet_schedule(&host->tasklet); + break; + default: + dev_warn(host->dev, "Unexpected command timeout, state %d\n", + host->state); + break; + } + +exit: + spin_unlock_irqrestore(&host->irq_lock, irqflags); +} + +static void dw_mci_dto_timer(struct timer_list *t) +{ + struct dw_mci *host = from_timer(host, t, dto_timer); + unsigned long irqflags; + u32 pending; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + /* + * The DTO timer is much longer than the CTO timer, so it's even less + * likely that we'll these cases, but it pays to be paranoid. + */ + pending = mci_readl(host, MINTSTS); /* read-only mask reg */ + if (pending & SDMMC_INT_DATA_OVER) { + /* The interrupt should fire; no need to act but we can warn */ + dev_warn(host->dev, "Unexpected data interrupt latency\n"); + goto exit; + } + if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { + /* Presumably interrupt handler couldn't delete the timer */ + dev_warn(host->dev, "DTO timeout when already completed\n"); + goto exit; + } + + /* + * Continued paranoia to make sure we're in the state we expect. + * This paranoia isn't really justified but it seems good to be safe. + */ + switch (host->state) { + case STATE_SENDING_DATA: + case STATE_DATA_BUSY: + /* + * If DTO interrupt does NOT come in sending data state, + * we should notify the driver to terminate current transfer + * and report a data timeout to the core. + */ + host->data_status = SDMMC_INT_DRTO; + set_bit(EVENT_DATA_ERROR, &host->pending_events); + set_bit(EVENT_DATA_COMPLETE, &host->pending_events); + tasklet_schedule(&host->tasklet); + break; + default: + dev_warn(host->dev, "Unexpected data timeout, state %d\n", + host->state); + break; + } + +exit: + spin_unlock_irqrestore(&host->irq_lock, irqflags); +} + +#ifdef CONFIG_OF +static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) +{ + struct dw_mci_board *pdata; + struct device *dev = host->dev; + const struct dw_mci_drv_data *drv_data = host->drv_data; + int ret; + u32 clock_frequency; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + /* find reset controller when exist */ + pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset"); + if (IS_ERR(pdata->rstc)) { + if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) + return ERR_PTR(-EPROBE_DEFER); + } + + if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) + dev_info(dev, + "fifo-depth property not found, using value of FIFOTH register as default\n"); + + device_property_read_u32(dev, "card-detect-delay", + &pdata->detect_delay_ms); + + device_property_read_u32(dev, "data-addr", &host->data_addr_override); + + if (device_property_present(dev, "fifo-watermark-aligned")) + host->wm_aligned = true; + + if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency)) + pdata->bus_hz = clock_frequency; + + if (drv_data && drv_data->parse_dt) { + ret = drv_data->parse_dt(host); + if (ret) + return ERR_PTR(ret); + } + + return pdata; +} + +#else /* CONFIG_OF */ +static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_OF */ + +static void dw_mci_enable_cd(struct dw_mci *host) +{ + unsigned long irqflags; + u32 temp; + + /* + * No need for CD if all slots have a non-error GPIO + * as well as broken card detection is found. + */ + if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL) + return; + + if (mmc_gpio_get_cd(host->slot->mmc) < 0) { + spin_lock_irqsave(&host->irq_lock, irqflags); + temp = mci_readl(host, INTMASK); + temp |= SDMMC_INT_CD; + mci_writel(host, INTMASK, temp); + spin_unlock_irqrestore(&host->irq_lock, irqflags); + } +} + +int dw_mci_probe(struct dw_mci *host) +{ + const struct dw_mci_drv_data *drv_data = host->drv_data; + int width, i, ret = 0; + u32 fifo_size; + + if (!host->pdata) { + host->pdata = dw_mci_parse_dt(host); + if (IS_ERR(host->pdata)) + return dev_err_probe(host->dev, PTR_ERR(host->pdata), + "platform data not available\n"); + } + + host->biu_clk = devm_clk_get(host->dev, "biu"); + if (IS_ERR(host->biu_clk)) { + dev_dbg(host->dev, "biu clock not available\n"); + } else { + ret = clk_prepare_enable(host->biu_clk); + if (ret) { + dev_err(host->dev, "failed to enable biu clock\n"); + return ret; + } + } + + host->ciu_clk = devm_clk_get(host->dev, "ciu"); + if (IS_ERR(host->ciu_clk)) { + dev_dbg(host->dev, "ciu clock not available\n"); + host->bus_hz = host->pdata->bus_hz; + } else { + ret = clk_prepare_enable(host->ciu_clk); + if (ret) { + dev_err(host->dev, "failed to enable ciu clock\n"); + goto err_clk_biu; + } + + if (host->pdata->bus_hz) { + ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); + if (ret) + dev_warn(host->dev, + "Unable to set bus rate to %uHz\n", + host->pdata->bus_hz); + } + host->bus_hz = clk_get_rate(host->ciu_clk); + } + + if (!host->bus_hz) { + dev_err(host->dev, + "Platform data must supply bus speed\n"); + ret = -ENODEV; + goto err_clk_ciu; + } + + if (!IS_ERR(host->pdata->rstc)) { + reset_control_assert(host->pdata->rstc); + usleep_range(10, 50); + reset_control_deassert(host->pdata->rstc); + } + + if (drv_data && drv_data->init) { + ret = drv_data->init(host); + if (ret) { + dev_err(host->dev, + "implementation specific init failed\n"); + goto err_clk_ciu; + } + } + + timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); + timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); + timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); + + spin_lock_init(&host->lock); + spin_lock_init(&host->irq_lock); + INIT_LIST_HEAD(&host->queue); + + /* + * Get the host data width - this assumes that HCON has been set with + * the correct values. + */ + i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); + if (!i) { + host->push_data = dw_mci_push_data16; + host->pull_data = dw_mci_pull_data16; + width = 16; + host->data_shift = 1; + } else if (i == 2) { + host->push_data = dw_mci_push_data64; + host->pull_data = dw_mci_pull_data64; + width = 64; + host->data_shift = 3; + } else { + /* Check for a reserved value, and warn if it is */ + WARN((i != 1), + "HCON reports a reserved host data width!\n" + "Defaulting to 32-bit access.\n"); + host->push_data = dw_mci_push_data32; + host->pull_data = dw_mci_pull_data32; + width = 32; + host->data_shift = 2; + } + + /* Reset all blocks */ + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { + ret = -ENODEV; + goto err_clk_ciu; + } + + host->dma_ops = host->pdata->dma_ops; + dw_mci_init_dma(host); + + /* Clear the interrupts for the host controller */ + mci_writel(host, RINTSTS, 0xFFFFFFFF); + mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ + + /* Put in max timeout */ + mci_writel(host, TMOUT, 0xFFFFFFFF); + + /* + * FIFO threshold settings RxMark = fifo_size / 2 - 1, + * Tx Mark = fifo_size / 2 DMA Size = 8 + */ + if (!host->pdata->fifo_depth) { + /* + * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may + * have been overwritten by the bootloader, just like we're + * about to do, so if you know the value for your hardware, you + * should put it in the platform data. + */ + fifo_size = mci_readl(host, FIFOTH); + fifo_size = 1 + ((fifo_size >> 16) & 0xfff); + } else { + fifo_size = host->pdata->fifo_depth; + } + host->fifo_depth = fifo_size; + host->fifoth_val = + SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); + mci_writel(host, FIFOTH, host->fifoth_val); + + /* disable clock to CIU */ + mci_writel(host, CLKENA, 0); + mci_writel(host, CLKSRC, 0); + + /* + * In 2.40a spec, Data offset is changed. + * Need to check the version-id and set data-offset for DATA register. + */ + host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); + dev_info(host->dev, "Version ID is %04x\n", host->verid); + + if (host->data_addr_override) + host->fifo_reg = host->regs + host->data_addr_override; + else if (host->verid < DW_MMC_240A) + host->fifo_reg = host->regs + DATA_OFFSET; + else + host->fifo_reg = host->regs + DATA_240A_OFFSET; + + tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); + ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, + host->irq_flags, "dw-mci", host); + if (ret) + goto err_dmaunmap; + + /* + * Enable interrupts for command done, data over, data empty, + * receive ready and error such as transmit, receive timeout, crc error + */ + mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | + SDMMC_INT_TXDR | SDMMC_INT_RXDR | + DW_MCI_ERROR_FLAGS); + /* Enable mci interrupt */ + mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); + + dev_info(host->dev, + "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", + host->irq, width, fifo_size); + + /* We need at least one slot to succeed */ + ret = dw_mci_init_slot(host); + if (ret) { + dev_dbg(host->dev, "slot %d init failed\n", i); + goto err_dmaunmap; + } + + /* Now that slots are all setup, we can enable card detect */ + dw_mci_enable_cd(host); + + return 0; + +err_dmaunmap: + if (host->use_dma && host->dma_ops->exit) + host->dma_ops->exit(host); + + if (!IS_ERR(host->pdata->rstc)) + reset_control_assert(host->pdata->rstc); + +err_clk_ciu: + clk_disable_unprepare(host->ciu_clk); + +err_clk_biu: + clk_disable_unprepare(host->biu_clk); + + return ret; +} +EXPORT_SYMBOL(dw_mci_probe); + +void dw_mci_remove(struct dw_mci *host) +{ + dev_dbg(host->dev, "remove slot\n"); + if (host->slot) + dw_mci_cleanup_slot(host->slot); + + mci_writel(host, RINTSTS, 0xFFFFFFFF); + mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ + + /* disable clock to CIU */ + mci_writel(host, CLKENA, 0); + mci_writel(host, CLKSRC, 0); + + if (host->use_dma && host->dma_ops->exit) + host->dma_ops->exit(host); + + if (!IS_ERR(host->pdata->rstc)) + reset_control_assert(host->pdata->rstc); + + clk_disable_unprepare(host->ciu_clk); + clk_disable_unprepare(host->biu_clk); +} +EXPORT_SYMBOL(dw_mci_remove); + + + +#ifdef CONFIG_PM +int dw_mci_runtime_suspend(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + + if (host->use_dma && host->dma_ops->exit) + host->dma_ops->exit(host); + + clk_disable_unprepare(host->ciu_clk); + + if (host->slot && + (mmc_can_gpio_cd(host->slot->mmc) || + !mmc_card_is_removable(host->slot->mmc))) + clk_disable_unprepare(host->biu_clk); + + return 0; +} +EXPORT_SYMBOL(dw_mci_runtime_suspend); + +int dw_mci_runtime_resume(struct device *dev) +{ + int ret = 0; + struct dw_mci *host = dev_get_drvdata(dev); + + if (host->slot && + (mmc_can_gpio_cd(host->slot->mmc) || + !mmc_card_is_removable(host->slot->mmc))) { + ret = clk_prepare_enable(host->biu_clk); + if (ret) + return ret; + } + + ret = clk_prepare_enable(host->ciu_clk); + if (ret) + goto err; + + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { + clk_disable_unprepare(host->ciu_clk); + ret = -ENODEV; + goto err; + } + + if (host->use_dma && host->dma_ops->init) + host->dma_ops->init(host); + + /* + * Restore the initial value at FIFOTH register + * And Invalidate the prev_blksz with zero + */ + mci_writel(host, FIFOTH, host->fifoth_val); + host->prev_blksz = 0; + + /* Put in max timeout */ + mci_writel(host, TMOUT, 0xFFFFFFFF); + + mci_writel(host, RINTSTS, 0xFFFFFFFF); + mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | + SDMMC_INT_TXDR | SDMMC_INT_RXDR | + DW_MCI_ERROR_FLAGS); + mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); + + + if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) + dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios); + + /* Force setup bus to guarantee available clock output */ + dw_mci_setup_bus(host->slot, true); + + /* Re-enable SDIO interrupts. */ + if (sdio_irq_claimed(host->slot->mmc)) + __dw_mci_enable_sdio_irq(host->slot, 1); + + /* Now that slots are all setup, we can enable card detect */ + dw_mci_enable_cd(host); + + return 0; + +err: + if (host->slot && + (mmc_can_gpio_cd(host->slot->mmc) || + !mmc_card_is_removable(host->slot->mmc))) + clk_disable_unprepare(host->biu_clk); + + return ret; +} +EXPORT_SYMBOL(dw_mci_runtime_resume); +#endif /* CONFIG_PM */ + +static int __init dw_mci_init(void) +{ + pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); + return 0; +} + +static void __exit dw_mci_exit(void) +{ +} + +module_init(dw_mci_init); +module_exit(dw_mci_exit); + +MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); +MODULE_AUTHOR("NXP Semiconductor VietNam"); +MODULE_AUTHOR("Imagination Technologies Ltd"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h new file mode 100644 index 000000000..da5923a92 --- /dev/null +++ b/drivers/mmc/host/dw_mmc.h @@ -0,0 +1,567 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Synopsys DesignWare Multimedia Card Interface driver + * (Based on NXP driver for lpc 31xx) + * + * Copyright (C) 2009 NXP Semiconductors + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. + */ + +#ifndef _DW_MMC_H_ +#define _DW_MMC_H_ + +#include +#include +#include +#include +#include + +enum dw_mci_state { + STATE_IDLE = 0, + STATE_SENDING_CMD, + STATE_SENDING_DATA, + STATE_DATA_BUSY, + STATE_SENDING_STOP, + STATE_DATA_ERROR, + STATE_SENDING_CMD11, + STATE_WAITING_CMD11_DONE, +}; + +enum { + EVENT_CMD_COMPLETE = 0, + EVENT_XFER_COMPLETE, + EVENT_DATA_COMPLETE, + EVENT_DATA_ERROR, +}; + +enum dw_mci_cookie { + COOKIE_UNMAPPED, + COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */ + COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */ +}; + +struct mmc_data; + +enum { + TRANS_MODE_PIO = 0, + TRANS_MODE_IDMAC, + TRANS_MODE_EDMAC +}; + +struct dw_mci_dma_slave { + struct dma_chan *ch; + enum dma_transfer_direction direction; +}; + +/** + * struct dw_mci - MMC controller state shared between all slots + * @lock: Spinlock protecting the queue and associated data. + * @irq_lock: Spinlock protecting the INTMASK setting. + * @regs: Pointer to MMIO registers. + * @fifo_reg: Pointer to MMIO registers for data FIFO + * @sg: Scatterlist entry currently being processed by PIO code, if any. + * @sg_miter: PIO mapping scatterlist iterator. + * @mrq: The request currently being processed on @slot, + * or NULL if the controller is idle. + * @cmd: The command currently being sent to the card, or NULL. + * @data: The data currently being transferred, or NULL if no data + * transfer is in progress. + * @stop_abort: The command currently prepared for stoping transfer. + * @prev_blksz: The former transfer blksz record. + * @timing: Record of current ios timing. + * @use_dma: Which DMA channel is in use for the current transfer, zero + * denotes PIO mode. + * @using_dma: Whether DMA is in use for the current transfer. + * @dma_64bit_address: Whether DMA supports 64-bit address mode or not. + * @sg_dma: Bus address of DMA buffer. + * @sg_cpu: Virtual address of DMA buffer. + * @dma_ops: Pointer to platform-specific DMA callbacks. + * @cmd_status: Snapshot of SR taken upon completion of the current + * @ring_size: Buffer size for idma descriptors. + * command. Only valid when EVENT_CMD_COMPLETE is pending. + * @dms: structure of slave-dma private data. + * @phy_regs: physical address of controller's register map + * @data_status: Snapshot of SR taken upon completion of the current + * data transfer. Only valid when EVENT_DATA_COMPLETE or + * EVENT_DATA_ERROR is pending. + * @stop_cmdr: Value to be loaded into CMDR when the stop command is + * to be sent. + * @dir_status: Direction of current transfer. + * @tasklet: Tasklet running the request state machine. + * @pending_events: Bitmask of events flagged by the interrupt handler + * to be processed by the tasklet. + * @completed_events: Bitmask of events which the state machine has + * processed. + * @state: Tasklet state. + * @queue: List of slots waiting for access to the controller. + * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus + * rate and timeout calculations. + * @current_speed: Configured rate of the controller. + * @fifoth_val: The value of FIFOTH register. + * @verid: Denote Version ID. + * @dev: Device associated with the MMC controller. + * @pdata: Platform data associated with the MMC controller. + * @drv_data: Driver specific data for identified variant of the controller + * @priv: Implementation defined private data. + * @biu_clk: Pointer to bus interface unit clock instance. + * @ciu_clk: Pointer to card interface unit clock instance. + * @slot: Slots sharing this MMC controller. + * @fifo_depth: depth of FIFO. + * @data_addr_override: override fifo reg offset with this value. + * @wm_aligned: force fifo watermark equal with data length in PIO mode. + * Set as true if alignment is needed. + * @data_shift: log2 of FIFO item size. + * @part_buf_start: Start index in part_buf. + * @part_buf_count: Bytes of partial data in part_buf. + * @part_buf: Simple buffer for partial fifo reads/writes. + * @push_data: Pointer to FIFO push function. + * @pull_data: Pointer to FIFO pull function. + * @vqmmc_enabled: Status of vqmmc, should be true or false. + * @irq_flags: The flags to be passed to request_irq. + * @irq: The irq value to be passed to request_irq. + * @sdio_id0: Number of slot0 in the SDIO interrupt registers. + * @cmd11_timer: Timer for SD3.0 voltage switch over scheme. + * @cto_timer: Timer for broken command transfer over scheme. + * @dto_timer: Timer for broken data transfer over scheme. + * + * Locking + * ======= + * + * @lock is a softirq-safe spinlock protecting @queue as well as + * @slot, @mrq and @state. These must always be updated + * at the same time while holding @lock. + * The @mrq field of struct dw_mci_slot is also protected by @lock, + * and must always be written at the same time as the slot is added to + * @queue. + * + * @irq_lock is an irq-safe spinlock protecting the INTMASK register + * to allow the interrupt handler to modify it directly. Held for only long + * enough to read-modify-write INTMASK and no other locks are grabbed when + * holding this one. + * + * @pending_events and @completed_events are accessed using atomic bit + * operations, so they don't need any locking. + * + * None of the fields touched by the interrupt handler need any + * locking. However, ordering is important: Before EVENT_DATA_ERROR or + * EVENT_DATA_COMPLETE is set in @pending_events, all data-related + * interrupts must be disabled and @data_status updated with a + * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the + * CMDRDY interrupt must be disabled and @cmd_status updated with a + * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the + * bytes_xfered field of @data must be written. This is ensured by + * using barriers. + */ +struct dw_mci { + spinlock_t lock; + spinlock_t irq_lock; + void __iomem *regs; + void __iomem *fifo_reg; + u32 data_addr_override; + bool wm_aligned; + + struct scatterlist *sg; + struct sg_mapping_iter sg_miter; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_command stop_abort; + unsigned int prev_blksz; + unsigned char timing; + + /* DMA interface members*/ + int use_dma; + int using_dma; + int dma_64bit_address; + + dma_addr_t sg_dma; + void *sg_cpu; + const struct dw_mci_dma_ops *dma_ops; + /* For idmac */ + unsigned int ring_size; + + /* For edmac */ + struct dw_mci_dma_slave *dms; + /* Registers's physical base address */ + resource_size_t phy_regs; + + u32 cmd_status; + u32 data_status; + u32 stop_cmdr; + u32 dir_status; + struct tasklet_struct tasklet; + unsigned long pending_events; + unsigned long completed_events; + enum dw_mci_state state; + struct list_head queue; + + u32 bus_hz; + u32 current_speed; + u32 fifoth_val; + u16 verid; + struct device *dev; + struct dw_mci_board *pdata; + const struct dw_mci_drv_data *drv_data; + void *priv; + struct clk *biu_clk; + struct clk *ciu_clk; + struct dw_mci_slot *slot; + + /* FIFO push and pull */ + int fifo_depth; + int data_shift; + u8 part_buf_start; + u8 part_buf_count; + union { + u16 part_buf16; + u32 part_buf32; + u64 part_buf; + }; + void (*push_data)(struct dw_mci *host, void *buf, int cnt); + void (*pull_data)(struct dw_mci *host, void *buf, int cnt); + + bool vqmmc_enabled; + unsigned long irq_flags; /* IRQ flags */ + int irq; + + int sdio_id0; + + struct timer_list cmd11_timer; + struct timer_list cto_timer; + struct timer_list dto_timer; +}; + +/* DMA ops for Internal/External DMAC interface */ +struct dw_mci_dma_ops { + /* DMA Ops */ + int (*init)(struct dw_mci *host); + int (*start)(struct dw_mci *host, unsigned int sg_len); + void (*complete)(void *host); + void (*stop)(struct dw_mci *host); + void (*cleanup)(struct dw_mci *host); + void (*exit)(struct dw_mci *host); +}; + +struct dma_pdata; + +/* Board platform data */ +struct dw_mci_board { + unsigned int bus_hz; /* Clock speed at the cclk_in pad */ + + u32 caps; /* Capabilities */ + u32 caps2; /* More capabilities */ + u32 pm_caps; /* PM capabilities */ + /* + * Override fifo depth. If 0, autodetect it from the FIFOTH register, + * but note that this may not be reliable after a bootloader has used + * it. + */ + unsigned int fifo_depth; + + /* delay in mS before detecting cards after interrupt */ + u32 detect_delay_ms; + + struct reset_control *rstc; + struct dw_mci_dma_ops *dma_ops; + struct dma_pdata *data; +}; + +#define DW_MMC_240A 0x240a +#define DW_MMC_280A 0x280a + +#define SDMMC_CTRL 0x000 +#define SDMMC_PWREN 0x004 +#define SDMMC_CLKDIV 0x008 +#define SDMMC_CLKSRC 0x00c +#define SDMMC_CLKENA 0x010 +#define SDMMC_TMOUT 0x014 +#define SDMMC_CTYPE 0x018 +#define SDMMC_BLKSIZ 0x01c +#define SDMMC_BYTCNT 0x020 +#define SDMMC_INTMASK 0x024 +#define SDMMC_CMDARG 0x028 +#define SDMMC_CMD 0x02c +#define SDMMC_RESP0 0x030 +#define SDMMC_RESP1 0x034 +#define SDMMC_RESP2 0x038 +#define SDMMC_RESP3 0x03c +#define SDMMC_MINTSTS 0x040 +#define SDMMC_RINTSTS 0x044 +#define SDMMC_STATUS 0x048 +#define SDMMC_FIFOTH 0x04c +#define SDMMC_CDETECT 0x050 +#define SDMMC_WRTPRT 0x054 +#define SDMMC_GPIO 0x058 +#define SDMMC_TCBCNT 0x05c +#define SDMMC_TBBCNT 0x060 +#define SDMMC_DEBNCE 0x064 +#define SDMMC_USRID 0x068 +#define SDMMC_VERID 0x06c +#define SDMMC_HCON 0x070 +#define SDMMC_UHS_REG 0x074 +#define SDMMC_RST_N 0x078 +#define SDMMC_BMOD 0x080 +#define SDMMC_PLDMND 0x084 +#define SDMMC_DBADDR 0x088 +#define SDMMC_IDSTS 0x08c +#define SDMMC_IDINTEN 0x090 +#define SDMMC_DSCADDR 0x094 +#define SDMMC_BUFADDR 0x098 +#define SDMMC_CDTHRCTL 0x100 +#define SDMMC_UHS_REG_EXT 0x108 +#define SDMMC_DDR_REG 0x10c +#define SDMMC_ENABLE_SHIFT 0x110 +#define SDMMC_DATA(x) (x) +/* + * Registers to support idmac 64-bit address mode + */ +#define SDMMC_DBADDRL 0x088 +#define SDMMC_DBADDRU 0x08c +#define SDMMC_IDSTS64 0x090 +#define SDMMC_IDINTEN64 0x094 +#define SDMMC_DSCADDRL 0x098 +#define SDMMC_DSCADDRU 0x09c +#define SDMMC_BUFADDRL 0x0A0 +#define SDMMC_BUFADDRU 0x0A4 + +/* + * Data offset is difference according to Version + * Lower than 2.40a : data register offest is 0x100 + */ +#define DATA_OFFSET 0x100 +#define DATA_240A_OFFSET 0x200 + +/* shift bit field */ +#define _SBF(f, v) ((v) << (f)) + +/* Control register defines */ +#define SDMMC_CTRL_USE_IDMAC BIT(25) +#define SDMMC_CTRL_CEATA_INT_EN BIT(11) +#define SDMMC_CTRL_SEND_AS_CCSD BIT(10) +#define SDMMC_CTRL_SEND_CCSD BIT(9) +#define SDMMC_CTRL_ABRT_READ_DATA BIT(8) +#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7) +#define SDMMC_CTRL_READ_WAIT BIT(6) +#define SDMMC_CTRL_DMA_ENABLE BIT(5) +#define SDMMC_CTRL_INT_ENABLE BIT(4) +#define SDMMC_CTRL_DMA_RESET BIT(2) +#define SDMMC_CTRL_FIFO_RESET BIT(1) +#define SDMMC_CTRL_RESET BIT(0) +/* Clock Enable register defines */ +#define SDMMC_CLKEN_LOW_PWR BIT(16) +#define SDMMC_CLKEN_ENABLE BIT(0) +/* time-out register defines */ +#define SDMMC_TMOUT_DATA(n) _SBF(8, (n)) +#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00 +#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF) +#define SDMMC_TMOUT_RESP_MSK 0xFF +/* card-type register defines */ +#define SDMMC_CTYPE_8BIT BIT(16) +#define SDMMC_CTYPE_4BIT BIT(0) +#define SDMMC_CTYPE_1BIT 0 +/* Interrupt status & mask register defines */ +#define SDMMC_INT_SDIO(n) BIT(16 + (n)) +#define SDMMC_INT_EBE BIT(15) +#define SDMMC_INT_ACD BIT(14) +#define SDMMC_INT_SBE BIT(13) +#define SDMMC_INT_HLE BIT(12) +#define SDMMC_INT_FRUN BIT(11) +#define SDMMC_INT_HTO BIT(10) +#define SDMMC_INT_VOLT_SWITCH BIT(10) /* overloads bit 10! */ +#define SDMMC_INT_DRTO BIT(9) +#define SDMMC_INT_RTO BIT(8) +#define SDMMC_INT_DCRC BIT(7) +#define SDMMC_INT_RCRC BIT(6) +#define SDMMC_INT_RXDR BIT(5) +#define SDMMC_INT_TXDR BIT(4) +#define SDMMC_INT_DATA_OVER BIT(3) +#define SDMMC_INT_CMD_DONE BIT(2) +#define SDMMC_INT_RESP_ERR BIT(1) +#define SDMMC_INT_CD BIT(0) +#define SDMMC_INT_ERROR 0xbfc2 +/* Command register defines */ +#define SDMMC_CMD_START BIT(31) +#define SDMMC_CMD_USE_HOLD_REG BIT(29) +#define SDMMC_CMD_VOLT_SWITCH BIT(28) +#define SDMMC_CMD_CCS_EXP BIT(23) +#define SDMMC_CMD_CEATA_RD BIT(22) +#define SDMMC_CMD_UPD_CLK BIT(21) +#define SDMMC_CMD_INIT BIT(15) +#define SDMMC_CMD_STOP BIT(14) +#define SDMMC_CMD_PRV_DAT_WAIT BIT(13) +#define SDMMC_CMD_SEND_STOP BIT(12) +#define SDMMC_CMD_STRM_MODE BIT(11) +#define SDMMC_CMD_DAT_WR BIT(10) +#define SDMMC_CMD_DAT_EXP BIT(9) +#define SDMMC_CMD_RESP_CRC BIT(8) +#define SDMMC_CMD_RESP_LONG BIT(7) +#define SDMMC_CMD_RESP_EXP BIT(6) +#define SDMMC_CMD_INDX(n) ((n) & 0x1F) +/* Status register defines */ +#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF) +#define SDMMC_STATUS_DMA_REQ BIT(31) +#define SDMMC_STATUS_BUSY BIT(9) +/* FIFOTH register defines */ +#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ + ((r) & 0xFFF) << 16 | \ + ((t) & 0xFFF)) +/* HCON register defines */ +#define DMA_INTERFACE_IDMA (0x0) +#define DMA_INTERFACE_DWDMA (0x1) +#define DMA_INTERFACE_GDMA (0x2) +#define DMA_INTERFACE_NODMA (0x3) +#define SDMMC_GET_TRANS_MODE(x) (((x)>>16) & 0x3) +#define SDMMC_GET_SLOT_NUM(x) ((((x)>>1) & 0x1F) + 1) +#define SDMMC_GET_HDATA_WIDTH(x) (((x)>>7) & 0x7) +#define SDMMC_GET_ADDR_CONFIG(x) (((x)>>27) & 0x1) +/* Internal DMAC interrupt defines */ +#define SDMMC_IDMAC_INT_AI BIT(9) +#define SDMMC_IDMAC_INT_NI BIT(8) +#define SDMMC_IDMAC_INT_CES BIT(5) +#define SDMMC_IDMAC_INT_DU BIT(4) +#define SDMMC_IDMAC_INT_FBE BIT(2) +#define SDMMC_IDMAC_INT_RI BIT(1) +#define SDMMC_IDMAC_INT_TI BIT(0) +/* Internal DMAC bus mode bits */ +#define SDMMC_IDMAC_ENABLE BIT(7) +#define SDMMC_IDMAC_FB BIT(1) +#define SDMMC_IDMAC_SWRESET BIT(0) +/* H/W reset */ +#define SDMMC_RST_HWACTIVE 0x1 +/* Version ID register define */ +#define SDMMC_GET_VERID(x) ((x) & 0xFFFF) +/* Card read threshold */ +#define SDMMC_SET_THLD(v, x) (((v) & 0xFFF) << 16 | (x)) +#define SDMMC_CARD_WR_THR_EN BIT(2) +#define SDMMC_CARD_RD_THR_EN BIT(0) +/* UHS-1 register defines */ +#define SDMMC_UHS_DDR BIT(16) +#define SDMMC_UHS_18V BIT(0) +/* DDR register defines */ +#define SDMMC_DDR_HS400 BIT(31) +/* Enable shift register defines */ +#define SDMMC_ENABLE_PHASE BIT(0) +/* All ctrl reset bits */ +#define SDMMC_CTRL_ALL_RESET_FLAGS \ + (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET) + +/* FIFO register access macros. These should not change the data endian-ness + * as they are written to memory to be dealt with by the upper layers + */ +#define mci_fifo_readw(__reg) __raw_readw(__reg) +#define mci_fifo_readl(__reg) __raw_readl(__reg) +#define mci_fifo_readq(__reg) __raw_readq(__reg) + +#define mci_fifo_writew(__value, __reg) __raw_writew(__reg, __value) +#define mci_fifo_writel(__value, __reg) __raw_writel(__reg, __value) +#define mci_fifo_writeq(__value, __reg) __raw_writeq(__reg, __value) + +/* Register access macros */ +#define mci_readl(dev, reg) \ + readl_relaxed((dev)->regs + SDMMC_##reg) +#define mci_writel(dev, reg, value) \ + writel_relaxed((value), (dev)->regs + SDMMC_##reg) + +/* 16-bit FIFO access macros */ +#define mci_readw(dev, reg) \ + readw_relaxed((dev)->regs + SDMMC_##reg) +#define mci_writew(dev, reg, value) \ + writew_relaxed((value), (dev)->regs + SDMMC_##reg) + +/* 64-bit FIFO access macros */ +#ifdef readq +#define mci_readq(dev, reg) \ + readq_relaxed((dev)->regs + SDMMC_##reg) +#define mci_writeq(dev, reg, value) \ + writeq_relaxed((value), (dev)->regs + SDMMC_##reg) +#else +/* + * Dummy readq implementation for architectures that don't define it. + * + * We would assume that none of these architectures would configure + * the IP block with a 64bit FIFO width, so this code will never be + * executed on those machines. Defining these macros here keeps the + * rest of the code free from ifdefs. + */ +#define mci_readq(dev, reg) \ + (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg)) +#define mci_writeq(dev, reg, value) \ + (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value)) + +#define __raw_writeq(__value, __reg) \ + (*(volatile u64 __force *)(__reg) = (__value)) +#define __raw_readq(__reg) (*(volatile u64 __force *)(__reg)) +#endif + +extern int dw_mci_probe(struct dw_mci *host); +extern void dw_mci_remove(struct dw_mci *host); +#ifdef CONFIG_PM +extern int dw_mci_runtime_suspend(struct device *device); +extern int dw_mci_runtime_resume(struct device *device); +#endif + +/** + * struct dw_mci_slot - MMC slot state + * @mmc: The mmc_host representing this slot. + * @host: The MMC controller this slot is using. + * @ctype: Card type for this slot. + * @mrq: mmc_request currently being processed or waiting to be + * processed, or NULL when the slot is idle. + * @queue_node: List node for placing this node in the @queue list of + * &struct dw_mci. + * @clock: Clock rate configured by set_ios(). Protected by host->lock. + * @__clk_old: The last clock value that was requested from core. + * Keeping track of this helps us to avoid spamming the console. + * @flags: Random state bits associated with the slot. + * @id: Number of this slot. + * @sdio_id: Number of this slot in the SDIO interrupt registers. + */ +struct dw_mci_slot { + struct mmc_host *mmc; + struct dw_mci *host; + + u32 ctype; + + struct mmc_request *mrq; + struct list_head queue_node; + + unsigned int clock; + unsigned int __clk_old; + + unsigned long flags; +#define DW_MMC_CARD_PRESENT 0 +#define DW_MMC_CARD_NEED_INIT 1 +#define DW_MMC_CARD_NO_LOW_PWR 2 +#define DW_MMC_CARD_NO_USE_HOLD 3 +#define DW_MMC_CARD_NEEDS_POLL 4 + int id; + int sdio_id; +}; + +/** + * dw_mci driver data - dw-mshc implementation specific driver data. + * @caps: mmc subsystem specified capabilities of the controller(s). + * @num_caps: number of capabilities specified by @caps. + * @init: early implementation specific initialization. + * @set_ios: handle bus specific extensions. + * @parse_dt: parse implementation specific device tree properties. + * @execute_tuning: implementation specific tuning procedure. + * + * Provide controller implementation specific extensions. The usage of this + * data structure is fully optional and usage of each member in this structure + * is optional as well. + */ +struct dw_mci_drv_data { + unsigned long *caps; + u32 num_caps; + int (*init)(struct dw_mci *host); + void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); + int (*parse_dt)(struct dw_mci *host); + int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode); + int (*prepare_hs400_tuning)(struct dw_mci *host, + struct mmc_ios *ios); + int (*switch_voltage)(struct mmc_host *mmc, + struct mmc_ios *ios); +}; +#endif /* _DW_MMC_H_ */ diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c new file mode 100644 index 000000000..62d00232f --- /dev/null +++ b/drivers/mmc/host/jz4740_mmc.c @@ -0,0 +1,1167 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen + * Copyright (C) 2013, Imagination Technologies + * + * JZ4740 SD/MMC controller driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define JZ_REG_MMC_STRPCL 0x00 +#define JZ_REG_MMC_STATUS 0x04 +#define JZ_REG_MMC_CLKRT 0x08 +#define JZ_REG_MMC_CMDAT 0x0C +#define JZ_REG_MMC_RESTO 0x10 +#define JZ_REG_MMC_RDTO 0x14 +#define JZ_REG_MMC_BLKLEN 0x18 +#define JZ_REG_MMC_NOB 0x1C +#define JZ_REG_MMC_SNOB 0x20 +#define JZ_REG_MMC_IMASK 0x24 +#define JZ_REG_MMC_IREG 0x28 +#define JZ_REG_MMC_CMD 0x2C +#define JZ_REG_MMC_ARG 0x30 +#define JZ_REG_MMC_RESP_FIFO 0x34 +#define JZ_REG_MMC_RXFIFO 0x38 +#define JZ_REG_MMC_TXFIFO 0x3C +#define JZ_REG_MMC_LPM 0x40 +#define JZ_REG_MMC_DMAC 0x44 + +#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7) +#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6) +#define JZ_MMC_STRPCL_START_READWAIT BIT(5) +#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4) +#define JZ_MMC_STRPCL_RESET BIT(3) +#define JZ_MMC_STRPCL_START_OP BIT(2) +#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0)) +#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0) +#define JZ_MMC_STRPCL_CLOCK_START BIT(1) + + +#define JZ_MMC_STATUS_IS_RESETTING BIT(15) +#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14) +#define JZ_MMC_STATUS_PRG_DONE BIT(13) +#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12) +#define JZ_MMC_STATUS_END_CMD_RES BIT(11) +#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10) +#define JZ_MMC_STATUS_IS_READWAIT BIT(9) +#define JZ_MMC_STATUS_CLK_EN BIT(8) +#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7) +#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6) +#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5) +#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4) +#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3) +#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2) +#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1) +#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0) + +#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0)) +#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2)) + + +#define JZ_MMC_CMDAT_IO_ABORT BIT(11) +#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10) +#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9)) +#define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9)) +#define JZ_MMC_CMDAT_DMA_EN BIT(8) +#define JZ_MMC_CMDAT_INIT BIT(7) +#define JZ_MMC_CMDAT_BUSY BIT(6) +#define JZ_MMC_CMDAT_STREAM BIT(5) +#define JZ_MMC_CMDAT_WRITE BIT(4) +#define JZ_MMC_CMDAT_DATA_EN BIT(3) +#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0)) +#define JZ_MMC_CMDAT_RSP_R1 1 +#define JZ_MMC_CMDAT_RSP_R2 2 +#define JZ_MMC_CMDAT_RSP_R3 3 + +#define JZ_MMC_IRQ_SDIO BIT(7) +#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6) +#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5) +#define JZ_MMC_IRQ_END_CMD_RES BIT(2) +#define JZ_MMC_IRQ_PRG_DONE BIT(1) +#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0) + +#define JZ_MMC_DMAC_DMA_SEL BIT(1) +#define JZ_MMC_DMAC_DMA_EN BIT(0) + +#define JZ_MMC_LPM_DRV_RISING BIT(31) +#define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31) +#define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30) +#define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29) +#define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0) + +#define JZ_MMC_CLK_RATE 24000000 +#define JZ_MMC_REQ_TIMEOUT_MS 5000 + +enum jz4740_mmc_version { + JZ_MMC_JZ4740, + JZ_MMC_JZ4725B, + JZ_MMC_JZ4760, + JZ_MMC_JZ4780, + JZ_MMC_X1000, +}; + +enum jz4740_mmc_state { + JZ4740_MMC_STATE_READ_RESPONSE, + JZ4740_MMC_STATE_TRANSFER_DATA, + JZ4740_MMC_STATE_SEND_STOP, + JZ4740_MMC_STATE_DONE, +}; + +/* + * The MMC core allows to prepare a mmc_request while another mmc_request + * is in-flight. This is used via the pre_req/post_req hooks. + * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request. + * Following what other drivers do (sdhci, dw_mmc) we use the following cookie + * flags to keep track of the mmc_request mapping state. + * + * COOKIE_UNMAPPED: the request is not mapped. + * COOKIE_PREMAPPED: the request was mapped in pre_req, + * and should be unmapped in post_req. + * COOKIE_MAPPED: the request was mapped in the irq handler, + * and should be unmapped before mmc_request_done is called.. + */ +enum jz4780_cookie { + COOKIE_UNMAPPED = 0, + COOKIE_PREMAPPED, + COOKIE_MAPPED, +}; + +struct jz4740_mmc_host { + struct mmc_host *mmc; + struct platform_device *pdev; + struct clk *clk; + + enum jz4740_mmc_version version; + + int irq; + int card_detect_irq; + + void __iomem *base; + struct resource *mem_res; + struct mmc_request *req; + struct mmc_command *cmd; + + unsigned long waiting; + + uint32_t cmdat; + + uint32_t irq_mask; + + spinlock_t lock; + + struct timer_list timeout_timer; + struct sg_mapping_iter miter; + enum jz4740_mmc_state state; + + /* DMA support */ + struct dma_chan *dma_rx; + struct dma_chan *dma_tx; + bool use_dma; + +/* The DMA trigger level is 8 words, that is to say, the DMA read + * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write + * trigger is when data words in MSC_TXFIFO is < 8. + */ +#define JZ4740_MMC_FIFO_HALF_SIZE 8 +}; + +static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host, + uint32_t val) +{ + if (host->version >= JZ_MMC_JZ4725B) + return writel(val, host->base + JZ_REG_MMC_IMASK); + else + return writew(val, host->base + JZ_REG_MMC_IMASK); +} + +static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host, + uint32_t val) +{ + if (host->version >= JZ_MMC_JZ4780) + writel(val, host->base + JZ_REG_MMC_IREG); + else + writew(val, host->base + JZ_REG_MMC_IREG); +} + +static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host) +{ + if (host->version >= JZ_MMC_JZ4780) + return readl(host->base + JZ_REG_MMC_IREG); + else + return readw(host->base + JZ_REG_MMC_IREG); +} + +/*----------------------------------------------------------------------------*/ +/* DMA infrastructure */ + +static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host) +{ + if (!host->use_dma) + return; + + dma_release_channel(host->dma_tx); + dma_release_channel(host->dma_rx); +} + +static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) +{ + host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); + if (IS_ERR(host->dma_tx)) { + dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); + return PTR_ERR(host->dma_tx); + } + + host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); + if (IS_ERR(host->dma_rx)) { + dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); + dma_release_channel(host->dma_tx); + return PTR_ERR(host->dma_rx); + } + + /* + * Limit the maximum segment size in any SG entry according to + * the parameters of the DMA engine device. + */ + if (host->dma_tx) { + struct device *dev = host->dma_tx->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } + + if (host->dma_rx) { + struct device *dev = host->dma_rx->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } + + return 0; +} + +static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host, + struct mmc_data *data) +{ + return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx; +} + +static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, + struct mmc_data *data) +{ + struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); + enum dma_data_direction dir = mmc_get_dma_dir(data); + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + data->host_cookie = COOKIE_UNMAPPED; +} + +/* Prepares DMA data for current or next transfer. + * A request can be in-flight when this is called. + */ +static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, + struct mmc_data *data, + int cookie) +{ + struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); + enum dma_data_direction dir = mmc_get_dma_dir(data); + int sg_count; + + if (data->host_cookie == COOKIE_PREMAPPED) + return data->sg_count; + + sg_count = dma_map_sg(chan->device->dev, + data->sg, + data->sg_len, + dir); + + if (sg_count <= 0) { + dev_err(mmc_dev(host->mmc), + "Failed to map scatterlist for DMA operation\n"); + return -EINVAL; + } + + data->sg_count = sg_count; + data->host_cookie = cookie; + + return data->sg_count; +} + +static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, + struct mmc_data *data) +{ + struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); + struct dma_async_tx_descriptor *desc; + struct dma_slave_config conf = { + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, + .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, + }; + int sg_count; + + if (data->flags & MMC_DATA_WRITE) { + conf.direction = DMA_MEM_TO_DEV; + conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; + } else { + conf.direction = DMA_DEV_TO_MEM; + conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; + } + + sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED); + if (sg_count < 0) + return sg_count; + + dmaengine_slave_config(chan, &conf); + desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count, + conf.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_err(mmc_dev(host->mmc), + "Failed to allocate DMA %s descriptor", + conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX"); + goto dma_unmap; + } + + dmaengine_submit(desc); + dma_async_issue_pending(chan); + + return 0; + +dma_unmap: + if (data->host_cookie == COOKIE_MAPPED) + jz4740_mmc_dma_unmap(host, data); + return -ENOMEM; +} + +static void jz4740_mmc_pre_request(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!host->use_dma) + return; + + data->host_cookie = COOKIE_UNMAPPED; + if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0) + data->host_cookie = COOKIE_UNMAPPED; +} + +static void jz4740_mmc_post_request(struct mmc_host *mmc, + struct mmc_request *mrq, + int err) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (data && data->host_cookie != COOKIE_UNMAPPED) + jz4740_mmc_dma_unmap(host, data); + + if (err) { + struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); + + dmaengine_terminate_all(chan); + } +} + +/*----------------------------------------------------------------------------*/ + +static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, + unsigned int irq, bool enabled) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (enabled) + host->irq_mask &= ~irq; + else + host->irq_mask |= irq; + + jz4740_mmc_write_irq_mask(host, host->irq_mask); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, + bool start_transfer) +{ + uint16_t val = JZ_MMC_STRPCL_CLOCK_START; + + if (start_transfer) + val |= JZ_MMC_STRPCL_START_OP; + + writew(val, host->base + JZ_REG_MMC_STRPCL); +} + +static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) +{ + uint32_t status; + unsigned int timeout = 1000; + + writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); + do { + status = readl(host->base + JZ_REG_MMC_STATUS); + } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); +} + +static void jz4740_mmc_reset(struct jz4740_mmc_host *host) +{ + uint32_t status; + unsigned int timeout = 1000; + + writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); + udelay(10); + do { + status = readl(host->base + JZ_REG_MMC_STATUS); + } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); +} + +static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) +{ + struct mmc_request *req; + struct mmc_data *data; + + req = host->req; + data = req->data; + host->req = NULL; + + if (data && data->host_cookie == COOKIE_MAPPED) + jz4740_mmc_dma_unmap(host, data); + mmc_request_done(host->mmc, req); +} + +static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, + unsigned int irq) +{ + unsigned int timeout = 0x800; + uint32_t status; + + do { + status = jz4740_mmc_read_irq_reg(host); + } while (!(status & irq) && --timeout); + + if (timeout == 0) { + set_bit(0, &host->waiting); + mod_timer(&host->timeout_timer, + jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS)); + jz4740_mmc_set_irq_enabled(host, irq, true); + return true; + } + + return false; +} + +static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, + struct mmc_data *data) +{ + int status; + + status = readl(host->base + JZ_REG_MMC_STATUS); + if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) { + if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) { + host->req->cmd->error = -ETIMEDOUT; + data->error = -ETIMEDOUT; + } else { + host->req->cmd->error = -EIO; + data->error = -EIO; + } + } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) { + if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) { + host->req->cmd->error = -ETIMEDOUT; + data->error = -ETIMEDOUT; + } else { + host->req->cmd->error = -EIO; + data->error = -EIO; + } + } +} + +static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, + struct mmc_data *data) +{ + struct sg_mapping_iter *miter = &host->miter; + void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; + uint32_t *buf; + bool timeout; + size_t i, j; + + while (sg_miter_next(miter)) { + buf = miter->addr; + i = miter->length / 4; + j = i / 8; + i = i & 0x7; + while (j) { + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); + if (unlikely(timeout)) + goto poll_timeout; + + writel(buf[0], fifo_addr); + writel(buf[1], fifo_addr); + writel(buf[2], fifo_addr); + writel(buf[3], fifo_addr); + writel(buf[4], fifo_addr); + writel(buf[5], fifo_addr); + writel(buf[6], fifo_addr); + writel(buf[7], fifo_addr); + buf += 8; + --j; + } + if (unlikely(i)) { + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); + if (unlikely(timeout)) + goto poll_timeout; + + while (i) { + writel(*buf, fifo_addr); + ++buf; + --i; + } + } + data->bytes_xfered += miter->length; + } + sg_miter_stop(miter); + + return false; + +poll_timeout: + miter->consumed = (void *)buf - miter->addr; + data->bytes_xfered += miter->consumed; + sg_miter_stop(miter); + + return true; +} + +static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, + struct mmc_data *data) +{ + struct sg_mapping_iter *miter = &host->miter; + void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; + uint32_t *buf; + uint32_t d; + uint32_t status; + size_t i, j; + unsigned int timeout; + + while (sg_miter_next(miter)) { + buf = miter->addr; + i = miter->length; + j = i / 32; + i = i & 0x1f; + while (j) { + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); + if (unlikely(timeout)) + goto poll_timeout; + + buf[0] = readl(fifo_addr); + buf[1] = readl(fifo_addr); + buf[2] = readl(fifo_addr); + buf[3] = readl(fifo_addr); + buf[4] = readl(fifo_addr); + buf[5] = readl(fifo_addr); + buf[6] = readl(fifo_addr); + buf[7] = readl(fifo_addr); + + buf += 8; + --j; + } + + if (unlikely(i)) { + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); + if (unlikely(timeout)) + goto poll_timeout; + + while (i >= 4) { + *buf++ = readl(fifo_addr); + i -= 4; + } + if (unlikely(i > 0)) { + d = readl(fifo_addr); + memcpy(buf, &d, i); + } + } + data->bytes_xfered += miter->length; + + /* This can go away once MIPS implements + * flush_kernel_dcache_page */ + flush_dcache_page(miter->page); + } + sg_miter_stop(miter); + + /* For whatever reason there is sometime one word more in the fifo then + * requested */ + timeout = 1000; + status = readl(host->base + JZ_REG_MMC_STATUS); + while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { + d = readl(fifo_addr); + status = readl(host->base + JZ_REG_MMC_STATUS); + } + + return false; + +poll_timeout: + miter->consumed = (void *)buf - miter->addr; + data->bytes_xfered += miter->consumed; + sg_miter_stop(miter); + + return true; +} + +static void jz4740_mmc_timeout(struct timer_list *t) +{ + struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer); + + if (!test_and_clear_bit(0, &host->waiting)) + return; + + jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); + + host->req->cmd->error = -ETIMEDOUT; + jz4740_mmc_request_done(host); +} + +static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, + struct mmc_command *cmd) +{ + int i; + uint16_t tmp; + void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; + + if (cmd->flags & MMC_RSP_136) { + tmp = readw(fifo_addr); + for (i = 0; i < 4; ++i) { + cmd->resp[i] = tmp << 24; + tmp = readw(fifo_addr); + cmd->resp[i] |= tmp << 8; + tmp = readw(fifo_addr); + cmd->resp[i] |= tmp >> 8; + } + } else { + cmd->resp[0] = readw(fifo_addr) << 24; + cmd->resp[0] |= readw(fifo_addr) << 8; + cmd->resp[0] |= readw(fifo_addr) & 0xff; + } +} + +static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, + struct mmc_command *cmd) +{ + uint32_t cmdat = host->cmdat; + + host->cmdat &= ~JZ_MMC_CMDAT_INIT; + jz4740_mmc_clock_disable(host); + + host->cmd = cmd; + + if (cmd->flags & MMC_RSP_BUSY) + cmdat |= JZ_MMC_CMDAT_BUSY; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1B: + case MMC_RSP_R1: + cmdat |= JZ_MMC_CMDAT_RSP_R1; + break; + case MMC_RSP_R2: + cmdat |= JZ_MMC_CMDAT_RSP_R2; + break; + case MMC_RSP_R3: + cmdat |= JZ_MMC_CMDAT_RSP_R3; + break; + default: + break; + } + + if (cmd->data) { + cmdat |= JZ_MMC_CMDAT_DATA_EN; + if (cmd->data->flags & MMC_DATA_WRITE) + cmdat |= JZ_MMC_CMDAT_WRITE; + if (host->use_dma) { + /* + * The 4780's MMC controller has integrated DMA ability + * in addition to being able to use the external DMA + * controller. It moves DMA control bits to a separate + * register. The DMA_SEL bit chooses the external + * controller over the integrated one. Earlier SoCs + * can only use the external controller, and have a + * single DMA enable bit in CMDAT. + */ + if (host->version >= JZ_MMC_JZ4780) { + writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL, + host->base + JZ_REG_MMC_DMAC); + } else { + cmdat |= JZ_MMC_CMDAT_DMA_EN; + } + } else if (host->version >= JZ_MMC_JZ4780) { + writel(0, host->base + JZ_REG_MMC_DMAC); + } + + writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); + writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); + } + + writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); + writel(cmd->arg, host->base + JZ_REG_MMC_ARG); + writel(cmdat, host->base + JZ_REG_MMC_CMDAT); + + jz4740_mmc_clock_enable(host, 1); +} + +static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) +{ + struct mmc_command *cmd = host->req->cmd; + struct mmc_data *data = cmd->data; + int direction; + + if (data->flags & MMC_DATA_READ) + direction = SG_MITER_TO_SG; + else + direction = SG_MITER_FROM_SG; + + sg_miter_start(&host->miter, data->sg, data->sg_len, direction); +} + + +static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) +{ + struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; + struct mmc_command *cmd = host->req->cmd; + struct mmc_request *req = host->req; + struct mmc_data *data = cmd->data; + bool timeout = false; + + if (cmd->error) + host->state = JZ4740_MMC_STATE_DONE; + + switch (host->state) { + case JZ4740_MMC_STATE_READ_RESPONSE: + if (cmd->flags & MMC_RSP_PRESENT) + jz4740_mmc_read_response(host, cmd); + + if (!data) + break; + + jz_mmc_prepare_data_transfer(host); + fallthrough; + + case JZ4740_MMC_STATE_TRANSFER_DATA: + if (host->use_dma) { + /* Use DMA if enabled. + * Data transfer direction is defined later by + * relying on data flags in + * jz4740_mmc_prepare_dma_data() and + * jz4740_mmc_start_dma_transfer(). + */ + timeout = jz4740_mmc_start_dma_transfer(host, data); + data->bytes_xfered = data->blocks * data->blksz; + } else if (data->flags & MMC_DATA_READ) + /* Use PIO if DMA is not enabled. + * Data transfer direction was defined before + * by relying on data flags in + * jz_mmc_prepare_data_transfer(). + */ + timeout = jz4740_mmc_read_data(host, data); + else + timeout = jz4740_mmc_write_data(host, data); + + if (unlikely(timeout)) { + host->state = JZ4740_MMC_STATE_TRANSFER_DATA; + break; + } + + jz4740_mmc_transfer_check_state(host, data); + + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); + if (unlikely(timeout)) { + host->state = JZ4740_MMC_STATE_SEND_STOP; + break; + } + jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE); + fallthrough; + + case JZ4740_MMC_STATE_SEND_STOP: + if (!req->stop) + break; + + jz4740_mmc_send_command(host, req->stop); + + if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { + timeout = jz4740_mmc_poll_irq(host, + JZ_MMC_IRQ_PRG_DONE); + if (timeout) { + host->state = JZ4740_MMC_STATE_DONE; + break; + } + } + case JZ4740_MMC_STATE_DONE: + break; + } + + if (!timeout) + jz4740_mmc_request_done(host); + + return IRQ_HANDLED; +} + +static irqreturn_t jz_mmc_irq(int irq, void *devid) +{ + struct jz4740_mmc_host *host = devid; + struct mmc_command *cmd = host->cmd; + uint32_t irq_reg, status, tmp; + + status = readl(host->base + JZ_REG_MMC_STATUS); + irq_reg = jz4740_mmc_read_irq_reg(host); + + tmp = irq_reg; + irq_reg &= ~host->irq_mask; + + tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ | + JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE); + + if (tmp != irq_reg) + jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg); + + if (irq_reg & JZ_MMC_IRQ_SDIO) { + jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO); + mmc_signal_sdio_irq(host->mmc); + irq_reg &= ~JZ_MMC_IRQ_SDIO; + } + + if (host->req && cmd && irq_reg) { + if (test_and_clear_bit(0, &host->waiting)) { + del_timer(&host->timeout_timer); + + if (status & JZ_MMC_STATUS_TIMEOUT_RES) { + cmd->error = -ETIMEDOUT; + } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) { + cmd->error = -EIO; + } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | + JZ_MMC_STATUS_CRC_WRITE_ERROR)) { + if (cmd->data) + cmd->data->error = -EIO; + cmd->error = -EIO; + } + + jz4740_mmc_set_irq_enabled(host, irq_reg, false); + jz4740_mmc_write_irq_reg(host, irq_reg); + + return IRQ_WAKE_THREAD; + } + } + + return IRQ_HANDLED; +} + +static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) +{ + int div = 0; + int real_rate; + + jz4740_mmc_clock_disable(host); + clk_set_rate(host->clk, host->mmc->f_max); + + real_rate = clk_get_rate(host->clk); + + while (real_rate > rate && div < 7) { + ++div; + real_rate >>= 1; + } + + writew(div, host->base + JZ_REG_MMC_CLKRT); + + if (real_rate > 25000000) { + if (host->version >= JZ_MMC_X1000) { + writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY | + JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY | + JZ_MMC_LPM_LOW_POWER_MODE_EN, + host->base + JZ_REG_MMC_LPM); + } else if (host->version >= JZ_MMC_JZ4760) { + writel(JZ_MMC_LPM_DRV_RISING | + JZ_MMC_LPM_LOW_POWER_MODE_EN, + host->base + JZ_REG_MMC_LPM); + } else if (host->version >= JZ_MMC_JZ4725B) + writel(JZ_MMC_LPM_LOW_POWER_MODE_EN, + host->base + JZ_REG_MMC_LPM); + } + + return real_rate; +} + +static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + + host->req = req; + + jz4740_mmc_write_irq_reg(host, ~0); + jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); + + host->state = JZ4740_MMC_STATE_READ_RESPONSE; + set_bit(0, &host->waiting); + mod_timer(&host->timeout_timer, + jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS)); + jz4740_mmc_send_command(host, req->cmd); +} + +static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + if (ios->clock) + jz4740_mmc_set_clock_rate(host, ios->clock); + + switch (ios->power_mode) { + case MMC_POWER_UP: + jz4740_mmc_reset(host); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + host->cmdat |= JZ_MMC_CMDAT_INIT; + clk_prepare_enable(host->clk); + break; + case MMC_POWER_ON: + break; + default: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + clk_disable_unprepare(host->clk); + break; + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; + break; + case MMC_BUS_WIDTH_4: + host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; + host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; + break; + case MMC_BUS_WIDTH_8: + host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; + host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT; + break; + default: + break; + } +} + +static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); +} + +static const struct mmc_host_ops jz4740_mmc_ops = { + .request = jz4740_mmc_request, + .pre_req = jz4740_mmc_pre_request, + .post_req = jz4740_mmc_post_request, + .set_ios = jz4740_mmc_set_ios, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, + .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, +}; + +static const struct of_device_id jz4740_mmc_of_match[] = { + { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 }, + { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B }, + { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 }, + { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 }, + { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 }, + {}, +}; +MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match); + +static int jz4740_mmc_probe(struct platform_device* pdev) +{ + int ret; + struct mmc_host *mmc; + struct jz4740_mmc_host *host; + const struct of_device_id *match; + + mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); + return -ENOMEM; + } + + host = mmc_priv(mmc); + + match = of_match_device(jz4740_mmc_of_match, &pdev->dev); + if (match) { + host->version = (enum jz4740_mmc_version)match->data; + } else { + /* JZ4740 should be the only one using legacy probe */ + host->version = JZ_MMC_JZ4740; + } + + ret = mmc_of_parse(mmc); + if (ret) { + dev_err_probe(&pdev->dev, ret, "could not parse device properties\n"); + goto err_free_host; + } + + mmc_regulator_get_supply(mmc); + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) { + ret = host->irq; + goto err_free_host; + } + + host->clk = devm_clk_get(&pdev->dev, "mmc"); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + dev_err(&pdev->dev, "Failed to get mmc clock\n"); + goto err_free_host; + } + + host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(&pdev->dev, host->mem_res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + dev_err(&pdev->dev, "Failed to ioremap base memory\n"); + goto err_free_host; + } + + mmc->ops = &jz4740_mmc_ops; + if (!mmc->f_max) + mmc->f_max = JZ_MMC_CLK_RATE; + + /* + * There seems to be a problem with this driver on the JZ4760 and + * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz), + * the communication fails with many SD cards. + * Until this bug is sorted out, limit the maximum rate to 24 MHz. + */ + if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE) + mmc->f_max = JZ_MMC_CLK_RATE; + + mmc->f_min = mmc->f_max / 128; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + /* + * We use a fixed timeout of 5s, hence inform the core about it. A + * future improvement should instead respect the cmd->busy_timeout. + */ + mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS; + + mmc->max_blk_size = (1 << 10) - 1; + mmc->max_blk_count = (1 << 15) - 1; + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + + mmc->max_segs = 128; + mmc->max_seg_size = mmc->max_req_size; + + host->mmc = mmc; + host->pdev = pdev; + spin_lock_init(&host->lock); + host->irq_mask = ~0; + + jz4740_mmc_reset(host); + + ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, + dev_name(&pdev->dev), host); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); + goto err_free_host; + } + + jz4740_mmc_clock_disable(host); + timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0); + + ret = jz4740_mmc_acquire_dma_channels(host); + if (ret == -EPROBE_DEFER) + goto err_free_irq; + host->use_dma = !ret; + + platform_set_drvdata(pdev, host); + ret = mmc_add_host(mmc); + + if (ret) { + dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); + goto err_release_dma; + } + dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n"); + + dev_info(&pdev->dev, "Using %s, %d-bit mode\n", + host->use_dma ? "DMA" : "PIO", + (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 : + ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1)); + + return 0; + +err_release_dma: + if (host->use_dma) + jz4740_mmc_release_dma_channels(host); +err_free_irq: + free_irq(host->irq, host); +err_free_host: + mmc_free_host(mmc); + + return ret; +} + +static int jz4740_mmc_remove(struct platform_device *pdev) +{ + struct jz4740_mmc_host *host = platform_get_drvdata(pdev); + + del_timer_sync(&host->timeout_timer); + jz4740_mmc_set_irq_enabled(host, 0xff, false); + jz4740_mmc_reset(host); + + mmc_remove_host(host->mmc); + + free_irq(host->irq, host); + + if (host->use_dma) + jz4740_mmc_release_dma_channels(host); + + mmc_free_host(host->mmc); + + return 0; +} + +static int __maybe_unused jz4740_mmc_suspend(struct device *dev) +{ + return pinctrl_pm_select_sleep_state(dev); +} + +static int __maybe_unused jz4740_mmc_resume(struct device *dev) +{ + return pinctrl_select_default_state(dev); +} + +static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, + jz4740_mmc_resume); + +static struct platform_driver jz4740_mmc_driver = { + .probe = jz4740_mmc_probe, + .remove = jz4740_mmc_remove, + .driver = { + .name = "jz4740-mmc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(jz4740_mmc_of_match), + .pm = pm_ptr(&jz4740_mmc_pm_ops), + }, +}; + +module_platform_driver(jz4740_mmc_driver); + +MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lars-Peter Clausen "); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c new file mode 100644 index 000000000..78c9a56b0 --- /dev/null +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -0,0 +1,1341 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Amlogic SD/eMMC driver for the GX/S905 family SoCs + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Kevin Hilman + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "meson-gx-mmc" + +#define SD_EMMC_CLOCK 0x0 +#define CLK_DIV_MASK GENMASK(5, 0) +#define CLK_SRC_MASK GENMASK(7, 6) +#define CLK_CORE_PHASE_MASK GENMASK(9, 8) +#define CLK_TX_PHASE_MASK GENMASK(11, 10) +#define CLK_RX_PHASE_MASK GENMASK(13, 12) +#define CLK_PHASE_0 0 +#define CLK_PHASE_180 2 +#define CLK_V2_TX_DELAY_MASK GENMASK(19, 16) +#define CLK_V2_RX_DELAY_MASK GENMASK(23, 20) +#define CLK_V2_ALWAYS_ON BIT(24) + +#define CLK_V3_TX_DELAY_MASK GENMASK(21, 16) +#define CLK_V3_RX_DELAY_MASK GENMASK(27, 22) +#define CLK_V3_ALWAYS_ON BIT(28) + +#define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask) +#define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask) +#define CLK_ALWAYS_ON(h) (h->data->always_on) + +#define SD_EMMC_DELAY 0x4 +#define SD_EMMC_ADJUST 0x8 +#define ADJUST_ADJ_DELAY_MASK GENMASK(21, 16) +#define ADJUST_DS_EN BIT(15) +#define ADJUST_ADJ_EN BIT(13) + +#define SD_EMMC_DELAY1 0x4 +#define SD_EMMC_DELAY2 0x8 +#define SD_EMMC_V3_ADJUST 0xc + +#define SD_EMMC_CALOUT 0x10 +#define SD_EMMC_START 0x40 +#define START_DESC_INIT BIT(0) +#define START_DESC_BUSY BIT(1) +#define START_DESC_ADDR_MASK GENMASK(31, 2) + +#define SD_EMMC_CFG 0x44 +#define CFG_BUS_WIDTH_MASK GENMASK(1, 0) +#define CFG_BUS_WIDTH_1 0x0 +#define CFG_BUS_WIDTH_4 0x1 +#define CFG_BUS_WIDTH_8 0x2 +#define CFG_DDR BIT(2) +#define CFG_BLK_LEN_MASK GENMASK(7, 4) +#define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8) +#define CFG_RC_CC_MASK GENMASK(15, 12) +#define CFG_STOP_CLOCK BIT(22) +#define CFG_CLK_ALWAYS_ON BIT(18) +#define CFG_CHK_DS BIT(20) +#define CFG_AUTO_CLK BIT(23) +#define CFG_ERR_ABORT BIT(27) + +#define SD_EMMC_STATUS 0x48 +#define STATUS_BUSY BIT(31) +#define STATUS_DESC_BUSY BIT(30) +#define STATUS_DATI GENMASK(23, 16) + +#define SD_EMMC_IRQ_EN 0x4c +#define IRQ_RXD_ERR_MASK GENMASK(7, 0) +#define IRQ_TXD_ERR BIT(8) +#define IRQ_DESC_ERR BIT(9) +#define IRQ_RESP_ERR BIT(10) +#define IRQ_CRC_ERR \ + (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR) +#define IRQ_RESP_TIMEOUT BIT(11) +#define IRQ_DESC_TIMEOUT BIT(12) +#define IRQ_TIMEOUTS \ + (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT) +#define IRQ_END_OF_CHAIN BIT(13) +#define IRQ_RESP_STATUS BIT(14) +#define IRQ_SDIO BIT(15) +#define IRQ_EN_MASK \ + (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\ + IRQ_SDIO) + +#define SD_EMMC_CMD_CFG 0x50 +#define SD_EMMC_CMD_ARG 0x54 +#define SD_EMMC_CMD_DAT 0x58 +#define SD_EMMC_CMD_RSP 0x5c +#define SD_EMMC_CMD_RSP1 0x60 +#define SD_EMMC_CMD_RSP2 0x64 +#define SD_EMMC_CMD_RSP3 0x68 + +#define SD_EMMC_RXD 0x94 +#define SD_EMMC_TXD 0x94 +#define SD_EMMC_LAST_REG SD_EMMC_TXD + +#define SD_EMMC_SRAM_DATA_BUF_LEN 1536 +#define SD_EMMC_SRAM_DATA_BUF_OFF 0x200 + +#define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */ +#define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */ +#define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */ +#define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */ +#define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */ +#define SD_EMMC_DESC_BUF_LEN PAGE_SIZE + +#define SD_EMMC_PRE_REQ_DONE BIT(0) +#define SD_EMMC_DESC_CHAIN_MODE BIT(1) + +#define MUX_CLK_NUM_PARENTS 2 + +struct meson_mmc_data { + unsigned int tx_delay_mask; + unsigned int rx_delay_mask; + unsigned int always_on; + unsigned int adjust; +}; + +struct sd_emmc_desc { + u32 cmd_cfg; + u32 cmd_arg; + u32 cmd_data; + u32 cmd_resp; +}; + +struct meson_host { + struct device *dev; + struct meson_mmc_data *data; + struct mmc_host *mmc; + struct mmc_command *cmd; + + void __iomem *regs; + struct clk *core_clk; + struct clk *mux_clk; + struct clk *mmc_clk; + unsigned long req_rate; + bool ddr; + + bool dram_access_quirk; + + struct pinctrl *pinctrl; + struct pinctrl_state *pins_clk_gate; + + unsigned int bounce_buf_size; + void *bounce_buf; + void __iomem *bounce_iomem_buf; + dma_addr_t bounce_dma_addr; + struct sd_emmc_desc *descs; + dma_addr_t descs_dma_addr; + + int irq; + + bool vqmmc_enabled; + bool needs_pre_post_req; + +}; + +#define CMD_CFG_LENGTH_MASK GENMASK(8, 0) +#define CMD_CFG_BLOCK_MODE BIT(9) +#define CMD_CFG_R1B BIT(10) +#define CMD_CFG_END_OF_CHAIN BIT(11) +#define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12) +#define CMD_CFG_NO_RESP BIT(16) +#define CMD_CFG_NO_CMD BIT(17) +#define CMD_CFG_DATA_IO BIT(18) +#define CMD_CFG_DATA_WR BIT(19) +#define CMD_CFG_RESP_NOCRC BIT(20) +#define CMD_CFG_RESP_128 BIT(21) +#define CMD_CFG_RESP_NUM BIT(22) +#define CMD_CFG_DATA_NUM BIT(23) +#define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24) +#define CMD_CFG_ERROR BIT(30) +#define CMD_CFG_OWNER BIT(31) + +#define CMD_DATA_MASK GENMASK(31, 2) +#define CMD_DATA_BIG_ENDIAN BIT(1) +#define CMD_DATA_SRAM BIT(0) +#define CMD_RESP_MASK GENMASK(31, 1) +#define CMD_RESP_SRAM BIT(0) + +static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data) +{ + unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC; + + if (!timeout) + return SD_EMMC_CMD_TIMEOUT_DATA; + + timeout = roundup_pow_of_two(timeout); + + return min(timeout, 32768U); /* max. 2^15 ms */ +} + +static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd) +{ + if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) + return cmd->mrq->cmd; + else if (mmc_op_multi(cmd->opcode) && + (!cmd->mrq->sbc || cmd->error || cmd->data->error)) + return cmd->mrq->stop; + else + return NULL; +} + +static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct meson_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + struct scatterlist *sg; + int i; + bool use_desc_chain_mode = true; + + /* + * When Controller DMA cannot directly access DDR memory, disable + * support for Chain Mode to directly use the internal SRAM using + * the bounce buffer mode. + */ + if (host->dram_access_quirk) + return; + + /* + * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been + * reported. For some strange reason this occurs in descriptor + * chain mode only. So let's fall back to bounce buffer mode + * for command SD_IO_RW_EXTENDED. + */ + if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) + return; + + for_each_sg(data->sg, sg, data->sg_len, i) + /* check for 8 byte alignment */ + if (sg->offset & 7) { + WARN_ONCE(1, "unaligned scatterlist buffer\n"); + use_desc_chain_mode = false; + break; + } + + if (use_desc_chain_mode) + data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE; +} + +static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data) +{ + return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE; +} + +static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data) +{ + return data && data->flags & MMC_DATA_READ && + !meson_mmc_desc_chain_mode(data); +} + +static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!data) + return; + + meson_mmc_get_transfer_mode(mmc, mrq); + data->host_cookie |= SD_EMMC_PRE_REQ_DONE; + + if (!meson_mmc_desc_chain_mode(data)) + return; + + data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + if (!data->sg_count) + dev_err(mmc_dev(mmc), "dma_map_sg failed"); +} + +static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct mmc_data *data = mrq->data; + + if (data && meson_mmc_desc_chain_mode(data) && data->sg_count) + dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); +} + +/* + * Gating the clock on this controller is tricky. It seems the mmc clock + * is also used by the controller. It may crash during some operation if the + * clock is stopped. The safest thing to do, whenever possible, is to keep + * clock running at stop it at the pad using the pinmux. + */ +static void meson_mmc_clk_gate(struct meson_host *host) +{ + u32 cfg; + + if (host->pins_clk_gate) { + pinctrl_select_state(host->pinctrl, host->pins_clk_gate); + } else { + /* + * If the pinmux is not provided - default to the classic and + * unsafe method + */ + cfg = readl(host->regs + SD_EMMC_CFG); + cfg |= CFG_STOP_CLOCK; + writel(cfg, host->regs + SD_EMMC_CFG); + } +} + +static void meson_mmc_clk_ungate(struct meson_host *host) +{ + u32 cfg; + + if (host->pins_clk_gate) + pinctrl_select_default_state(host->dev); + + /* Make sure the clock is not stopped in the controller */ + cfg = readl(host->regs + SD_EMMC_CFG); + cfg &= ~CFG_STOP_CLOCK; + writel(cfg, host->regs + SD_EMMC_CFG); +} + +static int meson_mmc_clk_set(struct meson_host *host, unsigned long rate, + bool ddr) +{ + struct mmc_host *mmc = host->mmc; + int ret; + u32 cfg; + + /* Same request - bail-out */ + if (host->ddr == ddr && host->req_rate == rate) + return 0; + + /* stop clock */ + meson_mmc_clk_gate(host); + host->req_rate = 0; + mmc->actual_clock = 0; + + /* return with clock being stopped */ + if (!rate) + return 0; + + /* Stop the clock during rate change to avoid glitches */ + cfg = readl(host->regs + SD_EMMC_CFG); + cfg |= CFG_STOP_CLOCK; + writel(cfg, host->regs + SD_EMMC_CFG); + + if (ddr) { + /* DDR modes require higher module clock */ + rate <<= 1; + cfg |= CFG_DDR; + } else { + cfg &= ~CFG_DDR; + } + writel(cfg, host->regs + SD_EMMC_CFG); + host->ddr = ddr; + + ret = clk_set_rate(host->mmc_clk, rate); + if (ret) { + dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n", + rate, ret); + return ret; + } + + host->req_rate = rate; + mmc->actual_clock = clk_get_rate(host->mmc_clk); + + /* We should report the real output frequency of the controller */ + if (ddr) { + host->req_rate >>= 1; + mmc->actual_clock >>= 1; + } + + dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock); + if (rate != mmc->actual_clock) + dev_dbg(host->dev, "requested rate was %lu\n", rate); + + /* (re)start clock */ + meson_mmc_clk_ungate(host); + + return 0; +} + +/* + * The SD/eMMC IP block has an internal mux and divider used for + * generating the MMC clock. Use the clock framework to create and + * manage these clocks. + */ +static int meson_mmc_clk_init(struct meson_host *host) +{ + struct clk_init_data init; + struct clk_mux *mux; + struct clk_divider *div; + char clk_name[32]; + int i, ret = 0; + const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; + const char *clk_parent[1]; + u32 clk_reg; + + /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ + clk_reg = CLK_ALWAYS_ON(host); + clk_reg |= CLK_DIV_MASK; + clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180); + clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0); + clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0); + writel(clk_reg, host->regs + SD_EMMC_CLOCK); + + /* get the mux parents */ + for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { + struct clk *clk; + char name[16]; + + snprintf(name, sizeof(name), "clkin%d", i); + clk = devm_clk_get(host->dev, name); + if (IS_ERR(clk)) + return dev_err_probe(host->dev, PTR_ERR(clk), + "Missing clock %s\n", name); + + mux_parent_names[i] = __clk_get_name(clk); + } + + /* create the mux */ + mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return -ENOMEM; + + snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); + init.name = clk_name; + init.ops = &clk_mux_ops; + init.flags = 0; + init.parent_names = mux_parent_names; + init.num_parents = MUX_CLK_NUM_PARENTS; + + mux->reg = host->regs + SD_EMMC_CLOCK; + mux->shift = __ffs(CLK_SRC_MASK); + mux->mask = CLK_SRC_MASK >> mux->shift; + mux->hw.init = &init; + + host->mux_clk = devm_clk_register(host->dev, &mux->hw); + if (WARN_ON(IS_ERR(host->mux_clk))) + return PTR_ERR(host->mux_clk); + + /* create the divider */ + div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL); + if (!div) + return -ENOMEM; + + snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); + init.name = clk_name; + init.ops = &clk_divider_ops; + init.flags = CLK_SET_RATE_PARENT; + clk_parent[0] = __clk_get_name(host->mux_clk); + init.parent_names = clk_parent; + init.num_parents = 1; + + div->reg = host->regs + SD_EMMC_CLOCK; + div->shift = __ffs(CLK_DIV_MASK); + div->width = __builtin_popcountl(CLK_DIV_MASK); + div->hw.init = &init; + div->flags = CLK_DIVIDER_ONE_BASED; + + host->mmc_clk = devm_clk_register(host->dev, &div->hw); + if (WARN_ON(IS_ERR(host->mmc_clk))) + return PTR_ERR(host->mmc_clk); + + /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ + host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000); + ret = clk_set_rate(host->mmc_clk, host->mmc->f_min); + if (ret) + return ret; + + return clk_prepare_enable(host->mmc_clk); +} + +static void meson_mmc_disable_resampling(struct meson_host *host) +{ + unsigned int val = readl(host->regs + host->data->adjust); + + val &= ~ADJUST_ADJ_EN; + writel(val, host->regs + host->data->adjust); +} + +static void meson_mmc_reset_resampling(struct meson_host *host) +{ + unsigned int val; + + meson_mmc_disable_resampling(host); + + val = readl(host->regs + host->data->adjust); + val &= ~ADJUST_ADJ_DELAY_MASK; + writel(val, host->regs + host->data->adjust); +} + +static int meson_mmc_resampling_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct meson_host *host = mmc_priv(mmc); + unsigned int val, dly, max_dly, i; + int ret; + + /* Resampling is done using the source clock */ + max_dly = DIV_ROUND_UP(clk_get_rate(host->mux_clk), + clk_get_rate(host->mmc_clk)); + + val = readl(host->regs + host->data->adjust); + val |= ADJUST_ADJ_EN; + writel(val, host->regs + host->data->adjust); + + if (mmc_doing_retune(mmc)) + dly = FIELD_GET(ADJUST_ADJ_DELAY_MASK, val) + 1; + else + dly = 0; + + for (i = 0; i < max_dly; i++) { + val &= ~ADJUST_ADJ_DELAY_MASK; + val |= FIELD_PREP(ADJUST_ADJ_DELAY_MASK, (dly + i) % max_dly); + writel(val, host->regs + host->data->adjust); + + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) { + dev_dbg(mmc_dev(mmc), "resampling delay: %u\n", + (dly + i) % max_dly); + return 0; + } + } + + meson_mmc_reset_resampling(host); + return -EIO; +} + +static int meson_mmc_prepare_ios_clock(struct meson_host *host, + struct mmc_ios *ios) +{ + bool ddr; + + switch (ios->timing) { + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + ddr = true; + break; + + default: + ddr = false; + break; + } + + return meson_mmc_clk_set(host, ios->clock, ddr); +} + +static void meson_mmc_check_resampling(struct meson_host *host, + struct mmc_ios *ios) +{ + switch (ios->timing) { + case MMC_TIMING_LEGACY: + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_MMC_DDR52: + meson_mmc_disable_resampling(host); + break; + } +} + +static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct meson_host *host = mmc_priv(mmc); + u32 bus_width, val; + int err; + + /* + * GPIO regulator, only controls switching between 1v8 and + * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON. + */ + switch (ios->power_mode) { + case MMC_POWER_OFF: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { + regulator_disable(mmc->supply.vqmmc); + host->vqmmc_enabled = false; + } + + break; + + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + + break; + + case MMC_POWER_ON: + if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { + int ret = regulator_enable(mmc->supply.vqmmc); + + if (ret < 0) + dev_err(host->dev, + "failed to enable vqmmc regulator\n"); + else + host->vqmmc_enabled = true; + } + + break; + } + + /* Bus width */ + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + bus_width = CFG_BUS_WIDTH_1; + break; + case MMC_BUS_WIDTH_4: + bus_width = CFG_BUS_WIDTH_4; + break; + case MMC_BUS_WIDTH_8: + bus_width = CFG_BUS_WIDTH_8; + break; + default: + dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n", + ios->bus_width); + bus_width = CFG_BUS_WIDTH_4; + } + + val = readl(host->regs + SD_EMMC_CFG); + val &= ~CFG_BUS_WIDTH_MASK; + val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width); + writel(val, host->regs + SD_EMMC_CFG); + + meson_mmc_check_resampling(host, ios); + err = meson_mmc_prepare_ios_clock(host, ios); + if (err) + dev_err(host->dev, "Failed to set clock: %d\n,", err); + + dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val); +} + +static void meson_mmc_request_done(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct meson_host *host = mmc_priv(mmc); + + host->cmd = NULL; + if (host->needs_pre_post_req) + meson_mmc_post_req(mmc, mrq, 0); + mmc_request_done(host->mmc, mrq); +} + +static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz) +{ + struct meson_host *host = mmc_priv(mmc); + u32 cfg, blksz_old; + + cfg = readl(host->regs + SD_EMMC_CFG); + blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg); + + if (!is_power_of_2(blksz)) + dev_err(host->dev, "blksz %u is not a power of 2\n", blksz); + + blksz = ilog2(blksz); + + /* check if block-size matches, if not update */ + if (blksz == blksz_old) + return; + + dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__, + blksz_old, blksz); + + cfg &= ~CFG_BLK_LEN_MASK; + cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz); + writel(cfg, host->regs + SD_EMMC_CFG); +} + +static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg) +{ + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) + *cmd_cfg |= CMD_CFG_RESP_128; + *cmd_cfg |= CMD_CFG_RESP_NUM; + + if (!(cmd->flags & MMC_RSP_CRC)) + *cmd_cfg |= CMD_CFG_RESP_NOCRC; + + if (cmd->flags & MMC_RSP_BUSY) + *cmd_cfg |= CMD_CFG_R1B; + } else { + *cmd_cfg |= CMD_CFG_NO_RESP; + } +} + +static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg) +{ + struct meson_host *host = mmc_priv(mmc); + struct sd_emmc_desc *desc = host->descs; + struct mmc_data *data = host->cmd->data; + struct scatterlist *sg; + u32 start; + int i; + + if (data->flags & MMC_DATA_WRITE) + cmd_cfg |= CMD_CFG_DATA_WR; + + if (data->blocks > 1) { + cmd_cfg |= CMD_CFG_BLOCK_MODE; + meson_mmc_set_blksz(mmc, data->blksz); + } + + for_each_sg(data->sg, sg, data->sg_count, i) { + unsigned int len = sg_dma_len(sg); + + if (data->blocks > 1) + len /= data->blksz; + + desc[i].cmd_cfg = cmd_cfg; + desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len); + if (i > 0) + desc[i].cmd_cfg |= CMD_CFG_NO_CMD; + desc[i].cmd_arg = host->cmd->arg; + desc[i].cmd_resp = 0; + desc[i].cmd_data = sg_dma_address(sg); + } + desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN; + + dma_wmb(); /* ensure descriptor is written before kicked */ + start = host->descs_dma_addr | START_DESC_BUSY; + writel(start, host->regs + SD_EMMC_START); +} + +/* local sg copy for dram_access_quirk */ +static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data, + size_t buflen, bool to_buffer) +{ + unsigned int sg_flags = SG_MITER_ATOMIC; + struct scatterlist *sgl = data->sg; + unsigned int nents = data->sg_len; + struct sg_mapping_iter miter; + unsigned int offset = 0; + + if (to_buffer) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + + sg_miter_start(&miter, sgl, nents, sg_flags); + + while ((offset < buflen) && sg_miter_next(&miter)) { + unsigned int buf_offset = 0; + unsigned int len, left; + u32 *buf = miter.addr; + + len = min(miter.length, buflen - offset); + left = len; + + if (to_buffer) { + do { + writel(*buf++, host->bounce_iomem_buf + offset + buf_offset); + + buf_offset += 4; + left -= 4; + } while (left); + } else { + do { + *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset); + + buf_offset += 4; + left -= 4; + } while (left); + } + + offset += len; + } + + sg_miter_stop(&miter); +} + +static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) +{ + struct meson_host *host = mmc_priv(mmc); + struct mmc_data *data = cmd->data; + u32 cmd_cfg = 0, cmd_data = 0; + unsigned int xfer_bytes = 0; + + /* Setup descriptors */ + dma_rmb(); + + host->cmd = cmd; + + cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode); + cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ + + meson_mmc_set_response_bits(cmd, &cmd_cfg); + + /* data? */ + if (data) { + data->bytes_xfered = 0; + cmd_cfg |= CMD_CFG_DATA_IO; + cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, + ilog2(meson_mmc_get_timeout_msecs(data))); + + if (meson_mmc_desc_chain_mode(data)) { + meson_mmc_desc_chain_transfer(mmc, cmd_cfg); + return; + } + + if (data->blocks > 1) { + cmd_cfg |= CMD_CFG_BLOCK_MODE; + cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, + data->blocks); + meson_mmc_set_blksz(mmc, data->blksz); + } else { + cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz); + } + + xfer_bytes = data->blksz * data->blocks; + if (data->flags & MMC_DATA_WRITE) { + cmd_cfg |= CMD_CFG_DATA_WR; + WARN_ON(xfer_bytes > host->bounce_buf_size); + if (host->dram_access_quirk) + meson_mmc_copy_buffer(host, data, xfer_bytes, true); + else + sg_copy_to_buffer(data->sg, data->sg_len, + host->bounce_buf, xfer_bytes); + dma_wmb(); + } + + cmd_data = host->bounce_dma_addr & CMD_DATA_MASK; + } else { + cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, + ilog2(SD_EMMC_CMD_TIMEOUT)); + } + + /* Last descriptor */ + cmd_cfg |= CMD_CFG_END_OF_CHAIN; + writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG); + writel(cmd_data, host->regs + SD_EMMC_CMD_DAT); + writel(0, host->regs + SD_EMMC_CMD_RSP); + wmb(); /* ensure descriptor is written before kicked */ + writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG); +} + +static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data) +{ + struct scatterlist *sg; + int i; + + /* Reject request if any element offset or size is not 32bit aligned */ + for_each_sg(data->sg, sg, data->sg_len, i) { + if (!IS_ALIGNED(sg->offset, sizeof(u32)) || + !IS_ALIGNED(sg->length, sizeof(u32))) { + dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n", + data->sg->offset, data->sg->length); + return -EINVAL; + } + } + + return 0; +} + +static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct meson_host *host = mmc_priv(mmc); + host->needs_pre_post_req = mrq->data && + !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); + + /* + * The memory at the end of the controller used as bounce buffer for + * the dram_access_quirk only accepts 32bit read/write access, + * check the aligment and length of the data before starting the request. + */ + if (host->dram_access_quirk && mrq->data) { + mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data); + if (mrq->cmd->error) { + mmc_request_done(mmc, mrq); + return; + } + } + + if (host->needs_pre_post_req) { + meson_mmc_get_transfer_mode(mmc, mrq); + if (!meson_mmc_desc_chain_mode(mrq->data)) + host->needs_pre_post_req = false; + } + + if (host->needs_pre_post_req) + meson_mmc_pre_req(mmc, mrq); + + /* Stop execution */ + writel(0, host->regs + SD_EMMC_START); + + meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); +} + +static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) +{ + struct meson_host *host = mmc_priv(mmc); + + if (cmd->flags & MMC_RSP_136) { + cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3); + cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2); + cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1); + cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP); + } else if (cmd->flags & MMC_RSP_PRESENT) { + cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP); + } +} + +static irqreturn_t meson_mmc_irq(int irq, void *dev_id) +{ + struct meson_host *host = dev_id; + struct mmc_command *cmd; + struct mmc_data *data; + u32 irq_en, status, raw_status; + irqreturn_t ret = IRQ_NONE; + + irq_en = readl(host->regs + SD_EMMC_IRQ_EN); + raw_status = readl(host->regs + SD_EMMC_STATUS); + status = raw_status & irq_en; + + if (!status) { + dev_dbg(host->dev, + "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n", + irq_en, raw_status); + return IRQ_NONE; + } + + if (WARN_ON(!host) || WARN_ON(!host->cmd)) + return IRQ_NONE; + + /* ack all raised interrupts */ + writel(status, host->regs + SD_EMMC_STATUS); + + cmd = host->cmd; + data = cmd->data; + cmd->error = 0; + if (status & IRQ_CRC_ERR) { + dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status); + cmd->error = -EILSEQ; + ret = IRQ_WAKE_THREAD; + goto out; + } + + if (status & IRQ_TIMEOUTS) { + dev_dbg(host->dev, "Timeout - status 0x%08x\n", status); + cmd->error = -ETIMEDOUT; + ret = IRQ_WAKE_THREAD; + goto out; + } + + meson_mmc_read_resp(host->mmc, cmd); + + if (status & IRQ_SDIO) { + dev_dbg(host->dev, "IRQ: SDIO TODO.\n"); + ret = IRQ_HANDLED; + } + + if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { + if (data && !cmd->error) + data->bytes_xfered = data->blksz * data->blocks; + + return IRQ_WAKE_THREAD; + } + +out: + if (cmd->error) { + /* Stop desc in case of errors */ + u32 start = readl(host->regs + SD_EMMC_START); + + start &= ~START_DESC_BUSY; + writel(start, host->regs + SD_EMMC_START); + } + + return ret; +} + +static int meson_mmc_wait_desc_stop(struct meson_host *host) +{ + u32 status; + + /* + * It may sometimes take a while for it to actually halt. Here, we + * are giving it 5ms to comply + * + * If we don't confirm the descriptor is stopped, it might raise new + * IRQs after we have called mmc_request_done() which is bad. + */ + + return readl_poll_timeout(host->regs + SD_EMMC_STATUS, status, + !(status & (STATUS_BUSY | STATUS_DESC_BUSY)), + 100, 5000); +} + +static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) +{ + struct meson_host *host = dev_id; + struct mmc_command *next_cmd, *cmd = host->cmd; + struct mmc_data *data; + unsigned int xfer_bytes; + + if (WARN_ON(!cmd)) + return IRQ_NONE; + + if (cmd->error) { + meson_mmc_wait_desc_stop(host); + meson_mmc_request_done(host->mmc, cmd->mrq); + + return IRQ_HANDLED; + } + + data = cmd->data; + if (meson_mmc_bounce_buf_read(data)) { + xfer_bytes = data->blksz * data->blocks; + WARN_ON(xfer_bytes > host->bounce_buf_size); + if (host->dram_access_quirk) + meson_mmc_copy_buffer(host, data, xfer_bytes, false); + else + sg_copy_from_buffer(data->sg, data->sg_len, + host->bounce_buf, xfer_bytes); + } + + next_cmd = meson_mmc_get_next_command(cmd); + if (next_cmd) + meson_mmc_start_cmd(host->mmc, next_cmd); + else + meson_mmc_request_done(host->mmc, cmd->mrq); + + return IRQ_HANDLED; +} + +/* + * NOTE: we only need this until the GPIO/pinctrl driver can handle + * interrupts. For now, the MMC core will use this for polling. + */ +static int meson_mmc_get_cd(struct mmc_host *mmc) +{ + int status = mmc_gpio_get_cd(mmc); + + if (status == -ENOSYS) + return 1; /* assume present */ + + return status; +} + +static void meson_mmc_cfg_init(struct meson_host *host) +{ + u32 cfg = 0; + + cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, + ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); + cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP)); + cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE)); + + /* abort chain on R/W errors */ + cfg |= CFG_ERR_ABORT; + + writel(cfg, host->regs + SD_EMMC_CFG); +} + +static int meson_mmc_card_busy(struct mmc_host *mmc) +{ + struct meson_host *host = mmc_priv(mmc); + u32 regval; + + regval = readl(host->regs + SD_EMMC_STATUS); + + /* We are only interrested in lines 0 to 3, so mask the other ones */ + return !(FIELD_GET(STATUS_DATI, regval) & 0xf); +} + +static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + int ret; + + /* vqmmc regulator is available */ + if (!IS_ERR(mmc->supply.vqmmc)) { + /* + * The usual amlogic setup uses a GPIO to switch from one + * regulator to the other. While the voltage ramp up is + * pretty fast, care must be taken when switching from 3.3v + * to 1.8v. Please make sure the regulator framework is aware + * of your own regulator constraints + */ + ret = mmc_regulator_set_vqmmc(mmc, ios); + return ret < 0 ? ret : 0; + } + + /* no vqmmc regulator, assume fixed regulator at 3/3.3V */ + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) + return 0; + + return -EINVAL; +} + +static const struct mmc_host_ops meson_mmc_ops = { + .request = meson_mmc_request, + .set_ios = meson_mmc_set_ios, + .get_cd = meson_mmc_get_cd, + .pre_req = meson_mmc_pre_req, + .post_req = meson_mmc_post_req, + .execute_tuning = meson_mmc_resampling_tuning, + .card_busy = meson_mmc_card_busy, + .start_signal_voltage_switch = meson_mmc_voltage_switch, +}; + +static int meson_mmc_probe(struct platform_device *pdev) +{ + struct resource *res; + struct meson_host *host; + struct mmc_host *mmc; + int ret; + + mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct meson_host)); + if (!mmc) + return -ENOMEM; + host = mmc_priv(mmc); + host->mmc = mmc; + host->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, host); + + /* The G12A SDIO Controller needs an SRAM bounce buffer */ + host->dram_access_quirk = device_property_read_bool(&pdev->dev, + "amlogic,dram-access-quirk"); + + /* Get regulators and the supported OCR mask */ + host->vqmmc_enabled = false; + ret = mmc_regulator_get_supply(mmc); + if (ret) + return ret; + + ret = mmc_of_parse(mmc); + if (ret) + return dev_err_probe(&pdev->dev, ret, "error parsing DT\n"); + + host->data = (struct meson_mmc_data *) + of_device_get_match_data(&pdev->dev); + if (!host->data) + return -EINVAL; + + ret = device_reset_optional(&pdev->dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, "device reset failed\n"); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->regs)) + return PTR_ERR(host->regs); + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) + return host->irq; + + host->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(host->pinctrl)) + return PTR_ERR(host->pinctrl); + + host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, + "clk-gate"); + if (IS_ERR(host->pins_clk_gate)) { + dev_warn(&pdev->dev, + "can't get clk-gate pinctrl, using clk_stop bit\n"); + host->pins_clk_gate = NULL; + } + + host->core_clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(host->core_clk)) + return PTR_ERR(host->core_clk); + + ret = clk_prepare_enable(host->core_clk); + if (ret) + return ret; + + ret = meson_mmc_clk_init(host); + if (ret) + goto err_core_clk; + + /* set config to sane default */ + meson_mmc_cfg_init(host); + + /* Stop execution */ + writel(0, host->regs + SD_EMMC_START); + + /* clear, ack and enable interrupts */ + writel(0, host->regs + SD_EMMC_IRQ_EN); + writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, + host->regs + SD_EMMC_STATUS); + writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, + host->regs + SD_EMMC_IRQ_EN); + + ret = request_threaded_irq(host->irq, meson_mmc_irq, + meson_mmc_irq_thread, IRQF_ONESHOT, + dev_name(&pdev->dev), host); + if (ret) + goto err_init_clk; + + mmc->caps |= MMC_CAP_CMD23; + if (host->dram_access_quirk) { + /* Limit segments to 1 due to low available sram memory */ + mmc->max_segs = 1; + /* Limit to the available sram memory */ + mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN / + mmc->max_blk_size; + } else { + mmc->max_blk_count = CMD_CFG_LENGTH_MASK; + mmc->max_segs = SD_EMMC_DESC_BUF_LEN / + sizeof(struct sd_emmc_desc); + } + mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size; + mmc->max_seg_size = mmc->max_req_size; + + /* + * At the moment, we don't know how to reliably enable HS400. + * From the different datasheets, it is not even clear if this mode + * is officially supported by any of the SoCs + */ + mmc->caps2 &= ~MMC_CAP2_HS400; + + if (host->dram_access_quirk) { + /* + * The MMC Controller embeds 1,5KiB of internal SRAM + * that can be used to be used as bounce buffer. + * In the case of the G12A SDIO controller, use these + * instead of the DDR memory + */ + host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN; + host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF; + host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF; + } else { + /* data bounce buffer */ + host->bounce_buf_size = mmc->max_req_size; + host->bounce_buf = + dma_alloc_coherent(host->dev, host->bounce_buf_size, + &host->bounce_dma_addr, GFP_KERNEL); + if (host->bounce_buf == NULL) { + dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); + ret = -ENOMEM; + goto err_free_irq; + } + } + + host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, + &host->descs_dma_addr, GFP_KERNEL); + if (!host->descs) { + dev_err(host->dev, "Allocating descriptor DMA buffer failed\n"); + ret = -ENOMEM; + goto err_bounce_buf; + } + + mmc->ops = &meson_mmc_ops; + ret = mmc_add_host(mmc); + if (ret) + goto err_free_irq; + + return 0; + +err_bounce_buf: + if (!host->dram_access_quirk) + dma_free_coherent(host->dev, host->bounce_buf_size, + host->bounce_buf, host->bounce_dma_addr); +err_free_irq: + free_irq(host->irq, host); +err_init_clk: + clk_disable_unprepare(host->mmc_clk); +err_core_clk: + clk_disable_unprepare(host->core_clk); + return ret; +} + +static int meson_mmc_remove(struct platform_device *pdev) +{ + struct meson_host *host = dev_get_drvdata(&pdev->dev); + + mmc_remove_host(host->mmc); + + /* disable interrupts */ + writel(0, host->regs + SD_EMMC_IRQ_EN); + free_irq(host->irq, host); + + dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, + host->descs, host->descs_dma_addr); + + if (!host->dram_access_quirk) + dma_free_coherent(host->dev, host->bounce_buf_size, + host->bounce_buf, host->bounce_dma_addr); + + clk_disable_unprepare(host->mmc_clk); + clk_disable_unprepare(host->core_clk); + + return 0; +} + +static const struct meson_mmc_data meson_gx_data = { + .tx_delay_mask = CLK_V2_TX_DELAY_MASK, + .rx_delay_mask = CLK_V2_RX_DELAY_MASK, + .always_on = CLK_V2_ALWAYS_ON, + .adjust = SD_EMMC_ADJUST, +}; + +static const struct meson_mmc_data meson_axg_data = { + .tx_delay_mask = CLK_V3_TX_DELAY_MASK, + .rx_delay_mask = CLK_V3_RX_DELAY_MASK, + .always_on = CLK_V3_ALWAYS_ON, + .adjust = SD_EMMC_V3_ADJUST, +}; + +static const struct of_device_id meson_mmc_of_match[] = { + { .compatible = "amlogic,meson-gx-mmc", .data = &meson_gx_data }, + { .compatible = "amlogic,meson-gxbb-mmc", .data = &meson_gx_data }, + { .compatible = "amlogic,meson-gxl-mmc", .data = &meson_gx_data }, + { .compatible = "amlogic,meson-gxm-mmc", .data = &meson_gx_data }, + { .compatible = "amlogic,meson-axg-mmc", .data = &meson_axg_data }, + {} +}; +MODULE_DEVICE_TABLE(of, meson_mmc_of_match); + +static struct platform_driver meson_mmc_driver = { + .probe = meson_mmc_probe, + .remove = meson_mmc_remove, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(meson_mmc_of_match), + }, +}; + +module_platform_driver(meson_mmc_driver); + +MODULE_DESCRIPTION("Amlogic S905*/GX*/AXG SD/eMMC driver"); +MODULE_AUTHOR("Kevin Hilman "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/meson-mx-sdhc-clkc.c b/drivers/mmc/host/meson-mx-sdhc-clkc.c new file mode 100644 index 000000000..e1f29b279 --- /dev/null +++ b/drivers/mmc/host/meson-mx-sdhc-clkc.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Amlogic Meson SDHC clock controller + * + * Copyright (C) 2020 Martin Blumenstingl + */ + +#include +#include +#include +#include + +#include "meson-mx-sdhc.h" + +#define MESON_SDHC_NUM_BUILTIN_CLKS 6 + +struct meson_mx_sdhc_clkc { + struct clk_mux src_sel; + struct clk_divider div; + struct clk_gate mod_clk_en; + struct clk_gate tx_clk_en; + struct clk_gate rx_clk_en; + struct clk_gate sd_clk_en; +}; + +static const struct clk_parent_data meson_mx_sdhc_src_sel_parents[4] = { + { .fw_name = "clkin0" }, + { .fw_name = "clkin1" }, + { .fw_name = "clkin2" }, + { .fw_name = "clkin3" }, +}; + +static const struct clk_div_table meson_mx_sdhc_div_table[] = { + { .div = 6, .val = 5, }, + { .div = 8, .val = 7, }, + { .div = 9, .val = 8, }, + { .div = 10, .val = 9, }, + { .div = 12, .val = 11, }, + { .div = 16, .val = 15, }, + { .div = 18, .val = 17, }, + { .div = 34, .val = 33, }, + { .div = 142, .val = 141, }, + { .div = 850, .val = 849, }, + { .div = 2126, .val = 2125, }, + { .div = 4096, .val = 4095, }, + { /* sentinel */ } +}; + +static int meson_mx_sdhc_clk_hw_register(struct device *dev, + const char *name_suffix, + const struct clk_parent_data *parents, + unsigned int num_parents, + const struct clk_ops *ops, + struct clk_hw *hw) +{ + struct clk_init_data init = { }; + char clk_name[32]; + + snprintf(clk_name, sizeof(clk_name), "%s#%s", dev_name(dev), + name_suffix); + + init.name = clk_name; + init.ops = ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_data = parents; + init.num_parents = num_parents; + + hw->init = &init; + + return devm_clk_hw_register(dev, hw); +} + +static int meson_mx_sdhc_gate_clk_hw_register(struct device *dev, + const char *name_suffix, + struct clk_hw *parent, + struct clk_hw *hw) +{ + struct clk_parent_data parent_data = { .hw = parent }; + + return meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1, + &clk_gate_ops, hw); +} + +int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base, + struct clk_bulk_data *clk_bulk_data) +{ + struct clk_parent_data div_parent = { }; + struct meson_mx_sdhc_clkc *clkc_data; + int ret; + + clkc_data = devm_kzalloc(dev, sizeof(*clkc_data), GFP_KERNEL); + if (!clkc_data) + return -ENOMEM; + + clkc_data->src_sel.reg = base + MESON_SDHC_CLKC; + clkc_data->src_sel.mask = 0x3; + clkc_data->src_sel.shift = 16; + ret = meson_mx_sdhc_clk_hw_register(dev, "src_sel", + meson_mx_sdhc_src_sel_parents, 4, + &clk_mux_ops, + &clkc_data->src_sel.hw); + if (ret) + return ret; + + clkc_data->div.reg = base + MESON_SDHC_CLKC; + clkc_data->div.shift = 0; + clkc_data->div.width = 12; + clkc_data->div.table = meson_mx_sdhc_div_table; + div_parent.hw = &clkc_data->src_sel.hw; + ret = meson_mx_sdhc_clk_hw_register(dev, "div", &div_parent, 1, + &clk_divider_ops, + &clkc_data->div.hw); + if (ret) + return ret; + + clkc_data->mod_clk_en.reg = base + MESON_SDHC_CLKC; + clkc_data->mod_clk_en.bit_idx = 15; + ret = meson_mx_sdhc_gate_clk_hw_register(dev, "mod_clk_on", + &clkc_data->div.hw, + &clkc_data->mod_clk_en.hw); + if (ret) + return ret; + + clkc_data->tx_clk_en.reg = base + MESON_SDHC_CLKC; + clkc_data->tx_clk_en.bit_idx = 14; + ret = meson_mx_sdhc_gate_clk_hw_register(dev, "tx_clk_on", + &clkc_data->div.hw, + &clkc_data->tx_clk_en.hw); + if (ret) + return ret; + + clkc_data->rx_clk_en.reg = base + MESON_SDHC_CLKC; + clkc_data->rx_clk_en.bit_idx = 13; + ret = meson_mx_sdhc_gate_clk_hw_register(dev, "rx_clk_on", + &clkc_data->div.hw, + &clkc_data->rx_clk_en.hw); + if (ret) + return ret; + + clkc_data->sd_clk_en.reg = base + MESON_SDHC_CLKC; + clkc_data->sd_clk_en.bit_idx = 12; + ret = meson_mx_sdhc_gate_clk_hw_register(dev, "sd_clk_on", + &clkc_data->div.hw, + &clkc_data->sd_clk_en.hw); + if (ret) + return ret; + + /* + * TODO: Replace clk_hw.clk with devm_clk_hw_get_clk() once that is + * available. + */ + clk_bulk_data[0].clk = clkc_data->mod_clk_en.hw.clk; + clk_bulk_data[1].clk = clkc_data->sd_clk_en.hw.clk; + clk_bulk_data[2].clk = clkc_data->tx_clk_en.hw.clk; + clk_bulk_data[3].clk = clkc_data->rx_clk_en.hw.clk; + + return 0; +} diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c new file mode 100644 index 000000000..ba59061fe --- /dev/null +++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c @@ -0,0 +1,920 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Amlogic Meson6/Meson8/Meson8b/Meson8m2 SDHC MMC host controller driver. + * + * Copyright (C) 2020 Martin Blumenstingl + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "meson-mx-sdhc.h" + +#define MESON_SDHC_NUM_BULK_CLKS 4 +#define MESON_SDHC_MAX_BLK_SIZE 512 +#define MESON_SDHC_NUM_TUNING_TRIES 10 + +#define MESON_SDHC_WAIT_CMD_READY_SLEEP_US 1 +#define MESON_SDHC_WAIT_CMD_READY_TIMEOUT_US 100000 +#define MESON_SDHC_WAIT_BEFORE_SEND_SLEEP_US 1 +#define MESON_SDHC_WAIT_BEFORE_SEND_TIMEOUT_US 200 + +struct meson_mx_sdhc_data { + void (*init_hw)(struct mmc_host *mmc); + void (*set_pdma)(struct mmc_host *mmc); + void (*wait_before_send)(struct mmc_host *mmc); + bool hardware_flush_all_cmds; +}; + +struct meson_mx_sdhc_host { + struct mmc_host *mmc; + + struct mmc_request *mrq; + struct mmc_command *cmd; + int error; + + struct regmap *regmap; + + struct clk *pclk; + struct clk *sd_clk; + struct clk_bulk_data bulk_clks[MESON_SDHC_NUM_BULK_CLKS]; + bool bulk_clks_enabled; + + const struct meson_mx_sdhc_data *platform; +}; + +static const struct regmap_config meson_mx_sdhc_regmap_config = { + .reg_bits = 8, + .val_bits = 32, + .reg_stride = 4, + .max_register = MESON_SDHC_CLK2, +}; + +static void meson_mx_sdhc_hw_reset(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + + regmap_write(host->regmap, MESON_SDHC_SRST, MESON_SDHC_SRST_MAIN_CTRL | + MESON_SDHC_SRST_RXFIFO | MESON_SDHC_SRST_TXFIFO | + MESON_SDHC_SRST_DPHY_RX | MESON_SDHC_SRST_DPHY_TX | + MESON_SDHC_SRST_DMA_IF); + usleep_range(10, 100); + + regmap_write(host->regmap, MESON_SDHC_SRST, 0); + usleep_range(10, 100); +} + +static void meson_mx_sdhc_clear_fifo(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + u32 stat; + + regmap_read(host->regmap, MESON_SDHC_STAT, &stat); + if (!FIELD_GET(MESON_SDHC_STAT_RXFIFO_CNT, stat) && + !FIELD_GET(MESON_SDHC_STAT_TXFIFO_CNT, stat)) + return; + + regmap_write(host->regmap, MESON_SDHC_SRST, MESON_SDHC_SRST_RXFIFO | + MESON_SDHC_SRST_TXFIFO | MESON_SDHC_SRST_MAIN_CTRL); + udelay(5); + + regmap_read(host->regmap, MESON_SDHC_STAT, &stat); + if (FIELD_GET(MESON_SDHC_STAT_RXFIFO_CNT, stat) || + FIELD_GET(MESON_SDHC_STAT_TXFIFO_CNT, stat)) + dev_warn(mmc_dev(host->mmc), + "Failed to clear FIFOs, RX: %lu, TX: %lu\n", + FIELD_GET(MESON_SDHC_STAT_RXFIFO_CNT, stat), + FIELD_GET(MESON_SDHC_STAT_TXFIFO_CNT, stat)); +} + +static void meson_mx_sdhc_wait_cmd_ready(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + u32 stat, esta; + int ret; + + ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_STAT, stat, + !(stat & MESON_SDHC_STAT_CMD_BUSY), + MESON_SDHC_WAIT_CMD_READY_SLEEP_US, + MESON_SDHC_WAIT_CMD_READY_TIMEOUT_US); + if (ret) { + dev_warn(mmc_dev(mmc), + "Failed to poll for CMD_BUSY while processing CMD%d\n", + host->cmd->opcode); + meson_mx_sdhc_hw_reset(mmc); + } + + ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_ESTA, esta, + !(esta & MESON_SDHC_ESTA_11_13), + MESON_SDHC_WAIT_CMD_READY_SLEEP_US, + MESON_SDHC_WAIT_CMD_READY_TIMEOUT_US); + if (ret) { + dev_warn(mmc_dev(mmc), + "Failed to poll for ESTA[13:11] while processing CMD%d\n", + host->cmd->opcode); + meson_mx_sdhc_hw_reset(mmc); + } +} + +static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + bool manual_stop = false; + u32 ictl, send; + int pack_len; + + host->cmd = cmd; + + ictl = MESON_SDHC_ICTL_DATA_TIMEOUT | MESON_SDHC_ICTL_DATA_ERR_CRC | + MESON_SDHC_ICTL_RXFIFO_FULL | MESON_SDHC_ICTL_TXFIFO_EMPTY | + MESON_SDHC_ICTL_RESP_TIMEOUT | MESON_SDHC_ICTL_RESP_ERR_CRC; + + send = FIELD_PREP(MESON_SDHC_SEND_CMD_INDEX, cmd->opcode); + + if (cmd->data) { + send |= MESON_SDHC_SEND_CMD_HAS_DATA; + send |= FIELD_PREP(MESON_SDHC_SEND_TOTAL_PACK, + cmd->data->blocks - 1); + + if (cmd->data->blksz < MESON_SDHC_MAX_BLK_SIZE) + pack_len = cmd->data->blksz; + else + pack_len = 0; + + if (cmd->data->flags & MMC_DATA_WRITE) + send |= MESON_SDHC_SEND_DATA_DIR; + + /* + * If command with no data, just wait response done + * interrupt(int[0]), and if command with data transfer, just + * wait dma done interrupt(int[11]), don't need care about + * dat0 busy or not. + */ + if (host->platform->hardware_flush_all_cmds || + cmd->data->flags & MMC_DATA_WRITE) + /* hardware flush: */ + ictl |= MESON_SDHC_ICTL_DMA_DONE; + else + /* software flush: */ + ictl |= MESON_SDHC_ICTL_DATA_XFER_OK; + + /* + * Mimic the logic from the vendor driver where (only) + * SD_IO_RW_EXTENDED commands with more than one block set the + * MESON_SDHC_MISC_MANUAL_STOP bit. This fixes the firmware + * download in the brcmfmac driver for a BCM43362/1 card. + * Without this sdio_memcpy_toio() (with a size of 219557 + * bytes) times out if MESON_SDHC_MISC_MANUAL_STOP is not set. + */ + manual_stop = cmd->data->blocks > 1 && + cmd->opcode == SD_IO_RW_EXTENDED; + } else { + pack_len = 0; + + ictl |= MESON_SDHC_ICTL_RESP_OK; + } + + regmap_update_bits(host->regmap, MESON_SDHC_MISC, + MESON_SDHC_MISC_MANUAL_STOP, + manual_stop ? MESON_SDHC_MISC_MANUAL_STOP : 0); + + if (cmd->opcode == MMC_STOP_TRANSMISSION) + send |= MESON_SDHC_SEND_DATA_STOP; + + if (cmd->flags & MMC_RSP_PRESENT) + send |= MESON_SDHC_SEND_CMD_HAS_RESP; + + if (cmd->flags & MMC_RSP_136) { + send |= MESON_SDHC_SEND_RESP_LEN; + send |= MESON_SDHC_SEND_RESP_NO_CRC; + } + + if (!(cmd->flags & MMC_RSP_CRC)) + send |= MESON_SDHC_SEND_RESP_NO_CRC; + + if (cmd->flags & MMC_RSP_BUSY) + send |= MESON_SDHC_SEND_R1B; + + /* enable the new IRQs and mask all pending ones */ + regmap_write(host->regmap, MESON_SDHC_ICTL, ictl); + regmap_write(host->regmap, MESON_SDHC_ISTA, MESON_SDHC_ISTA_ALL_IRQS); + + regmap_write(host->regmap, MESON_SDHC_ARGU, cmd->arg); + + regmap_update_bits(host->regmap, MESON_SDHC_CTRL, + MESON_SDHC_CTRL_PACK_LEN, + FIELD_PREP(MESON_SDHC_CTRL_PACK_LEN, pack_len)); + + if (cmd->data) + regmap_write(host->regmap, MESON_SDHC_ADDR, + sg_dma_address(cmd->data->sg)); + + meson_mx_sdhc_wait_cmd_ready(mmc); + + if (cmd->data) + host->platform->set_pdma(mmc); + + if (host->platform->wait_before_send) + host->platform->wait_before_send(mmc); + + regmap_write(host->regmap, MESON_SDHC_SEND, send); +} + +static void meson_mx_sdhc_disable_clks(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + + if (!host->bulk_clks_enabled) + return; + + clk_bulk_disable_unprepare(MESON_SDHC_NUM_BULK_CLKS, host->bulk_clks); + + host->bulk_clks_enabled = false; +} + +static int meson_mx_sdhc_enable_clks(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + int ret; + + if (host->bulk_clks_enabled) + return 0; + + ret = clk_bulk_prepare_enable(MESON_SDHC_NUM_BULK_CLKS, + host->bulk_clks); + if (ret) + return ret; + + host->bulk_clks_enabled = true; + + return 0; +} + +static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + u32 val, rx_clk_phase; + int ret; + + meson_mx_sdhc_disable_clks(mmc); + + if (ios->clock) { + ret = clk_set_rate(host->sd_clk, ios->clock); + if (ret) { + dev_warn(mmc_dev(mmc), + "Failed to set MMC clock to %uHz: %d\n", + ios->clock, host->error); + return ret; + } + + ret = meson_mx_sdhc_enable_clks(mmc); + if (ret) + return ret; + + mmc->actual_clock = clk_get_rate(host->sd_clk); + + /* + * Phase 90 should work in most cases. For data transmission, + * meson_mx_sdhc_execute_tuning() will find a accurate value + */ + regmap_read(host->regmap, MESON_SDHC_CLKC, &val); + rx_clk_phase = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val) / 4; + regmap_update_bits(host->regmap, MESON_SDHC_CLK2, + MESON_SDHC_CLK2_RX_CLK_PHASE, + FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE, + rx_clk_phase)); + } else { + mmc->actual_clock = 0; + } + + return 0; +} + +static void meson_mx_sdhc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + unsigned short vdd = ios->vdd; + + switch (ios->power_mode) { + case MMC_POWER_OFF: + vdd = 0; + fallthrough; + + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) { + host->error = mmc_regulator_set_ocr(mmc, + mmc->supply.vmmc, + vdd); + if (host->error) + return; + } + + break; + + case MMC_POWER_ON: + break; + } + + host->error = meson_mx_sdhc_set_clk(mmc, ios); + if (host->error) + return; + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + regmap_update_bits(host->regmap, MESON_SDHC_CTRL, + MESON_SDHC_CTRL_DAT_TYPE, + FIELD_PREP(MESON_SDHC_CTRL_DAT_TYPE, 0)); + break; + + case MMC_BUS_WIDTH_4: + regmap_update_bits(host->regmap, MESON_SDHC_CTRL, + MESON_SDHC_CTRL_DAT_TYPE, + FIELD_PREP(MESON_SDHC_CTRL_DAT_TYPE, 1)); + break; + + case MMC_BUS_WIDTH_8: + regmap_update_bits(host->regmap, MESON_SDHC_CTRL, + MESON_SDHC_CTRL_DAT_TYPE, + FIELD_PREP(MESON_SDHC_CTRL_DAT_TYPE, 2)); + break; + + default: + dev_err(mmc_dev(mmc), "unsupported bus width: %d\n", + ios->bus_width); + host->error = -EINVAL; + return; + } +} + +static int meson_mx_sdhc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + int dma_len; + + if (!data) + return 0; + + dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + if (dma_len <= 0) { + dev_err(mmc_dev(mmc), "dma_map_sg failed\n"); + return -ENOMEM; + } + + return 0; +} + +static void meson_mx_sdhc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + + if (!host->error) + host->error = meson_mx_sdhc_map_dma(mmc, mrq); + + if (host->error) { + cmd->error = host->error; + mmc_request_done(mmc, mrq); + return; + } + + host->mrq = mrq; + + meson_mx_sdhc_start_cmd(mmc, mrq->cmd); +} + +static int meson_mx_sdhc_card_busy(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + u32 stat; + + regmap_read(host->regmap, MESON_SDHC_STAT, &stat); + return FIELD_GET(MESON_SDHC_STAT_DAT3_0, stat) == 0; +} + +static bool meson_mx_sdhc_tuning_point_matches(struct mmc_host *mmc, + u32 opcode) +{ + unsigned int i, num_matches = 0; + int ret; + + for (i = 0; i < MESON_SDHC_NUM_TUNING_TRIES; i++) { + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) + num_matches++; + } + + return num_matches == MESON_SDHC_NUM_TUNING_TRIES; +} + +static int meson_mx_sdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + int div, start, len, best_start, best_len; + int curr_phase, old_phase, new_phase; + u32 val; + + len = 0; + start = 0; + best_len = 0; + + regmap_read(host->regmap, MESON_SDHC_CLK2, &val); + old_phase = FIELD_GET(MESON_SDHC_CLK2_RX_CLK_PHASE, val); + + regmap_read(host->regmap, MESON_SDHC_CLKC, &val); + div = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val); + + for (curr_phase = 0; curr_phase <= div; curr_phase++) { + regmap_update_bits(host->regmap, MESON_SDHC_CLK2, + MESON_SDHC_CLK2_RX_CLK_PHASE, + FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE, + curr_phase)); + + if (meson_mx_sdhc_tuning_point_matches(mmc, opcode)) { + if (!len) { + start = curr_phase; + + dev_dbg(mmc_dev(mmc), + "New RX phase window starts at %u\n", + start); + } + + len++; + } else { + if (len > best_len) { + best_start = start; + best_len = len; + + dev_dbg(mmc_dev(mmc), + "New best RX phase window: %u - %u\n", + best_start, best_start + best_len); + } + + /* reset the current window */ + len = 0; + } + } + + if (len > best_len) + /* the last window is the best (or possibly only) window */ + new_phase = start + (len / 2); + else if (best_len) + /* there was a better window than the last */ + new_phase = best_start + (best_len / 2); + else + /* no window was found at all, reset to the original phase */ + new_phase = old_phase; + + regmap_update_bits(host->regmap, MESON_SDHC_CLK2, + MESON_SDHC_CLK2_RX_CLK_PHASE, + FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE, + new_phase)); + + if (!len && !best_len) + return -EIO; + + dev_dbg(mmc_dev(mmc), "Tuned RX clock phase to %u\n", new_phase); + + return 0; +} + +static const struct mmc_host_ops meson_mx_sdhc_ops = { + .hw_reset = meson_mx_sdhc_hw_reset, + .request = meson_mx_sdhc_request, + .set_ios = meson_mx_sdhc_set_ios, + .card_busy = meson_mx_sdhc_card_busy, + .execute_tuning = meson_mx_sdhc_execute_tuning, + .get_cd = mmc_gpio_get_cd, + .get_ro = mmc_gpio_get_ro, +}; + +static void meson_mx_sdhc_request_done(struct meson_mx_sdhc_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_host *mmc = host->mmc; + + /* disable interrupts and mask all pending ones */ + regmap_update_bits(host->regmap, MESON_SDHC_ICTL, + MESON_SDHC_ICTL_ALL_IRQS, 0); + regmap_update_bits(host->regmap, MESON_SDHC_ISTA, + MESON_SDHC_ISTA_ALL_IRQS, MESON_SDHC_ISTA_ALL_IRQS); + + host->mrq = NULL; + host->cmd = NULL; + + mmc_request_done(mmc, mrq); +} + +static u32 meson_mx_sdhc_read_response(struct meson_mx_sdhc_host *host, u8 idx) +{ + u32 val; + + regmap_update_bits(host->regmap, MESON_SDHC_PDMA, + MESON_SDHC_PDMA_DMA_MODE, 0); + + regmap_update_bits(host->regmap, MESON_SDHC_PDMA, + MESON_SDHC_PDMA_PIO_RDRESP, + FIELD_PREP(MESON_SDHC_PDMA_PIO_RDRESP, idx)); + + regmap_read(host->regmap, MESON_SDHC_ARGU, &val); + + return val; +} + +static irqreturn_t meson_mx_sdhc_irq(int irq, void *data) +{ + struct meson_mx_sdhc_host *host = data; + struct mmc_command *cmd = host->cmd; + u32 ictl, ista; + + regmap_read(host->regmap, MESON_SDHC_ICTL, &ictl); + regmap_read(host->regmap, MESON_SDHC_ISTA, &ista); + + if (!(ictl & ista)) + return IRQ_NONE; + + if (ista & MESON_SDHC_ISTA_RXFIFO_FULL || + ista & MESON_SDHC_ISTA_TXFIFO_EMPTY) + cmd->error = -EIO; + else if (ista & MESON_SDHC_ISTA_RESP_ERR_CRC) + cmd->error = -EILSEQ; + else if (ista & MESON_SDHC_ISTA_RESP_TIMEOUT) + cmd->error = -ETIMEDOUT; + + if (cmd->data) { + if (ista & MESON_SDHC_ISTA_DATA_ERR_CRC) + cmd->data->error = -EILSEQ; + else if (ista & MESON_SDHC_ISTA_DATA_TIMEOUT) + cmd->data->error = -ETIMEDOUT; + } + + if (cmd->error || (cmd->data && cmd->data->error)) + dev_dbg(mmc_dev(host->mmc), "CMD%d error, ISTA: 0x%08x\n", + cmd->opcode, ista); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t meson_mx_sdhc_irq_thread(int irq, void *irq_data) +{ + struct meson_mx_sdhc_host *host = irq_data; + struct mmc_command *cmd; + u32 val; + + cmd = host->cmd; + if (WARN_ON(!cmd)) + return IRQ_HANDLED; + + if (cmd->data && !cmd->data->error) { + if (!host->platform->hardware_flush_all_cmds && + cmd->data->flags & MMC_DATA_READ) { + meson_mx_sdhc_wait_cmd_ready(host->mmc); + + /* + * If MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH was + * previously 0x1 then it has to be set to 0x3. If it + * was 0x0 before then it has to be set to 0x2. Without + * this reading SD cards sometimes transfers garbage, + * which results in cards not being detected due to: + * unrecognised SCR structure version + */ + val = FIELD_PREP(MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH, + 2); + regmap_update_bits(host->regmap, MESON_SDHC_PDMA, val, + val); + } + + dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg, + cmd->data->sg_len, mmc_get_dma_dir(cmd->data)); + + cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks; + } + + meson_mx_sdhc_wait_cmd_ready(host->mmc); + + if (cmd->flags & MMC_RSP_136) { + cmd->resp[0] = meson_mx_sdhc_read_response(host, 4); + cmd->resp[1] = meson_mx_sdhc_read_response(host, 3); + cmd->resp[2] = meson_mx_sdhc_read_response(host, 2); + cmd->resp[3] = meson_mx_sdhc_read_response(host, 1); + } else { + cmd->resp[0] = meson_mx_sdhc_read_response(host, 0); + } + + if (cmd->error == -EIO || cmd->error == -ETIMEDOUT) + meson_mx_sdhc_hw_reset(host->mmc); + else if (cmd->data) + /* + * Clear the FIFOs after completing data transfers to prevent + * corrupting data on write access. It's not clear why this is + * needed (for reads and writes), but it mimics what the BSP + * kernel did. + */ + meson_mx_sdhc_clear_fifo(host->mmc); + + meson_mx_sdhc_request_done(host); + + return IRQ_HANDLED; +} + +static void meson_mx_sdhc_init_hw_meson8(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + + regmap_write(host->regmap, MESON_SDHC_MISC, + FIELD_PREP(MESON_SDHC_MISC_TXSTART_THRES, 7) | + FIELD_PREP(MESON_SDHC_MISC_WCRC_ERR_PATT, 5) | + FIELD_PREP(MESON_SDHC_MISC_WCRC_OK_PATT, 2)); + + regmap_write(host->regmap, MESON_SDHC_ENHC, + FIELD_PREP(MESON_SDHC_ENHC_RXFIFO_TH, 63) | + MESON_SDHC_ENHC_MESON6_DMA_WR_RESP | + FIELD_PREP(MESON_SDHC_ENHC_MESON6_RX_TIMEOUT, 255) | + FIELD_PREP(MESON_SDHC_ENHC_SDIO_IRQ_PERIOD, 12)); +}; + +static void meson_mx_sdhc_set_pdma_meson8(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + + if (host->cmd->data->flags & MMC_DATA_WRITE) + regmap_update_bits(host->regmap, MESON_SDHC_PDMA, + MESON_SDHC_PDMA_DMA_MODE | + MESON_SDHC_PDMA_RD_BURST | + MESON_SDHC_PDMA_TXFIFO_FILL, + MESON_SDHC_PDMA_DMA_MODE | + FIELD_PREP(MESON_SDHC_PDMA_RD_BURST, 31) | + MESON_SDHC_PDMA_TXFIFO_FILL); + else + regmap_update_bits(host->regmap, MESON_SDHC_PDMA, + MESON_SDHC_PDMA_DMA_MODE | + MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH, + MESON_SDHC_PDMA_DMA_MODE | + FIELD_PREP(MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH, + 1)); + + if (host->cmd->data->flags & MMC_DATA_WRITE) + regmap_update_bits(host->regmap, MESON_SDHC_PDMA, + MESON_SDHC_PDMA_RD_BURST, + FIELD_PREP(MESON_SDHC_PDMA_RD_BURST, 15)); +} + +static void meson_mx_sdhc_wait_before_send_meson8(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + u32 val; + int ret; + + ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_ESTA, val, + val == 0, + MESON_SDHC_WAIT_BEFORE_SEND_SLEEP_US, + MESON_SDHC_WAIT_BEFORE_SEND_TIMEOUT_US); + if (ret) + dev_warn(mmc_dev(mmc), + "Failed to wait for ESTA to clear: 0x%08x\n", val); + + if (host->cmd->data && host->cmd->data->flags & MMC_DATA_WRITE) { + ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_STAT, + val, val & MESON_SDHC_STAT_TXFIFO_CNT, + MESON_SDHC_WAIT_BEFORE_SEND_SLEEP_US, + MESON_SDHC_WAIT_BEFORE_SEND_TIMEOUT_US); + if (ret) + dev_warn(mmc_dev(mmc), + "Failed to wait for TX FIFO to fill\n"); + } +} + +static void meson_mx_sdhc_init_hw_meson8m2(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + + regmap_write(host->regmap, MESON_SDHC_MISC, + FIELD_PREP(MESON_SDHC_MISC_TXSTART_THRES, 6) | + FIELD_PREP(MESON_SDHC_MISC_WCRC_ERR_PATT, 5) | + FIELD_PREP(MESON_SDHC_MISC_WCRC_OK_PATT, 2)); + + regmap_write(host->regmap, MESON_SDHC_ENHC, + FIELD_PREP(MESON_SDHC_ENHC_RXFIFO_TH, 64) | + FIELD_PREP(MESON_SDHC_ENHC_MESON8M2_DEBUG, 1) | + MESON_SDHC_ENHC_MESON8M2_WRRSP_MODE | + FIELD_PREP(MESON_SDHC_ENHC_SDIO_IRQ_PERIOD, 12)); +} + +static void meson_mx_sdhc_set_pdma_meson8m2(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + + regmap_update_bits(host->regmap, MESON_SDHC_PDMA, + MESON_SDHC_PDMA_DMA_MODE, MESON_SDHC_PDMA_DMA_MODE); +} + +static void meson_mx_sdhc_init_hw(struct mmc_host *mmc) +{ + struct meson_mx_sdhc_host *host = mmc_priv(mmc); + + meson_mx_sdhc_hw_reset(mmc); + + regmap_write(host->regmap, MESON_SDHC_CTRL, + FIELD_PREP(MESON_SDHC_CTRL_RX_PERIOD, 0xf) | + FIELD_PREP(MESON_SDHC_CTRL_RX_TIMEOUT, 0x7f) | + FIELD_PREP(MESON_SDHC_CTRL_RX_ENDIAN, 0x7) | + FIELD_PREP(MESON_SDHC_CTRL_TX_ENDIAN, 0x7)); + + /* + * start with a valid divider and enable the memory (un-setting + * MESON_SDHC_CLKC_MEM_PWR_OFF). + */ + regmap_write(host->regmap, MESON_SDHC_CLKC, MESON_SDHC_CLKC_CLK_DIV); + + regmap_write(host->regmap, MESON_SDHC_CLK2, + FIELD_PREP(MESON_SDHC_CLK2_SD_CLK_PHASE, 1)); + + regmap_write(host->regmap, MESON_SDHC_PDMA, + MESON_SDHC_PDMA_DMA_URGENT | + FIELD_PREP(MESON_SDHC_PDMA_WR_BURST, 7) | + FIELD_PREP(MESON_SDHC_PDMA_TXFIFO_TH, 49) | + FIELD_PREP(MESON_SDHC_PDMA_RD_BURST, 15) | + FIELD_PREP(MESON_SDHC_PDMA_RXFIFO_TH, 7)); + + /* some initialization bits depend on the SoC: */ + host->platform->init_hw(mmc); + + /* disable and mask all interrupts: */ + regmap_write(host->regmap, MESON_SDHC_ICTL, 0); + regmap_write(host->regmap, MESON_SDHC_ISTA, MESON_SDHC_ISTA_ALL_IRQS); +} + +static int meson_mx_sdhc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct meson_mx_sdhc_host *host; + struct mmc_host *mmc; + void __iomem *base; + int ret, irq; + + mmc = mmc_alloc_host(sizeof(*host), dev); + if (!mmc) + return -ENOMEM; + + ret = devm_add_action_or_reset(dev, (void(*)(void *))mmc_free_host, + mmc); + if (ret) { + dev_err(dev, "Failed to register mmc_free_host action\n"); + return ret; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + + platform_set_drvdata(pdev, host); + + host->platform = device_get_match_data(dev); + if (!host->platform) + return -EINVAL; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + host->regmap = devm_regmap_init_mmio(dev, base, + &meson_mx_sdhc_regmap_config); + if (IS_ERR(host->regmap)) + return PTR_ERR(host->regmap); + + host->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(host->pclk)) + return PTR_ERR(host->pclk); + + /* accessing any register requires the module clock to be enabled: */ + ret = clk_prepare_enable(host->pclk); + if (ret) { + dev_err(dev, "Failed to enable 'pclk' clock\n"); + return ret; + } + + meson_mx_sdhc_init_hw(mmc); + + ret = meson_mx_sdhc_register_clkc(dev, base, host->bulk_clks); + if (ret) + goto err_disable_pclk; + + host->sd_clk = host->bulk_clks[1].clk; + + /* Get regulators and the supported OCR mask */ + ret = mmc_regulator_get_supply(mmc); + if (ret) + goto err_disable_pclk; + + mmc->max_req_size = SZ_128K; + mmc->max_seg_size = mmc->max_req_size; + mmc->max_blk_count = FIELD_GET(MESON_SDHC_SEND_TOTAL_PACK, ~0); + mmc->max_blk_size = MESON_SDHC_MAX_BLK_SIZE; + mmc->max_busy_timeout = 30 * MSEC_PER_SEC; + mmc->f_min = clk_round_rate(host->sd_clk, 1); + mmc->f_max = clk_round_rate(host->sd_clk, ULONG_MAX); + mmc->max_current_180 = 300; + mmc->max_current_330 = 300; + mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_HW_RESET; + mmc->ops = &meson_mx_sdhc_ops; + + ret = mmc_of_parse(mmc); + if (ret) + goto err_disable_pclk; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_disable_pclk; + } + + ret = devm_request_threaded_irq(dev, irq, meson_mx_sdhc_irq, + meson_mx_sdhc_irq_thread, IRQF_ONESHOT, + NULL, host); + if (ret) + goto err_disable_pclk; + + ret = mmc_add_host(mmc); + if (ret) + goto err_disable_pclk; + + return 0; + +err_disable_pclk: + clk_disable_unprepare(host->pclk); + return ret; +} + +static int meson_mx_sdhc_remove(struct platform_device *pdev) +{ + struct meson_mx_sdhc_host *host = platform_get_drvdata(pdev); + + mmc_remove_host(host->mmc); + + meson_mx_sdhc_disable_clks(host->mmc); + + clk_disable_unprepare(host->pclk); + + return 0; +} + +static const struct meson_mx_sdhc_data meson_mx_sdhc_data_meson8 = { + .init_hw = meson_mx_sdhc_init_hw_meson8, + .set_pdma = meson_mx_sdhc_set_pdma_meson8, + .wait_before_send = meson_mx_sdhc_wait_before_send_meson8, + .hardware_flush_all_cmds = false, +}; + +static const struct meson_mx_sdhc_data meson_mx_sdhc_data_meson8m2 = { + .init_hw = meson_mx_sdhc_init_hw_meson8m2, + .set_pdma = meson_mx_sdhc_set_pdma_meson8m2, + .hardware_flush_all_cmds = true, +}; + +static const struct of_device_id meson_mx_sdhc_of_match[] = { + { + .compatible = "amlogic,meson8-sdhc", + .data = &meson_mx_sdhc_data_meson8 + }, + { + .compatible = "amlogic,meson8b-sdhc", + .data = &meson_mx_sdhc_data_meson8 + }, + { + .compatible = "amlogic,meson8m2-sdhc", + .data = &meson_mx_sdhc_data_meson8m2 + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, meson_mx_sdhc_of_match); + +static struct platform_driver meson_mx_sdhc_driver = { + .probe = meson_mx_sdhc_probe, + .remove = meson_mx_sdhc_remove, + .driver = { + .name = "meson-mx-sdhc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(meson_mx_sdhc_of_match), + }, +}; + +module_platform_driver(meson_mx_sdhc_driver); + +MODULE_DESCRIPTION("Meson6, Meson8, Meson8b and Meson8m2 SDHC Host Driver"); +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/meson-mx-sdhc.h b/drivers/mmc/host/meson-mx-sdhc.h new file mode 100644 index 000000000..230e8fbe6 --- /dev/null +++ b/drivers/mmc/host/meson-mx-sdhc.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2020 Martin Blumenstingl + */ + +#ifndef _MESON_MX_SDHC_H_ +#define _MESON_MX_SDHC_H_ + +#include + +#define MESON_SDHC_ARGU 0x00 + +#define MESON_SDHC_SEND 0x04 + #define MESON_SDHC_SEND_CMD_INDEX GENMASK(5, 0) + #define MESON_SDHC_SEND_CMD_HAS_RESP BIT(6) + #define MESON_SDHC_SEND_CMD_HAS_DATA BIT(7) + #define MESON_SDHC_SEND_RESP_LEN BIT(8) + #define MESON_SDHC_SEND_RESP_NO_CRC BIT(9) + #define MESON_SDHC_SEND_DATA_DIR BIT(10) + #define MESON_SDHC_SEND_DATA_STOP BIT(11) + #define MESON_SDHC_SEND_R1B BIT(12) + #define MESON_SDHC_SEND_TOTAL_PACK GENMASK(31, 16) + +#define MESON_SDHC_CTRL 0x08 + #define MESON_SDHC_CTRL_DAT_TYPE GENMASK(1, 0) + #define MESON_SDHC_CTRL_DDR_MODE BIT(2) + #define MESON_SDHC_CTRL_TX_CRC_NOCHECK BIT(3) + #define MESON_SDHC_CTRL_PACK_LEN GENMASK(12, 4) + #define MESON_SDHC_CTRL_RX_TIMEOUT GENMASK(19, 13) + #define MESON_SDHC_CTRL_RX_PERIOD GENMASK(23, 20) + #define MESON_SDHC_CTRL_RX_ENDIAN GENMASK(26, 24) + #define MESON_SDHC_CTRL_SDIO_IRQ_MODE BIT(27) + #define MESON_SDHC_CTRL_DAT0_IRQ_SEL BIT(28) + #define MESON_SDHC_CTRL_TX_ENDIAN GENMASK(31, 29) + +#define MESON_SDHC_STAT 0x0c + #define MESON_SDHC_STAT_CMD_BUSY BIT(0) + #define MESON_SDHC_STAT_DAT3_0 GENMASK(4, 1) + #define MESON_SDHC_STAT_CMD BIT(5) + #define MESON_SDHC_STAT_RXFIFO_CNT GENMASK(12, 6) + #define MESON_SDHC_STAT_TXFIFO_CNT GENMASK(19, 13) + #define MESON_SDHC_STAT_DAT7_4 GENMASK(23, 20) + +#define MESON_SDHC_CLKC 0x10 + #define MESON_SDHC_CLKC_CLK_DIV GENMASK(11, 0) + #define MESON_SDHC_CLKC_CLK_JIC BIT(24) + #define MESON_SDHC_CLKC_MEM_PWR_OFF GENMASK(26, 25) + +#define MESON_SDHC_ADDR 0x14 + +#define MESON_SDHC_PDMA 0x18 + #define MESON_SDHC_PDMA_DMA_MODE BIT(0) + #define MESON_SDHC_PDMA_PIO_RDRESP GENMASK(3, 1) + #define MESON_SDHC_PDMA_DMA_URGENT BIT(4) + #define MESON_SDHC_PDMA_WR_BURST GENMASK(9, 5) + #define MESON_SDHC_PDMA_RD_BURST GENMASK(14, 10) + #define MESON_SDHC_PDMA_RXFIFO_TH GENMASK(21, 15) + #define MESON_SDHC_PDMA_TXFIFO_TH GENMASK(28, 22) + #define MESON_SDHC_PDMA_RXFIFO_MANUAL_FLUSH GENMASK(30, 29) + #define MESON_SDHC_PDMA_TXFIFO_FILL BIT(31) + +#define MESON_SDHC_MISC 0x1c + #define MESON_SDHC_MISC_WCRC_ERR_PATT GENMASK(6, 4) + #define MESON_SDHC_MISC_WCRC_OK_PATT GENMASK(9, 7) + #define MESON_SDHC_MISC_BURST_NUM GENMASK(21, 16) + #define MESON_SDHC_MISC_THREAD_ID GENMASK(27, 22) + #define MESON_SDHC_MISC_MANUAL_STOP BIT(28) + #define MESON_SDHC_MISC_TXSTART_THRES GENMASK(31, 29) + +#define MESON_SDHC_DATA 0x20 + +#define MESON_SDHC_ICTL 0x24 + #define MESON_SDHC_ICTL_RESP_OK BIT(0) + #define MESON_SDHC_ICTL_RESP_TIMEOUT BIT(1) + #define MESON_SDHC_ICTL_RESP_ERR_CRC BIT(2) + #define MESON_SDHC_ICTL_RESP_OK_NOCLEAR BIT(3) + #define MESON_SDHC_ICTL_DATA_1PACK_OK BIT(4) + #define MESON_SDHC_ICTL_DATA_TIMEOUT BIT(5) + #define MESON_SDHC_ICTL_DATA_ERR_CRC BIT(6) + #define MESON_SDHC_ICTL_DATA_XFER_OK BIT(7) + #define MESON_SDHC_ICTL_RX_HIGHER BIT(8) + #define MESON_SDHC_ICTL_RX_LOWER BIT(9) + #define MESON_SDHC_ICTL_DAT1_IRQ BIT(10) + #define MESON_SDHC_ICTL_DMA_DONE BIT(11) + #define MESON_SDHC_ICTL_RXFIFO_FULL BIT(12) + #define MESON_SDHC_ICTL_TXFIFO_EMPTY BIT(13) + #define MESON_SDHC_ICTL_ADDI_DAT1_IRQ BIT(14) + #define MESON_SDHC_ICTL_ALL_IRQS GENMASK(14, 0) + #define MESON_SDHC_ICTL_DAT1_IRQ_DELAY GENMASK(17, 16) + +#define MESON_SDHC_ISTA 0x28 + #define MESON_SDHC_ISTA_RESP_OK BIT(0) + #define MESON_SDHC_ISTA_RESP_TIMEOUT BIT(1) + #define MESON_SDHC_ISTA_RESP_ERR_CRC BIT(2) + #define MESON_SDHC_ISTA_RESP_OK_NOCLEAR BIT(3) + #define MESON_SDHC_ISTA_DATA_1PACK_OK BIT(4) + #define MESON_SDHC_ISTA_DATA_TIMEOUT BIT(5) + #define MESON_SDHC_ISTA_DATA_ERR_CRC BIT(6) + #define MESON_SDHC_ISTA_DATA_XFER_OK BIT(7) + #define MESON_SDHC_ISTA_RX_HIGHER BIT(8) + #define MESON_SDHC_ISTA_RX_LOWER BIT(9) + #define MESON_SDHC_ISTA_DAT1_IRQ BIT(10) + #define MESON_SDHC_ISTA_DMA_DONE BIT(11) + #define MESON_SDHC_ISTA_RXFIFO_FULL BIT(12) + #define MESON_SDHC_ISTA_TXFIFO_EMPTY BIT(13) + #define MESON_SDHC_ISTA_ADDI_DAT1_IRQ BIT(14) + #define MESON_SDHC_ISTA_ALL_IRQS GENMASK(14, 0) + +#define MESON_SDHC_SRST 0x2c + #define MESON_SDHC_SRST_MAIN_CTRL BIT(0) + #define MESON_SDHC_SRST_RXFIFO BIT(1) + #define MESON_SDHC_SRST_TXFIFO BIT(2) + #define MESON_SDHC_SRST_DPHY_RX BIT(3) + #define MESON_SDHC_SRST_DPHY_TX BIT(4) + #define MESON_SDHC_SRST_DMA_IF BIT(5) + +#define MESON_SDHC_ESTA 0x30 + #define MESON_SDHC_ESTA_11_13 GENMASK(13, 11) + +#define MESON_SDHC_ENHC 0x34 + #define MESON_SDHC_ENHC_MESON8M2_WRRSP_MODE BIT(0) + #define MESON_SDHC_ENHC_MESON8M2_CHK_WRRSP BIT(1) + #define MESON_SDHC_ENHC_MESON8M2_CHK_DMA BIT(2) + #define MESON_SDHC_ENHC_MESON8M2_DEBUG GENMASK(5, 3) + #define MESON_SDHC_ENHC_MESON6_RX_TIMEOUT GENMASK(7, 0) + #define MESON_SDHC_ENHC_MESON6_DMA_RD_RESP BIT(16) + #define MESON_SDHC_ENHC_MESON6_DMA_WR_RESP BIT(17) + #define MESON_SDHC_ENHC_SDIO_IRQ_PERIOD GENMASK(15, 8) + #define MESON_SDHC_ENHC_RXFIFO_TH GENMASK(24, 18) + #define MESON_SDHC_ENHC_TXFIFO_TH GENMASK(31, 25) + +#define MESON_SDHC_CLK2 0x38 + #define MESON_SDHC_CLK2_RX_CLK_PHASE GENMASK(11, 0) + #define MESON_SDHC_CLK2_SD_CLK_PHASE GENMASK(23, 12) + +struct clk_bulk_data; + +int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base, + struct clk_bulk_data *clk_bulk_data); + +#endif /* _MESON_MX_SDHC_H_ */ diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c new file mode 100644 index 000000000..264aae2a2 --- /dev/null +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -0,0 +1,773 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * meson-mx-sdio.c - Meson6, Meson8 and Meson8b SDIO/MMC Host Controller + * + * Copyright (C) 2015 Endless Mobile, Inc. + * Author: Carlo Caione + * Copyright (C) 2017 Martin Blumenstingl + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define MESON_MX_SDIO_ARGU 0x00 + +#define MESON_MX_SDIO_SEND 0x04 + #define MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK GENMASK(7, 0) + #define MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK GENMASK(15, 8) + #define MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7 BIT(16) + #define MESON_MX_SDIO_SEND_RESP_HAS_DATA BIT(17) + #define MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8 BIT(18) + #define MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY BIT(19) + #define MESON_MX_SDIO_SEND_DATA BIT(20) + #define MESON_MX_SDIO_SEND_USE_INT_WINDOW BIT(21) + #define MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK GENMASK(31, 24) + +#define MESON_MX_SDIO_CONF 0x08 + #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT 0 + #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH 10 + #define MESON_MX_SDIO_CONF_CMD_DISABLE_CRC BIT(10) + #define MESON_MX_SDIO_CONF_CMD_OUT_AT_POSITIVE_EDGE BIT(11) + #define MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK GENMASK(17, 12) + #define MESON_MX_SDIO_CONF_RESP_LATCH_AT_NEGATIVE_EDGE BIT(18) + #define MESON_MX_SDIO_CONF_DATA_LATCH_AT_NEGATIVE_EDGE BIT(19) + #define MESON_MX_SDIO_CONF_BUS_WIDTH BIT(20) + #define MESON_MX_SDIO_CONF_M_ENDIAN_MASK GENMASK(22, 21) + #define MESON_MX_SDIO_CONF_WRITE_NWR_MASK GENMASK(28, 23) + #define MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK GENMASK(31, 29) + +#define MESON_MX_SDIO_IRQS 0x0c + #define MESON_MX_SDIO_IRQS_STATUS_STATE_MACHINE_MASK GENMASK(3, 0) + #define MESON_MX_SDIO_IRQS_CMD_BUSY BIT(4) + #define MESON_MX_SDIO_IRQS_RESP_CRC7_OK BIT(5) + #define MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK BIT(6) + #define MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK BIT(7) + #define MESON_MX_SDIO_IRQS_IF_INT BIT(8) + #define MESON_MX_SDIO_IRQS_CMD_INT BIT(9) + #define MESON_MX_SDIO_IRQS_STATUS_INFO_MASK GENMASK(15, 12) + #define MESON_MX_SDIO_IRQS_TIMING_OUT_INT BIT(16) + #define MESON_MX_SDIO_IRQS_AMRISC_TIMING_OUT_INT_EN BIT(17) + #define MESON_MX_SDIO_IRQS_ARC_TIMING_OUT_INT_EN BIT(18) + #define MESON_MX_SDIO_IRQS_TIMING_OUT_COUNT_MASK GENMASK(31, 19) + +#define MESON_MX_SDIO_IRQC 0x10 + #define MESON_MX_SDIO_IRQC_ARC_IF_INT_EN BIT(3) + #define MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN BIT(4) + #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10) + #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) + #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) + #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) + +#define MESON_MX_SDIO_MULT 0x14 + #define MESON_MX_SDIO_MULT_PORT_SEL_MASK GENMASK(1, 0) + #define MESON_MX_SDIO_MULT_MEMORY_STICK_ENABLE BIT(2) + #define MESON_MX_SDIO_MULT_MEMORY_STICK_SCLK_ALWAYS BIT(3) + #define MESON_MX_SDIO_MULT_STREAM_ENABLE BIT(4) + #define MESON_MX_SDIO_MULT_STREAM_8BITS_MODE BIT(5) + #define MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX BIT(8) + #define MESON_MX_SDIO_MULT_DAT0_DAT1_SWAPPED BIT(10) + #define MESON_MX_SDIO_MULT_DAT1_DAT0_SWAPPED BIT(11) + #define MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK GENMASK(15, 12) + +#define MESON_MX_SDIO_ADDR 0x18 + +#define MESON_MX_SDIO_EXT 0x1c + #define MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK GENMASK(29, 16) + +#define MESON_MX_SDIO_BOUNCE_REQ_SIZE (128 * 1024) +#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1) +#define MESON_MX_SDIO_MAX_SLOTS 3 + +struct meson_mx_mmc_host { + struct device *controller_dev; + + struct clk *parent_clk; + struct clk *core_clk; + struct clk_divider cfg_div; + struct clk *cfg_div_clk; + struct clk_fixed_factor fixed_factor; + struct clk *fixed_factor_clk; + + void __iomem *base; + int irq; + spinlock_t irq_lock; + + struct timer_list cmd_timeout; + + unsigned int slot_id; + struct mmc_host *mmc; + + struct mmc_request *mrq; + struct mmc_command *cmd; + int error; +}; + +static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask, + u32 val) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + u32 regval; + + regval = readl(host->base + reg); + regval &= ~mask; + regval |= (val & mask); + + writel(regval, host->base + reg); +} + +static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host) +{ + writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC); + udelay(2); +} + +static struct mmc_command *meson_mx_mmc_get_next_cmd(struct mmc_command *cmd) +{ + if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) + return cmd->mrq->cmd; + else if (mmc_op_multi(cmd->opcode) && + (!cmd->mrq->sbc || cmd->error || cmd->data->error)) + return cmd->mrq->stop; + else + return NULL; +} + +static void meson_mx_mmc_start_cmd(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + unsigned int pack_size; + unsigned long irqflags, timeout; + u32 mult, send = 0, ext = 0; + + host->cmd = cmd; + + if (cmd->busy_timeout) + timeout = msecs_to_jiffies(cmd->busy_timeout); + else + timeout = msecs_to_jiffies(1000); + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + case MMC_RSP_R1B: + case MMC_RSP_R3: + /* 7 (CMD) + 32 (response) + 7 (CRC) -1 */ + send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 45); + break; + case MMC_RSP_R2: + /* 7 (CMD) + 120 (response) + 7 (CRC) -1 */ + send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 133); + send |= MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8; + break; + default: + break; + } + + if (!(cmd->flags & MMC_RSP_CRC)) + send |= MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7; + + if (cmd->flags & MMC_RSP_BUSY) + send |= MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY; + + if (cmd->data) { + send |= FIELD_PREP(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK, + (cmd->data->blocks - 1)); + + pack_size = cmd->data->blksz * BITS_PER_BYTE; + if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) + pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 4; + else + pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 1; + + ext |= FIELD_PREP(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK, + pack_size); + + if (cmd->data->flags & MMC_DATA_WRITE) + send |= MESON_MX_SDIO_SEND_DATA; + else + send |= MESON_MX_SDIO_SEND_RESP_HAS_DATA; + + cmd->data->bytes_xfered = 0; + } + + send |= FIELD_PREP(MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK, + (0x40 | cmd->opcode)); + + spin_lock_irqsave(&host->irq_lock, irqflags); + + mult = readl(host->base + MESON_MX_SDIO_MULT); + mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK; + mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id); + mult |= BIT(31); + writel(mult, host->base + MESON_MX_SDIO_MULT); + + /* enable the CMD done interrupt */ + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC, + MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN, + MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN); + + /* clear pending interrupts */ + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS, + MESON_MX_SDIO_IRQS_CMD_INT, + MESON_MX_SDIO_IRQS_CMD_INT); + + writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU); + writel(ext, host->base + MESON_MX_SDIO_EXT); + writel(send, host->base + MESON_MX_SDIO_SEND); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + mod_timer(&host->cmd_timeout, jiffies + timeout); +} + +static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host) +{ + struct mmc_request *mrq; + + mrq = host->mrq; + + if (host->cmd->error) + meson_mx_mmc_soft_reset(host); + + host->mrq = NULL; + host->cmd = NULL; + + mmc_request_done(host->mmc, mrq); +} + +static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + unsigned short vdd = ios->vdd; + unsigned long clk_rate = ios->clock; + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF, + MESON_MX_SDIO_CONF_BUS_WIDTH, 0); + break; + + case MMC_BUS_WIDTH_4: + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF, + MESON_MX_SDIO_CONF_BUS_WIDTH, + MESON_MX_SDIO_CONF_BUS_WIDTH); + break; + + case MMC_BUS_WIDTH_8: + default: + dev_err(mmc_dev(mmc), "unsupported bus width: %d\n", + ios->bus_width); + host->error = -EINVAL; + return; + } + + host->error = clk_set_rate(host->cfg_div_clk, ios->clock); + if (host->error) { + dev_warn(mmc_dev(mmc), + "failed to set MMC clock to %lu: %d\n", + clk_rate, host->error); + return; + } + + mmc->actual_clock = clk_get_rate(host->cfg_div_clk); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + vdd = 0; + fallthrough; + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) { + host->error = mmc_regulator_set_ocr(mmc, + mmc->supply.vmmc, + vdd); + if (host->error) + return; + } + break; + } +} + +static int meson_mx_mmc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + int dma_len; + struct scatterlist *sg; + + if (!data) + return 0; + + sg = data->sg; + if (sg->offset & 3 || sg->length & 3) { + dev_err(mmc_dev(mmc), + "unaligned scatterlist: offset %x length %d\n", + sg->offset, sg->length); + return -EINVAL; + } + + dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + if (dma_len <= 0) { + dev_err(mmc_dev(mmc), "dma_map_sg failed\n"); + return -ENOMEM; + } + + return 0; +} + +static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + + if (!host->error) + host->error = meson_mx_mmc_map_dma(mmc, mrq); + + if (host->error) { + cmd->error = host->error; + mmc_request_done(mmc, mrq); + return; + } + + host->mrq = mrq; + + if (mrq->data) + writel(sg_dma_address(mrq->data->sg), + host->base + MESON_MX_SDIO_ADDR); + + if (mrq->sbc) + meson_mx_mmc_start_cmd(mmc, mrq->sbc); + else + meson_mx_mmc_start_cmd(mmc, mrq->cmd); +} + +static void meson_mx_mmc_read_response(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + u32 mult; + int i, resp[4]; + + mult = readl(host->base + MESON_MX_SDIO_MULT); + mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX; + mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK; + mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0); + writel(mult, host->base + MESON_MX_SDIO_MULT); + + if (cmd->flags & MMC_RSP_136) { + for (i = 0; i <= 3; i++) + resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU); + cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff); + cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff); + cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff); + cmd->resp[3] = (resp[3] << 8); + } else if (cmd->flags & MMC_RSP_PRESENT) { + cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU); + } +} + +static irqreturn_t meson_mx_mmc_process_cmd_irq(struct meson_mx_mmc_host *host, + u32 irqs, u32 send) +{ + struct mmc_command *cmd = host->cmd; + + /* + * NOTE: even though it shouldn't happen we sometimes get command + * interrupts twice (at least this is what it looks like). Ideally + * we find out why this happens and warn here as soon as it occurs. + */ + if (!cmd) + return IRQ_HANDLED; + + cmd->error = 0; + meson_mx_mmc_read_response(host->mmc, cmd); + + if (cmd->data) { + if (!((irqs & MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK) || + (irqs & MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK))) + cmd->error = -EILSEQ; + } else { + if (!((irqs & MESON_MX_SDIO_IRQS_RESP_CRC7_OK) || + (send & MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7))) + cmd->error = -EILSEQ; + } + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t meson_mx_mmc_irq(int irq, void *data) +{ + struct meson_mx_mmc_host *host = (void *) data; + u32 irqs, send; + unsigned long irqflags; + irqreturn_t ret; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + irqs = readl(host->base + MESON_MX_SDIO_IRQS); + send = readl(host->base + MESON_MX_SDIO_SEND); + + if (irqs & MESON_MX_SDIO_IRQS_CMD_INT) + ret = meson_mx_mmc_process_cmd_irq(host, irqs, send); + else + ret = IRQ_HANDLED; + + /* finally ACK all pending interrupts */ + writel(irqs, host->base + MESON_MX_SDIO_IRQS); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + return ret; +} + +static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data) +{ + struct meson_mx_mmc_host *host = (void *) irq_data; + struct mmc_command *cmd = host->cmd, *next_cmd; + + if (WARN_ON(!cmd)) + return IRQ_HANDLED; + + del_timer_sync(&host->cmd_timeout); + + if (cmd->data) { + dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg, + cmd->data->sg_len, + mmc_get_dma_dir(cmd->data)); + + cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks; + } + + next_cmd = meson_mx_mmc_get_next_cmd(cmd); + if (next_cmd) + meson_mx_mmc_start_cmd(host->mmc, next_cmd); + else + meson_mx_mmc_request_done(host); + + return IRQ_HANDLED; +} + +static void meson_mx_mmc_timeout(struct timer_list *t) +{ + struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout); + unsigned long irqflags; + u32 irqc; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + /* disable the CMD interrupt */ + irqc = readl(host->base + MESON_MX_SDIO_IRQC); + irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN; + writel(irqc, host->base + MESON_MX_SDIO_IRQC); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + /* + * skip the timeout handling if the interrupt handler already processed + * the command. + */ + if (!host->cmd) + return; + + dev_dbg(mmc_dev(host->mmc), + "Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n", + host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS), + readl(host->base + MESON_MX_SDIO_ARGU)); + + host->cmd->error = -ETIMEDOUT; + + meson_mx_mmc_request_done(host); +} + +static struct mmc_host_ops meson_mx_mmc_ops = { + .request = meson_mx_mmc_request, + .set_ios = meson_mx_mmc_set_ios, + .get_cd = mmc_gpio_get_cd, + .get_ro = mmc_gpio_get_ro, +}; + +static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent) +{ + struct device_node *slot_node; + struct platform_device *pdev; + + /* + * TODO: the MMC core framework currently does not support + * controllers with multiple slots properly. So we only register + * the first slot for now + */ + slot_node = of_get_compatible_child(parent->of_node, "mmc-slot"); + if (!slot_node) { + dev_warn(parent, "no 'mmc-slot' sub-node found\n"); + return ERR_PTR(-ENOENT); + } + + pdev = of_platform_device_create(slot_node, NULL, parent); + of_node_put(slot_node); + + return pdev; +} + +static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct device *slot_dev = mmc_dev(mmc); + int ret; + + if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) { + dev_err(slot_dev, "missing 'reg' property\n"); + return -EINVAL; + } + + if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) { + dev_err(slot_dev, "invalid 'reg' property value %d\n", + host->slot_id); + return -EINVAL; + } + + /* Get regulators and the supported OCR mask */ + ret = mmc_regulator_get_supply(mmc); + if (ret) + return ret; + + mmc->max_req_size = MESON_MX_SDIO_BOUNCE_REQ_SIZE; + mmc->max_seg_size = mmc->max_req_size; + mmc->max_blk_count = + FIELD_GET(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK, + 0xffffffff); + mmc->max_blk_size = FIELD_GET(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK, + 0xffffffff); + mmc->max_blk_size -= (4 * MESON_MX_SDIO_RESPONSE_CRC16_BITS); + mmc->max_blk_size /= BITS_PER_BYTE; + + /* Get the min and max supported clock rates */ + mmc->f_min = clk_round_rate(host->cfg_div_clk, 1); + mmc->f_max = clk_round_rate(host->cfg_div_clk, + clk_get_rate(host->parent_clk)); + + mmc->caps |= MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY; + mmc->ops = &meson_mx_mmc_ops; + + ret = mmc_of_parse(mmc); + if (ret) + return ret; + + ret = mmc_add_host(mmc); + if (ret) + return ret; + + return 0; +} + +static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host) +{ + struct clk_init_data init; + const char *clk_div_parent, *clk_fixed_factor_parent; + + clk_fixed_factor_parent = __clk_get_name(host->parent_clk); + init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL, + "%s#fixed_factor", + dev_name(host->controller_dev)); + if (!init.name) + return -ENOMEM; + + init.ops = &clk_fixed_factor_ops; + init.flags = 0; + init.parent_names = &clk_fixed_factor_parent; + init.num_parents = 1; + host->fixed_factor.div = 2; + host->fixed_factor.mult = 1; + host->fixed_factor.hw.init = &init; + + host->fixed_factor_clk = devm_clk_register(host->controller_dev, + &host->fixed_factor.hw); + if (WARN_ON(IS_ERR(host->fixed_factor_clk))) + return PTR_ERR(host->fixed_factor_clk); + + clk_div_parent = __clk_get_name(host->fixed_factor_clk); + init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL, + "%s#div", dev_name(host->controller_dev)); + if (!init.name) + return -ENOMEM; + + init.ops = &clk_divider_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = &clk_div_parent; + init.num_parents = 1; + host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF; + host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT; + host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH; + host->cfg_div.hw.init = &init; + host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO; + + host->cfg_div_clk = devm_clk_register(host->controller_dev, + &host->cfg_div.hw); + if (WARN_ON(IS_ERR(host->cfg_div_clk))) + return PTR_ERR(host->cfg_div_clk); + + return 0; +} + +static int meson_mx_mmc_probe(struct platform_device *pdev) +{ + struct platform_device *slot_pdev; + struct mmc_host *mmc; + struct meson_mx_mmc_host *host; + int ret, irq; + u32 conf; + + slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev); + if (!slot_pdev) + return -ENODEV; + else if (IS_ERR(slot_pdev)) + return PTR_ERR(slot_pdev); + + mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto error_unregister_slot_pdev; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + host->controller_dev = &pdev->dev; + + spin_lock_init(&host->irq_lock); + timer_setup(&host->cmd_timeout, meson_mx_mmc_timeout, 0); + + platform_set_drvdata(pdev, host); + + host->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto error_free_mmc; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto error_free_mmc; + } + + ret = devm_request_threaded_irq(host->controller_dev, irq, + meson_mx_mmc_irq, + meson_mx_mmc_irq_thread, IRQF_ONESHOT, + NULL, host); + if (ret) + goto error_free_mmc; + + host->core_clk = devm_clk_get(host->controller_dev, "core"); + if (IS_ERR(host->core_clk)) { + ret = PTR_ERR(host->core_clk); + goto error_free_mmc; + } + + host->parent_clk = devm_clk_get(host->controller_dev, "clkin"); + if (IS_ERR(host->parent_clk)) { + ret = PTR_ERR(host->parent_clk); + goto error_free_mmc; + } + + ret = meson_mx_mmc_register_clks(host); + if (ret) + goto error_free_mmc; + + ret = clk_prepare_enable(host->core_clk); + if (ret) { + dev_err(host->controller_dev, "Failed to enable core clock\n"); + goto error_free_mmc; + } + + ret = clk_prepare_enable(host->cfg_div_clk); + if (ret) { + dev_err(host->controller_dev, "Failed to enable MMC clock\n"); + goto error_disable_core_clk; + } + + conf = 0; + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK, 39); + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3); + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2); + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2); + writel(conf, host->base + MESON_MX_SDIO_CONF); + + meson_mx_mmc_soft_reset(host); + + ret = meson_mx_mmc_add_host(host); + if (ret) + goto error_disable_clks; + + return 0; + +error_disable_clks: + clk_disable_unprepare(host->cfg_div_clk); +error_disable_core_clk: + clk_disable_unprepare(host->core_clk); +error_free_mmc: + mmc_free_host(mmc); +error_unregister_slot_pdev: + of_platform_device_destroy(&slot_pdev->dev, NULL); + return ret; +} + +static int meson_mx_mmc_remove(struct platform_device *pdev) +{ + struct meson_mx_mmc_host *host = platform_get_drvdata(pdev); + struct device *slot_dev = mmc_dev(host->mmc); + + del_timer_sync(&host->cmd_timeout); + + mmc_remove_host(host->mmc); + + of_platform_device_destroy(slot_dev, NULL); + + clk_disable_unprepare(host->cfg_div_clk); + clk_disable_unprepare(host->core_clk); + + mmc_free_host(host->mmc); + + return 0; +} + +static const struct of_device_id meson_mx_mmc_of_match[] = { + { .compatible = "amlogic,meson8-sdio", }, + { .compatible = "amlogic,meson8b-sdio", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match); + +static struct platform_driver meson_mx_mmc_driver = { + .probe = meson_mx_mmc_probe, + .remove = meson_mx_mmc_remove, + .driver = { + .name = "meson-mx-sdio", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(meson_mx_mmc_of_match), + }, +}; + +module_platform_driver(meson_mx_mmc_driver); + +MODULE_DESCRIPTION("Meson6, Meson8 and Meson8b SDIO/MMC Host Driver"); +MODULE_AUTHOR("Carlo Caione "); +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c new file mode 100644 index 000000000..9d35453e7 --- /dev/null +++ b/drivers/mmc/host/mmc_hsq.c @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * MMC software queue support based on command queue interfaces + * + * Copyright (C) 2019 Linaro, Inc. + * Author: Baolin Wang + */ + +#include +#include +#include + +#include "mmc_hsq.h" + +#define HSQ_NUM_SLOTS 64 +#define HSQ_INVALID_TAG HSQ_NUM_SLOTS + +static void mmc_hsq_retry_handler(struct work_struct *work) +{ + struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work); + struct mmc_host *mmc = hsq->mmc; + + mmc->ops->request(mmc, hsq->mrq); +} + +static void mmc_hsq_pump_requests(struct mmc_hsq *hsq) +{ + struct mmc_host *mmc = hsq->mmc; + struct hsq_slot *slot; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hsq->lock, flags); + + /* Make sure we are not already running a request now */ + if (hsq->mrq || hsq->recovery_halt) { + spin_unlock_irqrestore(&hsq->lock, flags); + return; + } + + /* Make sure there are remain requests need to pump */ + if (!hsq->qcnt || !hsq->enabled) { + spin_unlock_irqrestore(&hsq->lock, flags); + return; + } + + slot = &hsq->slot[hsq->next_tag]; + hsq->mrq = slot->mrq; + hsq->qcnt--; + + spin_unlock_irqrestore(&hsq->lock, flags); + + if (mmc->ops->request_atomic) + ret = mmc->ops->request_atomic(mmc, hsq->mrq); + else + mmc->ops->request(mmc, hsq->mrq); + + /* + * If returning BUSY from request_atomic(), which means the card + * may be busy now, and we should change to non-atomic context to + * try again for this unusual case, to avoid time-consuming operations + * in the atomic context. + * + * Note: we just give a warning for other error cases, since the host + * driver will handle them. + */ + if (ret == -EBUSY) + schedule_work(&hsq->retry_work); + else + WARN_ON_ONCE(ret); +} + +static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains) +{ + struct hsq_slot *slot; + int tag; + + /* + * If there are no remain requests in software queue, then set a invalid + * tag. + */ + if (!remains) { + hsq->next_tag = HSQ_INVALID_TAG; + return; + } + + /* + * Increasing the next tag and check if the corresponding request is + * available, if yes, then we found a candidate request. + */ + if (++hsq->next_tag != HSQ_INVALID_TAG) { + slot = &hsq->slot[hsq->next_tag]; + if (slot->mrq) + return; + } + + /* Othersie we should iterate all slots to find a available tag. */ + for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) { + slot = &hsq->slot[tag]; + if (slot->mrq) + break; + } + + if (tag == HSQ_NUM_SLOTS) + tag = HSQ_INVALID_TAG; + + hsq->next_tag = tag; +} + +static void mmc_hsq_post_request(struct mmc_hsq *hsq) +{ + unsigned long flags; + int remains; + + spin_lock_irqsave(&hsq->lock, flags); + + remains = hsq->qcnt; + hsq->mrq = NULL; + + /* Update the next available tag to be queued. */ + mmc_hsq_update_next_tag(hsq, remains); + + if (hsq->waiting_for_idle && !remains) { + hsq->waiting_for_idle = false; + wake_up(&hsq->wait_queue); + } + + /* Do not pump new request in recovery mode. */ + if (hsq->recovery_halt) { + spin_unlock_irqrestore(&hsq->lock, flags); + return; + } + + spin_unlock_irqrestore(&hsq->lock, flags); + + /* + * Try to pump new request to host controller as fast as possible, + * after completing previous request. + */ + if (remains > 0) + mmc_hsq_pump_requests(hsq); +} + +/** + * mmc_hsq_finalize_request - finalize one request if the request is done + * @mmc: the host controller + * @mrq: the request need to be finalized + * + * Return true if we finalized the corresponding request in software queue, + * otherwise return false. + */ +bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmc_hsq *hsq = mmc->cqe_private; + unsigned long flags; + + spin_lock_irqsave(&hsq->lock, flags); + + if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) { + spin_unlock_irqrestore(&hsq->lock, flags); + return false; + } + + /* + * Clear current completed slot request to make a room for new request. + */ + hsq->slot[hsq->next_tag].mrq = NULL; + + spin_unlock_irqrestore(&hsq->lock, flags); + + mmc_cqe_request_done(mmc, hsq->mrq); + + mmc_hsq_post_request(hsq); + + return true; +} +EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request); + +static void mmc_hsq_recovery_start(struct mmc_host *mmc) +{ + struct mmc_hsq *hsq = mmc->cqe_private; + unsigned long flags; + + spin_lock_irqsave(&hsq->lock, flags); + + hsq->recovery_halt = true; + + spin_unlock_irqrestore(&hsq->lock, flags); +} + +static void mmc_hsq_recovery_finish(struct mmc_host *mmc) +{ + struct mmc_hsq *hsq = mmc->cqe_private; + int remains; + + spin_lock_irq(&hsq->lock); + + hsq->recovery_halt = false; + remains = hsq->qcnt; + + spin_unlock_irq(&hsq->lock); + + /* + * Try to pump new request if there are request pending in software + * queue after finishing recovery. + */ + if (remains > 0) + mmc_hsq_pump_requests(hsq); +} + +static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmc_hsq *hsq = mmc->cqe_private; + int tag = mrq->tag; + + spin_lock_irq(&hsq->lock); + + if (!hsq->enabled) { + spin_unlock_irq(&hsq->lock); + return -ESHUTDOWN; + } + + /* Do not queue any new requests in recovery mode. */ + if (hsq->recovery_halt) { + spin_unlock_irq(&hsq->lock); + return -EBUSY; + } + + hsq->slot[tag].mrq = mrq; + + /* + * Set the next tag as current request tag if no available + * next tag. + */ + if (hsq->next_tag == HSQ_INVALID_TAG) + hsq->next_tag = tag; + + hsq->qcnt++; + + spin_unlock_irq(&hsq->lock); + + mmc_hsq_pump_requests(hsq); + + return 0; +} + +static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + if (mmc->ops->post_req) + mmc->ops->post_req(mmc, mrq, 0); +} + +static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret) +{ + bool is_idle; + + spin_lock_irq(&hsq->lock); + + is_idle = (!hsq->mrq && !hsq->qcnt) || + hsq->recovery_halt; + + *ret = hsq->recovery_halt ? -EBUSY : 0; + hsq->waiting_for_idle = !is_idle; + + spin_unlock_irq(&hsq->lock); + + return is_idle; +} + +static int mmc_hsq_wait_for_idle(struct mmc_host *mmc) +{ + struct mmc_hsq *hsq = mmc->cqe_private; + int ret; + + wait_event(hsq->wait_queue, + mmc_hsq_queue_is_idle(hsq, &ret)); + + return ret; +} + +static void mmc_hsq_disable(struct mmc_host *mmc) +{ + struct mmc_hsq *hsq = mmc->cqe_private; + u32 timeout = 500; + int ret; + + spin_lock_irq(&hsq->lock); + + if (!hsq->enabled) { + spin_unlock_irq(&hsq->lock); + return; + } + + spin_unlock_irq(&hsq->lock); + + ret = wait_event_timeout(hsq->wait_queue, + mmc_hsq_queue_is_idle(hsq, &ret), + msecs_to_jiffies(timeout)); + if (ret == 0) { + pr_warn("could not stop mmc software queue\n"); + return; + } + + spin_lock_irq(&hsq->lock); + + hsq->enabled = false; + + spin_unlock_irq(&hsq->lock); +} + +static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card) +{ + struct mmc_hsq *hsq = mmc->cqe_private; + + spin_lock_irq(&hsq->lock); + + if (hsq->enabled) { + spin_unlock_irq(&hsq->lock); + return -EBUSY; + } + + hsq->enabled = true; + + spin_unlock_irq(&hsq->lock); + + return 0; +} + +static const struct mmc_cqe_ops mmc_hsq_ops = { + .cqe_enable = mmc_hsq_enable, + .cqe_disable = mmc_hsq_disable, + .cqe_request = mmc_hsq_request, + .cqe_post_req = mmc_hsq_post_req, + .cqe_wait_for_idle = mmc_hsq_wait_for_idle, + .cqe_recovery_start = mmc_hsq_recovery_start, + .cqe_recovery_finish = mmc_hsq_recovery_finish, +}; + +int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc) +{ + hsq->num_slots = HSQ_NUM_SLOTS; + hsq->next_tag = HSQ_INVALID_TAG; + + hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots, + sizeof(struct hsq_slot), GFP_KERNEL); + if (!hsq->slot) + return -ENOMEM; + + hsq->mmc = mmc; + hsq->mmc->cqe_private = hsq; + mmc->cqe_ops = &mmc_hsq_ops; + + INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler); + spin_lock_init(&hsq->lock); + init_waitqueue_head(&hsq->wait_queue); + + return 0; +} +EXPORT_SYMBOL_GPL(mmc_hsq_init); + +void mmc_hsq_suspend(struct mmc_host *mmc) +{ + mmc_hsq_disable(mmc); +} +EXPORT_SYMBOL_GPL(mmc_hsq_suspend); + +int mmc_hsq_resume(struct mmc_host *mmc) +{ + return mmc_hsq_enable(mmc, NULL); +} +EXPORT_SYMBOL_GPL(mmc_hsq_resume); + +MODULE_DESCRIPTION("MMC Host Software Queue support"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/mmc_hsq.h b/drivers/mmc/host/mmc_hsq.h new file mode 100644 index 000000000..ffdd9cd17 --- /dev/null +++ b/drivers/mmc/host/mmc_hsq.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_MMC_HSQ_H +#define LINUX_MMC_HSQ_H + +struct hsq_slot { + struct mmc_request *mrq; +}; + +struct mmc_hsq { + struct mmc_host *mmc; + struct mmc_request *mrq; + wait_queue_head_t wait_queue; + struct hsq_slot *slot; + spinlock_t lock; + struct work_struct retry_work; + + int next_tag; + int num_slots; + int qcnt; + + bool enabled; + bool waiting_for_idle; + bool recovery_halt; +}; + +int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc); +void mmc_hsq_suspend(struct mmc_host *mmc); +int mmc_hsq_resume(struct mmc_host *mmc); +bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq); + +#endif diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c new file mode 100644 index 000000000..1d814919e --- /dev/null +++ b/drivers/mmc/host/mmc_spi.c @@ -0,0 +1,1546 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Access SD/MMC cards through SPI master controllers + * + * (C) Copyright 2005, Intec Automation, + * Mike Lavender (mike@steroidmicros) + * (C) Copyright 2006-2007, David Brownell + * (C) Copyright 2007, Axis Communications, + * Hans-Peter Nilsson (hp@axis.com) + * (C) Copyright 2007, ATRON electronic GmbH, + * Jan Nikitenko + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include /* for R1_SPI_* bit values */ +#include + +#include +#include + +#include + + +/* NOTES: + * + * - For now, we won't try to interoperate with a real mmc/sd/sdio + * controller, although some of them do have hardware support for + * SPI protocol. The main reason for such configs would be mmc-ish + * cards like DataFlash, which don't support that "native" protocol. + * + * We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to + * switch between driver stacks, and in any case if "native" mode + * is available, it will be faster and hence preferable. + * + * - MMC depends on a different chipselect management policy than the + * SPI interface currently supports for shared bus segments: it needs + * to issue multiple spi_message requests with the chipselect active, + * using the results of one message to decide the next one to issue. + * + * Pending updates to the programming interface, this driver expects + * that it not share the bus with other drivers (precluding conflicts). + * + * - We tell the controller to keep the chipselect active from the + * beginning of an mmc_host_ops.request until the end. So beware + * of SPI controller drivers that mis-handle the cs_change flag! + * + * However, many cards seem OK with chipselect flapping up/down + * during that time ... at least on unshared bus segments. + */ + + +/* + * Local protocol constants, internal to data block protocols. + */ + +/* Response tokens used to ack each block written: */ +#define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f) +#define SPI_RESPONSE_ACCEPTED ((2 << 1)|1) +#define SPI_RESPONSE_CRC_ERR ((5 << 1)|1) +#define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1) + +/* Read and write blocks start with these tokens and end with crc; + * on error, read tokens act like a subset of R2_SPI_* values. + */ +#define SPI_TOKEN_SINGLE 0xfe /* single block r/w, multiblock read */ +#define SPI_TOKEN_MULTI_WRITE 0xfc /* multiblock write */ +#define SPI_TOKEN_STOP_TRAN 0xfd /* terminate multiblock write */ + +#define MMC_SPI_BLOCKSIZE 512 + +#define MMC_SPI_R1B_TIMEOUT_MS 3000 +#define MMC_SPI_INIT_TIMEOUT_MS 3000 + +/* One of the critical speed parameters is the amount of data which may + * be transferred in one command. If this value is too low, the SD card + * controller has to do multiple partial block writes (argggh!). With + * today (2008) SD cards there is little speed gain if we transfer more + * than 64 KBytes at a time. So use this value until there is any indication + * that we should do more here. + */ +#define MMC_SPI_BLOCKSATONCE 128 + +/****************************************************************************/ + +/* + * Local Data Structures + */ + +/* "scratch" is per-{command,block} data exchanged with the card */ +struct scratch { + u8 status[29]; + u8 data_token; + __be16 crc_val; +}; + +struct mmc_spi_host { + struct mmc_host *mmc; + struct spi_device *spi; + + unsigned char power_mode; + u16 powerup_msecs; + + struct mmc_spi_platform_data *pdata; + + /* for bulk data transfers */ + struct spi_transfer token, t, crc, early_status; + struct spi_message m; + + /* for status readback */ + struct spi_transfer status; + struct spi_message readback; + + /* underlying DMA-aware controller, or null */ + struct device *dma_dev; + + /* buffer used for commands and for message "overhead" */ + struct scratch *data; + dma_addr_t data_dma; + + /* Specs say to write ones most of the time, even when the card + * has no need to read its input data; and many cards won't care. + * This is our source of those ones. + */ + void *ones; + dma_addr_t ones_dma; +}; + + +/****************************************************************************/ + +/* + * MMC-over-SPI protocol glue, used by the MMC stack interface + */ + +static inline int mmc_cs_off(struct mmc_spi_host *host) +{ + /* chipselect will always be inactive after setup() */ + return spi_setup(host->spi); +} + +static int +mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) +{ + int status; + + if (len > sizeof(*host->data)) { + WARN_ON(1); + return -EIO; + } + + host->status.len = len; + + if (host->dma_dev) + dma_sync_single_for_device(host->dma_dev, + host->data_dma, sizeof(*host->data), + DMA_FROM_DEVICE); + + status = spi_sync_locked(host->spi, &host->readback); + + if (host->dma_dev) + dma_sync_single_for_cpu(host->dma_dev, + host->data_dma, sizeof(*host->data), + DMA_FROM_DEVICE); + + return status; +} + +static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, + unsigned n, u8 byte) +{ + u8 *cp = host->data->status; + unsigned long start = jiffies; + + while (1) { + int status; + unsigned i; + + status = mmc_spi_readbytes(host, n); + if (status < 0) + return status; + + for (i = 0; i < n; i++) { + if (cp[i] != byte) + return cp[i]; + } + + if (time_is_before_jiffies(start + timeout)) + break; + + /* If we need long timeouts, we may release the CPU. + * We use jiffies here because we want to have a relation + * between elapsed time and the blocking of the scheduler. + */ + if (time_is_before_jiffies(start + 1)) + schedule(); + } + return -ETIMEDOUT; +} + +static inline int +mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout) +{ + return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); +} + +static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout) +{ + return mmc_spi_skip(host, timeout, 1, 0xff); +} + + +/* + * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol + * hosts return! The low byte holds R1_SPI bits. The next byte may hold + * R2_SPI bits ... for SEND_STATUS, or after data read errors. + * + * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on + * newer cards R7 (IF_COND). + */ + +static char *maptype(struct mmc_command *cmd) +{ + switch (mmc_spi_resp_type(cmd)) { + case MMC_RSP_SPI_R1: return "R1"; + case MMC_RSP_SPI_R1B: return "R1B"; + case MMC_RSP_SPI_R2: return "R2/R5"; + case MMC_RSP_SPI_R3: return "R3/R4/R7"; + default: return "?"; + } +} + +/* return zero, else negative errno after setting cmd->error */ +static int mmc_spi_response_get(struct mmc_spi_host *host, + struct mmc_command *cmd, int cs_on) +{ + unsigned long timeout_ms; + u8 *cp = host->data->status; + u8 *end = cp + host->t.len; + int value = 0; + int bitshift; + u8 leftover = 0; + unsigned short rotator; + int i; + char tag[32]; + + snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s", + cmd->opcode, maptype(cmd)); + + /* Except for data block reads, the whole response will already + * be stored in the scratch buffer. It's somewhere after the + * command and the first byte we read after it. We ignore that + * first byte. After STOP_TRANSMISSION command it may include + * two data bits, but otherwise it's all ones. + */ + cp += 8; + while (cp < end && *cp == 0xff) + cp++; + + /* Data block reads (R1 response types) may need more data... */ + if (cp == end) { + cp = host->data->status; + end = cp+1; + + /* Card sends N(CR) (== 1..8) bytes of all-ones then one + * status byte ... and we already scanned 2 bytes. + * + * REVISIT block read paths use nasty byte-at-a-time I/O + * so it can always DMA directly into the target buffer. + * It'd probably be better to memcpy() the first chunk and + * avoid extra i/o calls... + * + * Note we check for more than 8 bytes, because in practice, + * some SD cards are slow... + */ + for (i = 2; i < 16; i++) { + value = mmc_spi_readbytes(host, 1); + if (value < 0) + goto done; + if (*cp != 0xff) + goto checkstatus; + } + value = -ETIMEDOUT; + goto done; + } + +checkstatus: + bitshift = 0; + if (*cp & 0x80) { + /* Houston, we have an ugly card with a bit-shifted response */ + rotator = *cp++ << 8; + /* read the next byte */ + if (cp == end) { + value = mmc_spi_readbytes(host, 1); + if (value < 0) + goto done; + cp = host->data->status; + end = cp+1; + } + rotator |= *cp++; + while (rotator & 0x8000) { + bitshift++; + rotator <<= 1; + } + cmd->resp[0] = rotator >> 8; + leftover = rotator; + } else { + cmd->resp[0] = *cp++; + } + cmd->error = 0; + + /* Status byte: the entire seven-bit R1 response. */ + if (cmd->resp[0] != 0) { + if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS) + & cmd->resp[0]) + value = -EFAULT; /* Bad address */ + else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0]) + value = -ENOSYS; /* Function not implemented */ + else if (R1_SPI_COM_CRC & cmd->resp[0]) + value = -EILSEQ; /* Illegal byte sequence */ + else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET) + & cmd->resp[0]) + value = -EIO; /* I/O error */ + /* else R1_SPI_IDLE, "it's resetting" */ + } + + switch (mmc_spi_resp_type(cmd)) { + + /* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads) + * and less-common stuff like various erase operations. + */ + case MMC_RSP_SPI_R1B: + /* maybe we read all the busy tokens already */ + while (cp < end && *cp == 0) + cp++; + if (cp == end) { + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : + MMC_SPI_R1B_TIMEOUT_MS; + mmc_spi_wait_unbusy(host, msecs_to_jiffies(timeout_ms)); + } + break; + + /* SPI R2 == R1 + second status byte; SEND_STATUS + * SPI R5 == R1 + data byte; IO_RW_DIRECT + */ + case MMC_RSP_SPI_R2: + /* read the next byte */ + if (cp == end) { + value = mmc_spi_readbytes(host, 1); + if (value < 0) + goto done; + cp = host->data->status; + end = cp+1; + } + if (bitshift) { + rotator = leftover << 8; + rotator |= *cp << bitshift; + cmd->resp[0] |= (rotator & 0xFF00); + } else { + cmd->resp[0] |= *cp << 8; + } + break; + + /* SPI R3, R4, or R7 == R1 + 4 bytes */ + case MMC_RSP_SPI_R3: + rotator = leftover << 8; + cmd->resp[1] = 0; + for (i = 0; i < 4; i++) { + cmd->resp[1] <<= 8; + /* read the next byte */ + if (cp == end) { + value = mmc_spi_readbytes(host, 1); + if (value < 0) + goto done; + cp = host->data->status; + end = cp+1; + } + if (bitshift) { + rotator |= *cp++ << bitshift; + cmd->resp[1] |= (rotator >> 8); + rotator <<= 8; + } else { + cmd->resp[1] |= *cp++; + } + } + break; + + /* SPI R1 == just one status byte */ + case MMC_RSP_SPI_R1: + break; + + default: + dev_dbg(&host->spi->dev, "bad response type %04x\n", + mmc_spi_resp_type(cmd)); + if (value >= 0) + value = -EINVAL; + goto done; + } + + if (value < 0) + dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n", + tag, cmd->resp[0], cmd->resp[1]); + + /* disable chipselect on errors and some success cases */ + if (value >= 0 && cs_on) + return value; +done: + if (value < 0) + cmd->error = value; + mmc_cs_off(host); + return value; +} + +/* Issue command and read its response. + * Returns zero on success, negative for error. + * + * On error, caller must cope with mmc core retry mechanism. That + * means immediate low-level resubmit, which affects the bus lock... + */ +static int +mmc_spi_command_send(struct mmc_spi_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd, int cs_on) +{ + struct scratch *data = host->data; + u8 *cp = data->status; + int status; + struct spi_transfer *t; + + /* We can handle most commands (except block reads) in one full + * duplex I/O operation before either starting the next transfer + * (data block or command) or else deselecting the card. + * + * First, write 7 bytes: + * - an all-ones byte to ensure the card is ready + * - opcode byte (plus start and transmission bits) + * - four bytes of big-endian argument + * - crc7 (plus end bit) ... always computed, it's cheap + * + * We init the whole buffer to all-ones, which is what we need + * to write while we're reading (later) response data. + */ + memset(cp, 0xff, sizeof(data->status)); + + cp[1] = 0x40 | cmd->opcode; + put_unaligned_be32(cmd->arg, cp + 2); + cp[6] = crc7_be(0, cp + 1, 5) | 0x01; + cp += 7; + + /* Then, read up to 13 bytes (while writing all-ones): + * - N(CR) (== 1..8) bytes of all-ones + * - status byte (for all response types) + * - the rest of the response, either: + * + nothing, for R1 or R1B responses + * + second status byte, for R2 responses + * + four data bytes, for R3 and R7 responses + * + * Finally, read some more bytes ... in the nice cases we know in + * advance how many, and reading 1 more is always OK: + * - N(EC) (== 0..N) bytes of all-ones, before deselect/finish + * - N(RC) (== 1..N) bytes of all-ones, before next command + * - N(WR) (== 1..N) bytes of all-ones, before data write + * + * So in those cases one full duplex I/O of at most 21 bytes will + * handle the whole command, leaving the card ready to receive a + * data block or new command. We do that whenever we can, shaving + * CPU and IRQ costs (especially when using DMA or FIFOs). + * + * There are two other cases, where it's not generally practical + * to rely on a single I/O: + * + * - R1B responses need at least N(EC) bytes of all-zeroes. + * + * In this case we can *try* to fit it into one I/O, then + * maybe read more data later. + * + * - Data block reads are more troublesome, since a variable + * number of padding bytes precede the token and data. + * + N(CX) (== 0..8) bytes of all-ones, before CSD or CID + * + N(AC) (== 1..many) bytes of all-ones + * + * In this case we currently only have minimal speedups here: + * when N(CR) == 1 we can avoid I/O in response_get(). + */ + if (cs_on && (mrq->data->flags & MMC_DATA_READ)) { + cp += 2; /* min(N(CR)) + status */ + /* R1 */ + } else { + cp += 10; /* max(N(CR)) + status + min(N(RC),N(WR)) */ + if (cmd->flags & MMC_RSP_SPI_S2) /* R2/R5 */ + cp++; + else if (cmd->flags & MMC_RSP_SPI_B4) /* R3/R4/R7 */ + cp += 4; + else if (cmd->flags & MMC_RSP_BUSY) /* R1B */ + cp = data->status + sizeof(data->status); + /* else: R1 (most commands) */ + } + + dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n", + cmd->opcode, maptype(cmd)); + + /* send command, leaving chipselect active */ + spi_message_init(&host->m); + + t = &host->t; + memset(t, 0, sizeof(*t)); + t->tx_buf = t->rx_buf = data->status; + t->tx_dma = t->rx_dma = host->data_dma; + t->len = cp - data->status; + t->cs_change = 1; + spi_message_add_tail(t, &host->m); + + if (host->dma_dev) { + host->m.is_dma_mapped = 1; + dma_sync_single_for_device(host->dma_dev, + host->data_dma, sizeof(*host->data), + DMA_BIDIRECTIONAL); + } + status = spi_sync_locked(host->spi, &host->m); + + if (host->dma_dev) + dma_sync_single_for_cpu(host->dma_dev, + host->data_dma, sizeof(*host->data), + DMA_BIDIRECTIONAL); + if (status < 0) { + dev_dbg(&host->spi->dev, " ... write returned %d\n", status); + cmd->error = status; + return status; + } + + /* after no-data commands and STOP_TRANSMISSION, chipselect off */ + return mmc_spi_response_get(host, cmd, cs_on); +} + +/* Build data message with up to four separate transfers. For TX, we + * start by writing the data token. And in most cases, we finish with + * a status transfer. + * + * We always provide TX data for data and CRC. The MMC/SD protocol + * requires us to write ones; but Linux defaults to writing zeroes; + * so we explicitly initialize it to all ones on RX paths. + * + * We also handle DMA mapping, so the underlying SPI controller does + * not need to (re)do it for each message. + */ +static void +mmc_spi_setup_data_message( + struct mmc_spi_host *host, + int multiple, + enum dma_data_direction direction) +{ + struct spi_transfer *t; + struct scratch *scratch = host->data; + dma_addr_t dma = host->data_dma; + + spi_message_init(&host->m); + if (dma) + host->m.is_dma_mapped = 1; + + /* for reads, readblock() skips 0xff bytes before finding + * the token; for writes, this transfer issues that token. + */ + if (direction == DMA_TO_DEVICE) { + t = &host->token; + memset(t, 0, sizeof(*t)); + t->len = 1; + if (multiple) + scratch->data_token = SPI_TOKEN_MULTI_WRITE; + else + scratch->data_token = SPI_TOKEN_SINGLE; + t->tx_buf = &scratch->data_token; + if (dma) + t->tx_dma = dma + offsetof(struct scratch, data_token); + spi_message_add_tail(t, &host->m); + } + + /* Body of transfer is buffer, then CRC ... + * either TX-only, or RX with TX-ones. + */ + t = &host->t; + memset(t, 0, sizeof(*t)); + t->tx_buf = host->ones; + t->tx_dma = host->ones_dma; + /* length and actual buffer info are written later */ + spi_message_add_tail(t, &host->m); + + t = &host->crc; + memset(t, 0, sizeof(*t)); + t->len = 2; + if (direction == DMA_TO_DEVICE) { + /* the actual CRC may get written later */ + t->tx_buf = &scratch->crc_val; + if (dma) + t->tx_dma = dma + offsetof(struct scratch, crc_val); + } else { + t->tx_buf = host->ones; + t->tx_dma = host->ones_dma; + t->rx_buf = &scratch->crc_val; + if (dma) + t->rx_dma = dma + offsetof(struct scratch, crc_val); + } + spi_message_add_tail(t, &host->m); + + /* + * A single block read is followed by N(EC) [0+] all-ones bytes + * before deselect ... don't bother. + * + * Multiblock reads are followed by N(AC) [1+] all-ones bytes before + * the next block is read, or a STOP_TRANSMISSION is issued. We'll + * collect that single byte, so readblock() doesn't need to. + * + * For a write, the one-byte data response follows immediately, then + * come zero or more busy bytes, then N(WR) [1+] all-ones bytes. + * Then single block reads may deselect, and multiblock ones issue + * the next token (next data block, or STOP_TRAN). We can try to + * minimize I/O ops by using a single read to collect end-of-busy. + */ + if (multiple || direction == DMA_TO_DEVICE) { + t = &host->early_status; + memset(t, 0, sizeof(*t)); + t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1; + t->tx_buf = host->ones; + t->tx_dma = host->ones_dma; + t->rx_buf = scratch->status; + if (dma) + t->rx_dma = dma + offsetof(struct scratch, status); + t->cs_change = 1; + spi_message_add_tail(t, &host->m); + } +} + +/* + * Write one block: + * - caller handled preceding N(WR) [1+] all-ones bytes + * - data block + * + token + * + data bytes + * + crc16 + * - an all-ones byte ... card writes a data-response byte + * - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy' + * + * Return negative errno, else success. + */ +static int +mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, + unsigned long timeout) +{ + struct spi_device *spi = host->spi; + int status, i; + struct scratch *scratch = host->data; + u32 pattern; + + if (host->mmc->use_spi_crc) + scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len)); + if (host->dma_dev) + dma_sync_single_for_device(host->dma_dev, + host->data_dma, sizeof(*scratch), + DMA_BIDIRECTIONAL); + + status = spi_sync_locked(spi, &host->m); + + if (status != 0) { + dev_dbg(&spi->dev, "write error (%d)\n", status); + return status; + } + + if (host->dma_dev) + dma_sync_single_for_cpu(host->dma_dev, + host->data_dma, sizeof(*scratch), + DMA_BIDIRECTIONAL); + + /* + * Get the transmission data-response reply. It must follow + * immediately after the data block we transferred. This reply + * doesn't necessarily tell whether the write operation succeeded; + * it just says if the transmission was ok and whether *earlier* + * writes succeeded; see the standard. + * + * In practice, there are (even modern SDHC-)cards which are late + * in sending the response, and miss the time frame by a few bits, + * so we have to cope with this situation and check the response + * bit-by-bit. Arggh!!! + */ + pattern = get_unaligned_be32(scratch->status); + + /* First 3 bit of pattern are undefined */ + pattern |= 0xE0000000; + + /* left-adjust to leading 0 bit */ + while (pattern & 0x80000000) + pattern <<= 1; + /* right-adjust for pattern matching. Code is in bit 4..0 now. */ + pattern >>= 27; + + switch (pattern) { + case SPI_RESPONSE_ACCEPTED: + status = 0; + break; + case SPI_RESPONSE_CRC_ERR: + /* host shall then issue MMC_STOP_TRANSMISSION */ + status = -EILSEQ; + break; + case SPI_RESPONSE_WRITE_ERR: + /* host shall then issue MMC_STOP_TRANSMISSION, + * and should MMC_SEND_STATUS to sort it out + */ + status = -EIO; + break; + default: + status = -EPROTO; + break; + } + if (status != 0) { + dev_dbg(&spi->dev, "write error %02x (%d)\n", + scratch->status[0], status); + return status; + } + + t->tx_buf += t->len; + if (host->dma_dev) + t->tx_dma += t->len; + + /* Return when not busy. If we didn't collect that status yet, + * we'll need some more I/O. + */ + for (i = 4; i < sizeof(scratch->status); i++) { + /* card is non-busy if the most recent bit is 1 */ + if (scratch->status[i] & 0x01) + return 0; + } + return mmc_spi_wait_unbusy(host, timeout); +} + +/* + * Read one block: + * - skip leading all-ones bytes ... either + * + N(AC) [1..f(clock,CSD)] usually, else + * + N(CX) [0..8] when reading CSD or CID + * - data block + * + token ... if error token, no data or crc + * + data bytes + * + crc16 + * + * After single block reads, we're done; N(EC) [0+] all-ones bytes follow + * before dropping chipselect. + * + * For multiblock reads, caller either reads the next block or issues a + * STOP_TRANSMISSION command. + */ +static int +mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, + unsigned long timeout) +{ + struct spi_device *spi = host->spi; + int status; + struct scratch *scratch = host->data; + unsigned int bitshift; + u8 leftover; + + /* At least one SD card sends an all-zeroes byte when N(CX) + * applies, before the all-ones bytes ... just cope with that. + */ + status = mmc_spi_readbytes(host, 1); + if (status < 0) + return status; + status = scratch->status[0]; + if (status == 0xff || status == 0) + status = mmc_spi_readtoken(host, timeout); + + if (status < 0) { + dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status); + return status; + } + + /* The token may be bit-shifted... + * the first 0-bit precedes the data stream. + */ + bitshift = 7; + while (status & 0x80) { + status <<= 1; + bitshift--; + } + leftover = status << 1; + + if (host->dma_dev) { + dma_sync_single_for_device(host->dma_dev, + host->data_dma, sizeof(*scratch), + DMA_BIDIRECTIONAL); + dma_sync_single_for_device(host->dma_dev, + t->rx_dma, t->len, + DMA_FROM_DEVICE); + } + + status = spi_sync_locked(spi, &host->m); + if (status < 0) { + dev_dbg(&spi->dev, "read error %d\n", status); + return status; + } + + if (host->dma_dev) { + dma_sync_single_for_cpu(host->dma_dev, + host->data_dma, sizeof(*scratch), + DMA_BIDIRECTIONAL); + dma_sync_single_for_cpu(host->dma_dev, + t->rx_dma, t->len, + DMA_FROM_DEVICE); + } + + if (bitshift) { + /* Walk through the data and the crc and do + * all the magic to get byte-aligned data. + */ + u8 *cp = t->rx_buf; + unsigned int len; + unsigned int bitright = 8 - bitshift; + u8 temp; + for (len = t->len; len; len--) { + temp = *cp; + *cp++ = leftover | (temp >> bitshift); + leftover = temp << bitright; + } + cp = (u8 *) &scratch->crc_val; + temp = *cp; + *cp++ = leftover | (temp >> bitshift); + leftover = temp << bitright; + temp = *cp; + *cp = leftover | (temp >> bitshift); + } + + if (host->mmc->use_spi_crc) { + u16 crc = crc_itu_t(0, t->rx_buf, t->len); + + be16_to_cpus(&scratch->crc_val); + if (scratch->crc_val != crc) { + dev_dbg(&spi->dev, + "read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n", + scratch->crc_val, crc, t->len); + return -EILSEQ; + } + } + + t->rx_buf += t->len; + if (host->dma_dev) + t->rx_dma += t->len; + + return 0; +} + +/* + * An MMC/SD data stage includes one or more blocks, optional CRCs, + * and inline handshaking. That handhaking makes it unlike most + * other SPI protocol stacks. + */ +static void +mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, + struct mmc_data *data, u32 blk_size) +{ + struct spi_device *spi = host->spi; + struct device *dma_dev = host->dma_dev; + struct spi_transfer *t; + enum dma_data_direction direction; + struct scatterlist *sg; + unsigned n_sg; + int multiple = (data->blocks > 1); + u32 clock_rate; + unsigned long timeout; + + direction = mmc_get_dma_dir(data); + mmc_spi_setup_data_message(host, multiple, direction); + t = &host->t; + + if (t->speed_hz) + clock_rate = t->speed_hz; + else + clock_rate = spi->max_speed_hz; + + timeout = data->timeout_ns / 1000 + + data->timeout_clks * 1000000 / clock_rate; + timeout = usecs_to_jiffies((unsigned int)timeout) + 1; + + /* Handle scatterlist segments one at a time, with synch for + * each 512-byte block + */ + for_each_sg(data->sg, sg, data->sg_len, n_sg) { + int status = 0; + dma_addr_t dma_addr = 0; + void *kmap_addr; + unsigned length = sg->length; + enum dma_data_direction dir = direction; + + /* set up dma mapping for controller drivers that might + * use DMA ... though they may fall back to PIO + */ + if (dma_dev) { + /* never invalidate whole *shared* pages ... */ + if ((sg->offset != 0 || length != PAGE_SIZE) + && dir == DMA_FROM_DEVICE) + dir = DMA_BIDIRECTIONAL; + + dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, + PAGE_SIZE, dir); + if (dma_mapping_error(dma_dev, dma_addr)) { + data->error = -EFAULT; + break; + } + if (direction == DMA_TO_DEVICE) + t->tx_dma = dma_addr + sg->offset; + else + t->rx_dma = dma_addr + sg->offset; + } + + /* allow pio too; we don't allow highmem */ + kmap_addr = kmap(sg_page(sg)); + if (direction == DMA_TO_DEVICE) + t->tx_buf = kmap_addr + sg->offset; + else + t->rx_buf = kmap_addr + sg->offset; + + /* transfer each block, and update request status */ + while (length) { + t->len = min(length, blk_size); + + dev_dbg(&host->spi->dev, + " mmc_spi: %s block, %d bytes\n", + (direction == DMA_TO_DEVICE) ? "write" : "read", + t->len); + + if (direction == DMA_TO_DEVICE) + status = mmc_spi_writeblock(host, t, timeout); + else + status = mmc_spi_readblock(host, t, timeout); + if (status < 0) + break; + + data->bytes_xfered += t->len; + length -= t->len; + + if (!multiple) + break; + } + + /* discard mappings */ + if (direction == DMA_FROM_DEVICE) + flush_kernel_dcache_page(sg_page(sg)); + kunmap(sg_page(sg)); + if (dma_dev) + dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir); + + if (status < 0) { + data->error = status; + dev_dbg(&spi->dev, "%s status %d\n", + (direction == DMA_TO_DEVICE) ? "write" : "read", + status); + break; + } + } + + /* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that + * can be issued before multiblock writes. Unlike its more widely + * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23), + * that can affect the STOP_TRAN logic. Complete (and current) + * MMC specs should sort that out before Linux starts using CMD23. + */ + if (direction == DMA_TO_DEVICE && multiple) { + struct scratch *scratch = host->data; + int tmp; + const unsigned statlen = sizeof(scratch->status); + + dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n"); + + /* Tweak the per-block message we set up earlier by morphing + * it to hold single buffer with the token followed by some + * all-ones bytes ... skip N(BR) (0..1), scan the rest for + * "not busy any longer" status, and leave chip selected. + */ + INIT_LIST_HEAD(&host->m.transfers); + list_add(&host->early_status.transfer_list, + &host->m.transfers); + + memset(scratch->status, 0xff, statlen); + scratch->status[0] = SPI_TOKEN_STOP_TRAN; + + host->early_status.tx_buf = host->early_status.rx_buf; + host->early_status.tx_dma = host->early_status.rx_dma; + host->early_status.len = statlen; + + if (host->dma_dev) + dma_sync_single_for_device(host->dma_dev, + host->data_dma, sizeof(*scratch), + DMA_BIDIRECTIONAL); + + tmp = spi_sync_locked(spi, &host->m); + + if (host->dma_dev) + dma_sync_single_for_cpu(host->dma_dev, + host->data_dma, sizeof(*scratch), + DMA_BIDIRECTIONAL); + + if (tmp < 0) { + if (!data->error) + data->error = tmp; + return; + } + + /* Ideally we collected "not busy" status with one I/O, + * avoiding wasteful byte-at-a-time scanning... but more + * I/O is often needed. + */ + for (tmp = 2; tmp < statlen; tmp++) { + if (scratch->status[tmp] != 0) + return; + } + tmp = mmc_spi_wait_unbusy(host, timeout); + if (tmp < 0 && !data->error) + data->error = tmp; + } +} + +/****************************************************************************/ + +/* + * MMC driver implementation -- the interface to the MMC stack + */ + +static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmc_spi_host *host = mmc_priv(mmc); + int status = -EINVAL; + int crc_retry = 5; + struct mmc_command stop; + +#ifdef DEBUG + /* MMC core and layered drivers *MUST* issue SPI-aware commands */ + { + struct mmc_command *cmd; + int invalid = 0; + + cmd = mrq->cmd; + if (!mmc_spi_resp_type(cmd)) { + dev_dbg(&host->spi->dev, "bogus command\n"); + cmd->error = -EINVAL; + invalid = 1; + } + + cmd = mrq->stop; + if (cmd && !mmc_spi_resp_type(cmd)) { + dev_dbg(&host->spi->dev, "bogus STOP command\n"); + cmd->error = -EINVAL; + invalid = 1; + } + + if (invalid) { + dump_stack(); + mmc_request_done(host->mmc, mrq); + return; + } + } +#endif + + /* request exclusive bus access */ + spi_bus_lock(host->spi->master); + +crc_recover: + /* issue command; then optionally data and stop */ + status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL); + if (status == 0 && mrq->data) { + mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz); + + /* + * The SPI bus is not always reliable for large data transfers. + * If an occasional crc error is reported by the SD device with + * data read/write over SPI, it may be recovered by repeating + * the last SD command again. The retry count is set to 5 to + * ensure the driver passes stress tests. + */ + if (mrq->data->error == -EILSEQ && crc_retry) { + stop.opcode = MMC_STOP_TRANSMISSION; + stop.arg = 0; + stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + status = mmc_spi_command_send(host, mrq, &stop, 0); + crc_retry--; + mrq->data->error = 0; + goto crc_recover; + } + + if (mrq->stop) + status = mmc_spi_command_send(host, mrq, mrq->stop, 0); + else + mmc_cs_off(host); + } + + /* release the bus */ + spi_bus_unlock(host->spi->master); + + mmc_request_done(host->mmc, mrq); +} + +/* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0" + * + * NOTE that here we can't know that the card has just been powered up; + * not all MMC/SD sockets support power switching. + * + * FIXME when the card is still in SPI mode, e.g. from a previous kernel, + * this doesn't seem to do the right thing at all... + */ +static void mmc_spi_initsequence(struct mmc_spi_host *host) +{ + /* Try to be very sure any previous command has completed; + * wait till not-busy, skip debris from any old commands. + */ + mmc_spi_wait_unbusy(host, msecs_to_jiffies(MMC_SPI_INIT_TIMEOUT_MS)); + mmc_spi_readbytes(host, 10); + + /* + * Do a burst with chipselect active-high. We need to do this to + * meet the requirement of 74 clock cycles with both chipselect + * and CMD (MOSI) high before CMD0 ... after the card has been + * powered up to Vdd(min), and so is ready to take commands. + * + * Some cards are particularly needy of this (e.g. Viking "SD256") + * while most others don't seem to care. + * + * Note that this is one of the places MMC/SD plays games with the + * SPI protocol. Another is that when chipselect is released while + * the card returns BUSY status, the clock must issue several cycles + * with chipselect high before the card will stop driving its output. + * + * SPI_CS_HIGH means "asserted" here. In some cases like when using + * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically + * inverted by gpiolib, so if we want to ascertain to drive it high + * we should toggle the default with an XOR as we do here. + */ + host->spi->mode ^= SPI_CS_HIGH; + if (spi_setup(host->spi) != 0) { + /* Just warn; most cards work without it. */ + dev_warn(&host->spi->dev, + "can't change chip-select polarity\n"); + host->spi->mode ^= SPI_CS_HIGH; + } else { + mmc_spi_readbytes(host, 18); + + host->spi->mode ^= SPI_CS_HIGH; + if (spi_setup(host->spi) != 0) { + /* Wot, we can't get the same setup we had before? */ + dev_err(&host->spi->dev, + "can't restore chip-select polarity\n"); + } + } +} + +static char *mmc_powerstring(u8 power_mode) +{ + switch (power_mode) { + case MMC_POWER_OFF: return "off"; + case MMC_POWER_UP: return "up"; + case MMC_POWER_ON: return "on"; + } + return "?"; +} + +static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mmc_spi_host *host = mmc_priv(mmc); + + if (host->power_mode != ios->power_mode) { + int canpower; + + canpower = host->pdata && host->pdata->setpower; + + dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n", + mmc_powerstring(ios->power_mode), + ios->vdd, + canpower ? ", can switch" : ""); + + /* switch power on/off if possible, accounting for + * max 250msec powerup time if needed. + */ + if (canpower) { + switch (ios->power_mode) { + case MMC_POWER_OFF: + case MMC_POWER_UP: + host->pdata->setpower(&host->spi->dev, + ios->vdd); + if (ios->power_mode == MMC_POWER_UP) + msleep(host->powerup_msecs); + } + } + + /* See 6.4.1 in the simplified SD card physical spec 2.0 */ + if (ios->power_mode == MMC_POWER_ON) + mmc_spi_initsequence(host); + + /* If powering down, ground all card inputs to avoid power + * delivery from data lines! On a shared SPI bus, this + * will probably be temporary; 6.4.2 of the simplified SD + * spec says this must last at least 1msec. + * + * - Clock low means CPOL 0, e.g. mode 0 + * - MOSI low comes from writing zero + * - Chipselect is usually active low... + */ + if (canpower && ios->power_mode == MMC_POWER_OFF) { + int mres; + u8 nullbyte = 0; + + host->spi->mode &= ~(SPI_CPOL|SPI_CPHA); + mres = spi_setup(host->spi); + if (mres < 0) + dev_dbg(&host->spi->dev, + "switch to SPI mode 0 failed\n"); + + if (spi_write(host->spi, &nullbyte, 1) < 0) + dev_dbg(&host->spi->dev, + "put spi signals to low failed\n"); + + /* + * Now clock should be low due to spi mode 0; + * MOSI should be low because of written 0x00; + * chipselect should be low (it is active low) + * power supply is off, so now MMC is off too! + * + * FIXME no, chipselect can be high since the + * device is inactive and SPI_CS_HIGH is clear... + */ + msleep(10); + if (mres == 0) { + host->spi->mode |= (SPI_CPOL|SPI_CPHA); + mres = spi_setup(host->spi); + if (mres < 0) + dev_dbg(&host->spi->dev, + "switch back to SPI mode 3 failed\n"); + } + } + + host->power_mode = ios->power_mode; + } + + if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) { + int status; + + host->spi->max_speed_hz = ios->clock; + status = spi_setup(host->spi); + dev_dbg(&host->spi->dev, + "mmc_spi: clock to %d Hz, %d\n", + host->spi->max_speed_hz, status); + } +} + +static const struct mmc_host_ops mmc_spi_ops = { + .request = mmc_spi_request, + .set_ios = mmc_spi_set_ios, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, +}; + + +/****************************************************************************/ + +/* + * SPI driver implementation + */ + +static irqreturn_t +mmc_spi_detect_irq(int irq, void *mmc) +{ + struct mmc_spi_host *host = mmc_priv(mmc); + u16 delay_msec = max(host->pdata->detect_delay, (u16)100); + + mmc_detect_change(mmc, msecs_to_jiffies(delay_msec)); + return IRQ_HANDLED; +} + +#ifdef CONFIG_HAS_DMA +static int mmc_spi_dma_alloc(struct mmc_spi_host *host) +{ + struct spi_device *spi = host->spi; + struct device *dev; + + if (!spi->master->dev.parent->dma_mask) + return 0; + + dev = spi->master->dev.parent; + + host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, host->ones_dma)) + return -ENOMEM; + + host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, host->data_dma)) { + dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE, + DMA_TO_DEVICE); + return -ENOMEM; + } + + dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data), + DMA_BIDIRECTIONAL); + + host->dma_dev = dev; + return 0; +} + +static void mmc_spi_dma_free(struct mmc_spi_host *host) +{ + if (!host->dma_dev) + return; + + dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, + DMA_TO_DEVICE); + dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), + DMA_BIDIRECTIONAL); +} +#else +static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; } +static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {} +#endif + +static int mmc_spi_probe(struct spi_device *spi) +{ + void *ones; + struct mmc_host *mmc; + struct mmc_spi_host *host; + int status; + bool has_ro = false; + + /* We rely on full duplex transfers, mostly to reduce + * per-transfer overheads (by making fewer transfers). + */ + if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) + return -EINVAL; + + /* MMC and SD specs only seem to care that sampling is on the + * rising edge ... meaning SPI modes 0 or 3. So either SPI mode + * should be legit. We'll use mode 0 since the steady state is 0, + * which is appropriate for hotplugging, unless the platform data + * specify mode 3 (if hardware is not compatible to mode 0). + */ + if (spi->mode != SPI_MODE_3) + spi->mode = SPI_MODE_0; + spi->bits_per_word = 8; + + status = spi_setup(spi); + if (status < 0) { + dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n", + spi->mode, spi->max_speed_hz / 1000, + status); + return status; + } + + /* We need a supply of ones to transmit. This is the only time + * the CPU touches these, so cache coherency isn't a concern. + * + * NOTE if many systems use more than one MMC-over-SPI connector + * it'd save some memory to share this. That's evidently rare. + */ + status = -ENOMEM; + ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL); + if (!ones) + goto nomem; + memset(ones, 0xff, MMC_SPI_BLOCKSIZE); + + mmc = mmc_alloc_host(sizeof(*host), &spi->dev); + if (!mmc) + goto nomem; + + mmc->ops = &mmc_spi_ops; + mmc->max_blk_size = MMC_SPI_BLOCKSIZE; + mmc->max_segs = MMC_SPI_BLOCKSATONCE; + mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE; + mmc->max_blk_count = MMC_SPI_BLOCKSATONCE; + + mmc->caps = MMC_CAP_SPI; + + /* SPI doesn't need the lowspeed device identification thing for + * MMC or SD cards, since it never comes up in open drain mode. + * That's good; some SPI masters can't handle very low speeds! + * + * However, low speed SDIO cards need not handle over 400 KHz; + * that's the only reason not to use a few MHz for f_min (until + * the upper layer reads the target frequency from the CSD). + */ + mmc->f_min = 400000; + mmc->f_max = spi->max_speed_hz; + + host = mmc_priv(mmc); + host->mmc = mmc; + host->spi = spi; + + host->ones = ones; + + /* Platform data is used to hook up things like card sensing + * and power switching gpios. + */ + host->pdata = mmc_spi_get_pdata(spi); + if (host->pdata) + mmc->ocr_avail = host->pdata->ocr_mask; + if (!mmc->ocr_avail) { + dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n"); + mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; + } + if (host->pdata && host->pdata->setpower) { + host->powerup_msecs = host->pdata->powerup_msecs; + if (!host->powerup_msecs || host->powerup_msecs > 250) + host->powerup_msecs = 250; + } + + dev_set_drvdata(&spi->dev, mmc); + + /* preallocate dma buffers */ + host->data = kmalloc(sizeof(*host->data), GFP_KERNEL); + if (!host->data) + goto fail_nobuf1; + + status = mmc_spi_dma_alloc(host); + if (status) + goto fail_dma; + + /* setup message for status/busy readback */ + spi_message_init(&host->readback); + host->readback.is_dma_mapped = (host->dma_dev != NULL); + + spi_message_add_tail(&host->status, &host->readback); + host->status.tx_buf = host->ones; + host->status.tx_dma = host->ones_dma; + host->status.rx_buf = &host->data->status; + host->status.rx_dma = host->data_dma + offsetof(struct scratch, status); + host->status.cs_change = 1; + + /* register card detect irq */ + if (host->pdata && host->pdata->init) { + status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc); + if (status != 0) + goto fail_glue_init; + } + + /* pass platform capabilities, if any */ + if (host->pdata) { + mmc->caps |= host->pdata->caps; + mmc->caps2 |= host->pdata->caps2; + } + + status = mmc_add_host(mmc); + if (status != 0) + goto fail_glue_init; + + /* + * Index 0 is card detect + * Old boardfiles were specifying 1 ms as debounce + */ + status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000); + if (status == -EPROBE_DEFER) + goto fail_gpiod_request; + if (!status) { + /* + * The platform has a CD GPIO signal that may support + * interrupts, so let mmc_gpiod_request_cd_irq() decide + * if polling is needed or not. + */ + mmc->caps &= ~MMC_CAP_NEEDS_POLL; + mmc_gpiod_request_cd_irq(mmc); + } + mmc_detect_change(mmc, 0); + + /* Index 1 is write protect/read only */ + status = mmc_gpiod_request_ro(mmc, NULL, 1, 0); + if (status == -EPROBE_DEFER) + goto fail_gpiod_request; + if (!status) + has_ro = true; + + dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", + dev_name(&mmc->class_dev), + host->dma_dev ? "" : ", no DMA", + has_ro ? "" : ", no WP", + (host->pdata && host->pdata->setpower) + ? "" : ", no poweroff", + (mmc->caps & MMC_CAP_NEEDS_POLL) + ? ", cd polling" : ""); + return 0; + +fail_gpiod_request: + mmc_remove_host(mmc); +fail_glue_init: + mmc_spi_dma_free(host); +fail_dma: + kfree(host->data); +fail_nobuf1: + mmc_free_host(mmc); + mmc_spi_put_pdata(spi); +nomem: + kfree(ones); + return status; +} + + +static int mmc_spi_remove(struct spi_device *spi) +{ + struct mmc_host *mmc = dev_get_drvdata(&spi->dev); + struct mmc_spi_host *host = mmc_priv(mmc); + + /* prevent new mmc_detect_change() calls */ + if (host->pdata && host->pdata->exit) + host->pdata->exit(&spi->dev, mmc); + + mmc_remove_host(mmc); + + mmc_spi_dma_free(host); + kfree(host->data); + kfree(host->ones); + + spi->max_speed_hz = mmc->f_max; + mmc_free_host(mmc); + mmc_spi_put_pdata(spi); + return 0; +} + +static const struct of_device_id mmc_spi_of_match_table[] = { + { .compatible = "mmc-spi-slot", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table); + +static struct spi_driver mmc_spi_driver = { + .driver = { + .name = "mmc_spi", + .of_match_table = mmc_spi_of_match_table, + }, + .probe = mmc_spi_probe, + .remove = mmc_spi_remove, +}; + +module_spi_driver(mmc_spi_driver); + +MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko"); +MODULE_DESCRIPTION("SPI SD/MMC host driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:mmc_spi"); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c new file mode 100644 index 000000000..a72ca7416 --- /dev/null +++ b/drivers/mmc/host/mmci.c @@ -0,0 +1,2401 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver + * + * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. + * Copyright (C) 2010 ST-Ericsson SA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mmci.h" + +#define DRIVER_NAME "mmci-pl18x" + +static void mmci_variant_init(struct mmci_host *host); +static void ux500_variant_init(struct mmci_host *host); +static void ux500v2_variant_init(struct mmci_host *host); + +static unsigned int fmax = 515633; + +static struct variant_data variant_arm = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .datalength_bits = 16, + .datactrl_blocksz = 11, + .pwrreg_powerup = MCI_PWR_UP, + .f_max = 100000000, + .reversed_irq_handling = true, + .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, + .start_err = MCI_STARTBITERR, + .opendrain = MCI_ROD, + .init = mmci_variant_init, +}; + +static struct variant_data variant_arm_extended_fifo = { + .fifosize = 128 * 4, + .fifohalfsize = 64 * 4, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .datalength_bits = 16, + .datactrl_blocksz = 11, + .pwrreg_powerup = MCI_PWR_UP, + .f_max = 100000000, + .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, + .start_err = MCI_STARTBITERR, + .opendrain = MCI_ROD, + .init = mmci_variant_init, +}; + +static struct variant_data variant_arm_extended_fifo_hwfc = { + .fifosize = 128 * 4, + .fifohalfsize = 64 * 4, + .clkreg_enable = MCI_ARM_HWFCEN, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .datalength_bits = 16, + .datactrl_blocksz = 11, + .pwrreg_powerup = MCI_PWR_UP, + .f_max = 100000000, + .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, + .start_err = MCI_STARTBITERR, + .opendrain = MCI_ROD, + .init = mmci_variant_init, +}; + +static struct variant_data variant_u300 = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, + .clkreg_enable = MCI_ST_U300_HWFCEN, + .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .datalength_bits = 16, + .datactrl_blocksz = 11, + .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, + .st_sdio = true, + .pwrreg_powerup = MCI_PWR_ON, + .f_max = 100000000, + .signal_direction = true, + .pwrreg_clkgate = true, + .pwrreg_nopower = true, + .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, + .start_err = MCI_STARTBITERR, + .opendrain = MCI_OD, + .init = mmci_variant_init, +}; + +static struct variant_data variant_nomadik = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, + .clkreg = MCI_CLK_ENABLE, + .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, + .st_sdio = true, + .st_clkdiv = true, + .pwrreg_powerup = MCI_PWR_ON, + .f_max = 100000000, + .signal_direction = true, + .pwrreg_clkgate = true, + .pwrreg_nopower = true, + .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, + .start_err = MCI_STARTBITERR, + .opendrain = MCI_OD, + .init = mmci_variant_init, +}; + +static struct variant_data variant_ux500 = { + .fifosize = 30 * 4, + .fifohalfsize = 8 * 4, + .clkreg = MCI_CLK_ENABLE, + .clkreg_enable = MCI_ST_UX500_HWFCEN, + .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, + .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, + .st_sdio = true, + .st_clkdiv = true, + .pwrreg_powerup = MCI_PWR_ON, + .f_max = 100000000, + .signal_direction = true, + .pwrreg_clkgate = true, + .busy_detect = true, + .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE, + .busy_detect_flag = MCI_ST_CARDBUSY, + .busy_detect_mask = MCI_ST_BUSYENDMASK, + .pwrreg_nopower = true, + .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, + .start_err = MCI_STARTBITERR, + .opendrain = MCI_OD, + .init = ux500_variant_init, +}; + +static struct variant_data variant_ux500v2 = { + .fifosize = 30 * 4, + .fifohalfsize = 8 * 4, + .clkreg = MCI_CLK_ENABLE, + .clkreg_enable = MCI_ST_UX500_HWFCEN, + .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, + .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, + .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, + .st_sdio = true, + .st_clkdiv = true, + .pwrreg_powerup = MCI_PWR_ON, + .f_max = 100000000, + .signal_direction = true, + .pwrreg_clkgate = true, + .busy_detect = true, + .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE, + .busy_detect_flag = MCI_ST_CARDBUSY, + .busy_detect_mask = MCI_ST_BUSYENDMASK, + .pwrreg_nopower = true, + .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, + .start_err = MCI_STARTBITERR, + .opendrain = MCI_OD, + .init = ux500v2_variant_init, +}; + +static struct variant_data variant_stm32 = { + .fifosize = 32 * 4, + .fifohalfsize = 8 * 4, + .clkreg = MCI_CLK_ENABLE, + .clkreg_enable = MCI_ST_UX500_HWFCEN, + .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .irq_pio_mask = MCI_IRQ_PIO_MASK, + .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, + .st_sdio = true, + .st_clkdiv = true, + .pwrreg_powerup = MCI_PWR_ON, + .f_max = 48000000, + .pwrreg_clkgate = true, + .pwrreg_nopower = true, + .init = mmci_variant_init, +}; + +static struct variant_data variant_stm32_sdmmc = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, + .f_max = 208000000, + .stm32_clkdiv = true, + .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC, + .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC, + .cmdreg_srsp = MCI_CPSM_STM32_SRSP, + .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP, + .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS, + .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK, + .datactrl_first = true, + .datacnt_useless = true, + .datalength_bits = 25, + .datactrl_blocksz = 14, + .datactrl_any_blocksz = true, + .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, + .stm32_idmabsize_mask = GENMASK(12, 5), + .busy_timeout = true, + .busy_detect = true, + .busy_detect_flag = MCI_STM32_BUSYD0, + .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK, + .init = sdmmc_variant_init, +}; + +static struct variant_data variant_stm32_sdmmcv2 = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, + .f_max = 208000000, + .stm32_clkdiv = true, + .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC, + .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC, + .cmdreg_srsp = MCI_CPSM_STM32_SRSP, + .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP, + .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS, + .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK, + .datactrl_first = true, + .datacnt_useless = true, + .datalength_bits = 25, + .datactrl_blocksz = 14, + .datactrl_any_blocksz = true, + .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, + .stm32_idmabsize_mask = GENMASK(16, 5), + .dma_lli = true, + .busy_timeout = true, + .busy_detect = true, + .busy_detect_flag = MCI_STM32_BUSYD0, + .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK, + .init = sdmmc_variant_init, +}; + +static struct variant_data variant_qcom = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, + .clkreg = MCI_CLK_ENABLE, + .clkreg_enable = MCI_QCOM_CLK_FLOWENA | + MCI_QCOM_CLK_SELECT_IN_FBCLK, + .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8, + .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE, + .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, + .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, + .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, + .cmdreg_srsp = MCI_CPSM_RESPONSE, + .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, + .datalength_bits = 24, + .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .pwrreg_powerup = MCI_PWR_UP, + .f_max = 208000000, + .explicit_mclk_control = true, + .qcom_fifo = true, + .qcom_dml = true, + .mmcimask1 = true, + .irq_pio_mask = MCI_IRQ_PIO_MASK, + .start_err = MCI_STARTBITERR, + .opendrain = MCI_ROD, + .init = qcom_variant_init, +}; + +/* Busy detection for the ST Micro variant */ +static int mmci_card_busy(struct mmc_host *mmc) +{ + struct mmci_host *host = mmc_priv(mmc); + unsigned long flags; + int busy = 0; + + spin_lock_irqsave(&host->lock, flags); + if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag) + busy = 1; + spin_unlock_irqrestore(&host->lock, flags); + + return busy; +} + +static void mmci_reg_delay(struct mmci_host *host) +{ + /* + * According to the spec, at least three feedback clock cycles + * of max 52 MHz must pass between two writes to the MMCICLOCK reg. + * Three MCLK clock cycles must pass between two MMCIPOWER reg writes. + * Worst delay time during card init is at 100 kHz => 30 us. + * Worst delay time when up and running is at 25 MHz => 120 ns. + */ + if (host->cclk < 25000000) + udelay(30); + else + ndelay(120); +} + +/* + * This must be called with host->lock held + */ +void mmci_write_clkreg(struct mmci_host *host, u32 clk) +{ + if (host->clk_reg != clk) { + host->clk_reg = clk; + writel(clk, host->base + MMCICLOCK); + } +} + +/* + * This must be called with host->lock held + */ +void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) +{ + if (host->pwr_reg != pwr) { + host->pwr_reg = pwr; + writel(pwr, host->base + MMCIPOWER); + } +} + +/* + * This must be called with host->lock held + */ +static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) +{ + /* Keep busy mode in DPSM if enabled */ + datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag; + + if (host->datactrl_reg != datactrl) { + host->datactrl_reg = datactrl; + writel(datactrl, host->base + MMCIDATACTRL); + } +} + +/* + * This must be called with host->lock held + */ +static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) +{ + struct variant_data *variant = host->variant; + u32 clk = variant->clkreg; + + /* Make sure cclk reflects the current calculated clock */ + host->cclk = 0; + + if (desired) { + if (variant->explicit_mclk_control) { + host->cclk = host->mclk; + } else if (desired >= host->mclk) { + clk = MCI_CLK_BYPASS; + if (variant->st_clkdiv) + clk |= MCI_ST_UX500_NEG_EDGE; + host->cclk = host->mclk; + } else if (variant->st_clkdiv) { + /* + * DB8500 TRM says f = mclk / (clkdiv + 2) + * => clkdiv = (mclk / f) - 2 + * Round the divider up so we don't exceed the max + * frequency + */ + clk = DIV_ROUND_UP(host->mclk, desired) - 2; + if (clk >= 256) + clk = 255; + host->cclk = host->mclk / (clk + 2); + } else { + /* + * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) + * => clkdiv = mclk / (2 * f) - 1 + */ + clk = host->mclk / (2 * desired) - 1; + if (clk >= 256) + clk = 255; + host->cclk = host->mclk / (2 * (clk + 1)); + } + + clk |= variant->clkreg_enable; + clk |= MCI_CLK_ENABLE; + /* This hasn't proven to be worthwhile */ + /* clk |= MCI_CLK_PWRSAVE; */ + } + + /* Set actual clock for debug */ + host->mmc->actual_clock = host->cclk; + + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) + clk |= MCI_4BIT_BUS; + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) + clk |= variant->clkreg_8bit_bus_enable; + + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || + host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) + clk |= variant->clkreg_neg_edge_enable; + + mmci_write_clkreg(host, clk); +} + +static void mmci_dma_release(struct mmci_host *host) +{ + if (host->ops && host->ops->dma_release) + host->ops->dma_release(host); + + host->use_dma = false; +} + +static void mmci_dma_setup(struct mmci_host *host) +{ + if (!host->ops || !host->ops->dma_setup) + return; + + if (host->ops->dma_setup(host)) + return; + + /* initialize pre request cookie */ + host->next_cookie = 1; + + host->use_dma = true; +} + +/* + * Validate mmc prerequisites + */ +static int mmci_validate_data(struct mmci_host *host, + struct mmc_data *data) +{ + struct variant_data *variant = host->variant; + + if (!data) + return 0; + if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) { + dev_err(mmc_dev(host->mmc), + "unsupported block size (%d bytes)\n", data->blksz); + return -EINVAL; + } + + if (host->ops && host->ops->validate_data) + return host->ops->validate_data(host, data); + + return 0; +} + +static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next) +{ + int err; + + if (!host->ops || !host->ops->prep_data) + return 0; + + err = host->ops->prep_data(host, data, next); + + if (next && !err) + data->host_cookie = ++host->next_cookie < 0 ? + 1 : host->next_cookie; + + return err; +} + +static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data, + int err) +{ + if (host->ops && host->ops->unprep_data) + host->ops->unprep_data(host, data, err); + + data->host_cookie = 0; +} + +static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) +{ + WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie); + + if (host->ops && host->ops->get_next_data) + host->ops->get_next_data(host, data); +} + +static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl) +{ + struct mmc_data *data = host->data; + int ret; + + if (!host->use_dma) + return -EINVAL; + + ret = mmci_prep_data(host, data, false); + if (ret) + return ret; + + if (!host->ops || !host->ops->dma_start) + return -EINVAL; + + /* Okay, go for it. */ + dev_vdbg(mmc_dev(host->mmc), + "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", + data->sg_len, data->blksz, data->blocks, data->flags); + + ret = host->ops->dma_start(host, &datactrl); + if (ret) + return ret; + + /* Trigger the DMA transfer */ + mmci_write_datactrlreg(host, datactrl); + + /* + * Let the MMCI say when the data is ended and it's time + * to fire next DMA request. When that happens, MMCI will + * call mmci_data_end() + */ + writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, + host->base + MMCIMASK0); + return 0; +} + +static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +{ + if (!host->use_dma) + return; + + if (host->ops && host->ops->dma_finalize) + host->ops->dma_finalize(host, data); +} + +static void mmci_dma_error(struct mmci_host *host) +{ + if (!host->use_dma) + return; + + if (host->ops && host->ops->dma_error) + host->ops->dma_error(host); +} + +static void +mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) +{ + writel(0, host->base + MMCICOMMAND); + + BUG_ON(host->data); + + host->mrq = NULL; + host->cmd = NULL; + + mmc_request_done(host->mmc, mrq); +} + +static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) +{ + void __iomem *base = host->base; + struct variant_data *variant = host->variant; + + if (host->singleirq) { + unsigned int mask0 = readl(base + MMCIMASK0); + + mask0 &= ~variant->irq_pio_mask; + mask0 |= mask; + + writel(mask0, base + MMCIMASK0); + } + + if (variant->mmcimask1) + writel(mask, base + MMCIMASK1); + + host->mask1_reg = mask; +} + +static void mmci_stop_data(struct mmci_host *host) +{ + mmci_write_datactrlreg(host, 0); + mmci_set_mask1(host, 0); + host->data = NULL; +} + +static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) +{ + unsigned int flags = SG_MITER_ATOMIC; + + if (data->flags & MMC_DATA_READ) + flags |= SG_MITER_TO_SG; + else + flags |= SG_MITER_FROM_SG; + + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); +} + +static u32 mmci_get_dctrl_cfg(struct mmci_host *host) +{ + return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host); +} + +static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host) +{ + return MCI_DPSM_ENABLE | (host->data->blksz << 16); +} + +static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk) +{ + void __iomem *base = host->base; + + /* + * Before unmasking for the busy end IRQ, confirm that the + * command was sent successfully. To keep track of having a + * command in-progress, waiting for busy signaling to end, + * store the status in host->busy_status. + * + * Note that, the card may need a couple of clock cycles before + * it starts signaling busy on DAT0, hence re-read the + * MMCISTATUS register here, to allow the busy bit to be set. + * Potentially we may even need to poll the register for a + * while, to allow it to be set, but tests indicates that it + * isn't needed. + */ + if (!host->busy_status && !(status & err_msk) && + (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) { + writel(readl(base + MMCIMASK0) | + host->variant->busy_detect_mask, + base + MMCIMASK0); + + host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND); + return false; + } + + /* + * If there is a command in-progress that has been successfully + * sent, then bail out if busy status is set and wait for the + * busy end IRQ. + * + * Note that, the HW triggers an IRQ on both edges while + * monitoring DAT0 for busy completion, but there is only one + * status bit in MMCISTATUS for the busy state. Therefore + * both the start and the end interrupts needs to be cleared, + * one after the other. So, clear the busy start IRQ here. + */ + if (host->busy_status && + (status & host->variant->busy_detect_flag)) { + writel(host->variant->busy_detect_mask, base + MMCICLEAR); + return false; + } + + /* + * If there is a command in-progress that has been successfully + * sent and the busy bit isn't set, it means we have received + * the busy end IRQ. Clear and mask the IRQ, then continue to + * process the command. + */ + if (host->busy_status) { + writel(host->variant->busy_detect_mask, base + MMCICLEAR); + + writel(readl(base + MMCIMASK0) & + ~host->variant->busy_detect_mask, base + MMCIMASK0); + host->busy_status = 0; + } + + return true; +} + +/* + * All the DMA operation mode stuff goes inside this ifdef. + * This assumes that you have a generic DMA device interface, + * no custom DMA interfaces are supported. + */ +#ifdef CONFIG_DMA_ENGINE +struct mmci_dmae_next { + struct dma_async_tx_descriptor *desc; + struct dma_chan *chan; +}; + +struct mmci_dmae_priv { + struct dma_chan *cur; + struct dma_chan *rx_channel; + struct dma_chan *tx_channel; + struct dma_async_tx_descriptor *desc_current; + struct mmci_dmae_next next_data; +}; + +int mmci_dmae_setup(struct mmci_host *host) +{ + const char *rxname, *txname; + struct mmci_dmae_priv *dmae; + + dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL); + if (!dmae) + return -ENOMEM; + + host->dma_priv = dmae; + + dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx"); + if (IS_ERR(dmae->rx_channel)) { + int ret = PTR_ERR(dmae->rx_channel); + dmae->rx_channel = NULL; + return ret; + } + + dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx"); + if (IS_ERR(dmae->tx_channel)) { + if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER) + dev_warn(mmc_dev(host->mmc), + "Deferred probe for TX channel ignored\n"); + dmae->tx_channel = NULL; + } + + /* + * If only an RX channel is specified, the driver will + * attempt to use it bidirectionally, however if it is + * is specified but cannot be located, DMA will be disabled. + */ + if (dmae->rx_channel && !dmae->tx_channel) + dmae->tx_channel = dmae->rx_channel; + + if (dmae->rx_channel) + rxname = dma_chan_name(dmae->rx_channel); + else + rxname = "none"; + + if (dmae->tx_channel) + txname = dma_chan_name(dmae->tx_channel); + else + txname = "none"; + + dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", + rxname, txname); + + /* + * Limit the maximum segment size in any SG entry according to + * the parameters of the DMA engine device. + */ + if (dmae->tx_channel) { + struct device *dev = dmae->tx_channel->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } + if (dmae->rx_channel) { + struct device *dev = dmae->rx_channel->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } + + if (!dmae->tx_channel || !dmae->rx_channel) { + mmci_dmae_release(host); + return -EINVAL; + } + + return 0; +} + +/* + * This is used in or so inline it + * so it can be discarded. + */ +void mmci_dmae_release(struct mmci_host *host) +{ + struct mmci_dmae_priv *dmae = host->dma_priv; + + if (dmae->rx_channel) + dma_release_channel(dmae->rx_channel); + if (dmae->tx_channel) + dma_release_channel(dmae->tx_channel); + dmae->rx_channel = dmae->tx_channel = NULL; +} + +static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +{ + struct mmci_dmae_priv *dmae = host->dma_priv; + struct dma_chan *chan; + + if (data->flags & MMC_DATA_READ) + chan = dmae->rx_channel; + else + chan = dmae->tx_channel; + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); +} + +void mmci_dmae_error(struct mmci_host *host) +{ + struct mmci_dmae_priv *dmae = host->dma_priv; + + if (!dma_inprogress(host)) + return; + + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(dmae->cur); + host->dma_in_progress = false; + dmae->cur = NULL; + dmae->desc_current = NULL; + host->data->host_cookie = 0; + + mmci_dma_unmap(host, host->data); +} + +void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data) +{ + struct mmci_dmae_priv *dmae = host->dma_priv; + u32 status; + int i; + + if (!dma_inprogress(host)) + return; + + /* Wait up to 1ms for the DMA to complete */ + for (i = 0; ; i++) { + status = readl(host->base + MMCISTATUS); + if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) + break; + udelay(10); + } + + /* + * Check to see whether we still have some data left in the FIFO - + * this catches DMA controllers which are unable to monitor the + * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- + * contiguous buffers. On TX, we'll get a FIFO underrun error. + */ + if (status & MCI_RXDATAAVLBLMASK) { + mmci_dma_error(host); + if (!data->error) + data->error = -EIO; + } else if (!data->host_cookie) { + mmci_dma_unmap(host, data); + } + + /* + * Use of DMA with scatter-gather is impossible. + * Give up with DMA and switch back to PIO mode. + */ + if (status & MCI_RXDATAAVLBLMASK) { + dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); + mmci_dma_release(host); + } + + host->dma_in_progress = false; + dmae->cur = NULL; + dmae->desc_current = NULL; +} + +/* prepares DMA channel and DMA descriptor, returns non-zero on failure */ +static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, + struct dma_chan **dma_chan, + struct dma_async_tx_descriptor **dma_desc) +{ + struct mmci_dmae_priv *dmae = host->dma_priv; + struct variant_data *variant = host->variant; + struct dma_slave_config conf = { + .src_addr = host->phybase + MMCIFIFO, + .dst_addr = host->phybase + MMCIFIFO, + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ + .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ + .device_fc = false, + }; + struct dma_chan *chan; + struct dma_device *device; + struct dma_async_tx_descriptor *desc; + int nr_sg; + unsigned long flags = DMA_CTRL_ACK; + + if (data->flags & MMC_DATA_READ) { + conf.direction = DMA_DEV_TO_MEM; + chan = dmae->rx_channel; + } else { + conf.direction = DMA_MEM_TO_DEV; + chan = dmae->tx_channel; + } + + /* If there's no DMA channel, fall back to PIO */ + if (!chan) + return -EINVAL; + + /* If less than or equal to the fifo size, don't bother with DMA */ + if (data->blksz * data->blocks <= variant->fifosize) + return -EINVAL; + + /* + * This is necessary to get SDIO working on the Ux500. We do not yet + * know if this is a bug in: + * - The Ux500 DMA controller (DMA40) + * - The MMCI DMA interface on the Ux500 + * some power of two blocks (such as 64 bytes) are sent regularly + * during SDIO traffic and those work fine so for these we enable DMA + * transfers. + */ + if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz)) + return -EINVAL; + + device = chan->device; + nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + if (nr_sg == 0) + return -EINVAL; + + if (host->variant->qcom_dml) + flags |= DMA_PREP_INTERRUPT; + + dmaengine_slave_config(chan, &conf); + desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, + conf.direction, flags); + if (!desc) + goto unmap_exit; + + *dma_chan = chan; + *dma_desc = desc; + + return 0; + + unmap_exit: + dma_unmap_sg(device->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + return -ENOMEM; +} + +int mmci_dmae_prep_data(struct mmci_host *host, + struct mmc_data *data, + bool next) +{ + struct mmci_dmae_priv *dmae = host->dma_priv; + struct mmci_dmae_next *nd = &dmae->next_data; + + if (!host->use_dma) + return -EINVAL; + + if (next) + return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc); + /* Check if next job is already prepared. */ + if (dmae->cur && dmae->desc_current) + return 0; + + /* No job were prepared thus do it now. */ + return _mmci_dmae_prep_data(host, data, &dmae->cur, + &dmae->desc_current); +} + +int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl) +{ + struct mmci_dmae_priv *dmae = host->dma_priv; + int ret; + + host->dma_in_progress = true; + ret = dma_submit_error(dmaengine_submit(dmae->desc_current)); + if (ret < 0) { + host->dma_in_progress = false; + return ret; + } + dma_async_issue_pending(dmae->cur); + + *datactrl |= MCI_DPSM_DMAENABLE; + + return 0; +} + +void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data) +{ + struct mmci_dmae_priv *dmae = host->dma_priv; + struct mmci_dmae_next *next = &dmae->next_data; + + if (!host->use_dma) + return; + + WARN_ON(!data->host_cookie && (next->desc || next->chan)); + + dmae->desc_current = next->desc; + dmae->cur = next->chan; + next->desc = NULL; + next->chan = NULL; +} + +void mmci_dmae_unprep_data(struct mmci_host *host, + struct mmc_data *data, int err) + +{ + struct mmci_dmae_priv *dmae = host->dma_priv; + + if (!host->use_dma) + return; + + mmci_dma_unmap(host, data); + + if (err) { + struct mmci_dmae_next *next = &dmae->next_data; + struct dma_chan *chan; + if (data->flags & MMC_DATA_READ) + chan = dmae->rx_channel; + else + chan = dmae->tx_channel; + dmaengine_terminate_all(chan); + + if (dmae->desc_current == next->desc) + dmae->desc_current = NULL; + + if (dmae->cur == next->chan) { + host->dma_in_progress = false; + dmae->cur = NULL; + } + + next->desc = NULL; + next->chan = NULL; + } +} + +static struct mmci_host_ops mmci_variant_ops = { + .prep_data = mmci_dmae_prep_data, + .unprep_data = mmci_dmae_unprep_data, + .get_datactrl_cfg = mmci_get_dctrl_cfg, + .get_next_data = mmci_dmae_get_next_data, + .dma_setup = mmci_dmae_setup, + .dma_release = mmci_dmae_release, + .dma_start = mmci_dmae_start, + .dma_finalize = mmci_dmae_finalize, + .dma_error = mmci_dmae_error, +}; +#else +static struct mmci_host_ops mmci_variant_ops = { + .get_datactrl_cfg = mmci_get_dctrl_cfg, +}; +#endif + +static void mmci_variant_init(struct mmci_host *host) +{ + host->ops = &mmci_variant_ops; +} + +static void ux500_variant_init(struct mmci_host *host) +{ + host->ops = &mmci_variant_ops; + host->ops->busy_complete = ux500_busy_complete; +} + +static void ux500v2_variant_init(struct mmci_host *host) +{ + host->ops = &mmci_variant_ops; + host->ops->busy_complete = ux500_busy_complete; + host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg; +} + +static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + WARN_ON(data->host_cookie); + + if (mmci_validate_data(host, data)) + return; + + mmci_prep_data(host, data, true); +} + +static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct mmci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data || !data->host_cookie) + return; + + mmci_unprep_data(host, data, err); +} + +static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) +{ + struct variant_data *variant = host->variant; + unsigned int datactrl, timeout, irqmask; + unsigned long long clks; + void __iomem *base; + + dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", + data->blksz, data->blocks, data->flags); + + host->data = data; + host->size = data->blksz * data->blocks; + data->bytes_xfered = 0; + + clks = (unsigned long long)data->timeout_ns * host->cclk; + do_div(clks, NSEC_PER_SEC); + + timeout = data->timeout_clks + (unsigned int)clks; + + base = host->base; + writel(timeout, base + MMCIDATATIMER); + writel(host->size, base + MMCIDATALENGTH); + + datactrl = host->ops->get_datactrl_cfg(host); + datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0; + + if (host->mmc->card && mmc_card_sdio(host->mmc->card)) { + u32 clk; + + datactrl |= variant->datactrl_mask_sdio; + + /* + * The ST Micro variant for SDIO small write transfers + * needs to have clock H/W flow control disabled, + * otherwise the transfer will not start. The threshold + * depends on the rate of MCLK. + */ + if (variant->st_sdio && data->flags & MMC_DATA_WRITE && + (host->size < 8 || + (host->size <= 8 && host->mclk > 50000000))) + clk = host->clk_reg & ~variant->clkreg_enable; + else + clk = host->clk_reg | variant->clkreg_enable; + + mmci_write_clkreg(host, clk); + } + + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || + host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) + datactrl |= variant->datactrl_mask_ddrmode; + + /* + * Attempt to use DMA operation mode, if this + * should fail, fall back to PIO mode + */ + if (!mmci_dma_start(host, datactrl)) + return; + + /* IRQ mode, map the SG list for CPU reading/writing */ + mmci_init_sg(host, data); + + if (data->flags & MMC_DATA_READ) { + irqmask = MCI_RXFIFOHALFFULLMASK; + + /* + * If we have less than the fifo 'half-full' threshold to + * transfer, trigger a PIO interrupt as soon as any data + * is available. + */ + if (host->size < variant->fifohalfsize) + irqmask |= MCI_RXDATAAVLBLMASK; + } else { + /* + * We don't actually need to include "FIFO empty" here + * since its implicit in "FIFO half empty". + */ + irqmask = MCI_TXFIFOHALFEMPTYMASK; + } + + mmci_write_datactrlreg(host, datactrl); + writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); + mmci_set_mask1(host, irqmask); +} + +static void +mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) +{ + void __iomem *base = host->base; + unsigned long long clks; + + dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", + cmd->opcode, cmd->arg, cmd->flags); + + if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) { + writel(0, base + MMCICOMMAND); + mmci_reg_delay(host); + } + + if (host->variant->cmdreg_stop && + cmd->opcode == MMC_STOP_TRANSMISSION) + c |= host->variant->cmdreg_stop; + + c |= cmd->opcode | host->variant->cmdreg_cpsm_enable; + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) + c |= host->variant->cmdreg_lrsp_crc; + else if (cmd->flags & MMC_RSP_CRC) + c |= host->variant->cmdreg_srsp_crc; + else + c |= host->variant->cmdreg_srsp; + } + + if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) { + if (!cmd->busy_timeout) + cmd->busy_timeout = 10 * MSEC_PER_SEC; + + if (cmd->busy_timeout > host->mmc->max_busy_timeout) + clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk; + else + clks = (unsigned long long)cmd->busy_timeout * host->cclk; + + do_div(clks, MSEC_PER_SEC); + writel_relaxed(clks, host->base + MMCIDATATIMER); + } + + if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE) + host->ops->pre_sig_volt_switch(host); + + if (/*interrupt*/0) + c |= MCI_CPSM_INTERRUPT; + + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) + c |= host->variant->data_cmd_enable; + + host->cmd = cmd; + + writel(cmd->arg, base + MMCIARGUMENT); + writel(c, base + MMCICOMMAND); +} + +static void mmci_stop_command(struct mmci_host *host) +{ + host->stop_abort.error = 0; + mmci_start_command(host, &host->stop_abort, 0); +} + +static void +mmci_data_irq(struct mmci_host *host, struct mmc_data *data, + unsigned int status) +{ + unsigned int status_err; + + /* Make sure we have data to handle */ + if (!data) + return; + + /* First check for errors */ + status_err = status & (host->variant->start_err | + MCI_DATACRCFAIL | MCI_DATATIMEOUT | + MCI_TXUNDERRUN | MCI_RXOVERRUN); + + if (status_err) { + u32 remain, success; + + /* Terminate the DMA transfer */ + mmci_dma_error(host); + + /* + * Calculate how far we are into the transfer. Note that + * the data counter gives the number of bytes transferred + * on the MMC bus, not on the host side. On reads, this + * can be as much as a FIFO-worth of data ahead. This + * matters for FIFO overruns only. + */ + if (!host->variant->datacnt_useless) { + remain = readl(host->base + MMCIDATACNT); + success = data->blksz * data->blocks - remain; + } else { + success = 0; + } + + dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", + status_err, success); + if (status_err & MCI_DATACRCFAIL) { + /* Last block was not successful */ + success -= 1; + data->error = -EILSEQ; + } else if (status_err & MCI_DATATIMEOUT) { + data->error = -ETIMEDOUT; + } else if (status_err & MCI_STARTBITERR) { + data->error = -ECOMM; + } else if (status_err & MCI_TXUNDERRUN) { + data->error = -EIO; + } else if (status_err & MCI_RXOVERRUN) { + if (success > host->variant->fifosize) + success -= host->variant->fifosize; + else + success = 0; + data->error = -EIO; + } + data->bytes_xfered = round_down(success, data->blksz); + } + + if (status & MCI_DATABLOCKEND) + dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); + + if (status & MCI_DATAEND || data->error) { + mmci_dma_finalize(host, data); + + mmci_stop_data(host); + + if (!data->error) + /* The error clause is handled above, success! */ + data->bytes_xfered = data->blksz * data->blocks; + + if (!data->stop) { + if (host->variant->cmdreg_stop && data->error) + mmci_stop_command(host); + else + mmci_request_end(host, data->mrq); + } else if (host->mrq->sbc && !data->error) { + mmci_request_end(host, data->mrq); + } else { + mmci_start_command(host, data->stop, 0); + } + } +} + +static void +mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, + unsigned int status) +{ + u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT; + void __iomem *base = host->base; + bool sbc, busy_resp; + + if (!cmd) + return; + + sbc = (cmd == host->mrq->sbc); + busy_resp = !!(cmd->flags & MMC_RSP_BUSY); + + /* + * We need to be one of these interrupts to be considered worth + * handling. Note that we tag on any latent IRQs postponed + * due to waiting for busy status. + */ + if (host->variant->busy_timeout && busy_resp) + err_msk |= MCI_DATATIMEOUT; + + if (!((status | host->busy_status) & + (err_msk | MCI_CMDSENT | MCI_CMDRESPEND))) + return; + + /* Handle busy detection on DAT0 if the variant supports it. */ + if (busy_resp && host->variant->busy_detect) + if (!host->ops->busy_complete(host, status, err_msk)) + return; + + host->cmd = NULL; + + if (status & MCI_CMDTIMEOUT) { + cmd->error = -ETIMEDOUT; + } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { + cmd->error = -EILSEQ; + } else if (host->variant->busy_timeout && busy_resp && + status & MCI_DATATIMEOUT) { + cmd->error = -ETIMEDOUT; + host->irq_action = IRQ_WAKE_THREAD; + } else { + cmd->resp[0] = readl(base + MMCIRESPONSE0); + cmd->resp[1] = readl(base + MMCIRESPONSE1); + cmd->resp[2] = readl(base + MMCIRESPONSE2); + cmd->resp[3] = readl(base + MMCIRESPONSE3); + } + + if ((!sbc && !cmd->data) || cmd->error) { + if (host->data) { + /* Terminate the DMA transfer */ + mmci_dma_error(host); + + mmci_stop_data(host); + if (host->variant->cmdreg_stop && cmd->error) { + mmci_stop_command(host); + return; + } + } + + if (host->irq_action != IRQ_WAKE_THREAD) + mmci_request_end(host, host->mrq); + + } else if (sbc) { + mmci_start_command(host, host->mrq->cmd, 0); + } else if (!host->variant->datactrl_first && + !(cmd->data->flags & MMC_DATA_READ)) { + mmci_start_data(host, cmd->data); + } +} + +static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain) +{ + return remain - (readl(host->base + MMCIFIFOCNT) << 2); +} + +static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r) +{ + /* + * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses + * from the fifo range should be used + */ + if (status & MCI_RXFIFOHALFFULL) + return host->variant->fifohalfsize; + else if (status & MCI_RXDATAAVLBL) + return 4; + + return 0; +} + +static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) +{ + void __iomem *base = host->base; + char *ptr = buffer; + u32 status = readl(host->base + MMCISTATUS); + int host_remain = host->size; + + do { + int count = host->get_rx_fifocnt(host, status, host_remain); + + if (count > remain) + count = remain; + + if (count <= 0) + break; + + /* + * SDIO especially may want to send something that is + * not divisible by 4 (as opposed to card sectors + * etc). Therefore make sure to always read the last bytes + * while only doing full 32-bit reads towards the FIFO. + */ + if (unlikely(count & 0x3)) { + if (count < 4) { + unsigned char buf[4]; + ioread32_rep(base + MMCIFIFO, buf, 1); + memcpy(ptr, buf, count); + } else { + ioread32_rep(base + MMCIFIFO, ptr, count >> 2); + count &= ~0x3; + } + } else { + ioread32_rep(base + MMCIFIFO, ptr, count >> 2); + } + + ptr += count; + remain -= count; + host_remain -= count; + + if (remain == 0) + break; + + status = readl(base + MMCISTATUS); + } while (status & MCI_RXDATAAVLBL); + + return ptr - buffer; +} + +static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) +{ + struct variant_data *variant = host->variant; + void __iomem *base = host->base; + char *ptr = buffer; + + do { + unsigned int count, maxcnt; + + maxcnt = status & MCI_TXFIFOEMPTY ? + variant->fifosize : variant->fifohalfsize; + count = min(remain, maxcnt); + + /* + * SDIO especially may want to send something that is + * not divisible by 4 (as opposed to card sectors + * etc), and the FIFO only accept full 32-bit writes. + * So compensate by adding +3 on the count, a single + * byte become a 32bit write, 7 bytes will be two + * 32bit writes etc. + */ + iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); + + ptr += count; + remain -= count; + + if (remain == 0) + break; + + status = readl(base + MMCISTATUS); + } while (status & MCI_TXFIFOHALFEMPTY); + + return ptr - buffer; +} + +/* + * PIO data transfer IRQ handler. + */ +static irqreturn_t mmci_pio_irq(int irq, void *dev_id) +{ + struct mmci_host *host = dev_id; + struct sg_mapping_iter *sg_miter = &host->sg_miter; + struct variant_data *variant = host->variant; + void __iomem *base = host->base; + u32 status; + + status = readl(base + MMCISTATUS); + + dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); + + do { + unsigned int remain, len; + char *buffer; + + /* + * For write, we only need to test the half-empty flag + * here - if the FIFO is completely empty, then by + * definition it is more than half empty. + * + * For read, check for data available. + */ + if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) + break; + + if (!sg_miter_next(sg_miter)) + break; + + buffer = sg_miter->addr; + remain = sg_miter->length; + + len = 0; + if (status & MCI_RXACTIVE) + len = mmci_pio_read(host, buffer, remain); + if (status & MCI_TXACTIVE) + len = mmci_pio_write(host, buffer, remain, status); + + sg_miter->consumed = len; + + host->size -= len; + remain -= len; + + if (remain) + break; + + status = readl(base + MMCISTATUS); + } while (1); + + sg_miter_stop(sg_miter); + + /* + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. + */ + if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) + mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); + + /* + * If we run out of data, disable the data IRQs; this + * prevents a race where the FIFO becomes empty before + * the chip itself has disabled the data path, and + * stops us racing with our data end IRQ. + */ + if (host->size == 0) { + mmci_set_mask1(host, 0); + writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); + } + + return IRQ_HANDLED; +} + +/* + * Handle completion of command and data transfers. + */ +static irqreturn_t mmci_irq(int irq, void *dev_id) +{ + struct mmci_host *host = dev_id; + u32 status; + + spin_lock(&host->lock); + host->irq_action = IRQ_HANDLED; + + do { + status = readl(host->base + MMCISTATUS); + + if (host->singleirq) { + if (status & host->mask1_reg) + mmci_pio_irq(irq, dev_id); + + status &= ~host->variant->irq_pio_mask; + } + + /* + * Busy detection is managed by mmci_cmd_irq(), including to + * clear the corresponding IRQ. + */ + status &= readl(host->base + MMCIMASK0); + if (host->variant->busy_detect) + writel(status & ~host->variant->busy_detect_mask, + host->base + MMCICLEAR); + else + writel(status, host->base + MMCICLEAR); + + dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); + + if (host->variant->reversed_irq_handling) { + mmci_data_irq(host, host->data, status); + mmci_cmd_irq(host, host->cmd, status); + } else { + mmci_cmd_irq(host, host->cmd, status); + mmci_data_irq(host, host->data, status); + } + + /* + * Busy detection has been handled by mmci_cmd_irq() above. + * Clear the status bit to prevent polling in IRQ context. + */ + if (host->variant->busy_detect_flag) + status &= ~host->variant->busy_detect_flag; + + } while (status); + + spin_unlock(&host->lock); + + return host->irq_action; +} + +/* + * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW. + * + * A reset is needed for some variants, where a datatimeout for a R1B request + * causes the DPSM to stay busy (non-functional). + */ +static irqreturn_t mmci_irq_thread(int irq, void *dev_id) +{ + struct mmci_host *host = dev_id; + unsigned long flags; + + if (host->rst) { + reset_control_assert(host->rst); + udelay(2); + reset_control_deassert(host->rst); + } + + spin_lock_irqsave(&host->lock, flags); + writel(host->clk_reg, host->base + MMCICLOCK); + writel(host->pwr_reg, host->base + MMCIPOWER); + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); + + host->irq_action = IRQ_HANDLED; + mmci_request_end(host, host->mrq); + spin_unlock_irqrestore(&host->lock, flags); + + return host->irq_action; +} + +static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmci_host *host = mmc_priv(mmc); + unsigned long flags; + + WARN_ON(host->mrq != NULL); + + mrq->cmd->error = mmci_validate_data(host, mrq->data); + if (mrq->cmd->error) { + mmc_request_done(mmc, mrq); + return; + } + + spin_lock_irqsave(&host->lock, flags); + + host->mrq = mrq; + + if (mrq->data) + mmci_get_next_data(host, mrq->data); + + if (mrq->data && + (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ)) + mmci_start_data(host, mrq->data); + + if (mrq->sbc) + mmci_start_command(host, mrq->sbc, 0); + else + mmci_start_command(host, mrq->cmd, 0); + + spin_unlock_irqrestore(&host->lock, flags); +} + +static void mmci_set_max_busy_timeout(struct mmc_host *mmc) +{ + struct mmci_host *host = mmc_priv(mmc); + u32 max_busy_timeout = 0; + + if (!host->variant->busy_detect) + return; + + if (host->variant->busy_timeout && mmc->actual_clock) + max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock, + MSEC_PER_SEC); + + mmc->max_busy_timeout = max_busy_timeout; +} + +static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mmci_host *host = mmc_priv(mmc); + struct variant_data *variant = host->variant; + u32 pwr = 0; + unsigned long flags; + int ret; + + if (host->plat->ios_handler && + host->plat->ios_handler(mmc_dev(mmc), ios)) + dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { + regulator_disable(mmc->supply.vqmmc); + host->vqmmc_enabled = false; + } + + break; + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + + /* + * The ST Micro variant doesn't have the PL180s MCI_PWR_UP + * and instead uses MCI_PWR_ON so apply whatever value is + * configured in the variant data. + */ + pwr |= variant->pwrreg_powerup; + + break; + case MMC_POWER_ON: + if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { + ret = regulator_enable(mmc->supply.vqmmc); + if (ret < 0) + dev_err(mmc_dev(mmc), + "failed to enable vqmmc regulator\n"); + else + host->vqmmc_enabled = true; + } + + pwr |= MCI_PWR_ON; + break; + } + + if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { + /* + * The ST Micro variant has some additional bits + * indicating signal direction for the signals in + * the SD/MMC bus and feedback-clock usage. + */ + pwr |= host->pwr_reg_add; + + if (ios->bus_width == MMC_BUS_WIDTH_4) + pwr &= ~MCI_ST_DATA74DIREN; + else if (ios->bus_width == MMC_BUS_WIDTH_1) + pwr &= (~MCI_ST_DATA74DIREN & + ~MCI_ST_DATA31DIREN & + ~MCI_ST_DATA2DIREN); + } + + if (variant->opendrain) { + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) + pwr |= variant->opendrain; + } else { + /* + * If the variant cannot configure the pads by its own, then we + * expect the pinctrl to be able to do that for us + */ + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) + pinctrl_select_state(host->pinctrl, host->pins_opendrain); + else + pinctrl_select_default_state(mmc_dev(mmc)); + } + + /* + * If clock = 0 and the variant requires the MMCIPOWER to be used for + * gating the clock, the MCI_PWR_ON bit is cleared. + */ + if (!ios->clock && variant->pwrreg_clkgate) + pwr &= ~MCI_PWR_ON; + + if (host->variant->explicit_mclk_control && + ios->clock != host->clock_cache) { + ret = clk_set_rate(host->clk, ios->clock); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "Error setting clock rate (%d)\n", ret); + else + host->mclk = clk_get_rate(host->clk); + } + host->clock_cache = ios->clock; + + spin_lock_irqsave(&host->lock, flags); + + if (host->ops && host->ops->set_clkreg) + host->ops->set_clkreg(host, ios->clock); + else + mmci_set_clkreg(host, ios->clock); + + mmci_set_max_busy_timeout(mmc); + + if (host->ops && host->ops->set_pwrreg) + host->ops->set_pwrreg(host, pwr); + else + mmci_write_pwrreg(host, pwr); + + mmci_reg_delay(host); + + spin_unlock_irqrestore(&host->lock, flags); +} + +static int mmci_get_cd(struct mmc_host *mmc) +{ + struct mmci_host *host = mmc_priv(mmc); + struct mmci_platform_data *plat = host->plat; + unsigned int status = mmc_gpio_get_cd(mmc); + + if (status == -ENOSYS) { + if (!plat->status) + return 1; /* Assume always present */ + + status = plat->status(mmc_dev(host->mmc)); + } + return status; +} + +static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mmci_host *host = mmc_priv(mmc); + int ret; + + ret = mmc_regulator_set_vqmmc(mmc, ios); + + if (!ret && host->ops && host->ops->post_sig_volt_switch) + ret = host->ops->post_sig_volt_switch(host, ios); + else if (ret) + ret = 0; + + if (ret < 0) + dev_warn(mmc_dev(mmc), "Voltage switch failed\n"); + + return ret; +} + +static struct mmc_host_ops mmci_ops = { + .request = mmci_request, + .pre_req = mmci_pre_request, + .post_req = mmci_post_request, + .set_ios = mmci_set_ios, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmci_get_cd, + .start_signal_voltage_switch = mmci_sig_volt_switch, +}; + +static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) +{ + struct mmci_host *host = mmc_priv(mmc); + int ret = mmc_of_parse(mmc); + + if (ret) + return ret; + + if (of_get_property(np, "st,sig-dir-dat0", NULL)) + host->pwr_reg_add |= MCI_ST_DATA0DIREN; + if (of_get_property(np, "st,sig-dir-dat2", NULL)) + host->pwr_reg_add |= MCI_ST_DATA2DIREN; + if (of_get_property(np, "st,sig-dir-dat31", NULL)) + host->pwr_reg_add |= MCI_ST_DATA31DIREN; + if (of_get_property(np, "st,sig-dir-dat74", NULL)) + host->pwr_reg_add |= MCI_ST_DATA74DIREN; + if (of_get_property(np, "st,sig-dir-cmd", NULL)) + host->pwr_reg_add |= MCI_ST_CMDDIREN; + if (of_get_property(np, "st,sig-pin-fbclk", NULL)) + host->pwr_reg_add |= MCI_ST_FBCLKEN; + if (of_get_property(np, "st,sig-dir", NULL)) + host->pwr_reg_add |= MCI_STM32_DIRPOL; + if (of_get_property(np, "st,neg-edge", NULL)) + host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE; + if (of_get_property(np, "st,use-ckin", NULL)) + host->clk_reg_add |= MCI_STM32_CLK_SELCKIN; + + if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; + if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) + mmc->caps |= MMC_CAP_SD_HIGHSPEED; + + return 0; +} + +static int mmci_probe(struct amba_device *dev, + const struct amba_id *id) +{ + struct mmci_platform_data *plat = dev->dev.platform_data; + struct device_node *np = dev->dev.of_node; + struct variant_data *variant = id->data; + struct mmci_host *host; + struct mmc_host *mmc; + int ret; + + /* Must have platform data or Device Tree. */ + if (!plat && !np) { + dev_err(&dev->dev, "No plat data or DT found\n"); + return -EINVAL; + } + + if (!plat) { + plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + } + + mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); + if (!mmc) + return -ENOMEM; + + ret = mmci_of_parse(np, mmc); + if (ret) + goto host_free; + + host = mmc_priv(mmc); + host->mmc = mmc; + host->mmc_ops = &mmci_ops; + mmc->ops = &mmci_ops; + + /* + * Some variant (STM32) doesn't have opendrain bit, nevertheless + * pins can be set accordingly using pinctrl + */ + if (!variant->opendrain) { + host->pinctrl = devm_pinctrl_get(&dev->dev); + if (IS_ERR(host->pinctrl)) { + dev_err(&dev->dev, "failed to get pinctrl"); + ret = PTR_ERR(host->pinctrl); + goto host_free; + } + + host->pins_opendrain = pinctrl_lookup_state(host->pinctrl, + MMCI_PINCTRL_STATE_OPENDRAIN); + if (IS_ERR(host->pins_opendrain)) { + dev_err(mmc_dev(mmc), "Can't select opendrain pins\n"); + ret = PTR_ERR(host->pins_opendrain); + goto host_free; + } + } + + host->hw_designer = amba_manf(dev); + host->hw_revision = amba_rev(dev); + dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); + dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); + + host->clk = devm_clk_get(&dev->dev, NULL); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto host_free; + } + + ret = clk_prepare_enable(host->clk); + if (ret) + goto host_free; + + if (variant->qcom_fifo) + host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt; + else + host->get_rx_fifocnt = mmci_get_rx_fifocnt; + + host->plat = plat; + host->variant = variant; + host->mclk = clk_get_rate(host->clk); + /* + * According to the spec, mclk is max 100 MHz, + * so we try to adjust the clock down to this, + * (if possible). + */ + if (host->mclk > variant->f_max) { + ret = clk_set_rate(host->clk, variant->f_max); + if (ret < 0) + goto clk_disable; + host->mclk = clk_get_rate(host->clk); + dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", + host->mclk); + } + + host->phybase = dev->res.start; + host->base = devm_ioremap_resource(&dev->dev, &dev->res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto clk_disable; + } + + if (variant->init) + variant->init(host); + + /* + * The ARM and ST versions of the block have slightly different + * clock divider equations which means that the minimum divider + * differs too. + * on Qualcomm like controllers get the nearest minimum clock to 100Khz + */ + if (variant->st_clkdiv) + mmc->f_min = DIV_ROUND_UP(host->mclk, 257); + else if (variant->stm32_clkdiv) + mmc->f_min = DIV_ROUND_UP(host->mclk, 2046); + else if (variant->explicit_mclk_control) + mmc->f_min = clk_round_rate(host->clk, 100000); + else + mmc->f_min = DIV_ROUND_UP(host->mclk, 512); + /* + * If no maximum operating frequency is supplied, fall back to use + * the module parameter, which has a (low) default value in case it + * is not specified. Either value must not exceed the clock rate into + * the block, of course. + */ + if (mmc->f_max) + mmc->f_max = variant->explicit_mclk_control ? + min(variant->f_max, mmc->f_max) : + min(host->mclk, mmc->f_max); + else + mmc->f_max = variant->explicit_mclk_control ? + fmax : min(host->mclk, fmax); + + + dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); + + host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL); + if (IS_ERR(host->rst)) { + ret = PTR_ERR(host->rst); + goto clk_disable; + } + + /* Get regulators and the supported OCR mask */ + ret = mmc_regulator_get_supply(mmc); + if (ret) + goto clk_disable; + + if (!mmc->ocr_avail) + mmc->ocr_avail = plat->ocr_mask; + else if (plat->ocr_mask) + dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); + + /* We support these capabilities. */ + mmc->caps |= MMC_CAP_CMD23; + + /* + * Enable busy detection. + */ + if (variant->busy_detect) { + mmci_ops.card_busy = mmci_card_busy; + /* + * Not all variants have a flag to enable busy detection + * in the DPSM, but if they do, set it here. + */ + if (variant->busy_dpsm_flag) + mmci_write_datactrlreg(host, + host->variant->busy_dpsm_flag); + mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; + } + + /* Variants with mandatory busy timeout in HW needs R1B responses. */ + if (variant->busy_timeout) + mmc->caps |= MMC_CAP_NEED_RSP_BUSY; + + /* Prepare a CMD12 - needed to clear the DPSM on some variants. */ + host->stop_abort.opcode = MMC_STOP_TRANSMISSION; + host->stop_abort.arg = 0; + host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC; + + /* We support these PM capabilities. */ + mmc->pm_caps |= MMC_PM_KEEP_POWER; + + /* + * We can do SGIO + */ + mmc->max_segs = NR_SG; + + /* + * Since only a certain number of bits are valid in the data length + * register, we must ensure that we don't exceed 2^num-1 bytes in a + * single request. + */ + mmc->max_req_size = (1 << variant->datalength_bits) - 1; + + /* + * Set the maximum segment size. Since we aren't doing DMA + * (yet) we are only limited by the data length register. + */ + mmc->max_seg_size = mmc->max_req_size; + + /* + * Block size can be up to 2048 bytes, but must be a power of two. + */ + mmc->max_blk_size = 1 << variant->datactrl_blocksz; + + /* + * Limit the number of blocks transferred so that we don't overflow + * the maximum request size. + */ + mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz; + + spin_lock_init(&host->lock); + + writel(0, host->base + MMCIMASK0); + + if (variant->mmcimask1) + writel(0, host->base + MMCIMASK1); + + writel(0xfff, host->base + MMCICLEAR); + + /* + * If: + * - not using DT but using a descriptor table, or + * - using a table of descriptors ALONGSIDE DT, or + * look up these descriptors named "cd" and "wp" right here, fail + * silently of these do not exist + */ + if (!np) { + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); + if (ret == -EPROBE_DEFER) + goto clk_disable; + + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); + if (ret == -EPROBE_DEFER) + goto clk_disable; + } + + ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq, + mmci_irq_thread, IRQF_SHARED, + DRIVER_NAME " (cmd)", host); + if (ret) + goto clk_disable; + + if (!dev->irq[1]) + host->singleirq = true; + else { + ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq, + IRQF_SHARED, DRIVER_NAME " (pio)", host); + if (ret) + goto clk_disable; + } + + writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0); + + amba_set_drvdata(dev, mmc); + + dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", + mmc_hostname(mmc), amba_part(dev), amba_manf(dev), + amba_rev(dev), (unsigned long long)dev->res.start, + dev->irq[0], dev->irq[1]); + + mmci_dma_setup(host); + + pm_runtime_set_autosuspend_delay(&dev->dev, 50); + pm_runtime_use_autosuspend(&dev->dev); + + ret = mmc_add_host(mmc); + if (ret) + goto clk_disable; + + pm_runtime_put(&dev->dev); + return 0; + + clk_disable: + clk_disable_unprepare(host->clk); + host_free: + mmc_free_host(mmc); + return ret; +} + +static void mmci_remove(struct amba_device *dev) +{ + struct mmc_host *mmc = amba_get_drvdata(dev); + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + struct variant_data *variant = host->variant; + + /* + * Undo pm_runtime_put() in probe. We use the _sync + * version here so that we can access the primecell. + */ + pm_runtime_get_sync(&dev->dev); + + mmc_remove_host(mmc); + + writel(0, host->base + MMCIMASK0); + + if (variant->mmcimask1) + writel(0, host->base + MMCIMASK1); + + writel(0, host->base + MMCICOMMAND); + writel(0, host->base + MMCIDATACTRL); + + mmci_dma_release(host); + clk_disable_unprepare(host->clk); + mmc_free_host(mmc); + } +} + +#ifdef CONFIG_PM +static void mmci_save(struct mmci_host *host) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + writel(0, host->base + MMCIMASK0); + if (host->variant->pwrreg_nopower) { + writel(0, host->base + MMCIDATACTRL); + writel(0, host->base + MMCIPOWER); + writel(0, host->base + MMCICLOCK); + } + mmci_reg_delay(host); + + spin_unlock_irqrestore(&host->lock, flags); +} + +static void mmci_restore(struct mmci_host *host) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + if (host->variant->pwrreg_nopower) { + writel(host->clk_reg, host->base + MMCICLOCK); + writel(host->datactrl_reg, host->base + MMCIDATACTRL); + writel(host->pwr_reg, host->base + MMCIPOWER); + } + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); + mmci_reg_delay(host); + + spin_unlock_irqrestore(&host->lock, flags); +} + +static int mmci_runtime_suspend(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + pinctrl_pm_select_sleep_state(dev); + mmci_save(host); + clk_disable_unprepare(host->clk); + } + + return 0; +} + +static int mmci_runtime_resume(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + clk_prepare_enable(host->clk); + mmci_restore(host); + pinctrl_select_default_state(dev); + } + + return 0; +} +#endif + +static const struct dev_pm_ops mmci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) +}; + +static const struct amba_id mmci_ids[] = { + { + .id = 0x00041180, + .mask = 0xff0fffff, + .data = &variant_arm, + }, + { + .id = 0x01041180, + .mask = 0xff0fffff, + .data = &variant_arm_extended_fifo, + }, + { + .id = 0x02041180, + .mask = 0xff0fffff, + .data = &variant_arm_extended_fifo_hwfc, + }, + { + .id = 0x00041181, + .mask = 0x000fffff, + .data = &variant_arm, + }, + /* ST Micro variants */ + { + .id = 0x00180180, + .mask = 0x00ffffff, + .data = &variant_u300, + }, + { + .id = 0x10180180, + .mask = 0xf0ffffff, + .data = &variant_nomadik, + }, + { + .id = 0x00280180, + .mask = 0x00ffffff, + .data = &variant_nomadik, + }, + { + .id = 0x00480180, + .mask = 0xf0ffffff, + .data = &variant_ux500, + }, + { + .id = 0x10480180, + .mask = 0xf0ffffff, + .data = &variant_ux500v2, + }, + { + .id = 0x00880180, + .mask = 0x00ffffff, + .data = &variant_stm32, + }, + { + .id = 0x10153180, + .mask = 0xf0ffffff, + .data = &variant_stm32_sdmmc, + }, + { + .id = 0x00253180, + .mask = 0xf0ffffff, + .data = &variant_stm32_sdmmcv2, + }, + /* Qualcomm variants */ + { + .id = 0x00051180, + .mask = 0x000fffff, + .data = &variant_qcom, + }, + { 0, 0 }, +}; + +MODULE_DEVICE_TABLE(amba, mmci_ids); + +static struct amba_driver mmci_driver = { + .drv = { + .name = DRIVER_NAME, + .pm = &mmci_dev_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .probe = mmci_probe, + .remove = mmci_remove, + .id_table = mmci_ids, +}; + +module_amba_driver(mmci_driver); + +module_param(fmax, uint, 0444); + +MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h new file mode 100644 index 000000000..e1a9b96a3 --- /dev/null +++ b/drivers/mmc/host/mmci.h @@ -0,0 +1,475 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/drivers/mmc/host/mmci.h - ARM PrimeCell MMCI PL180/1 driver + * + * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. + */ +#define MMCIPOWER 0x000 +#define MCI_PWR_OFF 0x00 +#define MCI_PWR_UP 0x02 +#define MCI_PWR_ON 0x03 +#define MCI_OD (1 << 6) +#define MCI_ROD (1 << 7) +/* + * The ST Micro version does not have ROD and reuse the voltage registers for + * direction settings. + */ +#define MCI_ST_DATA2DIREN (1 << 2) +#define MCI_ST_CMDDIREN (1 << 3) +#define MCI_ST_DATA0DIREN (1 << 4) +#define MCI_ST_DATA31DIREN (1 << 5) +#define MCI_ST_FBCLKEN (1 << 7) +#define MCI_ST_DATA74DIREN (1 << 8) +/* + * The STM32 sdmmc does not have PWR_UP/OD/ROD + * and uses the power register for + */ +#define MCI_STM32_PWR_CYC 0x02 +#define MCI_STM32_VSWITCH BIT(2) +#define MCI_STM32_VSWITCHEN BIT(3) +#define MCI_STM32_DIRPOL BIT(4) + +#define MMCICLOCK 0x004 +#define MCI_CLK_ENABLE (1 << 8) +#define MCI_CLK_PWRSAVE (1 << 9) +#define MCI_CLK_BYPASS (1 << 10) +#define MCI_4BIT_BUS (1 << 11) +/* + * 8bit wide buses, hardware flow contronl, negative edges and clock inversion + * supported in ST Micro U300 and Ux500 versions + */ +#define MCI_ST_8BIT_BUS (1 << 12) +#define MCI_ST_U300_HWFCEN (1 << 13) +#define MCI_ST_UX500_NEG_EDGE (1 << 13) +#define MCI_ST_UX500_HWFCEN (1 << 14) +#define MCI_ST_UX500_CLK_INV (1 << 15) +/* Modified PL180 on Versatile Express platform */ +#define MCI_ARM_HWFCEN (1 << 12) + +/* Modified on Qualcomm Integrations */ +#define MCI_QCOM_CLK_WIDEBUS_8 (BIT(10) | BIT(11)) +#define MCI_QCOM_CLK_FLOWENA BIT(12) +#define MCI_QCOM_CLK_INVERTOUT BIT(13) + +/* select in latch data and command in */ +#define MCI_QCOM_CLK_SELECT_IN_FBCLK BIT(15) +#define MCI_QCOM_CLK_SELECT_IN_DDR_MODE (BIT(14) | BIT(15)) + +/* Modified on STM32 sdmmc */ +#define MCI_STM32_CLK_CLKDIV_MSK GENMASK(9, 0) +#define MCI_STM32_CLK_WIDEBUS_4 BIT(14) +#define MCI_STM32_CLK_WIDEBUS_8 BIT(15) +#define MCI_STM32_CLK_NEGEDGE BIT(16) +#define MCI_STM32_CLK_HWFCEN BIT(17) +#define MCI_STM32_CLK_DDR BIT(18) +#define MCI_STM32_CLK_BUSSPEED BIT(19) +#define MCI_STM32_CLK_SEL_MSK GENMASK(21, 20) +#define MCI_STM32_CLK_SELCK (0 << 20) +#define MCI_STM32_CLK_SELCKIN (1 << 20) +#define MCI_STM32_CLK_SELFBCK (2 << 20) + +#define MMCIARGUMENT 0x008 + +/* The command register controls the Command Path State Machine (CPSM) */ +#define MMCICOMMAND 0x00c +#define MCI_CPSM_RESPONSE BIT(6) +#define MCI_CPSM_LONGRSP BIT(7) +#define MCI_CPSM_INTERRUPT BIT(8) +#define MCI_CPSM_PENDING BIT(9) +#define MCI_CPSM_ENABLE BIT(10) +/* Command register flag extenstions in the ST Micro versions */ +#define MCI_CPSM_ST_SDIO_SUSP BIT(11) +#define MCI_CPSM_ST_ENCMD_COMPL BIT(12) +#define MCI_CPSM_ST_NIEN BIT(13) +#define MCI_CPSM_ST_CE_ATACMD BIT(14) +/* Command register flag extensions in the Qualcomm versions */ +#define MCI_CPSM_QCOM_PROGENA BIT(11) +#define MCI_CPSM_QCOM_DATCMD BIT(12) +#define MCI_CPSM_QCOM_MCIABORT BIT(13) +#define MCI_CPSM_QCOM_CCSENABLE BIT(14) +#define MCI_CPSM_QCOM_CCSDISABLE BIT(15) +#define MCI_CPSM_QCOM_AUTO_CMD19 BIT(16) +#define MCI_CPSM_QCOM_AUTO_CMD21 BIT(21) +/* Command register in STM32 sdmmc versions */ +#define MCI_CPSM_STM32_CMDTRANS BIT(6) +#define MCI_CPSM_STM32_CMDSTOP BIT(7) +#define MCI_CPSM_STM32_WAITRESP_MASK GENMASK(9, 8) +#define MCI_CPSM_STM32_NORSP (0 << 8) +#define MCI_CPSM_STM32_SRSP_CRC (1 << 8) +#define MCI_CPSM_STM32_SRSP (2 << 8) +#define MCI_CPSM_STM32_LRSP_CRC (3 << 8) +#define MCI_CPSM_STM32_ENABLE BIT(12) + +#define MMCIRESPCMD 0x010 +#define MMCIRESPONSE0 0x014 +#define MMCIRESPONSE1 0x018 +#define MMCIRESPONSE2 0x01c +#define MMCIRESPONSE3 0x020 +#define MMCIDATATIMER 0x024 +#define MMCIDATALENGTH 0x028 + +/* The data control register controls the Data Path State Machine (DPSM) */ +#define MMCIDATACTRL 0x02c +#define MCI_DPSM_ENABLE BIT(0) +#define MCI_DPSM_DIRECTION BIT(1) +#define MCI_DPSM_MODE BIT(2) +#define MCI_DPSM_DMAENABLE BIT(3) +#define MCI_DPSM_BLOCKSIZE BIT(4) +/* Control register extensions in the ST Micro U300 and Ux500 versions */ +#define MCI_DPSM_ST_RWSTART BIT(8) +#define MCI_DPSM_ST_RWSTOP BIT(9) +#define MCI_DPSM_ST_RWMOD BIT(10) +#define MCI_DPSM_ST_SDIOEN BIT(11) +/* Control register extensions in the ST Micro Ux500 versions */ +#define MCI_DPSM_ST_DMAREQCTL BIT(12) +#define MCI_DPSM_ST_DBOOTMODEEN BIT(13) +#define MCI_DPSM_ST_BUSYMODE BIT(14) +#define MCI_DPSM_ST_DDRMODE BIT(15) +/* Control register extensions in the Qualcomm versions */ +#define MCI_DPSM_QCOM_DATA_PEND BIT(17) +#define MCI_DPSM_QCOM_RX_DATA_PEND BIT(20) +/* Control register extensions in STM32 versions */ +#define MCI_DPSM_STM32_MODE_BLOCK (0 << 2) +#define MCI_DPSM_STM32_MODE_SDIO (1 << 2) +#define MCI_DPSM_STM32_MODE_STREAM (2 << 2) +#define MCI_DPSM_STM32_MODE_BLOCK_STOP (3 << 2) + +#define MMCIDATACNT 0x030 +#define MMCISTATUS 0x034 +#define MCI_CMDCRCFAIL (1 << 0) +#define MCI_DATACRCFAIL (1 << 1) +#define MCI_CMDTIMEOUT (1 << 2) +#define MCI_DATATIMEOUT (1 << 3) +#define MCI_TXUNDERRUN (1 << 4) +#define MCI_RXOVERRUN (1 << 5) +#define MCI_CMDRESPEND (1 << 6) +#define MCI_CMDSENT (1 << 7) +#define MCI_DATAEND (1 << 8) +#define MCI_STARTBITERR (1 << 9) +#define MCI_DATABLOCKEND (1 << 10) +#define MCI_CMDACTIVE (1 << 11) +#define MCI_TXACTIVE (1 << 12) +#define MCI_RXACTIVE (1 << 13) +#define MCI_TXFIFOHALFEMPTY (1 << 14) +#define MCI_RXFIFOHALFFULL (1 << 15) +#define MCI_TXFIFOFULL (1 << 16) +#define MCI_RXFIFOFULL (1 << 17) +#define MCI_TXFIFOEMPTY (1 << 18) +#define MCI_RXFIFOEMPTY (1 << 19) +#define MCI_TXDATAAVLBL (1 << 20) +#define MCI_RXDATAAVLBL (1 << 21) +/* Extended status bits for the ST Micro variants */ +#define MCI_ST_SDIOIT (1 << 22) +#define MCI_ST_CEATAEND (1 << 23) +#define MCI_ST_CARDBUSY (1 << 24) +/* Extended status bits for the STM32 variants */ +#define MCI_STM32_BUSYD0 BIT(20) +#define MCI_STM32_BUSYD0END BIT(21) +#define MCI_STM32_VSWEND BIT(25) + +#define MMCICLEAR 0x038 +#define MCI_CMDCRCFAILCLR (1 << 0) +#define MCI_DATACRCFAILCLR (1 << 1) +#define MCI_CMDTIMEOUTCLR (1 << 2) +#define MCI_DATATIMEOUTCLR (1 << 3) +#define MCI_TXUNDERRUNCLR (1 << 4) +#define MCI_RXOVERRUNCLR (1 << 5) +#define MCI_CMDRESPENDCLR (1 << 6) +#define MCI_CMDSENTCLR (1 << 7) +#define MCI_DATAENDCLR (1 << 8) +#define MCI_STARTBITERRCLR (1 << 9) +#define MCI_DATABLOCKENDCLR (1 << 10) +/* Extended status bits for the ST Micro variants */ +#define MCI_ST_SDIOITC (1 << 22) +#define MCI_ST_CEATAENDC (1 << 23) +#define MCI_ST_BUSYENDC (1 << 24) +/* Extended clear bits for the STM32 variants */ +#define MCI_STM32_VSWENDC BIT(25) +#define MCI_STM32_CKSTOPC BIT(26) + +#define MMCIMASK0 0x03c +#define MCI_CMDCRCFAILMASK (1 << 0) +#define MCI_DATACRCFAILMASK (1 << 1) +#define MCI_CMDTIMEOUTMASK (1 << 2) +#define MCI_DATATIMEOUTMASK (1 << 3) +#define MCI_TXUNDERRUNMASK (1 << 4) +#define MCI_RXOVERRUNMASK (1 << 5) +#define MCI_CMDRESPENDMASK (1 << 6) +#define MCI_CMDSENTMASK (1 << 7) +#define MCI_DATAENDMASK (1 << 8) +#define MCI_STARTBITERRMASK (1 << 9) +#define MCI_DATABLOCKENDMASK (1 << 10) +#define MCI_CMDACTIVEMASK (1 << 11) +#define MCI_TXACTIVEMASK (1 << 12) +#define MCI_RXACTIVEMASK (1 << 13) +#define MCI_TXFIFOHALFEMPTYMASK (1 << 14) +#define MCI_RXFIFOHALFFULLMASK (1 << 15) +#define MCI_TXFIFOFULLMASK (1 << 16) +#define MCI_RXFIFOFULLMASK (1 << 17) +#define MCI_TXFIFOEMPTYMASK (1 << 18) +#define MCI_RXFIFOEMPTYMASK (1 << 19) +#define MCI_TXDATAAVLBLMASK (1 << 20) +#define MCI_RXDATAAVLBLMASK (1 << 21) +/* Extended status bits for the ST Micro variants */ +#define MCI_ST_SDIOITMASK (1 << 22) +#define MCI_ST_CEATAENDMASK (1 << 23) +#define MCI_ST_BUSYENDMASK (1 << 24) +/* Extended status bits for the STM32 variants */ +#define MCI_STM32_BUSYD0ENDMASK BIT(21) + +#define MMCIMASK1 0x040 +#define MMCIFIFOCNT 0x048 +#define MMCIFIFO 0x080 /* to 0x0bc */ + +/* STM32 sdmmc registers for IDMA (Internal DMA) */ +#define MMCI_STM32_IDMACTRLR 0x050 +#define MMCI_STM32_IDMAEN BIT(0) +#define MMCI_STM32_IDMALLIEN BIT(1) + +#define MMCI_STM32_IDMABSIZER 0x054 +#define MMCI_STM32_IDMABNDT_SHIFT 5 +#define MMCI_STM32_IDMABNDT_MASK GENMASK(12, 5) + +#define MMCI_STM32_IDMABASE0R 0x058 + +#define MMCI_STM32_IDMALAR 0x64 +#define MMCI_STM32_IDMALA_MASK GENMASK(13, 0) +#define MMCI_STM32_ABR BIT(29) +#define MMCI_STM32_ULS BIT(30) +#define MMCI_STM32_ULA BIT(31) + +#define MMCI_STM32_IDMABAR 0x68 + +#define MCI_IRQENABLE \ + (MCI_CMDCRCFAILMASK | MCI_DATACRCFAILMASK | MCI_CMDTIMEOUTMASK | \ + MCI_DATATIMEOUTMASK | MCI_TXUNDERRUNMASK | MCI_RXOVERRUNMASK | \ + MCI_CMDRESPENDMASK | MCI_CMDSENTMASK) + +/* These interrupts are directed to IRQ1 when two IRQ lines are available */ +#define MCI_IRQ_PIO_MASK \ + (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \ + MCI_TXFIFOHALFEMPTYMASK) + +#define MCI_IRQ_PIO_STM32_MASK \ + (MCI_RXFIFOHALFFULLMASK | MCI_TXFIFOHALFEMPTYMASK) + +#define NR_SG 128 + +#define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain" + +struct clk; +struct dma_chan; +struct mmci_host; + +/** + * struct variant_data - MMCI variant-specific quirks + * @clkreg: default value for MCICLOCK register + * @clkreg_enable: enable value for MMCICLOCK register + * @clkreg_8bit_bus_enable: enable value for 8 bit bus + * @clkreg_neg_edge_enable: enable value for inverted data/cmd output + * @cmdreg_cpsm_enable: enable value for CPSM + * @cmdreg_lrsp_crc: enable value for long response with crc + * @cmdreg_srsp_crc: enable value for short response with crc + * @cmdreg_srsp: enable value for short response without crc + * @cmdreg_stop: enable value for stop and abort transmission + * @datalength_bits: number of bits in the MMCIDATALENGTH register + * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY + * is asserted (likewise for RX) + * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY + * is asserted (likewise for RX) + * @data_cmd_enable: enable value for data commands. + * @st_sdio: enable ST specific SDIO logic + * @st_clkdiv: true if using a ST-specific clock divider algorithm + * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm + * @datactrl_mask_ddrmode: ddr mode mask in datactrl register. + * @datactrl_mask_sdio: SDIO enable mask in datactrl register + * @datactrl_blocksz: block size in power of two + * @datactrl_any_blocksz: true if block any block sizes are accepted by + * hardware, such as with some SDIO traffic that send + * odd packets. + * @dma_power_of_2: DMA only works with blocks that are a power of 2. + * @datactrl_first: true if data must be setup before send command + * @datacnt_useless: true if you could not use datacnt register to read + * remaining data + * @pwrreg_powerup: power up value for MMCIPOWER register + * @f_max: maximum clk frequency supported by the controller. + * @signal_direction: input/out direction of bus signals can be indicated + * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock + * @busy_detect: true if the variant supports busy detection on DAT0. + * @busy_timeout: true if the variant starts data timer when the DPSM + * enter in Wait_R or Busy state. + * @busy_dpsm_flag: bitmask enabling busy detection in the DPSM + * @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register + * indicating that the card is busy + * @busy_detect_mask: bitmask identifying the bit in the MMCIMASK0 to mask for + * getting busy end detection interrupts + * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply + * @explicit_mclk_control: enable explicit mclk control in driver. + * @qcom_fifo: enables qcom specific fifo pio read logic. + * @qcom_dml: enables qcom specific dma glue for dma transfers. + * @reversed_irq_handling: handle data irq before cmd irq. + * @mmcimask1: true if variant have a MMCIMASK1 register. + * @irq_pio_mask: bitmask used to manage interrupt pio transfert in mmcimask + * register + * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS + * register. + * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register + * @dma_lli: true if variant has dma link list feature. + * @stm32_idmabsize_mask: stm32 sdmmc idma buffer size. + */ +struct variant_data { + unsigned int clkreg; + unsigned int clkreg_enable; + unsigned int clkreg_8bit_bus_enable; + unsigned int clkreg_neg_edge_enable; + unsigned int cmdreg_cpsm_enable; + unsigned int cmdreg_lrsp_crc; + unsigned int cmdreg_srsp_crc; + unsigned int cmdreg_srsp; + unsigned int cmdreg_stop; + unsigned int datalength_bits; + unsigned int fifosize; + unsigned int fifohalfsize; + unsigned int data_cmd_enable; + unsigned int datactrl_mask_ddrmode; + unsigned int datactrl_mask_sdio; + unsigned int datactrl_blocksz; + u8 datactrl_any_blocksz:1; + u8 dma_power_of_2:1; + u8 datactrl_first:1; + u8 datacnt_useless:1; + u8 st_sdio:1; + u8 st_clkdiv:1; + u8 stm32_clkdiv:1; + u32 pwrreg_powerup; + u32 f_max; + u8 signal_direction:1; + u8 pwrreg_clkgate:1; + u8 busy_detect:1; + u8 busy_timeout:1; + u32 busy_dpsm_flag; + u32 busy_detect_flag; + u32 busy_detect_mask; + u8 pwrreg_nopower:1; + u8 explicit_mclk_control:1; + u8 qcom_fifo:1; + u8 qcom_dml:1; + u8 reversed_irq_handling:1; + u8 mmcimask1:1; + unsigned int irq_pio_mask; + u32 start_err; + u32 opendrain; + u8 dma_lli:1; + u32 stm32_idmabsize_mask; + void (*init)(struct mmci_host *host); +}; + +/* mmci variant callbacks */ +struct mmci_host_ops { + int (*validate_data)(struct mmci_host *host, struct mmc_data *data); + int (*prep_data)(struct mmci_host *host, struct mmc_data *data, + bool next); + void (*unprep_data)(struct mmci_host *host, struct mmc_data *data, + int err); + u32 (*get_datactrl_cfg)(struct mmci_host *host); + void (*get_next_data)(struct mmci_host *host, struct mmc_data *data); + int (*dma_setup)(struct mmci_host *host); + void (*dma_release)(struct mmci_host *host); + int (*dma_start)(struct mmci_host *host, unsigned int *datactrl); + void (*dma_finalize)(struct mmci_host *host, struct mmc_data *data); + void (*dma_error)(struct mmci_host *host); + void (*set_clkreg)(struct mmci_host *host, unsigned int desired); + void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr); + bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk); + void (*pre_sig_volt_switch)(struct mmci_host *host); + int (*post_sig_volt_switch)(struct mmci_host *host, struct mmc_ios *ios); +}; + +struct mmci_host { + phys_addr_t phybase; + void __iomem *base; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_command stop_abort; + struct mmc_data *data; + struct mmc_host *mmc; + struct clk *clk; + u8 singleirq:1; + + struct reset_control *rst; + + spinlock_t lock; + + unsigned int mclk; + /* cached value of requested clk in set_ios */ + unsigned int clock_cache; + unsigned int cclk; + u32 pwr_reg; + u32 pwr_reg_add; + u32 clk_reg; + u32 clk_reg_add; + u32 datactrl_reg; + u32 busy_status; + u32 mask1_reg; + u8 vqmmc_enabled:1; + struct mmci_platform_data *plat; + struct mmc_host_ops *mmc_ops; + struct mmci_host_ops *ops; + struct variant_data *variant; + void *variant_priv; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_opendrain; + + u8 hw_designer; + u8 hw_revision:4; + + struct timer_list timer; + unsigned int oldstat; + u32 irq_action; + + /* pio stuff */ + struct sg_mapping_iter sg_miter; + unsigned int size; + int (*get_rx_fifocnt)(struct mmci_host *h, u32 status, int remain); + + u8 use_dma:1; + u8 dma_in_progress:1; + void *dma_priv; + + s32 next_cookie; +}; + +#define dma_inprogress(host) ((host)->dma_in_progress) + +void mmci_write_clkreg(struct mmci_host *host, u32 clk); +void mmci_write_pwrreg(struct mmci_host *host, u32 pwr); + +static inline u32 mmci_dctrl_blksz(struct mmci_host *host) +{ + return (ffs(host->data->blksz) - 1) << 4; +} + +#ifdef CONFIG_DMA_ENGINE +int mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, + bool next); +void mmci_dmae_unprep_data(struct mmci_host *host, struct mmc_data *data, + int err); +void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data); +int mmci_dmae_setup(struct mmci_host *host); +void mmci_dmae_release(struct mmci_host *host); +int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl); +void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data); +void mmci_dmae_error(struct mmci_host *host); +#endif + +#ifdef CONFIG_MMC_QCOM_DML +void qcom_variant_init(struct mmci_host *host); +#else +static inline void qcom_variant_init(struct mmci_host *host) {} +#endif + +#ifdef CONFIG_MMC_STM32_SDMMC +void sdmmc_variant_init(struct mmci_host *host); +#else +static inline void sdmmc_variant_init(struct mmci_host *host) {} +#endif diff --git a/drivers/mmc/host/mmci_qcom_dml.c b/drivers/mmc/host/mmci_qcom_dml.c new file mode 100644 index 000000000..3da6112fb --- /dev/null +++ b/drivers/mmc/host/mmci_qcom_dml.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (c) 2011, The Linux Foundation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include "mmci.h" + +/* Registers */ +#define DML_CONFIG 0x00 +#define PRODUCER_CRCI_MSK GENMASK(1, 0) +#define PRODUCER_CRCI_DISABLE 0 +#define PRODUCER_CRCI_X_SEL BIT(0) +#define PRODUCER_CRCI_Y_SEL BIT(1) +#define CONSUMER_CRCI_MSK GENMASK(3, 2) +#define CONSUMER_CRCI_DISABLE 0 +#define CONSUMER_CRCI_X_SEL BIT(2) +#define CONSUMER_CRCI_Y_SEL BIT(3) +#define PRODUCER_TRANS_END_EN BIT(4) +#define BYPASS BIT(16) +#define DIRECT_MODE BIT(17) +#define INFINITE_CONS_TRANS BIT(18) + +#define DML_SW_RESET 0x08 +#define DML_PRODUCER_START 0x0c +#define DML_CONSUMER_START 0x10 +#define DML_PRODUCER_PIPE_LOGICAL_SIZE 0x14 +#define DML_CONSUMER_PIPE_LOGICAL_SIZE 0x18 +#define DML_PIPE_ID 0x1c +#define PRODUCER_PIPE_ID_SHFT 0 +#define PRODUCER_PIPE_ID_MSK GENMASK(4, 0) +#define CONSUMER_PIPE_ID_SHFT 16 +#define CONSUMER_PIPE_ID_MSK GENMASK(20, 16) + +#define DML_PRODUCER_BAM_BLOCK_SIZE 0x24 +#define DML_PRODUCER_BAM_TRANS_SIZE 0x28 + +/* other definitions */ +#define PRODUCER_PIPE_LOGICAL_SIZE 4096 +#define CONSUMER_PIPE_LOGICAL_SIZE 4096 + +#define DML_OFFSET 0x800 + +static int qcom_dma_start(struct mmci_host *host, unsigned int *datactrl) +{ + u32 config; + void __iomem *base = host->base + DML_OFFSET; + struct mmc_data *data = host->data; + int ret = mmci_dmae_start(host, datactrl); + + if (ret) + return ret; + + if (data->flags & MMC_DATA_READ) { + /* Read operation: configure DML for producer operation */ + /* Set producer CRCI-x and disable consumer CRCI */ + config = readl_relaxed(base + DML_CONFIG); + config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_X_SEL; + config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_DISABLE; + writel_relaxed(config, base + DML_CONFIG); + + /* Set the Producer BAM block size */ + writel_relaxed(data->blksz, base + DML_PRODUCER_BAM_BLOCK_SIZE); + + /* Set Producer BAM Transaction size */ + writel_relaxed(data->blocks * data->blksz, + base + DML_PRODUCER_BAM_TRANS_SIZE); + /* Set Producer Transaction End bit */ + config = readl_relaxed(base + DML_CONFIG); + config |= PRODUCER_TRANS_END_EN; + writel_relaxed(config, base + DML_CONFIG); + /* Trigger producer */ + writel_relaxed(1, base + DML_PRODUCER_START); + } else { + /* Write operation: configure DML for consumer operation */ + /* Set consumer CRCI-x and disable producer CRCI*/ + config = readl_relaxed(base + DML_CONFIG); + config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_X_SEL; + config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_DISABLE; + writel_relaxed(config, base + DML_CONFIG); + /* Clear Producer Transaction End bit */ + config = readl_relaxed(base + DML_CONFIG); + config &= ~PRODUCER_TRANS_END_EN; + writel_relaxed(config, base + DML_CONFIG); + /* Trigger consumer */ + writel_relaxed(1, base + DML_CONSUMER_START); + } + + /* make sure the dml is configured before dma is triggered */ + wmb(); + return 0; +} + +static int of_get_dml_pipe_index(struct device_node *np, const char *name) +{ + int index; + struct of_phandle_args dma_spec; + + index = of_property_match_string(np, "dma-names", name); + + if (index < 0) + return -ENODEV; + + if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index, + &dma_spec)) + return -ENODEV; + + if (dma_spec.args_count) + return dma_spec.args[0]; + + return -ENODEV; +} + +/* Initialize the dml hardware connected to SD Card controller */ +static int qcom_dma_setup(struct mmci_host *host) +{ + u32 config; + void __iomem *base; + int consumer_id, producer_id; + struct device_node *np = host->mmc->parent->of_node; + + if (mmci_dmae_setup(host)) + return -EINVAL; + + consumer_id = of_get_dml_pipe_index(np, "tx"); + producer_id = of_get_dml_pipe_index(np, "rx"); + + if (producer_id < 0 || consumer_id < 0) { + mmci_dmae_release(host); + return -EINVAL; + } + + base = host->base + DML_OFFSET; + + /* Reset the DML block */ + writel_relaxed(1, base + DML_SW_RESET); + + /* Disable the producer and consumer CRCI */ + config = (PRODUCER_CRCI_DISABLE | CONSUMER_CRCI_DISABLE); + /* + * Disable the bypass mode. Bypass mode will only be used + * if data transfer is to happen in PIO mode and don't + * want the BAM interface to connect with SDCC-DML. + */ + config &= ~BYPASS; + /* + * Disable direct mode as we don't DML to MASTER the AHB bus. + * BAM connected with DML should MASTER the AHB bus. + */ + config &= ~DIRECT_MODE; + /* + * Disable infinite mode transfer as we won't be doing any + * infinite size data transfers. All data transfer will be + * of finite data size. + */ + config &= ~INFINITE_CONS_TRANS; + writel_relaxed(config, base + DML_CONFIG); + + /* + * Initialize the logical BAM pipe size for producer + * and consumer. + */ + writel_relaxed(PRODUCER_PIPE_LOGICAL_SIZE, + base + DML_PRODUCER_PIPE_LOGICAL_SIZE); + writel_relaxed(CONSUMER_PIPE_LOGICAL_SIZE, + base + DML_CONSUMER_PIPE_LOGICAL_SIZE); + + /* Initialize Producer/consumer pipe id */ + writel_relaxed(producer_id | (consumer_id << CONSUMER_PIPE_ID_SHFT), + base + DML_PIPE_ID); + + /* Make sure dml initialization is finished */ + mb(); + + return 0; +} + +static u32 qcom_get_dctrl_cfg(struct mmci_host *host) +{ + return MCI_DPSM_ENABLE | (host->data->blksz << 4); +} + +static struct mmci_host_ops qcom_variant_ops = { + .prep_data = mmci_dmae_prep_data, + .unprep_data = mmci_dmae_unprep_data, + .get_datactrl_cfg = qcom_get_dctrl_cfg, + .get_next_data = mmci_dmae_get_next_data, + .dma_setup = qcom_dma_setup, + .dma_release = mmci_dmae_release, + .dma_start = qcom_dma_start, + .dma_finalize = mmci_dmae_finalize, + .dma_error = mmci_dmae_error, +}; + +void qcom_variant_init(struct mmci_host *host) +{ + host->ops = &qcom_variant_ops; +} diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c new file mode 100644 index 000000000..4cceb9bab --- /dev/null +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -0,0 +1,542 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Ludovic.barre@st.com for STMicroelectronics. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mmci.h" + +#define SDMMC_LLI_BUF_LEN PAGE_SIZE +#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT) + +#define DLYB_CR 0x0 +#define DLYB_CR_DEN BIT(0) +#define DLYB_CR_SEN BIT(1) + +#define DLYB_CFGR 0x4 +#define DLYB_CFGR_SEL_MASK GENMASK(3, 0) +#define DLYB_CFGR_UNIT_MASK GENMASK(14, 8) +#define DLYB_CFGR_LNG_MASK GENMASK(27, 16) +#define DLYB_CFGR_LNGF BIT(31) + +#define DLYB_NB_DELAY 11 +#define DLYB_CFGR_SEL_MAX (DLYB_NB_DELAY + 1) +#define DLYB_CFGR_UNIT_MAX 127 + +#define DLYB_LNG_TIMEOUT_US 1000 +#define SDMMC_VSWEND_TIMEOUT_US 10000 + +struct sdmmc_lli_desc { + u32 idmalar; + u32 idmabase; + u32 idmasize; +}; + +struct sdmmc_idma { + dma_addr_t sg_dma; + void *sg_cpu; +}; + +struct sdmmc_dlyb { + void __iomem *base; + u32 unit; + u32 max; +}; + +static int sdmmc_idma_validate_data(struct mmci_host *host, + struct mmc_data *data) +{ + struct scatterlist *sg; + int i; + + /* + * idma has constraints on idmabase & idmasize for each element + * excepted the last element which has no constraint on idmasize + */ + for_each_sg(data->sg, sg, data->sg_len - 1, i) { + if (!IS_ALIGNED(sg->offset, sizeof(u32)) || + !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) { + dev_err(mmc_dev(host->mmc), + "unaligned scatterlist: ofst:%x length:%d\n", + data->sg->offset, data->sg->length); + return -EINVAL; + } + } + + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + dev_err(mmc_dev(host->mmc), + "unaligned last scatterlist: ofst:%x length:%d\n", + data->sg->offset, data->sg->length); + return -EINVAL; + } + + return 0; +} + +static int _sdmmc_idma_prep_data(struct mmci_host *host, + struct mmc_data *data) +{ + int n_elem; + + n_elem = dma_map_sg(mmc_dev(host->mmc), + data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + + if (!n_elem) { + dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); + return -EINVAL; + } + + return 0; +} + +static int sdmmc_idma_prep_data(struct mmci_host *host, + struct mmc_data *data, bool next) +{ + /* Check if job is already prepared. */ + if (!next && data->host_cookie == host->next_cookie) + return 0; + + return _sdmmc_idma_prep_data(host, data); +} + +static void sdmmc_idma_unprep_data(struct mmci_host *host, + struct mmc_data *data, int err) +{ + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); +} + +static int sdmmc_idma_setup(struct mmci_host *host) +{ + struct sdmmc_idma *idma; + struct device *dev = mmc_dev(host->mmc); + + idma = devm_kzalloc(dev, sizeof(*idma), GFP_KERNEL); + if (!idma) + return -ENOMEM; + + host->dma_priv = idma; + + if (host->variant->dma_lli) { + idma->sg_cpu = dmam_alloc_coherent(dev, SDMMC_LLI_BUF_LEN, + &idma->sg_dma, GFP_KERNEL); + if (!idma->sg_cpu) { + dev_err(dev, "Failed to alloc IDMA descriptor\n"); + return -ENOMEM; + } + host->mmc->max_segs = SDMMC_LLI_BUF_LEN / + sizeof(struct sdmmc_lli_desc); + host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; + } else { + host->mmc->max_segs = 1; + host->mmc->max_seg_size = host->mmc->max_req_size; + } + + return dma_set_max_seg_size(dev, host->mmc->max_seg_size); +} + +static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) + +{ + struct sdmmc_idma *idma = host->dma_priv; + struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu; + struct mmc_data *data = host->data; + struct scatterlist *sg; + int i; + + if (!host->variant->dma_lli || data->sg_len == 1) { + writel_relaxed(sg_dma_address(data->sg), + host->base + MMCI_STM32_IDMABASE0R); + writel_relaxed(MMCI_STM32_IDMAEN, + host->base + MMCI_STM32_IDMACTRLR); + return 0; + } + + for_each_sg(data->sg, sg, data->sg_len, i) { + desc[i].idmalar = (i + 1) * sizeof(struct sdmmc_lli_desc); + desc[i].idmalar |= MMCI_STM32_ULA | MMCI_STM32_ULS + | MMCI_STM32_ABR; + desc[i].idmabase = sg_dma_address(sg); + desc[i].idmasize = sg_dma_len(sg); + } + + /* notice the end of link list */ + desc[data->sg_len - 1].idmalar &= ~MMCI_STM32_ULA; + + dma_wmb(); + writel_relaxed(idma->sg_dma, host->base + MMCI_STM32_IDMABAR); + writel_relaxed(desc[0].idmalar, host->base + MMCI_STM32_IDMALAR); + writel_relaxed(desc[0].idmabase, host->base + MMCI_STM32_IDMABASE0R); + writel_relaxed(desc[0].idmasize, host->base + MMCI_STM32_IDMABSIZER); + writel_relaxed(MMCI_STM32_IDMAEN | MMCI_STM32_IDMALLIEN, + host->base + MMCI_STM32_IDMACTRLR); + + return 0; +} + +static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) +{ + writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); + + if (!data->host_cookie) + sdmmc_idma_unprep_data(host, data, 0); +} + +static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired) +{ + unsigned int clk = 0, ddr = 0; + + if (host->mmc->ios.timing == MMC_TIMING_MMC_DDR52 || + host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) + ddr = MCI_STM32_CLK_DDR; + + /* + * cclk = mclk / (2 * clkdiv) + * clkdiv 0 => bypass + * in ddr mode bypass is not possible + */ + if (desired) { + if (desired >= host->mclk && !ddr) { + host->cclk = host->mclk; + } else { + clk = DIV_ROUND_UP(host->mclk, 2 * desired); + if (clk > MCI_STM32_CLK_CLKDIV_MSK) + clk = MCI_STM32_CLK_CLKDIV_MSK; + host->cclk = host->mclk / (2 * clk); + } + } else { + /* + * while power-on phase the clock can't be define to 0, + * Only power-off and power-cyc deactivate the clock. + * if desired clock is 0, set max divider + */ + clk = MCI_STM32_CLK_CLKDIV_MSK; + host->cclk = host->mclk / (2 * clk); + } + + /* Set actual clock for debug */ + if (host->mmc->ios.power_mode == MMC_POWER_ON) + host->mmc->actual_clock = host->cclk; + else + host->mmc->actual_clock = 0; + + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) + clk |= MCI_STM32_CLK_WIDEBUS_4; + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) + clk |= MCI_STM32_CLK_WIDEBUS_8; + + clk |= MCI_STM32_CLK_HWFCEN; + clk |= host->clk_reg_add; + clk |= ddr; + + /* + * SDMMC_FBCK is selected when an external Delay Block is needed + * with SDR104. + */ + if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) { + clk |= MCI_STM32_CLK_BUSSPEED; + if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) { + clk &= ~MCI_STM32_CLK_SEL_MSK; + clk |= MCI_STM32_CLK_SELFBCK; + } + } + + mmci_write_clkreg(host, clk); +} + +static void sdmmc_dlyb_input_ck(struct sdmmc_dlyb *dlyb) +{ + if (!dlyb || !dlyb->base) + return; + + /* Output clock = Input clock */ + writel_relaxed(0, dlyb->base + DLYB_CR); +} + +static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr) +{ + struct mmc_ios ios = host->mmc->ios; + struct sdmmc_dlyb *dlyb = host->variant_priv; + + /* adds OF options */ + pwr = host->pwr_reg_add; + + sdmmc_dlyb_input_ck(dlyb); + + if (ios.power_mode == MMC_POWER_OFF) { + /* Only a reset could power-off sdmmc */ + reset_control_assert(host->rst); + udelay(2); + reset_control_deassert(host->rst); + + /* + * Set the SDMMC in Power-cycle state. + * This will make that the SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK + * are driven low, to prevent the Card from being supplied + * through the signal lines. + */ + mmci_write_pwrreg(host, MCI_STM32_PWR_CYC | pwr); + } else if (ios.power_mode == MMC_POWER_ON) { + /* + * After power-off (reset): the irq mask defined in probe + * functionis lost + * ault irq mask (probe) must be activated + */ + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); + + /* preserves voltage switch bits */ + pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN | + MCI_STM32_VSWITCH); + + /* + * After a power-cycle state, we must set the SDMMC in + * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are + * driven high. Then we can set the SDMMC to Power-on state + */ + mmci_write_pwrreg(host, MCI_PWR_OFF | pwr); + mdelay(1); + mmci_write_pwrreg(host, MCI_PWR_ON | pwr); + } +} + +static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host) +{ + u32 datactrl; + + datactrl = mmci_dctrl_blksz(host); + + if (host->mmc->card && mmc_card_sdio(host->mmc->card) && + host->data->blocks == 1) + datactrl |= MCI_DPSM_STM32_MODE_SDIO; + else if (host->data->stop && !host->mrq->sbc) + datactrl |= MCI_DPSM_STM32_MODE_BLOCK_STOP; + else + datactrl |= MCI_DPSM_STM32_MODE_BLOCK; + + return datactrl; +} + +static bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk) +{ + void __iomem *base = host->base; + u32 busy_d0, busy_d0end, mask, sdmmc_status; + + mask = readl_relaxed(base + MMCIMASK0); + sdmmc_status = readl_relaxed(base + MMCISTATUS); + busy_d0end = sdmmc_status & MCI_STM32_BUSYD0END; + busy_d0 = sdmmc_status & MCI_STM32_BUSYD0; + + /* complete if there is an error or busy_d0end */ + if ((status & err_msk) || busy_d0end) + goto complete; + + /* + * On response the busy signaling is reflected in the BUSYD0 flag. + * if busy_d0 is in-progress we must activate busyd0end interrupt + * to wait this completion. Else this request has no busy step. + */ + if (busy_d0) { + if (!host->busy_status) { + writel_relaxed(mask | host->variant->busy_detect_mask, + base + MMCIMASK0); + host->busy_status = status & + (MCI_CMDSENT | MCI_CMDRESPEND); + } + return false; + } + +complete: + if (host->busy_status) { + writel_relaxed(mask & ~host->variant->busy_detect_mask, + base + MMCIMASK0); + host->busy_status = 0; + } + + writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR); + + return true; +} + +static void sdmmc_dlyb_set_cfgr(struct sdmmc_dlyb *dlyb, + int unit, int phase, bool sampler) +{ + u32 cfgr; + + writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR); + + cfgr = FIELD_PREP(DLYB_CFGR_UNIT_MASK, unit) | + FIELD_PREP(DLYB_CFGR_SEL_MASK, phase); + writel_relaxed(cfgr, dlyb->base + DLYB_CFGR); + + if (!sampler) + writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); +} + +static int sdmmc_dlyb_lng_tuning(struct mmci_host *host) +{ + struct sdmmc_dlyb *dlyb = host->variant_priv; + u32 cfgr; + int i, lng, ret; + + for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) { + sdmmc_dlyb_set_cfgr(dlyb, i, DLYB_CFGR_SEL_MAX, true); + + ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr, + (cfgr & DLYB_CFGR_LNGF), + 1, DLYB_LNG_TIMEOUT_US); + if (ret) { + dev_warn(mmc_dev(host->mmc), + "delay line cfg timeout unit:%d cfgr:%d\n", + i, cfgr); + continue; + } + + lng = FIELD_GET(DLYB_CFGR_LNG_MASK, cfgr); + if (lng < BIT(DLYB_NB_DELAY) && lng > 0) + break; + } + + if (i > DLYB_CFGR_UNIT_MAX) + return -EINVAL; + + dlyb->unit = i; + dlyb->max = __fls(lng); + + return 0; +} + +static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode) +{ + struct sdmmc_dlyb *dlyb = host->variant_priv; + int cur_len = 0, max_len = 0, end_of_len = 0; + int phase; + + for (phase = 0; phase <= dlyb->max; phase++) { + sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false); + + if (mmc_send_tuning(host->mmc, opcode, NULL)) { + cur_len = 0; + } else { + cur_len++; + if (cur_len > max_len) { + max_len = cur_len; + end_of_len = phase; + } + } + } + + if (!max_len) { + dev_err(mmc_dev(host->mmc), "no tuning point found\n"); + return -EINVAL; + } + + writel_relaxed(0, dlyb->base + DLYB_CR); + + phase = end_of_len - max_len / 2; + sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false); + + dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n", + dlyb->unit, dlyb->max, phase); + + return 0; +} + +static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct mmci_host *host = mmc_priv(mmc); + struct sdmmc_dlyb *dlyb = host->variant_priv; + + if (!dlyb || !dlyb->base) + return -EINVAL; + + if (sdmmc_dlyb_lng_tuning(host)) + return -EINVAL; + + return sdmmc_dlyb_phase_tuning(host, opcode); +} + +static void sdmmc_pre_sig_volt_vswitch(struct mmci_host *host) +{ + /* clear the voltage switch completion flag */ + writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR); + /* enable Voltage switch procedure */ + mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCHEN); +} + +static int sdmmc_post_sig_volt_switch(struct mmci_host *host, + struct mmc_ios *ios) +{ + unsigned long flags; + u32 status; + int ret = 0; + + spin_lock_irqsave(&host->lock, flags); + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 && + host->pwr_reg & MCI_STM32_VSWITCHEN) { + mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH); + spin_unlock_irqrestore(&host->lock, flags); + + /* wait voltage switch completion while 10ms */ + ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS, + status, + (status & MCI_STM32_VSWEND), + 10, SDMMC_VSWEND_TIMEOUT_US); + + writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC, + host->base + MMCICLEAR); + spin_lock_irqsave(&host->lock, flags); + mmci_write_pwrreg(host, host->pwr_reg & + ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH)); + } + spin_unlock_irqrestore(&host->lock, flags); + + return ret; +} + +static struct mmci_host_ops sdmmc_variant_ops = { + .validate_data = sdmmc_idma_validate_data, + .prep_data = sdmmc_idma_prep_data, + .unprep_data = sdmmc_idma_unprep_data, + .get_datactrl_cfg = sdmmc_get_dctrl_cfg, + .dma_setup = sdmmc_idma_setup, + .dma_start = sdmmc_idma_start, + .dma_finalize = sdmmc_idma_finalize, + .set_clkreg = mmci_sdmmc_set_clkreg, + .set_pwrreg = mmci_sdmmc_set_pwrreg, + .busy_complete = sdmmc_busy_complete, + .pre_sig_volt_switch = sdmmc_pre_sig_volt_vswitch, + .post_sig_volt_switch = sdmmc_post_sig_volt_switch, +}; + +void sdmmc_variant_init(struct mmci_host *host) +{ + struct device_node *np = host->mmc->parent->of_node; + void __iomem *base_dlyb; + struct sdmmc_dlyb *dlyb; + + host->ops = &sdmmc_variant_ops; + host->pwr_reg = readl_relaxed(host->base + MMCIPOWER); + + base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL); + if (IS_ERR(base_dlyb)) + return; + + dlyb = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dlyb), GFP_KERNEL); + if (!dlyb) + return; + + dlyb->base = base_dlyb; + host->variant_priv = dlyb; + host->mmc_ops->execute_tuning = sdmmc_execute_tuning; +} diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c new file mode 100644 index 000000000..c4baf8afb --- /dev/null +++ b/drivers/mmc/host/moxart-mmc.c @@ -0,0 +1,727 @@ +/* + * MOXA ART MMC host driver. + * + * Copyright (C) 2014 Jonas Jensen + * + * Jonas Jensen + * + * Based on code from + * Moxa Technologies Co., Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define REG_COMMAND 0 +#define REG_ARGUMENT 4 +#define REG_RESPONSE0 8 +#define REG_RESPONSE1 12 +#define REG_RESPONSE2 16 +#define REG_RESPONSE3 20 +#define REG_RESPONSE_COMMAND 24 +#define REG_DATA_CONTROL 28 +#define REG_DATA_TIMER 32 +#define REG_DATA_LENGTH 36 +#define REG_STATUS 40 +#define REG_CLEAR 44 +#define REG_INTERRUPT_MASK 48 +#define REG_POWER_CONTROL 52 +#define REG_CLOCK_CONTROL 56 +#define REG_BUS_WIDTH 60 +#define REG_DATA_WINDOW 64 +#define REG_FEATURE 68 +#define REG_REVISION 72 + +/* REG_COMMAND */ +#define CMD_SDC_RESET BIT(10) +#define CMD_EN BIT(9) +#define CMD_APP_CMD BIT(8) +#define CMD_LONG_RSP BIT(7) +#define CMD_NEED_RSP BIT(6) +#define CMD_IDX_MASK 0x3f + +/* REG_RESPONSE_COMMAND */ +#define RSP_CMD_APP BIT(6) +#define RSP_CMD_IDX_MASK 0x3f + +/* REG_DATA_CONTROL */ +#define DCR_DATA_FIFO_RESET BIT(8) +#define DCR_DATA_THRES BIT(7) +#define DCR_DATA_EN BIT(6) +#define DCR_DMA_EN BIT(5) +#define DCR_DATA_WRITE BIT(4) +#define DCR_BLK_SIZE 0x0f + +/* REG_DATA_LENGTH */ +#define DATA_LEN_MASK 0xffffff + +/* REG_STATUS */ +#define WRITE_PROT BIT(12) +#define CARD_DETECT BIT(11) +/* 1-10 below can be sent to either registers, interrupt or clear. */ +#define CARD_CHANGE BIT(10) +#define FIFO_ORUN BIT(9) +#define FIFO_URUN BIT(8) +#define DATA_END BIT(7) +#define CMD_SENT BIT(6) +#define DATA_CRC_OK BIT(5) +#define RSP_CRC_OK BIT(4) +#define DATA_TIMEOUT BIT(3) +#define RSP_TIMEOUT BIT(2) +#define DATA_CRC_FAIL BIT(1) +#define RSP_CRC_FAIL BIT(0) + +#define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \ + RSP_CRC_OK | CARD_DETECT | CMD_SENT) + +#define MASK_DATA (DATA_CRC_OK | DATA_END | \ + DATA_CRC_FAIL | DATA_TIMEOUT) + +#define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE) + +/* REG_POWER_CONTROL */ +#define SD_POWER_ON BIT(4) +#define SD_POWER_MASK 0x0f + +/* REG_CLOCK_CONTROL */ +#define CLK_HISPD BIT(9) +#define CLK_OFF BIT(8) +#define CLK_SD BIT(7) +#define CLK_DIV_MASK 0x7f + +/* REG_BUS_WIDTH */ +#define BUS_WIDTH_4_SUPPORT BIT(3) +#define BUS_WIDTH_4 BIT(2) +#define BUS_WIDTH_1 BIT(0) + +#define MMC_VDD_360 23 +#define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK) +#define MAX_RETRIES 500000 + +struct moxart_host { + spinlock_t lock; + + void __iomem *base; + + phys_addr_t reg_phys; + + struct dma_chan *dma_chan_tx; + struct dma_chan *dma_chan_rx; + struct dma_async_tx_descriptor *tx_desc; + struct mmc_host *mmc; + struct mmc_request *mrq; + struct scatterlist *cur_sg; + struct completion dma_complete; + struct completion pio_complete; + + u32 num_sg; + u32 data_remain; + u32 data_len; + u32 fifo_width; + u32 timeout; + u32 rate; + + long sysclk; + + bool have_dma; + bool is_removed; +}; + +static inline void moxart_init_sg(struct moxart_host *host, + struct mmc_data *data) +{ + host->cur_sg = data->sg; + host->num_sg = data->sg_len; + host->data_remain = host->cur_sg->length; + + if (host->data_remain > host->data_len) + host->data_remain = host->data_len; +} + +static inline int moxart_next_sg(struct moxart_host *host) +{ + int remain; + struct mmc_data *data = host->mrq->cmd->data; + + host->cur_sg++; + host->num_sg--; + + if (host->num_sg > 0) { + host->data_remain = host->cur_sg->length; + remain = host->data_len - data->bytes_xfered; + if (remain > 0 && remain < host->data_remain) + host->data_remain = remain; + } + + return host->num_sg; +} + +static int moxart_wait_for_status(struct moxart_host *host, + u32 mask, u32 *status) +{ + int ret = -ETIMEDOUT; + u32 i; + + for (i = 0; i < MAX_RETRIES; i++) { + *status = readl(host->base + REG_STATUS); + if (!(*status & mask)) { + udelay(5); + continue; + } + writel(*status & mask, host->base + REG_CLEAR); + ret = 0; + break; + } + + if (ret) + dev_err(mmc_dev(host->mmc), "timed out waiting for status\n"); + + return ret; +} + + +static void moxart_send_command(struct moxart_host *host, + struct mmc_command *cmd) +{ + u32 status, cmdctrl; + + writel(RSP_TIMEOUT | RSP_CRC_OK | + RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR); + writel(cmd->arg, host->base + REG_ARGUMENT); + + cmdctrl = cmd->opcode & CMD_IDX_MASK; + if (cmdctrl == SD_APP_SET_BUS_WIDTH || cmdctrl == SD_APP_OP_COND || + cmdctrl == SD_APP_SEND_SCR || cmdctrl == SD_APP_SD_STATUS || + cmdctrl == SD_APP_SEND_NUM_WR_BLKS) + cmdctrl |= CMD_APP_CMD; + + if (cmd->flags & MMC_RSP_PRESENT) + cmdctrl |= CMD_NEED_RSP; + + if (cmd->flags & MMC_RSP_136) + cmdctrl |= CMD_LONG_RSP; + + writel(cmdctrl | CMD_EN, host->base + REG_COMMAND); + + if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT) + cmd->error = -ETIMEDOUT; + + if (status & RSP_TIMEOUT) { + cmd->error = -ETIMEDOUT; + return; + } + if (status & RSP_CRC_FAIL) { + cmd->error = -EIO; + return; + } + if (status & RSP_CRC_OK) { + if (cmd->flags & MMC_RSP_136) { + cmd->resp[3] = readl(host->base + REG_RESPONSE0); + cmd->resp[2] = readl(host->base + REG_RESPONSE1); + cmd->resp[1] = readl(host->base + REG_RESPONSE2); + cmd->resp[0] = readl(host->base + REG_RESPONSE3); + } else { + cmd->resp[0] = readl(host->base + REG_RESPONSE0); + } + } +} + +static void moxart_dma_complete(void *param) +{ + struct moxart_host *host = param; + + complete(&host->dma_complete); +} + +static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) +{ + u32 len, dir_slave; + long dma_time; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *dma_chan; + + if (host->data_len == data->bytes_xfered) + return; + + if (data->flags & MMC_DATA_WRITE) { + dma_chan = host->dma_chan_tx; + dir_slave = DMA_MEM_TO_DEV; + } else { + dma_chan = host->dma_chan_rx; + dir_slave = DMA_DEV_TO_MEM; + } + + len = dma_map_sg(dma_chan->device->dev, data->sg, + data->sg_len, mmc_get_dma_dir(data)); + + if (len > 0) { + desc = dmaengine_prep_slave_sg(dma_chan, data->sg, + len, dir_slave, + DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + } else { + dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n"); + } + + if (desc) { + host->tx_desc = desc; + desc->callback = moxart_dma_complete; + desc->callback_param = host; + dmaengine_submit(desc); + dma_async_issue_pending(dma_chan); + } + + data->bytes_xfered += host->data_remain; + + dma_time = wait_for_completion_interruptible_timeout( + &host->dma_complete, host->timeout); + + dma_unmap_sg(dma_chan->device->dev, + data->sg, data->sg_len, + mmc_get_dma_dir(data)); +} + + +static void moxart_transfer_pio(struct moxart_host *host) +{ + struct mmc_data *data = host->mrq->cmd->data; + u32 *sgp, len = 0, remain, status; + + if (host->data_len == data->bytes_xfered) + return; + + sgp = sg_virt(host->cur_sg); + remain = host->data_remain; + + if (data->flags & MMC_DATA_WRITE) { + while (remain > 0) { + if (moxart_wait_for_status(host, FIFO_URUN, &status) + == -ETIMEDOUT) { + data->error = -ETIMEDOUT; + complete(&host->pio_complete); + return; + } + for (len = 0; len < remain && len < host->fifo_width;) { + iowrite32(*sgp, host->base + REG_DATA_WINDOW); + sgp++; + len += 4; + } + remain -= len; + } + + } else { + while (remain > 0) { + if (moxart_wait_for_status(host, FIFO_ORUN, &status) + == -ETIMEDOUT) { + data->error = -ETIMEDOUT; + complete(&host->pio_complete); + return; + } + for (len = 0; len < remain && len < host->fifo_width;) { + *sgp = ioread32(host->base + REG_DATA_WINDOW); + sgp++; + len += 4; + } + remain -= len; + } + } + + data->bytes_xfered += host->data_remain - remain; + host->data_remain = remain; + + if (host->data_len != data->bytes_xfered) + moxart_next_sg(host); + else + complete(&host->pio_complete); +} + +static void moxart_prepare_data(struct moxart_host *host) +{ + struct mmc_data *data = host->mrq->cmd->data; + u32 datactrl; + int blksz_bits; + + if (!data) + return; + + host->data_len = data->blocks * data->blksz; + blksz_bits = ffs(data->blksz) - 1; + BUG_ON(1 << blksz_bits != data->blksz); + + moxart_init_sg(host, data); + + datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE); + + if (data->flags & MMC_DATA_WRITE) + datactrl |= DCR_DATA_WRITE; + + if ((host->data_len > host->fifo_width) && host->have_dma) + datactrl |= DCR_DMA_EN; + + writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL); + writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR); + writel(host->rate, host->base + REG_DATA_TIMER); + writel(host->data_len, host->base + REG_DATA_LENGTH); + writel(datactrl, host->base + REG_DATA_CONTROL); +} + +static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct moxart_host *host = mmc_priv(mmc); + long pio_time; + unsigned long flags; + u32 status; + + spin_lock_irqsave(&host->lock, flags); + + init_completion(&host->dma_complete); + init_completion(&host->pio_complete); + + host->mrq = mrq; + + if (readl(host->base + REG_STATUS) & CARD_DETECT) { + mrq->cmd->error = -ETIMEDOUT; + goto request_done; + } + + moxart_prepare_data(host); + moxart_send_command(host, host->mrq->cmd); + + if (mrq->cmd->data) { + if ((host->data_len > host->fifo_width) && host->have_dma) { + + writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); + + spin_unlock_irqrestore(&host->lock, flags); + + moxart_transfer_dma(mrq->cmd->data, host); + + spin_lock_irqsave(&host->lock, flags); + } else { + + writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK); + + spin_unlock_irqrestore(&host->lock, flags); + + /* PIO transfers start from interrupt. */ + pio_time = wait_for_completion_interruptible_timeout( + &host->pio_complete, host->timeout); + + spin_lock_irqsave(&host->lock, flags); + } + + if (host->is_removed) { + dev_err(mmc_dev(host->mmc), "card removed\n"); + mrq->cmd->error = -ETIMEDOUT; + goto request_done; + } + + if (moxart_wait_for_status(host, MASK_DATA, &status) + == -ETIMEDOUT) { + mrq->cmd->data->error = -ETIMEDOUT; + goto request_done; + } + + if (status & DATA_CRC_FAIL) + mrq->cmd->data->error = -ETIMEDOUT; + + if (mrq->cmd->data->stop) + moxart_send_command(host, mrq->cmd->data->stop); + } + +request_done: + spin_unlock_irqrestore(&host->lock, flags); + mmc_request_done(host->mmc, mrq); +} + +static irqreturn_t moxart_irq(int irq, void *devid) +{ + struct moxart_host *host = (struct moxart_host *)devid; + u32 status; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + status = readl(host->base + REG_STATUS); + if (status & CARD_CHANGE) { + host->is_removed = status & CARD_DETECT; + if (host->is_removed && host->have_dma) { + dmaengine_terminate_all(host->dma_chan_tx); + dmaengine_terminate_all(host->dma_chan_rx); + } + host->mrq = NULL; + writel(MASK_INTR_PIO, host->base + REG_CLEAR); + writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); + mmc_detect_change(host->mmc, 0); + } + if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq) + moxart_transfer_pio(host); + + spin_unlock_irqrestore(&host->lock, flags); + + return IRQ_HANDLED; +} + +static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct moxart_host *host = mmc_priv(mmc); + unsigned long flags; + u8 power, div; + u32 ctrl; + + spin_lock_irqsave(&host->lock, flags); + + if (ios->clock) { + for (div = 0; div < CLK_DIV_MASK; ++div) { + if (ios->clock >= host->sysclk / (2 * (div + 1))) + break; + } + ctrl = CLK_SD | div; + host->rate = host->sysclk / (2 * (div + 1)); + if (host->rate > host->sysclk) + ctrl |= CLK_HISPD; + writel(ctrl, host->base + REG_CLOCK_CONTROL); + } + + if (ios->power_mode == MMC_POWER_OFF) { + writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON, + host->base + REG_POWER_CONTROL); + } else { + if (ios->vdd < MIN_POWER) + power = 0; + else + power = ios->vdd - MIN_POWER; + + writel(SD_POWER_ON | (u32) power, + host->base + REG_POWER_CONTROL); + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_4: + writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH); + break; + default: + writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH); + break; + } + + spin_unlock_irqrestore(&host->lock, flags); +} + + +static int moxart_get_ro(struct mmc_host *mmc) +{ + struct moxart_host *host = mmc_priv(mmc); + + return !!(readl(host->base + REG_STATUS) & WRITE_PROT); +} + +static const struct mmc_host_ops moxart_ops = { + .request = moxart_request, + .set_ios = moxart_set_ios, + .get_ro = moxart_get_ro, +}; + +static int moxart_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource res_mmc; + struct mmc_host *mmc; + struct moxart_host *host = NULL; + struct dma_slave_config cfg; + struct clk *clk; + void __iomem *reg_mmc; + int irq, ret; + u32 i; + + mmc = mmc_alloc_host(sizeof(struct moxart_host), dev); + if (!mmc) { + dev_err(dev, "mmc_alloc_host failed\n"); + ret = -ENOMEM; + goto out_mmc; + } + + ret = of_address_to_resource(node, 0, &res_mmc); + if (ret) { + dev_err(dev, "of_address_to_resource failed\n"); + goto out_mmc; + } + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) { + dev_err(dev, "irq_of_parse_and_map failed\n"); + ret = -EINVAL; + goto out_mmc; + } + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto out_mmc; + } + + reg_mmc = devm_ioremap_resource(dev, &res_mmc); + if (IS_ERR(reg_mmc)) { + ret = PTR_ERR(reg_mmc); + goto out_mmc; + } + + ret = mmc_of_parse(mmc); + if (ret) + goto out_mmc; + + host = mmc_priv(mmc); + host->mmc = mmc; + host->base = reg_mmc; + host->reg_phys = res_mmc.start; + host->timeout = msecs_to_jiffies(1000); + host->sysclk = clk_get_rate(clk); + host->fifo_width = readl(host->base + REG_FEATURE) << 2; + host->dma_chan_tx = dma_request_chan(dev, "tx"); + host->dma_chan_rx = dma_request_chan(dev, "rx"); + + spin_lock_init(&host->lock); + + mmc->ops = &moxart_ops; + mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2); + mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2); + mmc->ocr_avail = 0xffff00; /* Support 2.0v - 3.6v power. */ + + if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) { + if (PTR_ERR(host->dma_chan_tx) == -EPROBE_DEFER || + PTR_ERR(host->dma_chan_rx) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out; + } + if (!IS_ERR(host->dma_chan_tx)) { + dma_release_channel(host->dma_chan_tx); + host->dma_chan_tx = NULL; + } + if (!IS_ERR(host->dma_chan_rx)) { + dma_release_channel(host->dma_chan_rx); + host->dma_chan_rx = NULL; + } + dev_dbg(dev, "PIO mode transfer enabled\n"); + host->have_dma = false; + } else { + dev_dbg(dev, "DMA channels found (%p,%p)\n", + host->dma_chan_tx, host->dma_chan_rx); + host->have_dma = true; + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + + cfg.direction = DMA_MEM_TO_DEV; + cfg.src_addr = 0; + cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW; + dmaengine_slave_config(host->dma_chan_tx, &cfg); + + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = host->reg_phys + REG_DATA_WINDOW; + cfg.dst_addr = 0; + dmaengine_slave_config(host->dma_chan_rx, &cfg); + } + + if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT) + mmc->caps |= MMC_CAP_4_BIT_DATA; + + writel(0, host->base + REG_INTERRUPT_MASK); + + writel(CMD_SDC_RESET, host->base + REG_COMMAND); + for (i = 0; i < MAX_RETRIES; i++) { + if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET)) + break; + udelay(5); + } + + ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host); + if (ret) + goto out; + + dev_set_drvdata(dev, mmc); + ret = mmc_add_host(mmc); + if (ret) + goto out; + + dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width); + + return 0; + +out: + if (!IS_ERR_OR_NULL(host->dma_chan_tx)) + dma_release_channel(host->dma_chan_tx); + if (!IS_ERR_OR_NULL(host->dma_chan_rx)) + dma_release_channel(host->dma_chan_rx); +out_mmc: + if (mmc) + mmc_free_host(mmc); + return ret; +} + +static int moxart_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = dev_get_drvdata(&pdev->dev); + struct moxart_host *host = mmc_priv(mmc); + + dev_set_drvdata(&pdev->dev, NULL); + + if (!IS_ERR_OR_NULL(host->dma_chan_tx)) + dma_release_channel(host->dma_chan_tx); + if (!IS_ERR_OR_NULL(host->dma_chan_rx)) + dma_release_channel(host->dma_chan_rx); + mmc_remove_host(mmc); + + writel(0, host->base + REG_INTERRUPT_MASK); + writel(0, host->base + REG_POWER_CONTROL); + writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, + host->base + REG_CLOCK_CONTROL); + mmc_free_host(mmc); + + return 0; +} + +static const struct of_device_id moxart_mmc_match[] = { + { .compatible = "moxa,moxart-mmc" }, + { .compatible = "faraday,ftsdc010" }, + { } +}; +MODULE_DEVICE_TABLE(of, moxart_mmc_match); + +static struct platform_driver moxart_mmc_driver = { + .probe = moxart_probe, + .remove = moxart_remove, + .driver = { + .name = "mmc-moxart", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = moxart_mmc_match, + }, +}; +module_platform_driver(moxart_mmc_driver); + +MODULE_ALIAS("platform:mmc-moxart"); +MODULE_DESCRIPTION("MOXA ART MMC driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jonas Jensen "); diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c new file mode 100644 index 000000000..2c9ea5ed0 --- /dev/null +++ b/drivers/mmc/host/mtk-sd.c @@ -0,0 +1,2735 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2015 MediaTek Inc. + * Author: Chaotian.Jing + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "cqhci.h" + +#define MAX_BD_NUM 1024 + +/*--------------------------------------------------------------------------*/ +/* Common Definition */ +/*--------------------------------------------------------------------------*/ +#define MSDC_BUS_1BITS 0x0 +#define MSDC_BUS_4BITS 0x1 +#define MSDC_BUS_8BITS 0x2 + +#define MSDC_BURST_64B 0x6 + +/*--------------------------------------------------------------------------*/ +/* Register Offset */ +/*--------------------------------------------------------------------------*/ +#define MSDC_CFG 0x0 +#define MSDC_IOCON 0x04 +#define MSDC_PS 0x08 +#define MSDC_INT 0x0c +#define MSDC_INTEN 0x10 +#define MSDC_FIFOCS 0x14 +#define SDC_CFG 0x30 +#define SDC_CMD 0x34 +#define SDC_ARG 0x38 +#define SDC_STS 0x3c +#define SDC_RESP0 0x40 +#define SDC_RESP1 0x44 +#define SDC_RESP2 0x48 +#define SDC_RESP3 0x4c +#define SDC_BLK_NUM 0x50 +#define SDC_ADV_CFG0 0x64 +#define EMMC_IOCON 0x7c +#define SDC_ACMD_RESP 0x80 +#define DMA_SA_H4BIT 0x8c +#define MSDC_DMA_SA 0x90 +#define MSDC_DMA_CTRL 0x98 +#define MSDC_DMA_CFG 0x9c +#define MSDC_PATCH_BIT 0xb0 +#define MSDC_PATCH_BIT1 0xb4 +#define MSDC_PATCH_BIT2 0xb8 +#define MSDC_PAD_TUNE 0xec +#define MSDC_PAD_TUNE0 0xf0 +#define PAD_DS_TUNE 0x188 +#define PAD_CMD_TUNE 0x18c +#define EMMC50_CFG0 0x208 +#define EMMC50_CFG3 0x220 +#define SDC_FIFO_CFG 0x228 + +/*--------------------------------------------------------------------------*/ +/* Top Pad Register Offset */ +/*--------------------------------------------------------------------------*/ +#define EMMC_TOP_CONTROL 0x00 +#define EMMC_TOP_CMD 0x04 +#define EMMC50_PAD_DS_TUNE 0x0c + +/*--------------------------------------------------------------------------*/ +/* Register Mask */ +/*--------------------------------------------------------------------------*/ + +/* MSDC_CFG mask */ +#define MSDC_CFG_MODE (0x1 << 0) /* RW */ +#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */ +#define MSDC_CFG_RST (0x1 << 2) /* RW */ +#define MSDC_CFG_PIO (0x1 << 3) /* RW */ +#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */ +#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */ +#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */ +#define MSDC_CFG_CKSTB (0x1 << 7) /* R */ +#define MSDC_CFG_CKDIV (0xff << 8) /* RW */ +#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */ +#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */ +#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */ +#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */ +#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */ + +/* MSDC_IOCON mask */ +#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */ +#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */ +#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */ +#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */ +#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */ +#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */ +#define MSDC_IOCON_W_DSPL (0x1 << 8) /* RW */ +#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */ +#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */ +#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */ +#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */ +#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */ +#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */ +#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */ +#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */ +#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */ + +/* MSDC_PS mask */ +#define MSDC_PS_CDEN (0x1 << 0) /* RW */ +#define MSDC_PS_CDSTS (0x1 << 1) /* R */ +#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */ +#define MSDC_PS_DAT (0xff << 16) /* R */ +#define MSDC_PS_DATA1 (0x1 << 17) /* R */ +#define MSDC_PS_CMD (0x1 << 24) /* R */ +#define MSDC_PS_WP (0x1 << 31) /* R */ + +/* MSDC_INT mask */ +#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */ +#define MSDC_INT_CDSC (0x1 << 1) /* W1C */ +#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */ +#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */ +#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */ +#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */ +#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */ +#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */ +#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */ +#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */ +#define MSDC_INT_CSTA (0x1 << 11) /* R */ +#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */ +#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */ +#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */ +#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */ +#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */ +#define MSDC_INT_DMA_BDCSERR (0x1 << 17) /* W1C */ +#define MSDC_INT_DMA_GPDCSERR (0x1 << 18) /* W1C */ +#define MSDC_INT_DMA_PROTECT (0x1 << 19) /* W1C */ +#define MSDC_INT_CMDQ (0x1 << 28) /* W1C */ + +/* MSDC_INTEN mask */ +#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */ +#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */ +#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */ +#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */ +#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */ +#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */ +#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */ +#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */ +#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */ +#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */ +#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */ +#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */ +#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */ +#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */ +#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */ +#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */ +#define MSDC_INTEN_DMA_BDCSERR (0x1 << 17) /* RW */ +#define MSDC_INTEN_DMA_GPDCSERR (0x1 << 18) /* RW */ +#define MSDC_INTEN_DMA_PROTECT (0x1 << 19) /* RW */ + +/* MSDC_FIFOCS mask */ +#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */ +#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */ +#define MSDC_FIFOCS_CLR (0x1 << 31) /* RW */ + +/* SDC_CFG mask */ +#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */ +#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */ +#define SDC_CFG_WRDTOC (0x1fff << 2) /* RW */ +#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */ +#define SDC_CFG_SDIO (0x1 << 19) /* RW */ +#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */ +#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */ +#define SDC_CFG_DTOC (0xff << 24) /* RW */ + +/* SDC_STS mask */ +#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */ +#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */ +#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */ + +#define SDC_DAT1_IRQ_TRIGGER (0x1 << 19) /* RW */ +/* SDC_ADV_CFG0 mask */ +#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */ + +/* DMA_SA_H4BIT mask */ +#define DMA_ADDR_HIGH_4BIT (0xf << 0) /* RW */ + +/* MSDC_DMA_CTRL mask */ +#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */ +#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */ +#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */ +#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */ +#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */ +#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */ + +/* MSDC_DMA_CFG mask */ +#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */ +#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */ +#define MSDC_DMA_CFG_AHBHPROT2 (0x2 << 8) /* RW */ +#define MSDC_DMA_CFG_ACTIVEEN (0x2 << 12) /* RW */ +#define MSDC_DMA_CFG_CS12B16B (0x1 << 16) /* RW */ + +/* MSDC_PATCH_BIT mask */ +#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */ +#define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7) +#define MSDC_CKGEN_MSDC_DLY_SEL (0x1f << 10) +#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */ +#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */ +#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */ +#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */ +#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */ +#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */ +#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */ +#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ +#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ + +#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */ +#define MSDC_PB1_BUSY_CHECK_SEL (0x1 << 7) /* RW */ +#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */ + +#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */ +#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */ +#define MSDC_PB2_SUPPORT_64G (0x1 << 1) /* RW */ +#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */ +#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */ +#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */ + +#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */ +#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */ +#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */ +#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */ +#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */ +#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */ +#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */ +#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */ + +#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */ +#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */ +#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */ + +#define PAD_CMD_TUNE_RX_DLY3 (0x1f << 1) /* RW */ + +#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */ +#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */ +#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */ + +#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */ + +#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */ +#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */ + +/* EMMC_TOP_CONTROL mask */ +#define PAD_RXDLY_SEL (0x1 << 0) /* RW */ +#define DELAY_EN (0x1 << 1) /* RW */ +#define PAD_DAT_RD_RXDLY2 (0x1f << 2) /* RW */ +#define PAD_DAT_RD_RXDLY (0x1f << 7) /* RW */ +#define PAD_DAT_RD_RXDLY2_SEL (0x1 << 12) /* RW */ +#define PAD_DAT_RD_RXDLY_SEL (0x1 << 13) /* RW */ +#define DATA_K_VALUE_SEL (0x1 << 14) /* RW */ +#define SDC_RX_ENH_EN (0x1 << 15) /* TW */ + +/* EMMC_TOP_CMD mask */ +#define PAD_CMD_RXDLY2 (0x1f << 0) /* RW */ +#define PAD_CMD_RXDLY (0x1f << 5) /* RW */ +#define PAD_CMD_RD_RXDLY2_SEL (0x1 << 10) /* RW */ +#define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */ +#define PAD_CMD_TX_DLY (0x1f << 12) /* RW */ + +#define REQ_CMD_EIO (0x1 << 0) +#define REQ_CMD_TMO (0x1 << 1) +#define REQ_DAT_ERR (0x1 << 2) +#define REQ_STOP_EIO (0x1 << 3) +#define REQ_STOP_TMO (0x1 << 4) +#define REQ_CMD_BUSY (0x1 << 5) + +#define MSDC_PREPARE_FLAG (0x1 << 0) +#define MSDC_ASYNC_FLAG (0x1 << 1) +#define MSDC_MMAP_FLAG (0x1 << 2) + +#define MTK_MMC_AUTOSUSPEND_DELAY 50 +#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */ +#define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */ + +#define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */ + +#define PAD_DELAY_MAX 32 /* PAD delay cells */ +/*--------------------------------------------------------------------------*/ +/* Descriptor Structure */ +/*--------------------------------------------------------------------------*/ +struct mt_gpdma_desc { + u32 gpd_info; +#define GPDMA_DESC_HWO (0x1 << 0) +#define GPDMA_DESC_BDP (0x1 << 1) +#define GPDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */ +#define GPDMA_DESC_INT (0x1 << 16) +#define GPDMA_DESC_NEXT_H4 (0xf << 24) +#define GPDMA_DESC_PTR_H4 (0xf << 28) + u32 next; + u32 ptr; + u32 gpd_data_len; +#define GPDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */ +#define GPDMA_DESC_EXTLEN (0xff << 16) /* bit16 ~ bit23 */ + u32 arg; + u32 blknum; + u32 cmd; +}; + +struct mt_bdma_desc { + u32 bd_info; +#define BDMA_DESC_EOL (0x1 << 0) +#define BDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */ +#define BDMA_DESC_BLKPAD (0x1 << 17) +#define BDMA_DESC_DWPAD (0x1 << 18) +#define BDMA_DESC_NEXT_H4 (0xf << 24) +#define BDMA_DESC_PTR_H4 (0xf << 28) + u32 next; + u32 ptr; + u32 bd_data_len; +#define BDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */ +#define BDMA_DESC_BUFLEN_EXT (0xffffff) /* bit0 ~ bit23 */ +}; + +struct msdc_dma { + struct scatterlist *sg; /* I/O scatter list */ + struct mt_gpdma_desc *gpd; /* pointer to gpd array */ + struct mt_bdma_desc *bd; /* pointer to bd array */ + dma_addr_t gpd_addr; /* the physical address of gpd array */ + dma_addr_t bd_addr; /* the physical address of bd array */ +}; + +struct msdc_save_para { + u32 msdc_cfg; + u32 iocon; + u32 sdc_cfg; + u32 pad_tune; + u32 patch_bit0; + u32 patch_bit1; + u32 patch_bit2; + u32 pad_ds_tune; + u32 pad_cmd_tune; + u32 emmc50_cfg0; + u32 emmc50_cfg3; + u32 sdc_fifo_cfg; + u32 emmc_top_control; + u32 emmc_top_cmd; + u32 emmc50_pad_ds_tune; +}; + +struct mtk_mmc_compatible { + u8 clk_div_bits; + bool recheck_sdio_irq; + bool hs400_tune; /* only used for MT8173 */ + u32 pad_tune_reg; + bool async_fifo; + bool data_tune; + bool busy_check; + bool stop_clk_fix; + bool enhance_rx; + bool support_64g; + bool use_internal_cd; +}; + +struct msdc_tune_para { + u32 iocon; + u32 pad_tune; + u32 pad_cmd_tune; + u32 emmc_top_control; + u32 emmc_top_cmd; +}; + +struct msdc_delay_phase { + u8 maxlen; + u8 start; + u8 final_phase; +}; + +struct msdc_host { + struct device *dev; + const struct mtk_mmc_compatible *dev_comp; + int cmd_rsp; + + spinlock_t lock; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + + void __iomem *base; /* host base address */ + void __iomem *top_base; /* host top register base address */ + + struct msdc_dma dma; /* dma channel */ + u64 dma_mask; + + u32 timeout_ns; /* data timeout ns */ + u32 timeout_clks; /* data timeout clks */ + + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_uhs; + struct delayed_work req_timeout; + int irq; /* host interrupt */ + struct reset_control *reset; + + struct clk *src_clk; /* msdc source clock */ + struct clk *h_clk; /* msdc h_clk */ + struct clk *bus_clk; /* bus clock which used to access register */ + struct clk *src_clk_cg; /* msdc source clock control gate */ + u32 mclk; /* mmc subsystem clock frequency */ + u32 src_clk_freq; /* source clock frequency */ + unsigned char timing; + bool vqmmc_enabled; + u32 latch_ck; + u32 hs400_ds_delay; + u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */ + u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */ + bool hs400_cmd_resp_sel_rising; + /* cmd response sample selection for HS400 */ + bool hs400_mode; /* current eMMC will run at hs400 mode */ + bool internal_cd; /* Use internal card-detect logic */ + bool cqhci; /* support eMMC hw cmdq */ + struct msdc_save_para save_para; /* used when gate HCLK */ + struct msdc_tune_para def_tune_para; /* default tune setting */ + struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */ + struct cqhci_host *cq_host; +}; + +static const struct mtk_mmc_compatible mt8135_compat = { + .clk_div_bits = 8, + .recheck_sdio_irq = true, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE, + .async_fifo = false, + .data_tune = false, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, + .support_64g = false, +}; + +static const struct mtk_mmc_compatible mt8173_compat = { + .clk_div_bits = 8, + .recheck_sdio_irq = true, + .hs400_tune = true, + .pad_tune_reg = MSDC_PAD_TUNE, + .async_fifo = false, + .data_tune = false, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, + .support_64g = false, +}; + +static const struct mtk_mmc_compatible mt8183_compat = { + .clk_div_bits = 12, + .recheck_sdio_irq = false, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = true, + .stop_clk_fix = true, + .enhance_rx = true, + .support_64g = true, +}; + +static const struct mtk_mmc_compatible mt2701_compat = { + .clk_div_bits = 12, + .recheck_sdio_irq = true, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, + .support_64g = false, +}; + +static const struct mtk_mmc_compatible mt2712_compat = { + .clk_div_bits = 12, + .recheck_sdio_irq = false, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = true, + .stop_clk_fix = true, + .enhance_rx = true, + .support_64g = true, +}; + +static const struct mtk_mmc_compatible mt7622_compat = { + .clk_div_bits = 12, + .recheck_sdio_irq = true, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = true, + .stop_clk_fix = true, + .enhance_rx = true, + .support_64g = false, +}; + +static const struct mtk_mmc_compatible mt8516_compat = { + .clk_div_bits = 12, + .recheck_sdio_irq = true, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = true, + .stop_clk_fix = true, +}; + +static const struct mtk_mmc_compatible mt7620_compat = { + .clk_div_bits = 8, + .recheck_sdio_irq = true, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE, + .async_fifo = false, + .data_tune = false, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, + .use_internal_cd = true, +}; + +static const struct mtk_mmc_compatible mt6779_compat = { + .clk_div_bits = 12, + .recheck_sdio_irq = false, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = true, + .stop_clk_fix = true, + .enhance_rx = true, + .support_64g = true, +}; + +static const struct of_device_id msdc_of_ids[] = { + { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat}, + { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat}, + { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat}, + { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat}, + { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat}, + { .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat}, + { .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat}, + { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat}, + { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat}, + {} +}; +MODULE_DEVICE_TABLE(of, msdc_of_ids); + +static void sdr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val |= bs; + writel(val, reg); +} + +static void sdr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val &= ~bs; + writel(val, reg); +} + +static void sdr_set_field(void __iomem *reg, u32 field, u32 val) +{ + unsigned int tv = readl(reg); + + tv &= ~field; + tv |= ((val) << (ffs((unsigned int)field) - 1)); + writel(tv, reg); +} + +static void sdr_get_field(void __iomem *reg, u32 field, u32 *val) +{ + unsigned int tv = readl(reg); + + *val = ((tv & field) >> (ffs((unsigned int)field) - 1)); +} + +static void msdc_reset_hw(struct msdc_host *host) +{ + u32 val; + + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST); + while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST) + cpu_relax(); + + sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR); + while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR) + cpu_relax(); + + val = readl(host->base + MSDC_INT); + writel(val, host->base + MSDC_INT); +} + +static void msdc_cmd_next(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd); +static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb); + +static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR | + MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY | + MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO; +static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | + MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR | + MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT; + +static u8 msdc_dma_calcs(u8 *buf, u32 len) +{ + u32 i, sum = 0; + + for (i = 0; i < len; i++) + sum += buf[i]; + return 0xff - (u8) sum; +} + +static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, + struct mmc_data *data) +{ + unsigned int j, dma_len; + dma_addr_t dma_address; + u32 dma_ctrl; + struct scatterlist *sg; + struct mt_gpdma_desc *gpd; + struct mt_bdma_desc *bd; + + sg = data->sg; + + gpd = dma->gpd; + bd = dma->bd; + + /* modify gpd */ + gpd->gpd_info |= GPDMA_DESC_HWO; + gpd->gpd_info |= GPDMA_DESC_BDP; + /* need to clear first. use these bits to calc checksum */ + gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM; + gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8; + + /* modify bd */ + for_each_sg(data->sg, sg, data->sg_count, j) { + dma_address = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + + /* init bd */ + bd[j].bd_info &= ~BDMA_DESC_BLKPAD; + bd[j].bd_info &= ~BDMA_DESC_DWPAD; + bd[j].ptr = lower_32_bits(dma_address); + if (host->dev_comp->support_64g) { + bd[j].bd_info &= ~BDMA_DESC_PTR_H4; + bd[j].bd_info |= (upper_32_bits(dma_address) & 0xf) + << 28; + } + + if (host->dev_comp->support_64g) { + bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN_EXT; + bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN_EXT); + } else { + bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN; + bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN); + } + + if (j == data->sg_count - 1) /* the last bd */ + bd[j].bd_info |= BDMA_DESC_EOL; + else + bd[j].bd_info &= ~BDMA_DESC_EOL; + + /* checksume need to clear first */ + bd[j].bd_info &= ~BDMA_DESC_CHECKSUM; + bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8; + } + + sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1); + dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL); + dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE); + dma_ctrl |= (MSDC_BURST_64B << 12 | 1 << 8); + writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL); + if (host->dev_comp->support_64g) + sdr_set_field(host->base + DMA_SA_H4BIT, DMA_ADDR_HIGH_4BIT, + upper_32_bits(dma->gpd_addr) & 0xf); + writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA); +} + +static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!(data->host_cookie & MSDC_PREPARE_FLAG)) { + data->host_cookie |= MSDC_PREPARE_FLAG; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } +} + +static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (data->host_cookie & MSDC_ASYNC_FLAG) + return; + + if (data->host_cookie & MSDC_PREPARE_FLAG) { + dma_unmap_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + data->host_cookie &= ~MSDC_PREPARE_FLAG; + } +} + +static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks) +{ + struct mmc_host *mmc = mmc_from_priv(host); + u64 timeout, clk_ns; + u32 mode = 0; + + if (mmc->actual_clock == 0) { + timeout = 0; + } else { + clk_ns = 1000000000ULL; + do_div(clk_ns, mmc->actual_clock); + timeout = ns + clk_ns - 1; + do_div(timeout, clk_ns); + timeout += clks; + /* in 1048576 sclk cycle unit */ + timeout = DIV_ROUND_UP(timeout, (0x1 << 20)); + if (host->dev_comp->clk_div_bits == 8) + sdr_get_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD, &mode); + else + sdr_get_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD_EXTRA, &mode); + /*DDR mode will double the clk cycles for data timeout */ + timeout = mode >= 2 ? timeout * 2 : timeout; + timeout = timeout > 1 ? timeout - 1 : 0; + } + return timeout; +} + +/* clock control primitives */ +static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks) +{ + u64 timeout; + + host->timeout_ns = ns; + host->timeout_clks = clks; + + timeout = msdc_timeout_cal(host, ns, clks); + sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, + (u32)(timeout > 255 ? 255 : timeout)); +} + +static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks) +{ + u64 timeout; + + timeout = msdc_timeout_cal(host, ns, clks); + sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC, + (u32)(timeout > 8191 ? 8191 : timeout)); +} + +static void msdc_gate_clock(struct msdc_host *host) +{ + clk_disable_unprepare(host->src_clk_cg); + clk_disable_unprepare(host->src_clk); + clk_disable_unprepare(host->bus_clk); + clk_disable_unprepare(host->h_clk); +} + +static void msdc_ungate_clock(struct msdc_host *host) +{ + clk_prepare_enable(host->h_clk); + clk_prepare_enable(host->bus_clk); + clk_prepare_enable(host->src_clk); + clk_prepare_enable(host->src_clk_cg); + while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) + cpu_relax(); +} + +static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) +{ + struct mmc_host *mmc = mmc_from_priv(host); + u32 mode; + u32 flags; + u32 div; + u32 sclk; + u32 tune_reg = host->dev_comp->pad_tune_reg; + + if (!hz) { + dev_dbg(host->dev, "set mclk to 0\n"); + host->mclk = 0; + mmc->actual_clock = 0; + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + return; + } + + flags = readl(host->base + MSDC_INTEN); + sdr_clr_bits(host->base + MSDC_INTEN, flags); + if (host->dev_comp->clk_div_bits == 8) + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE); + else + sdr_clr_bits(host->base + MSDC_CFG, + MSDC_CFG_HS400_CK_MODE_EXTRA); + if (timing == MMC_TIMING_UHS_DDR50 || + timing == MMC_TIMING_MMC_DDR52 || + timing == MMC_TIMING_MMC_HS400) { + if (timing == MMC_TIMING_MMC_HS400) + mode = 0x3; + else + mode = 0x2; /* ddr mode and use divisor */ + + if (hz >= (host->src_clk_freq >> 2)) { + div = 0; /* mean div = 1/4 */ + sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */ + } else { + div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); + sclk = (host->src_clk_freq >> 2) / div; + div = (div >> 1); + } + + if (timing == MMC_TIMING_MMC_HS400 && + hz >= (host->src_clk_freq >> 1)) { + if (host->dev_comp->clk_div_bits == 8) + sdr_set_bits(host->base + MSDC_CFG, + MSDC_CFG_HS400_CK_MODE); + else + sdr_set_bits(host->base + MSDC_CFG, + MSDC_CFG_HS400_CK_MODE_EXTRA); + sclk = host->src_clk_freq >> 1; + div = 0; /* div is ignore when bit18 is set */ + } + } else if (hz >= host->src_clk_freq) { + mode = 0x1; /* no divisor */ + div = 0; + sclk = host->src_clk_freq; + } else { + mode = 0x0; /* use divisor */ + if (hz >= (host->src_clk_freq >> 1)) { + div = 0; /* mean div = 1/2 */ + sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */ + } else { + div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); + sclk = (host->src_clk_freq >> 2) / div; + } + } + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + /* + * As src_clk/HCLK use the same bit to gate/ungate, + * So if want to only gate src_clk, need gate its parent(mux). + */ + if (host->src_clk_cg) + clk_disable_unprepare(host->src_clk_cg); + else + clk_disable_unprepare(clk_get_parent(host->src_clk)); + if (host->dev_comp->clk_div_bits == 8) + sdr_set_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, + (mode << 8) | div); + else + sdr_set_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA, + (mode << 12) | div); + if (host->src_clk_cg) + clk_prepare_enable(host->src_clk_cg); + else + clk_prepare_enable(clk_get_parent(host->src_clk)); + + while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) + cpu_relax(); + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + mmc->actual_clock = sclk; + host->mclk = hz; + host->timing = timing; + /* need because clk changed. */ + msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); + sdr_set_bits(host->base + MSDC_INTEN, flags); + + /* + * mmc_select_hs400() will drop to 50Mhz and High speed mode, + * tune result of hs200/200Mhz is not suitable for 50Mhz + */ + if (mmc->actual_clock <= 52000000) { + writel(host->def_tune_para.iocon, host->base + MSDC_IOCON); + if (host->top_base) { + writel(host->def_tune_para.emmc_top_control, + host->top_base + EMMC_TOP_CONTROL); + writel(host->def_tune_para.emmc_top_cmd, + host->top_base + EMMC_TOP_CMD); + } else { + writel(host->def_tune_para.pad_tune, + host->base + tune_reg); + } + } else { + writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON); + writel(host->saved_tune_para.pad_cmd_tune, + host->base + PAD_CMD_TUNE); + if (host->top_base) { + writel(host->saved_tune_para.emmc_top_control, + host->top_base + EMMC_TOP_CONTROL); + writel(host->saved_tune_para.emmc_top_cmd, + host->top_base + EMMC_TOP_CMD); + } else { + writel(host->saved_tune_para.pad_tune, + host->base + tune_reg); + } + } + + if (timing == MMC_TIMING_MMC_HS400 && + host->dev_comp->hs400_tune) + sdr_set_field(host->base + tune_reg, + MSDC_PAD_TUNE_CMDRRDLY, + host->hs400_cmd_int_delay); + dev_dbg(host->dev, "sclk: %d, timing: %d\n", mmc->actual_clock, + timing); +} + +static inline u32 msdc_cmd_find_resp(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + /* Actually, R1, R5, R6, R7 are the same */ + case MMC_RSP_R1: + resp = 0x1; + break; + case MMC_RSP_R1B: + resp = 0x7; + break; + case MMC_RSP_R2: + resp = 0x2; + break; + case MMC_RSP_R3: + resp = 0x3; + break; + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + struct mmc_host *mmc = mmc_from_priv(host); + /* rawcmd : + * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 | + * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode + */ + u32 opcode = cmd->opcode; + u32 resp = msdc_cmd_find_resp(host, mrq, cmd); + u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7); + + host->cmd_rsp = resp; + + if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) || + opcode == MMC_STOP_TRANSMISSION) + rawcmd |= (0x1 << 14); + else if (opcode == SD_SWITCH_VOLTAGE) + rawcmd |= (0x1 << 30); + else if (opcode == SD_APP_SEND_SCR || + opcode == SD_APP_SEND_NUM_WR_BLKS || + (opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || + (opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || + (opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC)) + rawcmd |= (0x1 << 11); + + if (cmd->data) { + struct mmc_data *data = cmd->data; + + if (mmc_op_multi(opcode)) { + if (mmc_card_mmc(mmc->card) && mrq->sbc && + !(mrq->sbc->arg & 0xFFFF0000)) + rawcmd |= 0x2 << 28; /* AutoCMD23 */ + } + + rawcmd |= ((data->blksz & 0xFFF) << 16); + if (data->flags & MMC_DATA_WRITE) + rawcmd |= (0x1 << 13); + if (data->blocks > 1) + rawcmd |= (0x2 << 11); + else + rawcmd |= (0x1 << 11); + /* Always use dma mode */ + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO); + + if (host->timeout_ns != data->timeout_ns || + host->timeout_clks != data->timeout_clks) + msdc_set_timeout(host, data->timeout_ns, + data->timeout_clks); + + writel(data->blocks, host->base + SDC_BLK_NUM); + } + return rawcmd; +} + +static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + bool read; + + WARN_ON(host->data); + host->data = data; + read = data->flags & MMC_DATA_READ; + + mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); + msdc_dma_setup(host, &host->dma, data); + sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask); + sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1); + dev_dbg(host->dev, "DMA start\n"); + dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n", + __func__, cmd->opcode, data->blocks, read); +} + +static int msdc_auto_cmd_done(struct msdc_host *host, int events, + struct mmc_command *cmd) +{ + u32 *rsp = cmd->resp; + + rsp[0] = readl(host->base + SDC_ACMD_RESP); + + if (events & MSDC_INT_ACMDRDY) { + cmd->error = 0; + } else { + msdc_reset_hw(host); + if (events & MSDC_INT_ACMDCRCERR) { + cmd->error = -EILSEQ; + host->error |= REQ_STOP_EIO; + } else if (events & MSDC_INT_ACMDTMO) { + cmd->error = -ETIMEDOUT; + host->error |= REQ_STOP_TMO; + } + dev_err(host->dev, + "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error); + } + return cmd->error; +} + +/* + * msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost + * + * Host controller may lost interrupt in some special case. + * Add SDIO irq recheck mechanism to make sure all interrupts + * can be processed immediately + */ +static void msdc_recheck_sdio_irq(struct msdc_host *host) +{ + struct mmc_host *mmc = mmc_from_priv(host); + u32 reg_int, reg_inten, reg_ps; + + if (mmc->caps & MMC_CAP_SDIO_IRQ) { + reg_inten = readl(host->base + MSDC_INTEN); + if (reg_inten & MSDC_INTEN_SDIOIRQ) { + reg_int = readl(host->base + MSDC_INT); + reg_ps = readl(host->base + MSDC_PS); + if (!(reg_int & MSDC_INT_SDIOIRQ || + reg_ps & MSDC_PS_DATA1)) { + __msdc_enable_sdio_irq(host, 0); + sdio_signal_irq(mmc); + } + } + } +} + +static void msdc_track_cmd_data(struct msdc_host *host, + struct mmc_command *cmd, struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) +{ + unsigned long flags; + + /* + * No need check the return value of cancel_delayed_work, as only ONE + * path will go here! + */ + cancel_delayed_work(&host->req_timeout); + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + msdc_track_cmd_data(host, mrq->cmd, mrq->data); + if (mrq->data) + msdc_unprepare_data(host, mrq); + if (host->error) + msdc_reset_hw(host); + mmc_request_done(mmc_from_priv(host), mrq); + if (host->dev_comp->recheck_sdio_irq) + msdc_recheck_sdio_irq(host); +} + +/* returns true if command is fully handled; returns false otherwise */ +static bool msdc_cmd_done(struct msdc_host *host, int events, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + bool done = false; + bool sbc_error; + unsigned long flags; + u32 *rsp; + + if (mrq->sbc && cmd == mrq->cmd && + (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR + | MSDC_INT_ACMDTMO))) + msdc_auto_cmd_done(host, events, mrq->sbc); + + sbc_error = mrq->sbc && mrq->sbc->error; + + if (!sbc_error && !(events & (MSDC_INT_CMDRDY + | MSDC_INT_RSPCRCERR + | MSDC_INT_CMDTMO))) + return done; + + spin_lock_irqsave(&host->lock, flags); + done = !host->cmd; + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + rsp = cmd->resp; + + sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + rsp[0] = readl(host->base + SDC_RESP3); + rsp[1] = readl(host->base + SDC_RESP2); + rsp[2] = readl(host->base + SDC_RESP1); + rsp[3] = readl(host->base + SDC_RESP0); + } else { + rsp[0] = readl(host->base + SDC_RESP0); + } + } + + if (!sbc_error && !(events & MSDC_INT_CMDRDY)) { + if (events & MSDC_INT_CMDTMO || + (cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) + /* + * should not clear fifo/interrupt as the tune data + * may have alreay come when cmd19/cmd21 gets response + * CRC error. + */ + msdc_reset_hw(host); + if (events & MSDC_INT_RSPCRCERR) { + cmd->error = -EILSEQ; + host->error |= REQ_CMD_EIO; + } else if (events & MSDC_INT_CMDTMO) { + cmd->error = -ETIMEDOUT; + host->error |= REQ_CMD_TMO; + } + } + if (cmd->error) + dev_dbg(host->dev, + "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], + cmd->error); + + msdc_cmd_next(host, mrq, cmd); + return true; +} + +/* It is the core layer's responsibility to ensure card status + * is correct before issue a request. but host design do below + * checks recommended. + */ +static inline bool msdc_cmd_is_ready(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + /* The max busy time we can endure is 20ms */ + unsigned long tmo = jiffies + msecs_to_jiffies(20); + + while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) && + time_before(jiffies, tmo)) + cpu_relax(); + if (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) { + dev_err(host->dev, "CMD bus busy detected\n"); + host->error |= REQ_CMD_BUSY; + msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); + return false; + } + + if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) { + tmo = jiffies + msecs_to_jiffies(20); + /* R1B or with data, should check SDCBUSY */ + while ((readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) && + time_before(jiffies, tmo)) + cpu_relax(); + if (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) { + dev_err(host->dev, "Controller busy detected\n"); + host->error |= REQ_CMD_BUSY; + msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); + return false; + } + } + return true; +} + +static void msdc_start_command(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + u32 rawcmd; + unsigned long flags; + + WARN_ON(host->cmd); + host->cmd = cmd; + + mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); + if (!msdc_cmd_is_ready(host, mrq, cmd)) + return; + + if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 || + readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) { + dev_err(host->dev, "TX/RX FIFO non-empty before start of IO. Reset\n"); + msdc_reset_hw(host); + } + + cmd->error = 0; + rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); + + spin_lock_irqsave(&host->lock, flags); + sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask); + spin_unlock_irqrestore(&host->lock, flags); + + writel(cmd->arg, host->base + SDC_ARG); + writel(rawcmd, host->base + SDC_CMD); +} + +static void msdc_cmd_next(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + if ((cmd->error && + !(cmd->error == -EILSEQ && + (cmd->opcode == MMC_SEND_TUNING_BLOCK || + cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) || + (mrq->sbc && mrq->sbc->error)) + msdc_request_done(host, mrq); + else if (cmd == mrq->sbc) + msdc_start_command(host, mrq, mrq->cmd); + else if (!cmd->data) + msdc_request_done(host, mrq); + else + msdc_start_data(host, mrq, cmd, cmd->data); +} + +static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct msdc_host *host = mmc_priv(mmc); + + host->error = 0; + WARN_ON(host->mrq); + host->mrq = mrq; + + if (mrq->data) + msdc_prepare_data(host, mrq); + + /* if SBC is required, we have HW option and SW option. + * if HW option is enabled, and SBC does not have "special" flags, + * use HW option, otherwise use SW option + */ + if (mrq->sbc && (!mmc_card_mmc(mmc->card) || + (mrq->sbc->arg & 0xFFFF0000))) + msdc_start_command(host, mrq, mrq->sbc); + else + msdc_start_command(host, mrq, mrq->cmd); +} + +static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct msdc_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + msdc_prepare_data(host, mrq); + data->host_cookie |= MSDC_ASYNC_FLAG; +} + +static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct msdc_host *host = mmc_priv(mmc); + struct mmc_data *data; + + data = mrq->data; + if (!data) + return; + if (data->host_cookie) { + data->host_cookie &= ~MSDC_ASYNC_FLAG; + msdc_unprepare_data(host, mrq); + } +} + +static void msdc_data_xfer_next(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_data *data) +{ + if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error && + !mrq->sbc) + msdc_start_command(host, mrq, mrq->stop); + else + msdc_request_done(host, mrq); +} + +static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data) +{ + struct mmc_command *stop; + unsigned long flags; + bool done; + unsigned int check_data = events & + (MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO + | MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR + | MSDC_INT_DMA_PROTECT); + + spin_lock_irqsave(&host->lock, flags); + done = !host->data; + if (check_data) + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + stop = data->stop; + + if (check_data || (stop && stop->error)) { + dev_dbg(host->dev, "DMA status: 0x%8X\n", + readl(host->base + MSDC_DMA_CFG)); + sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, + 1); + while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS) + cpu_relax(); + sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask); + dev_dbg(host->dev, "DMA stop\n"); + + if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) { + data->bytes_xfered = data->blocks * data->blksz; + } else { + dev_dbg(host->dev, "interrupt events: %x\n", events); + msdc_reset_hw(host); + host->error |= REQ_DAT_ERR; + data->bytes_xfered = 0; + + if (events & MSDC_INT_DATTMO) + data->error = -ETIMEDOUT; + else if (events & MSDC_INT_DATCRCERR) + data->error = -EILSEQ; + + dev_dbg(host->dev, "%s: cmd=%d; blocks=%d", + __func__, mrq->cmd->opcode, data->blocks); + dev_dbg(host->dev, "data_error=%d xfer_size=%d\n", + (int)data->error, data->bytes_xfered); + } + + msdc_data_xfer_next(host, mrq, data); + done = true; + } + return done; +} + +static void msdc_set_buswidth(struct msdc_host *host, u32 width) +{ + u32 val = readl(host->base + SDC_CFG); + + val &= ~SDC_CFG_BUSWIDTH; + + switch (width) { + default: + case MMC_BUS_WIDTH_1: + val |= (MSDC_BUS_1BITS << 16); + break; + case MMC_BUS_WIDTH_4: + val |= (MSDC_BUS_4BITS << 16); + break; + case MMC_BUS_WIDTH_8: + val |= (MSDC_BUS_8BITS << 16); + break; + } + + writel(val, host->base + SDC_CFG); + dev_dbg(host->dev, "Bus Width = %d", width); +} + +static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct msdc_host *host = mmc_priv(mmc); + int ret; + + if (!IS_ERR(mmc->supply.vqmmc)) { + if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 && + ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) { + dev_err(host->dev, "Unsupported signal voltage!\n"); + return -EINVAL; + } + + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) { + dev_dbg(host->dev, "Regulator set error %d (%d)\n", + ret, ios->signal_voltage); + return ret; + } + + /* Apply different pinctrl settings for different signal voltage */ + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) + pinctrl_select_state(host->pinctrl, host->pins_uhs); + else + pinctrl_select_state(host->pinctrl, host->pins_default); + } + return 0; +} + +static int msdc_card_busy(struct mmc_host *mmc) +{ + struct msdc_host *host = mmc_priv(mmc); + u32 status = readl(host->base + MSDC_PS); + + /* only check if data0 is low */ + return !(status & BIT(16)); +} + +static void msdc_request_timeout(struct work_struct *work) +{ + struct msdc_host *host = container_of(work, struct msdc_host, + req_timeout.work); + + /* simulate HW timeout status */ + dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__); + if (host->mrq) { + dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__, + host->mrq, host->mrq->cmd->opcode); + if (host->cmd) { + dev_err(host->dev, "%s: aborting cmd=%d\n", + __func__, host->cmd->opcode); + msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq, + host->cmd); + } else if (host->data) { + dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n", + __func__, host->mrq->cmd->opcode, + host->data->blocks); + msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq, + host->data); + } + } +} + +static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb) +{ + if (enb) { + sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); + sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); + if (host->dev_comp->recheck_sdio_irq) + msdc_recheck_sdio_irq(host); + } else { + sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); + sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); + } +} + +static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb) +{ + unsigned long flags; + struct msdc_host *host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + __msdc_enable_sdio_irq(host, enb); + spin_unlock_irqrestore(&host->lock, flags); + + if (enb) + pm_runtime_get_noresume(host->dev); + else + pm_runtime_put_noidle(host->dev); +} + +static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts) +{ + struct mmc_host *mmc = mmc_from_priv(host); + int cmd_err = 0, dat_err = 0; + + if (intsts & MSDC_INT_RSPCRCERR) { + cmd_err = -EILSEQ; + dev_err(host->dev, "%s: CMD CRC ERR", __func__); + } else if (intsts & MSDC_INT_CMDTMO) { + cmd_err = -ETIMEDOUT; + dev_err(host->dev, "%s: CMD TIMEOUT ERR", __func__); + } + + if (intsts & MSDC_INT_DATCRCERR) { + dat_err = -EILSEQ; + dev_err(host->dev, "%s: DATA CRC ERR", __func__); + } else if (intsts & MSDC_INT_DATTMO) { + dat_err = -ETIMEDOUT; + dev_err(host->dev, "%s: DATA TIMEOUT ERR", __func__); + } + + if (cmd_err || dat_err) { + dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x", + cmd_err, dat_err, intsts); + } + + return cqhci_irq(mmc, 0, cmd_err, dat_err); +} + +static irqreturn_t msdc_irq(int irq, void *dev_id) +{ + struct msdc_host *host = (struct msdc_host *) dev_id; + struct mmc_host *mmc = mmc_from_priv(host); + + while (true) { + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events, event_mask; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + MSDC_INT); + event_mask = readl(host->base + MSDC_INTEN); + if ((events & event_mask) & MSDC_INT_SDIOIRQ) + __msdc_enable_sdio_irq(host, 0); + /* clear interrupts */ + writel(events & event_mask, host->base + MSDC_INT); + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + spin_unlock_irqrestore(&host->lock, flags); + + if ((events & event_mask) & MSDC_INT_SDIOIRQ) + sdio_signal_irq(mmc); + + if ((events & event_mask) & MSDC_INT_CDSC) { + if (host->internal_cd) + mmc_detect_change(mmc, msecs_to_jiffies(20)); + events &= ~MSDC_INT_CDSC; + } + + if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ))) + break; + + if ((mmc->caps2 & MMC_CAP2_CQE) && + (events & MSDC_INT_CMDQ)) { + msdc_cmdq_irq(host, events); + /* clear interrupts */ + writel(events, host->base + MSDC_INT); + return IRQ_HANDLED; + } + + if (!mrq) { + dev_err(host->dev, + "%s: MRQ=NULL; events=%08X; event_mask=%08X\n", + __func__, events, event_mask); + WARN_ON(1); + break; + } + + dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); + + if (cmd) + msdc_cmd_done(host, events, mrq, cmd); + else if (data) + msdc_data_xfer_done(host, events, mrq, data); + } + + return IRQ_HANDLED; +} + +static void msdc_init_hw(struct msdc_host *host) +{ + u32 val; + u32 tune_reg = host->dev_comp->pad_tune_reg; + + if (host->reset) { + reset_control_assert(host->reset); + usleep_range(10, 50); + reset_control_deassert(host->reset); + } + + /* Configure to MMC/SD mode, clock free running */ + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); + + /* Reset */ + msdc_reset_hw(host); + + /* Disable and clear all interrupts */ + writel(0, host->base + MSDC_INTEN); + val = readl(host->base + MSDC_INT); + writel(val, host->base + MSDC_INT); + + /* Configure card detection */ + if (host->internal_cd) { + sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE, + DEFAULT_DEBOUNCE); + sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN); + sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC); + sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); + } else { + sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); + sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN); + sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC); + } + + if (host->top_base) { + writel(0, host->top_base + EMMC_TOP_CONTROL); + writel(0, host->top_base + EMMC_TOP_CMD); + } else { + writel(0, host->base + tune_reg); + } + writel(0, host->base + MSDC_IOCON); + sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0); + writel(0x403c0046, host->base + MSDC_PATCH_BIT); + sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1); + writel(0xffff4089, host->base + MSDC_PATCH_BIT1); + sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL); + + if (host->dev_comp->stop_clk_fix) { + sdr_set_field(host->base + MSDC_PATCH_BIT1, + MSDC_PATCH_BIT1_STOP_DLY, 3); + sdr_clr_bits(host->base + SDC_FIFO_CFG, + SDC_FIFO_CFG_WRVALIDSEL); + sdr_clr_bits(host->base + SDC_FIFO_CFG, + SDC_FIFO_CFG_RDVALIDSEL); + } + + if (host->dev_comp->busy_check) + sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7)); + + if (host->dev_comp->async_fifo) { + sdr_set_field(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_RESPWAIT, 3); + if (host->dev_comp->enhance_rx) { + if (host->top_base) + sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, + SDC_RX_ENH_EN); + else + sdr_set_bits(host->base + SDC_ADV_CFG0, + SDC_RX_ENHANCE_EN); + } else { + sdr_set_field(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_RESPSTSENSEL, 2); + sdr_set_field(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_CRCSTSENSEL, 2); + } + /* use async fifo, then no need tune internal delay */ + sdr_clr_bits(host->base + MSDC_PATCH_BIT2, + MSDC_PATCH_BIT2_CFGRESP); + sdr_set_bits(host->base + MSDC_PATCH_BIT2, + MSDC_PATCH_BIT2_CFGCRCSTS); + } + + if (host->dev_comp->support_64g) + sdr_set_bits(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_SUPPORT_64G); + if (host->dev_comp->data_tune) { + if (host->top_base) { + sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY_SEL); + sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL, + DATA_K_VALUE_SEL); + sdr_set_bits(host->top_base + EMMC_TOP_CMD, + PAD_CMD_RD_RXDLY_SEL); + } else { + sdr_set_bits(host->base + tune_reg, + MSDC_PAD_TUNE_RD_SEL | + MSDC_PAD_TUNE_CMD_SEL); + } + } else { + /* choose clock tune */ + if (host->top_base) + sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, + PAD_RXDLY_SEL); + else + sdr_set_bits(host->base + tune_reg, + MSDC_PAD_TUNE_RXDLYSEL); + } + + /* Configure to enable SDIO mode. + * it's must otherwise sdio cmd5 failed + */ + sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO); + + /* Config SDIO device detect interrupt function */ + sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); + sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER); + + /* Configure to default data timeout */ + sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3); + + host->def_tune_para.iocon = readl(host->base + MSDC_IOCON); + host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); + if (host->top_base) { + host->def_tune_para.emmc_top_control = + readl(host->top_base + EMMC_TOP_CONTROL); + host->def_tune_para.emmc_top_cmd = + readl(host->top_base + EMMC_TOP_CMD); + host->saved_tune_para.emmc_top_control = + readl(host->top_base + EMMC_TOP_CONTROL); + host->saved_tune_para.emmc_top_cmd = + readl(host->top_base + EMMC_TOP_CMD); + } else { + host->def_tune_para.pad_tune = readl(host->base + tune_reg); + host->saved_tune_para.pad_tune = readl(host->base + tune_reg); + } + dev_dbg(host->dev, "init hardware done!"); +} + +static void msdc_deinit_hw(struct msdc_host *host) +{ + u32 val; + + if (host->internal_cd) { + /* Disabled card-detect */ + sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN); + sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); + } + + /* Disable and clear all interrupts */ + writel(0, host->base + MSDC_INTEN); + + val = readl(host->base + MSDC_INT); + writel(val, host->base + MSDC_INT); +} + +/* init gpd and bd list in msdc_drv_probe */ +static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma) +{ + struct mt_gpdma_desc *gpd = dma->gpd; + struct mt_bdma_desc *bd = dma->bd; + dma_addr_t dma_addr; + int i; + + memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2); + + dma_addr = dma->gpd_addr + sizeof(struct mt_gpdma_desc); + gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */ + /* gpd->next is must set for desc DMA + * That's why must alloc 2 gpd structure. + */ + gpd->next = lower_32_bits(dma_addr); + if (host->dev_comp->support_64g) + gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 24; + + dma_addr = dma->bd_addr; + gpd->ptr = lower_32_bits(dma->bd_addr); /* physical address */ + if (host->dev_comp->support_64g) + gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 28; + + memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM); + for (i = 0; i < (MAX_BD_NUM - 1); i++) { + dma_addr = dma->bd_addr + sizeof(*bd) * (i + 1); + bd[i].next = lower_32_bits(dma_addr); + if (host->dev_comp->support_64g) + bd[i].bd_info |= (upper_32_bits(dma_addr) & 0xf) << 24; + } +} + +static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct msdc_host *host = mmc_priv(mmc); + int ret; + + msdc_set_buswidth(host, ios->bus_width); + + /* Suspend/Resume will do power off/on */ + switch (ios->power_mode) { + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) { + msdc_init_hw(host); + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, + ios->vdd); + if (ret) { + dev_err(host->dev, "Failed to set vmmc power!\n"); + return; + } + } + break; + case MMC_POWER_ON: + if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { + ret = regulator_enable(mmc->supply.vqmmc); + if (ret) + dev_err(host->dev, "Failed to set vqmmc power!\n"); + else + host->vqmmc_enabled = true; + } + break; + case MMC_POWER_OFF: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { + regulator_disable(mmc->supply.vqmmc); + host->vqmmc_enabled = false; + } + break; + default: + break; + } + + if (host->mclk != ios->clock || host->timing != ios->timing) + msdc_set_mclk(host, ios->timing, ios->clock); +} + +static u32 test_delay_bit(u32 delay, u32 bit) +{ + bit %= PAD_DELAY_MAX; + return delay & (1 << bit); +} + +static int get_delay_len(u32 delay, u32 start_bit) +{ + int i; + + for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) { + if (test_delay_bit(delay, start_bit + i) == 0) + return i; + } + return PAD_DELAY_MAX - start_bit; +} + +static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) +{ + int start = 0, len = 0; + int start_final = 0, len_final = 0; + u8 final_phase = 0xff; + struct msdc_delay_phase delay_phase = { 0, }; + + if (delay == 0) { + dev_err(host->dev, "phase error: [map:%x]\n", delay); + delay_phase.final_phase = final_phase; + return delay_phase; + } + + while (start < PAD_DELAY_MAX) { + len = get_delay_len(delay, start); + if (len_final < len) { + start_final = start; + len_final = len; + } + start += len ? len : 1; + if (len >= 12 && start_final < 4) + break; + } + + /* The rule is that to find the smallest delay cell */ + if (start_final == 0) + final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX; + else + final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX; + dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n", + delay, len_final, final_phase); + + delay_phase.maxlen = len_final; + delay_phase.start = start_final; + delay_phase.final_phase = final_phase; + return delay_phase; +} + +static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value) +{ + u32 tune_reg = host->dev_comp->pad_tune_reg; + + if (host->top_base) + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, + value); + else + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, + value); +} + +static inline void msdc_set_data_delay(struct msdc_host *host, u32 value) +{ + u32 tune_reg = host->dev_comp->pad_tune_reg; + + if (host->top_base) + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY, value); + else + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, + value); +} + +static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) +{ + struct msdc_host *host = mmc_priv(mmc); + u32 rise_delay = 0, fall_delay = 0; + struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; + struct msdc_delay_phase internal_delay_phase; + u8 final_delay, final_maxlen; + u32 internal_delay = 0; + u32 tune_reg = host->dev_comp->pad_tune_reg; + int cmd_err; + int i, j; + + if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || + mmc->ios.timing == MMC_TIMING_UHS_SDR104) + sdr_set_field(host->base + tune_reg, + MSDC_PAD_TUNE_CMDRRDLY, + host->hs200_cmd_int_delay); + + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + for (i = 0 ; i < PAD_DELAY_MAX; i++) { + msdc_set_cmd_delay(host, i); + /* + * Using the same parameters, it may sometimes pass the test, + * but sometimes it may fail. To make sure the parameters are + * more stable, we test each set of parameters 3 times. + */ + for (j = 0; j < 3; j++) { + mmc_send_tuning(mmc, opcode, &cmd_err); + if (!cmd_err) { + rise_delay |= (1 << i); + } else { + rise_delay &= ~(1 << i); + break; + } + } + } + final_rise_delay = get_best_delay(host, rise_delay); + /* if rising edge has enough margin, then do not scan falling edge */ + if (final_rise_delay.maxlen >= 12 || + (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) + goto skip_fall; + + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + for (i = 0; i < PAD_DELAY_MAX; i++) { + msdc_set_cmd_delay(host, i); + /* + * Using the same parameters, it may sometimes pass the test, + * but sometimes it may fail. To make sure the parameters are + * more stable, we test each set of parameters 3 times. + */ + for (j = 0; j < 3; j++) { + mmc_send_tuning(mmc, opcode, &cmd_err); + if (!cmd_err) { + fall_delay |= (1 << i); + } else { + fall_delay &= ~(1 << i); + break; + } + } + } + final_fall_delay = get_best_delay(host, fall_delay); + +skip_fall: + final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); + if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4) + final_maxlen = final_fall_delay.maxlen; + if (final_maxlen == final_rise_delay.maxlen) { + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + final_delay = final_rise_delay.final_phase; + } else { + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + final_delay = final_fall_delay.final_phase; + } + msdc_set_cmd_delay(host, final_delay); + + if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) + goto skip_internal; + + for (i = 0; i < PAD_DELAY_MAX; i++) { + sdr_set_field(host->base + tune_reg, + MSDC_PAD_TUNE_CMDRRDLY, i); + mmc_send_tuning(mmc, opcode, &cmd_err); + if (!cmd_err) + internal_delay |= (1 << i); + } + dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay); + internal_delay_phase = get_best_delay(host, internal_delay); + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, + internal_delay_phase.final_phase); +skip_internal: + dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); + return final_delay == 0xff ? -EIO : 0; +} + +static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) +{ + struct msdc_host *host = mmc_priv(mmc); + u32 cmd_delay = 0; + struct msdc_delay_phase final_cmd_delay = { 0,}; + u8 final_delay; + int cmd_err; + int i, j; + + /* select EMMC50 PAD CMD tune */ + sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0)); + sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2); + + if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || + mmc->ios.timing == MMC_TIMING_UHS_SDR104) + sdr_set_field(host->base + MSDC_PAD_TUNE, + MSDC_PAD_TUNE_CMDRRDLY, + host->hs200_cmd_int_delay); + + if (host->hs400_cmd_resp_sel_rising) + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + else + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + for (i = 0 ; i < PAD_DELAY_MAX; i++) { + sdr_set_field(host->base + PAD_CMD_TUNE, + PAD_CMD_TUNE_RX_DLY3, i); + /* + * Using the same parameters, it may sometimes pass the test, + * but sometimes it may fail. To make sure the parameters are + * more stable, we test each set of parameters 3 times. + */ + for (j = 0; j < 3; j++) { + mmc_send_tuning(mmc, opcode, &cmd_err); + if (!cmd_err) { + cmd_delay |= (1 << i); + } else { + cmd_delay &= ~(1 << i); + break; + } + } + } + final_cmd_delay = get_best_delay(host, cmd_delay); + sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3, + final_cmd_delay.final_phase); + final_delay = final_cmd_delay.final_phase; + + dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); + return final_delay == 0xff ? -EIO : 0; +} + +static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) +{ + struct msdc_host *host = mmc_priv(mmc); + u32 rise_delay = 0, fall_delay = 0; + struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; + u8 final_delay, final_maxlen; + int i, ret; + + sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, + host->latch_ck); + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); + for (i = 0 ; i < PAD_DELAY_MAX; i++) { + msdc_set_data_delay(host, i); + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) + rise_delay |= (1 << i); + } + final_rise_delay = get_best_delay(host, rise_delay); + /* if rising edge has enough margin, then do not scan falling edge */ + if (final_rise_delay.maxlen >= 12 || + (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) + goto skip_fall; + + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); + for (i = 0; i < PAD_DELAY_MAX; i++) { + msdc_set_data_delay(host, i); + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) + fall_delay |= (1 << i); + } + final_fall_delay = get_best_delay(host, fall_delay); + +skip_fall: + final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); + if (final_maxlen == final_rise_delay.maxlen) { + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); + final_delay = final_rise_delay.final_phase; + } else { + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); + final_delay = final_fall_delay.final_phase; + } + msdc_set_data_delay(host, final_delay); + + dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay); + return final_delay == 0xff ? -EIO : 0; +} + +/* + * MSDC IP which supports data tune + async fifo can do CMD/DAT tune + * together, which can save the tuning time. + */ +static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) +{ + struct msdc_host *host = mmc_priv(mmc); + u32 rise_delay = 0, fall_delay = 0; + struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; + u8 final_delay, final_maxlen; + int i, ret; + + sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, + host->latch_ck); + + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + sdr_clr_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + for (i = 0 ; i < PAD_DELAY_MAX; i++) { + msdc_set_cmd_delay(host, i); + msdc_set_data_delay(host, i); + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) + rise_delay |= (1 << i); + } + final_rise_delay = get_best_delay(host, rise_delay); + /* if rising edge has enough margin, then do not scan falling edge */ + if (final_rise_delay.maxlen >= 12 || + (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) + goto skip_fall; + + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + sdr_set_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + for (i = 0; i < PAD_DELAY_MAX; i++) { + msdc_set_cmd_delay(host, i); + msdc_set_data_delay(host, i); + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) + fall_delay |= (1 << i); + } + final_fall_delay = get_best_delay(host, fall_delay); + +skip_fall: + final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); + if (final_maxlen == final_rise_delay.maxlen) { + sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + sdr_clr_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + final_delay = final_rise_delay.final_phase; + } else { + sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); + sdr_set_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + final_delay = final_fall_delay.final_phase; + } + + msdc_set_cmd_delay(host, final_delay); + msdc_set_data_delay(host, final_delay); + + dev_dbg(host->dev, "Final pad delay: %x\n", final_delay); + return final_delay == 0xff ? -EIO : 0; +} + +static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct msdc_host *host = mmc_priv(mmc); + int ret; + u32 tune_reg = host->dev_comp->pad_tune_reg; + + if (host->dev_comp->data_tune && host->dev_comp->async_fifo) { + ret = msdc_tune_together(mmc, opcode); + if (host->hs400_mode) { + sdr_clr_bits(host->base + MSDC_IOCON, + MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); + msdc_set_data_delay(host, 0); + } + goto tune_done; + } + if (host->hs400_mode && + host->dev_comp->hs400_tune) + ret = hs400_tune_response(mmc, opcode); + else + ret = msdc_tune_response(mmc, opcode); + if (ret == -EIO) { + dev_err(host->dev, "Tune response fail!\n"); + return ret; + } + if (host->hs400_mode == false) { + ret = msdc_tune_data(mmc, opcode); + if (ret == -EIO) + dev_err(host->dev, "Tune data fail!\n"); + } + +tune_done: + host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); + host->saved_tune_para.pad_tune = readl(host->base + tune_reg); + host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); + if (host->top_base) { + host->saved_tune_para.emmc_top_control = readl(host->top_base + + EMMC_TOP_CONTROL); + host->saved_tune_para.emmc_top_cmd = readl(host->top_base + + EMMC_TOP_CMD); + } + return ret; +} + +static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct msdc_host *host = mmc_priv(mmc); + host->hs400_mode = true; + + if (host->top_base) + writel(host->hs400_ds_delay, + host->top_base + EMMC50_PAD_DS_TUNE); + else + writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); + /* hs400 mode must set it to 0 */ + sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS); + /* to improve read performance, set outstanding to 2 */ + sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2); + + return 0; +} + +static void msdc_hw_reset(struct mmc_host *mmc) +{ + struct msdc_host *host = mmc_priv(mmc); + + sdr_set_bits(host->base + EMMC_IOCON, 1); + udelay(10); /* 10us is enough */ + sdr_clr_bits(host->base + EMMC_IOCON, 1); +} + +static void msdc_ack_sdio_irq(struct mmc_host *mmc) +{ + unsigned long flags; + struct msdc_host *host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + __msdc_enable_sdio_irq(host, 1); + spin_unlock_irqrestore(&host->lock, flags); +} + +static int msdc_get_cd(struct mmc_host *mmc) +{ + struct msdc_host *host = mmc_priv(mmc); + int val; + + if (mmc->caps & MMC_CAP_NONREMOVABLE) + return 1; + + if (!host->internal_cd) + return mmc_gpio_get_cd(mmc); + + val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS; + if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) + return !!val; + else + return !val; +} + +static void msdc_cqe_enable(struct mmc_host *mmc) +{ + struct msdc_host *host = mmc_priv(mmc); + + /* enable cmdq irq */ + writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN); + /* enable busy check */ + sdr_set_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); + /* default write data / busy timeout 20s */ + msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0); + /* default read data timeout 1s */ + msdc_set_timeout(host, 1000000000ULL, 0); +} + +static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery) +{ + struct msdc_host *host = mmc_priv(mmc); + unsigned int val = 0; + + /* disable cmdq irq */ + sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ); + /* disable busy check */ + sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); + + val = readl(host->base + MSDC_INT); + writel(val, host->base + MSDC_INT); + + if (recovery) { + sdr_set_field(host->base + MSDC_DMA_CTRL, + MSDC_DMA_CTRL_STOP, 1); + if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val, + !(val & MSDC_DMA_CFG_STS), 1, 3000))) + return; + msdc_reset_hw(host); + } +} + +static void msdc_cqe_pre_enable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + u32 reg; + + reg = cqhci_readl(cq_host, CQHCI_CFG); + reg |= CQHCI_ENABLE; + cqhci_writel(cq_host, reg, CQHCI_CFG); +} + +static void msdc_cqe_post_disable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + u32 reg; + + reg = cqhci_readl(cq_host, CQHCI_CFG); + reg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, reg, CQHCI_CFG); +} + +static const struct mmc_host_ops mt_msdc_ops = { + .post_req = msdc_post_req, + .pre_req = msdc_pre_req, + .request = msdc_ops_request, + .set_ios = msdc_ops_set_ios, + .get_ro = mmc_gpio_get_ro, + .get_cd = msdc_get_cd, + .enable_sdio_irq = msdc_enable_sdio_irq, + .ack_sdio_irq = msdc_ack_sdio_irq, + .start_signal_voltage_switch = msdc_ops_switch_volt, + .card_busy = msdc_card_busy, + .execute_tuning = msdc_execute_tuning, + .prepare_hs400_tuning = msdc_prepare_hs400_tuning, + .hw_reset = msdc_hw_reset, +}; + +static const struct cqhci_host_ops msdc_cmdq_ops = { + .enable = msdc_cqe_enable, + .disable = msdc_cqe_disable, + .pre_enable = msdc_cqe_pre_enable, + .post_disable = msdc_cqe_post_disable, +}; + +static void msdc_of_property_parse(struct platform_device *pdev, + struct msdc_host *host) +{ + of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck", + &host->latch_ck); + + of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay", + &host->hs400_ds_delay); + + of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay", + &host->hs200_cmd_int_delay); + + of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-cmd-int-delay", + &host->hs400_cmd_int_delay); + + if (of_property_read_bool(pdev->dev.of_node, + "mediatek,hs400-cmd-resp-sel-rising")) + host->hs400_cmd_resp_sel_rising = true; + else + host->hs400_cmd_resp_sel_rising = false; + + if (of_property_read_bool(pdev->dev.of_node, + "supports-cqe")) + host->cqhci = true; + else + host->cqhci = false; +} + +static int msdc_drv_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct msdc_host *host; + struct resource *res; + int ret; + + if (!pdev->dev.of_node) { + dev_err(&pdev->dev, "No DT found\n"); + return -EINVAL; + } + + /* Allocate MMC host for this device */ + mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + ret = mmc_of_parse(mmc); + if (ret) + goto host_free; + + host->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto host_free; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + host->top_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->top_base)) + host->top_base = NULL; + } + + ret = mmc_regulator_get_supply(mmc); + if (ret) + goto host_free; + + host->src_clk = devm_clk_get(&pdev->dev, "source"); + if (IS_ERR(host->src_clk)) { + ret = PTR_ERR(host->src_clk); + goto host_free; + } + + host->h_clk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(host->h_clk)) { + ret = PTR_ERR(host->h_clk); + goto host_free; + } + + host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk"); + if (IS_ERR(host->bus_clk)) + host->bus_clk = NULL; + /*source clock control gate is optional clock*/ + host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg"); + if (IS_ERR(host->src_clk_cg)) + host->src_clk_cg = NULL; + + host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, + "hrst"); + if (IS_ERR(host->reset)) + return PTR_ERR(host->reset); + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) { + ret = host->irq; + goto host_free; + } + + host->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(host->pinctrl)) { + ret = PTR_ERR(host->pinctrl); + dev_err(&pdev->dev, "Cannot find pinctrl!\n"); + goto host_free; + } + + host->pins_default = pinctrl_lookup_state(host->pinctrl, "default"); + if (IS_ERR(host->pins_default)) { + ret = PTR_ERR(host->pins_default); + dev_err(&pdev->dev, "Cannot find pinctrl default!\n"); + goto host_free; + } + + host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); + if (IS_ERR(host->pins_uhs)) { + ret = PTR_ERR(host->pins_uhs); + dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n"); + goto host_free; + } + + msdc_of_property_parse(pdev, host); + + host->dev = &pdev->dev; + host->dev_comp = of_device_get_match_data(&pdev->dev); + host->src_clk_freq = clk_get_rate(host->src_clk); + /* Set host parameters to mmc */ + mmc->ops = &mt_msdc_ops; + if (host->dev_comp->clk_div_bits == 8) + mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); + else + mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095); + + if (!(mmc->caps & MMC_CAP_NONREMOVABLE) && + !mmc_can_gpio_cd(mmc) && + host->dev_comp->use_internal_cd) { + /* + * Is removable but no GPIO declared, so + * use internal functionality. + */ + host->internal_cd = true; + } + + if (mmc->caps & MMC_CAP_SDIO_IRQ) + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + + mmc->caps |= MMC_CAP_CMD23; + if (host->cqhci) + mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + if (host->dev_comp->support_64g) + mmc->max_seg_size = BDMA_DESC_BUFLEN_EXT; + else + mmc->max_seg_size = BDMA_DESC_BUFLEN; + mmc->max_blk_size = 2048; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + if (host->dev_comp->support_64g) + host->dma_mask = DMA_BIT_MASK(36); + else + host->dma_mask = DMA_BIT_MASK(32); + mmc_dev(mmc)->dma_mask = &host->dma_mask; + + host->timeout_clks = 3 * 1048576; + host->dma.gpd = dma_alloc_coherent(&pdev->dev, + 2 * sizeof(struct mt_gpdma_desc), + &host->dma.gpd_addr, GFP_KERNEL); + host->dma.bd = dma_alloc_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct mt_bdma_desc), + &host->dma.bd_addr, GFP_KERNEL); + if (!host->dma.gpd || !host->dma.bd) { + ret = -ENOMEM; + goto release_mem; + } + msdc_init_gpd_bd(host, &host->dma); + INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout); + spin_lock_init(&host->lock); + + platform_set_drvdata(pdev, mmc); + msdc_ungate_clock(host); + msdc_init_hw(host); + + if (mmc->caps2 & MMC_CAP2_CQE) { + host->cq_host = devm_kzalloc(mmc->parent, + sizeof(*host->cq_host), + GFP_KERNEL); + if (!host->cq_host) { + ret = -ENOMEM; + goto host_free; + } + host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + host->cq_host->mmio = host->base + 0x800; + host->cq_host->ops = &msdc_cmdq_ops; + ret = cqhci_init(host->cq_host, mmc, true); + if (ret) + goto host_free; + mmc->max_segs = 128; + /* cqhci 16bit length */ + /* 0 size, means 65536 so we don't have to -1 here */ + mmc->max_seg_size = 64 * 1024; + } + + ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq, + IRQF_TRIGGER_NONE, pdev->name, host); + if (ret) + goto release; + + pm_runtime_set_active(host->dev); + pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(host->dev); + pm_runtime_enable(host->dev); + ret = mmc_add_host(mmc); + + if (ret) + goto end; + + return 0; +end: + pm_runtime_disable(host->dev); +release: + platform_set_drvdata(pdev, NULL); + msdc_deinit_hw(host); + msdc_gate_clock(host); +release_mem: + if (host->dma.gpd) + dma_free_coherent(&pdev->dev, + 2 * sizeof(struct mt_gpdma_desc), + host->dma.gpd, host->dma.gpd_addr); + if (host->dma.bd) + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct mt_bdma_desc), + host->dma.bd, host->dma.bd_addr); +host_free: + mmc_free_host(mmc); + + return ret; +} + +static int msdc_drv_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct msdc_host *host; + + mmc = platform_get_drvdata(pdev); + host = mmc_priv(mmc); + + pm_runtime_get_sync(host->dev); + + platform_set_drvdata(pdev, NULL); + mmc_remove_host(mmc); + msdc_deinit_hw(host); + msdc_gate_clock(host); + + pm_runtime_disable(host->dev); + pm_runtime_put_noidle(host->dev); + dma_free_coherent(&pdev->dev, + 2 * sizeof(struct mt_gpdma_desc), + host->dma.gpd, host->dma.gpd_addr); + dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc), + host->dma.bd, host->dma.bd_addr); + + mmc_free_host(mmc); + + return 0; +} + +static void msdc_save_reg(struct msdc_host *host) +{ + u32 tune_reg = host->dev_comp->pad_tune_reg; + + host->save_para.msdc_cfg = readl(host->base + MSDC_CFG); + host->save_para.iocon = readl(host->base + MSDC_IOCON); + host->save_para.sdc_cfg = readl(host->base + SDC_CFG); + host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); + host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); + host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2); + host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE); + host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); + host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0); + host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3); + host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG); + if (host->top_base) { + host->save_para.emmc_top_control = + readl(host->top_base + EMMC_TOP_CONTROL); + host->save_para.emmc_top_cmd = + readl(host->top_base + EMMC_TOP_CMD); + host->save_para.emmc50_pad_ds_tune = + readl(host->top_base + EMMC50_PAD_DS_TUNE); + } else { + host->save_para.pad_tune = readl(host->base + tune_reg); + } +} + +static void msdc_restore_reg(struct msdc_host *host) +{ + struct mmc_host *mmc = mmc_from_priv(host); + u32 tune_reg = host->dev_comp->pad_tune_reg; + + writel(host->save_para.msdc_cfg, host->base + MSDC_CFG); + writel(host->save_para.iocon, host->base + MSDC_IOCON); + writel(host->save_para.sdc_cfg, host->base + SDC_CFG); + writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); + writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); + writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2); + writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE); + writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE); + writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0); + writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3); + writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG); + if (host->top_base) { + writel(host->save_para.emmc_top_control, + host->top_base + EMMC_TOP_CONTROL); + writel(host->save_para.emmc_top_cmd, + host->top_base + EMMC_TOP_CMD); + writel(host->save_para.emmc50_pad_ds_tune, + host->top_base + EMMC50_PAD_DS_TUNE); + } else { + writel(host->save_para.pad_tune, host->base + tune_reg); + } + + if (sdio_irq_claimed(mmc)) + __msdc_enable_sdio_irq(host, 1); +} + +static int __maybe_unused msdc_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct msdc_host *host = mmc_priv(mmc); + + msdc_save_reg(host); + msdc_gate_clock(host); + return 0; +} + +static int __maybe_unused msdc_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct msdc_host *host = mmc_priv(mmc); + + msdc_ungate_clock(host); + msdc_restore_reg(host); + return 0; +} + +static int __maybe_unused msdc_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + int ret; + u32 val; + + if (mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(mmc); + if (ret) + return ret; + val = readl(((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT); + writel(val, ((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT); + } + + return pm_runtime_force_suspend(dev); +} + +static int __maybe_unused msdc_resume(struct device *dev) +{ + return pm_runtime_force_resume(dev); +} + +static const struct dev_pm_ops msdc_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume) + SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL) +}; + +static struct platform_driver mt_msdc_driver = { + .probe = msdc_drv_probe, + .remove = msdc_drv_remove, + .driver = { + .name = "mtk-msdc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = msdc_of_ids, + .pm = &msdc_dev_pm_ops, + }, +}; + +module_platform_driver(mt_msdc_driver); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MediaTek SD/MMC Card Driver"); diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c new file mode 100644 index 000000000..b4f6a0a2f --- /dev/null +++ b/drivers/mmc/host/mvsdio.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell MMC/SD/SDIO driver + * + * Authors: Maen Suleiman, Nicolas Pitre + * Copyright (C) 2008-2009 Marvell Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mvsdio.h" + +#define DRIVER_NAME "mvsdio" + +static int maxfreq; +static int nodma; + +struct mvsd_host { + void __iomem *base; + struct mmc_request *mrq; + spinlock_t lock; + unsigned int xfer_mode; + unsigned int intr_en; + unsigned int ctrl; + unsigned int pio_size; + void *pio_ptr; + unsigned int sg_frags; + unsigned int ns_per_clk; + unsigned int clock; + unsigned int base_clock; + struct timer_list timer; + struct mmc_host *mmc; + struct device *dev; + struct clk *clk; +}; + +#define mvsd_write(offs, val) writel(val, iobase + (offs)) +#define mvsd_read(offs) readl(iobase + (offs)) + +static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) +{ + void __iomem *iobase = host->base; + unsigned int tmout; + int tmout_index; + + /* + * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE + * register is sometimes not set before a while when some + * "unusual" data block sizes are used (such as with the SWITCH + * command), even despite the fact that the XFER_DONE interrupt + * was raised. And if another data transfer starts before + * this bit comes to good sense (which eventually happens by + * itself) then the new transfer simply fails with a timeout. + */ + if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) { + unsigned long t = jiffies + HZ; + unsigned int hw_state, count = 0; + do { + hw_state = mvsd_read(MVSD_HW_STATE); + if (time_after(jiffies, t)) { + dev_warn(host->dev, "FIFO_EMPTY bit missing\n"); + break; + } + count++; + } while (!(hw_state & (1 << 13))); + dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " + "(hw=0x%04x, count=%d, jiffies=%ld)\n", + hw_state, count, jiffies - (t - HZ)); + } + + /* If timeout=0 then maximum timeout index is used. */ + tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); + tmout += data->timeout_clks; + tmout_index = fls(tmout - 1) - 12; + if (tmout_index < 0) + tmout_index = 0; + if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX) + tmout_index = MVSD_HOST_CTRL_TMOUT_MAX; + + dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n", + (data->flags & MMC_DATA_READ) ? "read" : "write", + (u32)sg_virt(data->sg), data->blocks, data->blksz, + tmout, tmout_index); + + host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK; + host->ctrl |= MVSD_HOST_CTRL_TMOUT(tmout_index); + mvsd_write(MVSD_HOST_CTRL, host->ctrl); + mvsd_write(MVSD_BLK_COUNT, data->blocks); + mvsd_write(MVSD_BLK_SIZE, data->blksz); + + if (nodma || (data->blksz | data->sg->offset) & 3 || + ((!(data->flags & MMC_DATA_READ) && data->sg->offset & 0x3f))) { + /* + * We cannot do DMA on a buffer which offset or size + * is not aligned on a 4-byte boundary. + * + * It also appears the host to card DMA can corrupt + * data when the buffer is not aligned on a 64 byte + * boundary. + */ + host->pio_size = data->blocks * data->blksz; + host->pio_ptr = sg_virt(data->sg); + if (!nodma) + dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n", + host->pio_ptr, host->pio_size); + return 1; + } else { + dma_addr_t phys_addr; + + host->sg_frags = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + mmc_get_dma_dir(data)); + phys_addr = sg_dma_address(data->sg); + mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff); + mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16); + return 0; + } +} + +static void mvsd_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mvsd_host *host = mmc_priv(mmc); + void __iomem *iobase = host->base; + struct mmc_command *cmd = mrq->cmd; + u32 cmdreg = 0, xfer = 0, intr = 0; + unsigned long flags; + unsigned int timeout; + + BUG_ON(host->mrq != NULL); + host->mrq = mrq; + + dev_dbg(host->dev, "cmd %d (hw state 0x%04x)\n", + cmd->opcode, mvsd_read(MVSD_HW_STATE)); + + cmdreg = MVSD_CMD_INDEX(cmd->opcode); + + if (cmd->flags & MMC_RSP_BUSY) + cmdreg |= MVSD_CMD_RSP_48BUSY; + else if (cmd->flags & MMC_RSP_136) + cmdreg |= MVSD_CMD_RSP_136; + else if (cmd->flags & MMC_RSP_PRESENT) + cmdreg |= MVSD_CMD_RSP_48; + else + cmdreg |= MVSD_CMD_RSP_NONE; + + if (cmd->flags & MMC_RSP_CRC) + cmdreg |= MVSD_CMD_CHECK_CMDCRC; + + if (cmd->flags & MMC_RSP_OPCODE) + cmdreg |= MVSD_CMD_INDX_CHECK; + + if (cmd->flags & MMC_RSP_PRESENT) { + cmdreg |= MVSD_UNEXPECTED_RESP; + intr |= MVSD_NOR_UNEXP_RSP; + } + + if (mrq->data) { + struct mmc_data *data = mrq->data; + int pio; + + cmdreg |= MVSD_CMD_DATA_PRESENT | MVSD_CMD_CHECK_DATACRC16; + xfer |= MVSD_XFER_MODE_HW_WR_DATA_EN; + if (data->flags & MMC_DATA_READ) + xfer |= MVSD_XFER_MODE_TO_HOST; + + pio = mvsd_setup_data(host, data); + if (pio) { + xfer |= MVSD_XFER_MODE_PIO; + /* PIO section of mvsd_irq has comments on those bits */ + if (data->flags & MMC_DATA_WRITE) + intr |= MVSD_NOR_TX_AVAIL; + else if (host->pio_size > 32) + intr |= MVSD_NOR_RX_FIFO_8W; + else + intr |= MVSD_NOR_RX_READY; + } + + if (data->stop) { + struct mmc_command *stop = data->stop; + u32 cmd12reg = 0; + + mvsd_write(MVSD_AUTOCMD12_ARG_LOW, stop->arg & 0xffff); + mvsd_write(MVSD_AUTOCMD12_ARG_HI, stop->arg >> 16); + + if (stop->flags & MMC_RSP_BUSY) + cmd12reg |= MVSD_AUTOCMD12_BUSY; + if (stop->flags & MMC_RSP_OPCODE) + cmd12reg |= MVSD_AUTOCMD12_INDX_CHECK; + cmd12reg |= MVSD_AUTOCMD12_INDEX(stop->opcode); + mvsd_write(MVSD_AUTOCMD12_CMD, cmd12reg); + + xfer |= MVSD_XFER_MODE_AUTO_CMD12; + intr |= MVSD_NOR_AUTOCMD12_DONE; + } else { + intr |= MVSD_NOR_XFER_DONE; + } + } else { + intr |= MVSD_NOR_CMD_DONE; + } + + mvsd_write(MVSD_ARG_LOW, cmd->arg & 0xffff); + mvsd_write(MVSD_ARG_HI, cmd->arg >> 16); + + spin_lock_irqsave(&host->lock, flags); + + host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; + host->xfer_mode |= xfer; + mvsd_write(MVSD_XFER_MODE, host->xfer_mode); + + mvsd_write(MVSD_NOR_INTR_STATUS, ~MVSD_NOR_CARD_INT); + mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); + mvsd_write(MVSD_CMD, cmdreg); + + host->intr_en &= MVSD_NOR_CARD_INT; + host->intr_en |= intr | MVSD_NOR_ERROR; + mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); + mvsd_write(MVSD_ERR_INTR_EN, 0xffff); + + timeout = cmd->busy_timeout ? cmd->busy_timeout : 5000; + mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout)); + + spin_unlock_irqrestore(&host->lock, flags); +} + +static u32 mvsd_finish_cmd(struct mvsd_host *host, struct mmc_command *cmd, + u32 err_status) +{ + void __iomem *iobase = host->base; + + if (cmd->flags & MMC_RSP_136) { + unsigned int response[8], i; + for (i = 0; i < 8; i++) + response[i] = mvsd_read(MVSD_RSP(i)); + cmd->resp[0] = ((response[0] & 0x03ff) << 22) | + ((response[1] & 0xffff) << 6) | + ((response[2] & 0xfc00) >> 10); + cmd->resp[1] = ((response[2] & 0x03ff) << 22) | + ((response[3] & 0xffff) << 6) | + ((response[4] & 0xfc00) >> 10); + cmd->resp[2] = ((response[4] & 0x03ff) << 22) | + ((response[5] & 0xffff) << 6) | + ((response[6] & 0xfc00) >> 10); + cmd->resp[3] = ((response[6] & 0x03ff) << 22) | + ((response[7] & 0x3fff) << 8); + } else if (cmd->flags & MMC_RSP_PRESENT) { + unsigned int response[3], i; + for (i = 0; i < 3; i++) + response[i] = mvsd_read(MVSD_RSP(i)); + cmd->resp[0] = ((response[2] & 0x003f) << (8 - 8)) | + ((response[1] & 0xffff) << (14 - 8)) | + ((response[0] & 0x03ff) << (30 - 8)); + cmd->resp[1] = ((response[0] & 0xfc00) >> 10); + cmd->resp[2] = 0; + cmd->resp[3] = 0; + } + + if (err_status & MVSD_ERR_CMD_TIMEOUT) { + cmd->error = -ETIMEDOUT; + } else if (err_status & (MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT | + MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT)) { + cmd->error = -EILSEQ; + } + err_status &= ~(MVSD_ERR_CMD_TIMEOUT | MVSD_ERR_CMD_CRC | + MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX | + MVSD_ERR_CMD_STARTBIT); + + return err_status; +} + +static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, + u32 err_status) +{ + void __iomem *iobase = host->base; + + if (host->pio_ptr) { + host->pio_ptr = NULL; + host->pio_size = 0; + } else { + dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, + mmc_get_dma_dir(data)); + } + + if (err_status & MVSD_ERR_DATA_TIMEOUT) + data->error = -ETIMEDOUT; + else if (err_status & (MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT)) + data->error = -EILSEQ; + else if (err_status & MVSD_ERR_XFER_SIZE) + data->error = -EBADE; + err_status &= ~(MVSD_ERR_DATA_TIMEOUT | MVSD_ERR_DATA_CRC | + MVSD_ERR_DATA_ENDBIT | MVSD_ERR_XFER_SIZE); + + dev_dbg(host->dev, "data done: blocks_left=%d, bytes_left=%d\n", + mvsd_read(MVSD_CURR_BLK_LEFT), mvsd_read(MVSD_CURR_BYTE_LEFT)); + data->bytes_xfered = + (data->blocks - mvsd_read(MVSD_CURR_BLK_LEFT)) * data->blksz; + /* We can't be sure about the last block when errors are detected */ + if (data->bytes_xfered && data->error) + data->bytes_xfered -= data->blksz; + + /* Handle Auto cmd 12 response */ + if (data->stop) { + unsigned int response[3], i; + for (i = 0; i < 3; i++) + response[i] = mvsd_read(MVSD_AUTO_RSP(i)); + data->stop->resp[0] = ((response[2] & 0x003f) << (8 - 8)) | + ((response[1] & 0xffff) << (14 - 8)) | + ((response[0] & 0x03ff) << (30 - 8)); + data->stop->resp[1] = ((response[0] & 0xfc00) >> 10); + data->stop->resp[2] = 0; + data->stop->resp[3] = 0; + + if (err_status & MVSD_ERR_AUTOCMD12) { + u32 err_cmd12 = mvsd_read(MVSD_AUTOCMD12_ERR_STATUS); + dev_dbg(host->dev, "c12err 0x%04x\n", err_cmd12); + if (err_cmd12 & MVSD_AUTOCMD12_ERR_NOTEXE) + data->stop->error = -ENOEXEC; + else if (err_cmd12 & MVSD_AUTOCMD12_ERR_TIMEOUT) + data->stop->error = -ETIMEDOUT; + else if (err_cmd12) + data->stop->error = -EILSEQ; + err_status &= ~MVSD_ERR_AUTOCMD12; + } + } + + return err_status; +} + +static irqreturn_t mvsd_irq(int irq, void *dev) +{ + struct mvsd_host *host = dev; + void __iomem *iobase = host->base; + u32 intr_status, intr_done_mask; + int irq_handled = 0; + + intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); + dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n", + intr_status, mvsd_read(MVSD_NOR_INTR_EN), + mvsd_read(MVSD_HW_STATE)); + + /* + * It looks like, SDIO IP can issue one late, spurious irq + * although all irqs should be disabled. To work around this, + * bail out early, if we didn't expect any irqs to occur. + */ + if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) { + dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n", + mvsd_read(MVSD_NOR_INTR_STATUS), + mvsd_read(MVSD_NOR_INTR_EN), + mvsd_read(MVSD_ERR_INTR_STATUS), + mvsd_read(MVSD_ERR_INTR_EN)); + return IRQ_HANDLED; + } + + spin_lock(&host->lock); + + /* PIO handling, if needed. Messy business... */ + if (host->pio_size && + (intr_status & host->intr_en & + (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) { + u16 *p = host->pio_ptr; + int s = host->pio_size; + while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) { + readsw(iobase + MVSD_FIFO, p, 16); + p += 16; + s -= 32; + intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); + } + /* + * Normally we'd use < 32 here, but the RX_FIFO_8W bit + * doesn't appear to assert when there is exactly 32 bytes + * (8 words) left to fetch in a transfer. + */ + if (s <= 32) { + while (s >= 4 && (intr_status & MVSD_NOR_RX_READY)) { + put_unaligned(mvsd_read(MVSD_FIFO), p++); + put_unaligned(mvsd_read(MVSD_FIFO), p++); + s -= 4; + intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); + } + if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) { + u16 val[2] = {0, 0}; + val[0] = mvsd_read(MVSD_FIFO); + val[1] = mvsd_read(MVSD_FIFO); + memcpy(p, ((void *)&val) + 4 - s, s); + s = 0; + intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); + } + if (s == 0) { + host->intr_en &= + ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W); + mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); + } else if (host->intr_en & MVSD_NOR_RX_FIFO_8W) { + host->intr_en &= ~MVSD_NOR_RX_FIFO_8W; + host->intr_en |= MVSD_NOR_RX_READY; + mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); + } + } + dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", + s, intr_status, mvsd_read(MVSD_HW_STATE)); + host->pio_ptr = p; + host->pio_size = s; + irq_handled = 1; + } else if (host->pio_size && + (intr_status & host->intr_en & + (MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) { + u16 *p = host->pio_ptr; + int s = host->pio_size; + /* + * The TX_FIFO_8W bit is unreliable. When set, bursting + * 16 halfwords all at once in the FIFO drops data. Actually + * TX_AVAIL does go off after only one word is pushed even if + * TX_FIFO_8W remains set. + */ + while (s >= 4 && (intr_status & MVSD_NOR_TX_AVAIL)) { + mvsd_write(MVSD_FIFO, get_unaligned(p++)); + mvsd_write(MVSD_FIFO, get_unaligned(p++)); + s -= 4; + intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); + } + if (s < 4) { + if (s && (intr_status & MVSD_NOR_TX_AVAIL)) { + u16 val[2] = {0, 0}; + memcpy(((void *)&val) + 4 - s, p, s); + mvsd_write(MVSD_FIFO, val[0]); + mvsd_write(MVSD_FIFO, val[1]); + s = 0; + intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); + } + if (s == 0) { + host->intr_en &= + ~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W); + mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); + } + } + dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", + s, intr_status, mvsd_read(MVSD_HW_STATE)); + host->pio_ptr = p; + host->pio_size = s; + irq_handled = 1; + } + + mvsd_write(MVSD_NOR_INTR_STATUS, intr_status); + + intr_done_mask = MVSD_NOR_CARD_INT | MVSD_NOR_RX_READY | + MVSD_NOR_RX_FIFO_8W | MVSD_NOR_TX_FIFO_8W; + if (intr_status & host->intr_en & ~intr_done_mask) { + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = mrq->cmd; + u32 err_status = 0; + + del_timer(&host->timer); + host->mrq = NULL; + + host->intr_en &= MVSD_NOR_CARD_INT; + mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); + mvsd_write(MVSD_ERR_INTR_EN, 0); + + spin_unlock(&host->lock); + + if (intr_status & MVSD_NOR_UNEXP_RSP) { + cmd->error = -EPROTO; + } else if (intr_status & MVSD_NOR_ERROR) { + err_status = mvsd_read(MVSD_ERR_INTR_STATUS); + dev_dbg(host->dev, "err 0x%04x\n", err_status); + } + + err_status = mvsd_finish_cmd(host, cmd, err_status); + if (mrq->data) + err_status = mvsd_finish_data(host, mrq->data, err_status); + if (err_status) { + dev_err(host->dev, "unhandled error status %#04x\n", + err_status); + cmd->error = -ENOMSG; + } + + mmc_request_done(host->mmc, mrq); + irq_handled = 1; + } else + spin_unlock(&host->lock); + + if (intr_status & MVSD_NOR_CARD_INT) { + mmc_signal_sdio_irq(host->mmc); + irq_handled = 1; + } + + if (irq_handled) + return IRQ_HANDLED; + + dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n", + intr_status, host->intr_en, host->pio_size); + return IRQ_NONE; +} + +static void mvsd_timeout_timer(struct timer_list *t) +{ + struct mvsd_host *host = from_timer(host, t, timer); + void __iomem *iobase = host->base; + struct mmc_request *mrq; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + mrq = host->mrq; + if (mrq) { + dev_err(host->dev, "Timeout waiting for hardware interrupt.\n"); + dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n", + mvsd_read(MVSD_HW_STATE), + mvsd_read(MVSD_NOR_INTR_STATUS), + mvsd_read(MVSD_NOR_INTR_EN)); + + host->mrq = NULL; + + mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); + + host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN; + mvsd_write(MVSD_XFER_MODE, host->xfer_mode); + + host->intr_en &= MVSD_NOR_CARD_INT; + mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); + mvsd_write(MVSD_ERR_INTR_EN, 0); + mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); + + mrq->cmd->error = -ETIMEDOUT; + mvsd_finish_cmd(host, mrq->cmd, 0); + if (mrq->data) { + mrq->data->error = -ETIMEDOUT; + mvsd_finish_data(host, mrq->data, 0); + } + } + spin_unlock_irqrestore(&host->lock, flags); + + if (mrq) + mmc_request_done(host->mmc, mrq); +} + +static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct mvsd_host *host = mmc_priv(mmc); + void __iomem *iobase = host->base; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (enable) { + host->xfer_mode |= MVSD_XFER_MODE_INT_CHK_EN; + host->intr_en |= MVSD_NOR_CARD_INT; + } else { + host->xfer_mode &= ~MVSD_XFER_MODE_INT_CHK_EN; + host->intr_en &= ~MVSD_NOR_CARD_INT; + } + mvsd_write(MVSD_XFER_MODE, host->xfer_mode); + mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void mvsd_power_up(struct mvsd_host *host) +{ + void __iomem *iobase = host->base; + dev_dbg(host->dev, "power up\n"); + mvsd_write(MVSD_NOR_INTR_EN, 0); + mvsd_write(MVSD_ERR_INTR_EN, 0); + mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); + mvsd_write(MVSD_XFER_MODE, 0); + mvsd_write(MVSD_NOR_STATUS_EN, 0xffff); + mvsd_write(MVSD_ERR_STATUS_EN, 0xffff); + mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff); + mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); +} + +static void mvsd_power_down(struct mvsd_host *host) +{ + void __iomem *iobase = host->base; + dev_dbg(host->dev, "power down\n"); + mvsd_write(MVSD_NOR_INTR_EN, 0); + mvsd_write(MVSD_ERR_INTR_EN, 0); + mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW); + mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK); + mvsd_write(MVSD_NOR_STATUS_EN, 0); + mvsd_write(MVSD_ERR_STATUS_EN, 0); + mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff); + mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff); +} + +static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mvsd_host *host = mmc_priv(mmc); + void __iomem *iobase = host->base; + u32 ctrl_reg = 0; + + if (ios->power_mode == MMC_POWER_UP) + mvsd_power_up(host); + + if (ios->clock == 0) { + mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK); + mvsd_write(MVSD_CLK_DIV, MVSD_BASE_DIV_MAX); + host->clock = 0; + dev_dbg(host->dev, "clock off\n"); + } else if (ios->clock != host->clock) { + u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1; + if (m > MVSD_BASE_DIV_MAX) + m = MVSD_BASE_DIV_MAX; + mvsd_write(MVSD_CLK_DIV, m); + host->clock = ios->clock; + host->ns_per_clk = 1000000000 / (host->base_clock / (m+1)); + dev_dbg(host->dev, "clock=%d (%d), div=0x%04x\n", + ios->clock, host->base_clock / (m+1), m); + } + + /* default transfer mode */ + ctrl_reg |= MVSD_HOST_CTRL_BIG_ENDIAN; + ctrl_reg &= ~MVSD_HOST_CTRL_LSB_FIRST; + + /* default to maximum timeout */ + ctrl_reg |= MVSD_HOST_CTRL_TMOUT_MASK; + ctrl_reg |= MVSD_HOST_CTRL_TMOUT_EN; + + if (ios->bus_mode == MMC_BUSMODE_PUSHPULL) + ctrl_reg |= MVSD_HOST_CTRL_PUSH_PULL_EN; + + if (ios->bus_width == MMC_BUS_WIDTH_4) + ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS; + + /* + * The HI_SPEED_EN bit is causing trouble with many (but not all) + * high speed SD, SDHC and SDIO cards. Not enabling that bit + * makes all cards work. So let's just ignore that bit for now + * and revisit this issue if problems for not enabling this bit + * are ever reported. + */ +#if 0 + if (ios->timing == MMC_TIMING_MMC_HS || + ios->timing == MMC_TIMING_SD_HS) + ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN; +#endif + + host->ctrl = ctrl_reg; + mvsd_write(MVSD_HOST_CTRL, ctrl_reg); + dev_dbg(host->dev, "ctrl 0x%04x: %s %s %s\n", ctrl_reg, + (ctrl_reg & MVSD_HOST_CTRL_PUSH_PULL_EN) ? + "push-pull" : "open-drain", + (ctrl_reg & MVSD_HOST_CTRL_DATA_WIDTH_4_BITS) ? + "4bit-width" : "1bit-width", + (ctrl_reg & MVSD_HOST_CTRL_HI_SPEED_EN) ? + "high-speed" : ""); + + if (ios->power_mode == MMC_POWER_OFF) + mvsd_power_down(host); +} + +static const struct mmc_host_ops mvsd_ops = { + .request = mvsd_request, + .get_ro = mmc_gpio_get_ro, + .set_ios = mvsd_set_ios, + .enable_sdio_irq = mvsd_enable_sdio_irq, +}; + +static void +mv_conf_mbus_windows(struct mvsd_host *host, + const struct mbus_dram_target_info *dram) +{ + void __iomem *iobase = host->base; + int i; + + for (i = 0; i < 4; i++) { + writel(0, iobase + MVSD_WINDOW_CTRL(i)); + writel(0, iobase + MVSD_WINDOW_BASE(i)); + } + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + writel(((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + iobase + MVSD_WINDOW_CTRL(i)); + writel(cs->base, iobase + MVSD_WINDOW_BASE(i)); + } +} + +static int mvsd_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mmc_host *mmc = NULL; + struct mvsd_host *host = NULL; + const struct mbus_dram_target_info *dram; + int ret, irq; + + if (!np) { + dev_err(&pdev->dev, "no DT node\n"); + return -ENODEV; + } + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto out; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + host->dev = &pdev->dev; + + /* + * Some non-DT platforms do not pass a clock, and the clock + * frequency is passed through platform_data. On DT platforms, + * a clock must always be passed, even if there is no gatable + * clock associated to the SDIO interface (it can simply be a + * fixed rate clock). + */ + host->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(host->clk)) { + dev_err(&pdev->dev, "no clock associated\n"); + ret = -EINVAL; + goto out; + } + clk_prepare_enable(host->clk); + + mmc->ops = &mvsd_ops; + + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX); + mmc->f_max = MVSD_CLOCKRATE_MAX; + + mmc->max_blk_size = 2048; + mmc->max_blk_count = 65535; + + mmc->max_segs = 1; + mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count; + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + + host->base_clock = clk_get_rate(host->clk) / 2; + ret = mmc_of_parse(mmc); + if (ret < 0) + goto out; + if (maxfreq) + mmc->f_max = maxfreq; + + spin_lock_init(&host->lock); + + host->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto out; + } + + /* (Re-)program MBUS remapping windows if we are asked to. */ + dram = mv_mbus_dram_info(); + if (dram) + mv_conf_mbus_windows(host, dram); + + mvsd_power_down(host); + + ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host); + if (ret) { + dev_err(&pdev->dev, "cannot assign irq %d\n", irq); + goto out; + } + + timer_setup(&host->timer, mvsd_timeout_timer, 0); + platform_set_drvdata(pdev, mmc); + ret = mmc_add_host(mmc); + if (ret) + goto out; + + if (!(mmc->caps & MMC_CAP_NEEDS_POLL)) + dev_dbg(&pdev->dev, "using GPIO for card detection\n"); + else + dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n"); + + return 0; + +out: + if (mmc) { + if (!IS_ERR(host->clk)) + clk_disable_unprepare(host->clk); + mmc_free_host(mmc); + } + + return ret; +} + +static int mvsd_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + + struct mvsd_host *host = mmc_priv(mmc); + + mmc_remove_host(mmc); + del_timer_sync(&host->timer); + mvsd_power_down(host); + + if (!IS_ERR(host->clk)) + clk_disable_unprepare(host->clk); + mmc_free_host(mmc); + + return 0; +} + +static const struct of_device_id mvsdio_dt_ids[] = { + { .compatible = "marvell,orion-sdio" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mvsdio_dt_ids); + +static struct platform_driver mvsd_driver = { + .probe = mvsd_probe, + .remove = mvsd_remove, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = mvsdio_dt_ids, + }, +}; + +module_platform_driver(mvsd_driver); + +/* maximum card clock frequency (default 50MHz) */ +module_param(maxfreq, int, 0); + +/* force PIO transfers all the time */ +module_param(nodma, int, 0); + +MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre"); +MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mvsdio"); diff --git a/drivers/mmc/host/mvsdio.h b/drivers/mmc/host/mvsdio.h new file mode 100644 index 000000000..2f1458ac6 --- /dev/null +++ b/drivers/mmc/host/mvsdio.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2008 Marvell Semiconductors, All Rights Reserved. + */ + +#ifndef __MVSDIO_H +#define __MVSDIO_H + +/* + * Clock rates + */ + +#define MVSD_CLOCKRATE_MAX 50000000 +#define MVSD_BASE_DIV_MAX 0x7ff + + +/* + * Register offsets + */ + +#define MVSD_SYS_ADDR_LOW 0x000 +#define MVSD_SYS_ADDR_HI 0x004 +#define MVSD_BLK_SIZE 0x008 +#define MVSD_BLK_COUNT 0x00c +#define MVSD_ARG_LOW 0x010 +#define MVSD_ARG_HI 0x014 +#define MVSD_XFER_MODE 0x018 +#define MVSD_CMD 0x01c +#define MVSD_RSP(i) (0x020 + ((i)<<2)) +#define MVSD_RSP0 0x020 +#define MVSD_RSP1 0x024 +#define MVSD_RSP2 0x028 +#define MVSD_RSP3 0x02c +#define MVSD_RSP4 0x030 +#define MVSD_RSP5 0x034 +#define MVSD_RSP6 0x038 +#define MVSD_RSP7 0x03c +#define MVSD_FIFO 0x040 +#define MVSD_RSP_CRC7 0x044 +#define MVSD_HW_STATE 0x048 +#define MVSD_HOST_CTRL 0x050 +#define MVSD_BLK_GAP_CTRL 0x054 +#define MVSD_CLK_CTRL 0x058 +#define MVSD_SW_RESET 0x05c +#define MVSD_NOR_INTR_STATUS 0x060 +#define MVSD_ERR_INTR_STATUS 0x064 +#define MVSD_NOR_STATUS_EN 0x068 +#define MVSD_ERR_STATUS_EN 0x06c +#define MVSD_NOR_INTR_EN 0x070 +#define MVSD_ERR_INTR_EN 0x074 +#define MVSD_AUTOCMD12_ERR_STATUS 0x078 +#define MVSD_CURR_BYTE_LEFT 0x07c +#define MVSD_CURR_BLK_LEFT 0x080 +#define MVSD_AUTOCMD12_ARG_LOW 0x084 +#define MVSD_AUTOCMD12_ARG_HI 0x088 +#define MVSD_AUTOCMD12_CMD 0x08c +#define MVSD_AUTO_RSP(i) (0x090 + ((i)<<2)) +#define MVSD_AUTO_RSP0 0x090 +#define MVSD_AUTO_RSP1 0x094 +#define MVSD_AUTO_RSP2 0x098 +#define MVSD_CLK_DIV 0x128 + +#define MVSD_WINDOW_CTRL(i) (0x108 + ((i) << 3)) +#define MVSD_WINDOW_BASE(i) (0x10c + ((i) << 3)) + + +/* + * MVSD_CMD + */ + +#define MVSD_CMD_RSP_NONE (0 << 0) +#define MVSD_CMD_RSP_136 (1 << 0) +#define MVSD_CMD_RSP_48 (2 << 0) +#define MVSD_CMD_RSP_48BUSY (3 << 0) + +#define MVSD_CMD_CHECK_DATACRC16 (1 << 2) +#define MVSD_CMD_CHECK_CMDCRC (1 << 3) +#define MVSD_CMD_INDX_CHECK (1 << 4) +#define MVSD_CMD_DATA_PRESENT (1 << 5) +#define MVSD_UNEXPECTED_RESP (1 << 7) +#define MVSD_CMD_INDEX(x) ((x) << 8) + + +/* + * MVSD_AUTOCMD12_CMD + */ + +#define MVSD_AUTOCMD12_BUSY (1 << 0) +#define MVSD_AUTOCMD12_INDX_CHECK (1 << 1) +#define MVSD_AUTOCMD12_INDEX(x) ((x) << 8) + +/* + * MVSD_XFER_MODE + */ + +#define MVSD_XFER_MODE_WR_DATA_START (1 << 0) +#define MVSD_XFER_MODE_HW_WR_DATA_EN (1 << 1) +#define MVSD_XFER_MODE_AUTO_CMD12 (1 << 2) +#define MVSD_XFER_MODE_INT_CHK_EN (1 << 3) +#define MVSD_XFER_MODE_TO_HOST (1 << 4) +#define MVSD_XFER_MODE_STOP_CLK (1 << 5) +#define MVSD_XFER_MODE_PIO (1 << 6) + + +/* + * MVSD_HOST_CTRL + */ + +#define MVSD_HOST_CTRL_PUSH_PULL_EN (1 << 0) + +#define MVSD_HOST_CTRL_CARD_TYPE_MEM_ONLY (0 << 1) +#define MVSD_HOST_CTRL_CARD_TYPE_IO_ONLY (1 << 1) +#define MVSD_HOST_CTRL_CARD_TYPE_IO_MEM_COMBO (2 << 1) +#define MVSD_HOST_CTRL_CARD_TYPE_IO_MMC (3 << 1) +#define MVSD_HOST_CTRL_CARD_TYPE_MASK (3 << 1) + +#define MVSD_HOST_CTRL_BIG_ENDIAN (1 << 3) +#define MVSD_HOST_CTRL_LSB_FIRST (1 << 4) +#define MVSD_HOST_CTRL_DATA_WIDTH_4_BITS (1 << 9) +#define MVSD_HOST_CTRL_HI_SPEED_EN (1 << 10) + +#define MVSD_HOST_CTRL_TMOUT_MAX 0xf +#define MVSD_HOST_CTRL_TMOUT_MASK (0xf << 11) +#define MVSD_HOST_CTRL_TMOUT(x) ((x) << 11) +#define MVSD_HOST_CTRL_TMOUT_EN (1 << 15) + + +/* + * MVSD_SW_RESET + */ + +#define MVSD_SW_RESET_NOW (1 << 8) + + +/* + * Normal interrupt status bits + */ + +#define MVSD_NOR_CMD_DONE (1 << 0) +#define MVSD_NOR_XFER_DONE (1 << 1) +#define MVSD_NOR_BLK_GAP_EVT (1 << 2) +#define MVSD_NOR_DMA_DONE (1 << 3) +#define MVSD_NOR_TX_AVAIL (1 << 4) +#define MVSD_NOR_RX_READY (1 << 5) +#define MVSD_NOR_CARD_INT (1 << 8) +#define MVSD_NOR_READ_WAIT_ON (1 << 9) +#define MVSD_NOR_RX_FIFO_8W (1 << 10) +#define MVSD_NOR_TX_FIFO_8W (1 << 11) +#define MVSD_NOR_SUSPEND_ON (1 << 12) +#define MVSD_NOR_AUTOCMD12_DONE (1 << 13) +#define MVSD_NOR_UNEXP_RSP (1 << 14) +#define MVSD_NOR_ERROR (1 << 15) + + +/* + * Error status bits + */ + +#define MVSD_ERR_CMD_TIMEOUT (1 << 0) +#define MVSD_ERR_CMD_CRC (1 << 1) +#define MVSD_ERR_CMD_ENDBIT (1 << 2) +#define MVSD_ERR_CMD_INDEX (1 << 3) +#define MVSD_ERR_DATA_TIMEOUT (1 << 4) +#define MVSD_ERR_DATA_CRC (1 << 5) +#define MVSD_ERR_DATA_ENDBIT (1 << 6) +#define MVSD_ERR_AUTOCMD12 (1 << 8) +#define MVSD_ERR_CMD_STARTBIT (1 << 9) +#define MVSD_ERR_XFER_SIZE (1 << 10) +#define MVSD_ERR_RESP_T_BIT (1 << 11) +#define MVSD_ERR_CRC_ENDBIT (1 << 12) +#define MVSD_ERR_CRC_STARTBIT (1 << 13) +#define MVSD_ERR_CRC_STATUS (1 << 14) + + +/* + * CMD12 error status bits + */ + +#define MVSD_AUTOCMD12_ERR_NOTEXE (1 << 0) +#define MVSD_AUTOCMD12_ERR_TIMEOUT (1 << 1) +#define MVSD_AUTOCMD12_ERR_CRC (1 << 2) +#define MVSD_AUTOCMD12_ERR_ENDBIT (1 << 3) +#define MVSD_AUTOCMD12_ERR_INDEX (1 << 4) +#define MVSD_AUTOCMD12_ERR_RESP_T_BIT (1 << 5) +#define MVSD_AUTOCMD12_ERR_RESP_STARTBIT (1 << 6) + +#endif diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c new file mode 100644 index 000000000..93a105b07 --- /dev/null +++ b/drivers/mmc/host/mxcmmc.c @@ -0,0 +1,1260 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver + * + * This is a driver for the SDHC controller found in Freescale MX2/MX3 + * SoCs. It is basically the same hardware as found on MX1 (imxmmc.c). + * Unlike the hardware found on MX1, this hardware just works and does + * not need all the quirks found in imxmmc.c, hence the separate driver. + * + * Copyright (C) 2008 Sascha Hauer, Pengutronix + * Copyright (C) 2006 Pavel Pisa, PiKRON + * + * derived from pxamci.c by Russell King + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#define DRIVER_NAME "mxc-mmc" +#define MXCMCI_TIMEOUT_MS 10000 + +#define MMC_REG_STR_STP_CLK 0x00 +#define MMC_REG_STATUS 0x04 +#define MMC_REG_CLK_RATE 0x08 +#define MMC_REG_CMD_DAT_CONT 0x0C +#define MMC_REG_RES_TO 0x10 +#define MMC_REG_READ_TO 0x14 +#define MMC_REG_BLK_LEN 0x18 +#define MMC_REG_NOB 0x1C +#define MMC_REG_REV_NO 0x20 +#define MMC_REG_INT_CNTR 0x24 +#define MMC_REG_CMD 0x28 +#define MMC_REG_ARG 0x2C +#define MMC_REG_RES_FIFO 0x34 +#define MMC_REG_BUFFER_ACCESS 0x38 + +#define STR_STP_CLK_RESET (1 << 3) +#define STR_STP_CLK_START_CLK (1 << 1) +#define STR_STP_CLK_STOP_CLK (1 << 0) + +#define STATUS_CARD_INSERTION (1 << 31) +#define STATUS_CARD_REMOVAL (1 << 30) +#define STATUS_YBUF_EMPTY (1 << 29) +#define STATUS_XBUF_EMPTY (1 << 28) +#define STATUS_YBUF_FULL (1 << 27) +#define STATUS_XBUF_FULL (1 << 26) +#define STATUS_BUF_UND_RUN (1 << 25) +#define STATUS_BUF_OVFL (1 << 24) +#define STATUS_SDIO_INT_ACTIVE (1 << 14) +#define STATUS_END_CMD_RESP (1 << 13) +#define STATUS_WRITE_OP_DONE (1 << 12) +#define STATUS_DATA_TRANS_DONE (1 << 11) +#define STATUS_READ_OP_DONE (1 << 11) +#define STATUS_WR_CRC_ERROR_CODE_MASK (3 << 10) +#define STATUS_CARD_BUS_CLK_RUN (1 << 8) +#define STATUS_BUF_READ_RDY (1 << 7) +#define STATUS_BUF_WRITE_RDY (1 << 6) +#define STATUS_RESP_CRC_ERR (1 << 5) +#define STATUS_CRC_READ_ERR (1 << 3) +#define STATUS_CRC_WRITE_ERR (1 << 2) +#define STATUS_TIME_OUT_RESP (1 << 1) +#define STATUS_TIME_OUT_READ (1 << 0) +#define STATUS_ERR_MASK 0x2f + +#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1 << 12) +#define CMD_DAT_CONT_STOP_READWAIT (1 << 11) +#define CMD_DAT_CONT_START_READWAIT (1 << 10) +#define CMD_DAT_CONT_BUS_WIDTH_4 (2 << 8) +#define CMD_DAT_CONT_INIT (1 << 7) +#define CMD_DAT_CONT_WRITE (1 << 4) +#define CMD_DAT_CONT_DATA_ENABLE (1 << 3) +#define CMD_DAT_CONT_RESPONSE_48BIT_CRC (1 << 0) +#define CMD_DAT_CONT_RESPONSE_136BIT (2 << 0) +#define CMD_DAT_CONT_RESPONSE_48BIT (3 << 0) + +#define INT_SDIO_INT_WKP_EN (1 << 18) +#define INT_CARD_INSERTION_WKP_EN (1 << 17) +#define INT_CARD_REMOVAL_WKP_EN (1 << 16) +#define INT_CARD_INSERTION_EN (1 << 15) +#define INT_CARD_REMOVAL_EN (1 << 14) +#define INT_SDIO_IRQ_EN (1 << 13) +#define INT_DAT0_EN (1 << 12) +#define INT_BUF_READ_EN (1 << 4) +#define INT_BUF_WRITE_EN (1 << 3) +#define INT_END_CMD_RES_EN (1 << 2) +#define INT_WRITE_OP_DONE_EN (1 << 1) +#define INT_READ_OP_EN (1 << 0) + +enum mxcmci_type { + IMX21_MMC, + IMX31_MMC, + MPC512X_MMC, +}; + +struct mxcmci_host { + struct mmc_host *mmc; + void __iomem *base; + dma_addr_t phys_base; + int detect_irq; + struct dma_chan *dma; + struct dma_async_tx_descriptor *desc; + int do_dma; + int default_irq_mask; + int use_sdio; + unsigned int power_mode; + struct imxmmc_platform_data *pdata; + + struct mmc_request *req; + struct mmc_command *cmd; + struct mmc_data *data; + + unsigned int datasize; + unsigned int dma_dir; + + u16 rev_no; + unsigned int cmdat; + + struct clk *clk_ipg; + struct clk *clk_per; + + int clock; + + struct work_struct datawork; + spinlock_t lock; + + int burstlen; + int dmareq; + struct dma_slave_config dma_slave_config; + struct imx_dma_data dma_data; + + struct timer_list watchdog; + enum mxcmci_type devtype; +}; + +static const struct platform_device_id mxcmci_devtype[] = { + { + .name = "imx21-mmc", + .driver_data = IMX21_MMC, + }, { + .name = "imx31-mmc", + .driver_data = IMX31_MMC, + }, { + .name = "mpc512x-sdhc", + .driver_data = MPC512X_MMC, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, mxcmci_devtype); + +static const struct of_device_id mxcmci_of_match[] = { + { + .compatible = "fsl,imx21-mmc", + .data = &mxcmci_devtype[IMX21_MMC], + }, { + .compatible = "fsl,imx31-mmc", + .data = &mxcmci_devtype[IMX31_MMC], + }, { + .compatible = "fsl,mpc5121-sdhc", + .data = &mxcmci_devtype[MPC512X_MMC], + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, mxcmci_of_match); + +static inline int is_imx31_mmc(struct mxcmci_host *host) +{ + return host->devtype == IMX31_MMC; +} + +static inline int is_mpc512x_mmc(struct mxcmci_host *host) +{ + return host->devtype == MPC512X_MMC; +} + +static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg) +{ + if (IS_ENABLED(CONFIG_PPC_MPC512x)) + return ioread32be(host->base + reg); + else + return readl(host->base + reg); +} + +static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg) +{ + if (IS_ENABLED(CONFIG_PPC_MPC512x)) + iowrite32be(val, host->base + reg); + else + writel(val, host->base + reg); +} + +static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg) +{ + if (IS_ENABLED(CONFIG_PPC_MPC512x)) + return ioread32be(host->base + reg); + else + return readw(host->base + reg); +} + +static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg) +{ + if (IS_ENABLED(CONFIG_PPC_MPC512x)) + iowrite32be(val, host->base + reg); + else + writew(val, host->base + reg); +} + +static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); + +static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd) +{ + if (!IS_ERR(host->mmc->supply.vmmc)) { + if (host->power_mode == MMC_POWER_UP) + mmc_regulator_set_ocr(host->mmc, + host->mmc->supply.vmmc, vdd); + else if (host->power_mode == MMC_POWER_OFF) + mmc_regulator_set_ocr(host->mmc, + host->mmc->supply.vmmc, 0); + } + + if (host->pdata && host->pdata->setpower) + host->pdata->setpower(mmc_dev(host->mmc), vdd); +} + +static inline int mxcmci_use_dma(struct mxcmci_host *host) +{ + return host->do_dma; +} + +static void mxcmci_softreset(struct mxcmci_host *host) +{ + int i; + + dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n"); + + /* reset sequence */ + mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK); + mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, + MMC_REG_STR_STP_CLK); + + for (i = 0; i < 8; i++) + mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); + + mxcmci_writew(host, 0xff, MMC_REG_RES_TO); +} + +#if IS_ENABLED(CONFIG_PPC_MPC512x) +static inline void buffer_swap32(u32 *buf, int len) +{ + int i; + + for (i = 0; i < ((len + 3) / 4); i++) { + *buf = swab32(*buf); + buf++; + } +} + +static void mxcmci_swap_buffers(struct mmc_data *data) +{ + struct scatterlist *sg; + int i; + + for_each_sg(data->sg, sg, data->sg_len, i) + buffer_swap32(sg_virt(sg), sg->length); +} +#else +static inline void mxcmci_swap_buffers(struct mmc_data *data) {} +#endif + +static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) +{ + unsigned int nob = data->blocks; + unsigned int blksz = data->blksz; + unsigned int datasize = nob * blksz; + struct scatterlist *sg; + enum dma_transfer_direction slave_dirn; + int i, nents; + + host->data = data; + data->bytes_xfered = 0; + + mxcmci_writew(host, nob, MMC_REG_NOB); + mxcmci_writew(host, blksz, MMC_REG_BLK_LEN); + host->datasize = datasize; + + if (!mxcmci_use_dma(host)) + return 0; + + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->offset & 3 || sg->length & 3 || sg->length < 512) { + host->do_dma = 0; + return 0; + } + } + + if (data->flags & MMC_DATA_READ) { + host->dma_dir = DMA_FROM_DEVICE; + slave_dirn = DMA_DEV_TO_MEM; + } else { + host->dma_dir = DMA_TO_DEVICE; + slave_dirn = DMA_MEM_TO_DEV; + + mxcmci_swap_buffers(data); + } + + nents = dma_map_sg(host->dma->device->dev, data->sg, + data->sg_len, host->dma_dir); + if (nents != data->sg_len) + return -EINVAL; + + host->desc = dmaengine_prep_slave_sg(host->dma, + data->sg, data->sg_len, slave_dirn, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + + if (!host->desc) { + dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, + host->dma_dir); + host->do_dma = 0; + return 0; /* Fall back to PIO */ + } + wmb(); + + dmaengine_submit(host->desc); + dma_async_issue_pending(host->dma); + + mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS)); + + return 0; +} + +static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat); +static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat); + +static void mxcmci_dma_callback(void *data) +{ + struct mxcmci_host *host = data; + u32 stat; + + del_timer(&host->watchdog); + + stat = mxcmci_readl(host, MMC_REG_STATUS); + + dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); + + mxcmci_data_done(host, stat); +} + +static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, + unsigned int cmdat) +{ + u32 int_cntr = host->default_irq_mask; + unsigned long flags; + + WARN_ON(host->cmd != NULL); + host->cmd = cmd; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: /* short CRC, OPCODE */ + case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */ + cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC; + break; + case MMC_RSP_R2: /* long 136 bit + CRC */ + cmdat |= CMD_DAT_CONT_RESPONSE_136BIT; + break; + case MMC_RSP_R3: /* short */ + cmdat |= CMD_DAT_CONT_RESPONSE_48BIT; + break; + case MMC_RSP_NONE: + break; + default: + dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n", + mmc_resp_type(cmd)); + cmd->error = -EINVAL; + return -EINVAL; + } + + int_cntr = INT_END_CMD_RES_EN; + + if (mxcmci_use_dma(host)) { + if (host->dma_dir == DMA_FROM_DEVICE) { + host->desc->callback = mxcmci_dma_callback; + host->desc->callback_param = host; + } else { + int_cntr |= INT_WRITE_OP_DONE_EN; + } + } + + spin_lock_irqsave(&host->lock, flags); + if (host->use_sdio) + int_cntr |= INT_SDIO_IRQ_EN; + mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); + spin_unlock_irqrestore(&host->lock, flags); + + mxcmci_writew(host, cmd->opcode, MMC_REG_CMD); + mxcmci_writel(host, cmd->arg, MMC_REG_ARG); + mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT); + + return 0; +} + +static void mxcmci_finish_request(struct mxcmci_host *host, + struct mmc_request *req) +{ + u32 int_cntr = host->default_irq_mask; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->use_sdio) + int_cntr |= INT_SDIO_IRQ_EN; + mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); + spin_unlock_irqrestore(&host->lock, flags); + + host->req = NULL; + host->cmd = NULL; + host->data = NULL; + + mmc_request_done(host->mmc, req); +} + +static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat) +{ + struct mmc_data *data = host->data; + int data_error; + + if (mxcmci_use_dma(host)) { + dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, + host->dma_dir); + mxcmci_swap_buffers(data); + } + + if (stat & STATUS_ERR_MASK) { + dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", + stat); + if (stat & STATUS_CRC_READ_ERR) { + dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__); + data->error = -EILSEQ; + } else if (stat & STATUS_CRC_WRITE_ERR) { + u32 err_code = (stat >> 9) & 0x3; + if (err_code == 2) { /* No CRC response */ + dev_err(mmc_dev(host->mmc), + "%s: No CRC -ETIMEDOUT\n", __func__); + data->error = -ETIMEDOUT; + } else { + dev_err(mmc_dev(host->mmc), + "%s: -EILSEQ\n", __func__); + data->error = -EILSEQ; + } + } else if (stat & STATUS_TIME_OUT_READ) { + dev_err(mmc_dev(host->mmc), + "%s: read -ETIMEDOUT\n", __func__); + data->error = -ETIMEDOUT; + } else { + dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__); + data->error = -EIO; + } + } else { + data->bytes_xfered = host->datasize; + } + + data_error = data->error; + + host->data = NULL; + + return data_error; +} + +static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat) +{ + struct mmc_command *cmd = host->cmd; + int i; + u32 a, b, c; + + if (!cmd) + return; + + if (stat & STATUS_TIME_OUT_RESP) { + dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n"); + cmd->error = -ETIMEDOUT; + } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) { + dev_dbg(mmc_dev(host->mmc), "cmd crc error\n"); + cmd->error = -EILSEQ; + } + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + for (i = 0; i < 4; i++) { + a = mxcmci_readw(host, MMC_REG_RES_FIFO); + b = mxcmci_readw(host, MMC_REG_RES_FIFO); + cmd->resp[i] = a << 16 | b; + } + } else { + a = mxcmci_readw(host, MMC_REG_RES_FIFO); + b = mxcmci_readw(host, MMC_REG_RES_FIFO); + c = mxcmci_readw(host, MMC_REG_RES_FIFO); + cmd->resp[0] = a << 24 | b << 8 | c >> 8; + } + } +} + +static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask) +{ + u32 stat; + unsigned long timeout = jiffies + HZ; + + do { + stat = mxcmci_readl(host, MMC_REG_STATUS); + if (stat & STATUS_ERR_MASK) + return stat; + if (time_after(jiffies, timeout)) { + mxcmci_softreset(host); + mxcmci_set_clk_rate(host, host->clock); + return STATUS_TIME_OUT_READ; + } + if (stat & mask) + return 0; + cpu_relax(); + } while (1); +} + +static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes) +{ + unsigned int stat; + u32 *buf = _buf; + + while (bytes > 3) { + stat = mxcmci_poll_status(host, + STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE); + if (stat) + return stat; + *buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS)); + bytes -= 4; + } + + if (bytes) { + u8 *b = (u8 *)buf; + u32 tmp; + + stat = mxcmci_poll_status(host, + STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE); + if (stat) + return stat; + tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS)); + memcpy(b, &tmp, bytes); + } + + return 0; +} + +static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes) +{ + unsigned int stat; + u32 *buf = _buf; + + while (bytes > 3) { + stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); + if (stat) + return stat; + mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS); + bytes -= 4; + } + + if (bytes) { + u8 *b = (u8 *)buf; + u32 tmp; + + stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); + if (stat) + return stat; + + memcpy(&tmp, b, bytes); + mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS); + } + + return mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); +} + +static int mxcmci_transfer_data(struct mxcmci_host *host) +{ + struct mmc_data *data = host->req->data; + struct scatterlist *sg; + int stat, i; + + host->data = data; + host->datasize = 0; + + if (data->flags & MMC_DATA_READ) { + for_each_sg(data->sg, sg, data->sg_len, i) { + stat = mxcmci_pull(host, sg_virt(sg), sg->length); + if (stat) + return stat; + host->datasize += sg->length; + } + } else { + for_each_sg(data->sg, sg, data->sg_len, i) { + stat = mxcmci_push(host, sg_virt(sg), sg->length); + if (stat) + return stat; + host->datasize += sg->length; + } + stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE); + if (stat) + return stat; + } + return 0; +} + +static void mxcmci_datawork(struct work_struct *work) +{ + struct mxcmci_host *host = container_of(work, struct mxcmci_host, + datawork); + int datastat = mxcmci_transfer_data(host); + + mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, + MMC_REG_STATUS); + mxcmci_finish_data(host, datastat); + + if (host->req->stop) { + if (mxcmci_start_cmd(host, host->req->stop, 0)) { + mxcmci_finish_request(host, host->req); + return; + } + } else { + mxcmci_finish_request(host, host->req); + } +} + +static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) +{ + struct mmc_request *req; + int data_error; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + if (!host->data) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + if (!host->req) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + req = host->req; + if (!req->stop) + host->req = NULL; /* we will handle finish req below */ + + data_error = mxcmci_finish_data(host, stat); + + spin_unlock_irqrestore(&host->lock, flags); + + if (data_error) + return; + + mxcmci_read_response(host, stat); + host->cmd = NULL; + + if (req->stop) { + if (mxcmci_start_cmd(host, req->stop, 0)) { + mxcmci_finish_request(host, req); + return; + } + } else { + mxcmci_finish_request(host, req); + } +} + +static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) +{ + mxcmci_read_response(host, stat); + host->cmd = NULL; + + if (!host->data && host->req) { + mxcmci_finish_request(host, host->req); + return; + } + + /* For the DMA case the DMA engine handles the data transfer + * automatically. For non DMA we have to do it ourselves. + * Don't do it in interrupt context though. + */ + if (!mxcmci_use_dma(host) && host->data) + schedule_work(&host->datawork); + +} + +static irqreturn_t mxcmci_irq(int irq, void *devid) +{ + struct mxcmci_host *host = devid; + bool sdio_irq; + u32 stat; + + stat = mxcmci_readl(host, MMC_REG_STATUS); + mxcmci_writel(host, + stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE | + STATUS_WRITE_OP_DONE), + MMC_REG_STATUS); + + dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); + + spin_lock(&host->lock); + sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; + spin_unlock(&host->lock); + + if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE))) + mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS); + + if (sdio_irq) { + mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS); + mmc_signal_sdio_irq(host->mmc); + } + + if (stat & STATUS_END_CMD_RESP) + mxcmci_cmd_done(host, stat); + + if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) { + del_timer(&host->watchdog); + mxcmci_data_done(host, stat); + } + + if (host->default_irq_mask && + (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + + return IRQ_HANDLED; +} + +static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct mxcmci_host *host = mmc_priv(mmc); + unsigned int cmdat = host->cmdat; + int error; + + WARN_ON(host->req != NULL); + + host->req = req; + host->cmdat &= ~CMD_DAT_CONT_INIT; + + if (host->dma) + host->do_dma = 1; + + if (req->data) { + error = mxcmci_setup_data(host, req->data); + if (error) { + req->cmd->error = error; + goto out; + } + + + cmdat |= CMD_DAT_CONT_DATA_ENABLE; + + if (req->data->flags & MMC_DATA_WRITE) + cmdat |= CMD_DAT_CONT_WRITE; + } + + error = mxcmci_start_cmd(host, req->cmd, cmdat); + +out: + if (error) + mxcmci_finish_request(host, req); +} + +static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios) +{ + unsigned int divider; + int prescaler = 0; + unsigned int clk_in = clk_get_rate(host->clk_per); + + while (prescaler <= 0x800) { + for (divider = 1; divider <= 0xF; divider++) { + int x; + + x = (clk_in / (divider + 1)); + + if (prescaler) + x /= (prescaler * 2); + + if (x <= clk_ios) + break; + } + if (divider < 0x10) + break; + + if (prescaler == 0) + prescaler = 1; + else + prescaler <<= 1; + } + + mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE); + + dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n", + prescaler, divider, clk_in, clk_ios); +} + +static int mxcmci_setup_dma(struct mmc_host *mmc) +{ + struct mxcmci_host *host = mmc_priv(mmc); + struct dma_slave_config *config = &host->dma_slave_config; + + config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; + config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; + config->dst_addr_width = 4; + config->src_addr_width = 4; + config->dst_maxburst = host->burstlen; + config->src_maxburst = host->burstlen; + config->device_fc = false; + + return dmaengine_slave_config(host->dma, config); +} + +static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mxcmci_host *host = mmc_priv(mmc); + int burstlen, ret; + + /* + * use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0) + * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16) + */ + if (ios->bus_width == MMC_BUS_WIDTH_4) + burstlen = 16; + else + burstlen = 4; + + if (mxcmci_use_dma(host) && burstlen != host->burstlen) { + host->burstlen = burstlen; + ret = mxcmci_setup_dma(mmc); + if (ret) { + dev_err(mmc_dev(host->mmc), + "failed to config DMA channel. Falling back to PIO\n"); + dma_release_channel(host->dma); + host->do_dma = 0; + host->dma = NULL; + } + } + + if (ios->bus_width == MMC_BUS_WIDTH_4) + host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; + else + host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; + + if (host->power_mode != ios->power_mode) { + host->power_mode = ios->power_mode; + mxcmci_set_power(host, ios->vdd); + + if (ios->power_mode == MMC_POWER_ON) + host->cmdat |= CMD_DAT_CONT_INIT; + } + + if (ios->clock) { + mxcmci_set_clk_rate(host, ios->clock); + mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); + } else { + mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK); + } + + host->clock = ios->clock; +} + +static irqreturn_t mxcmci_detect_irq(int irq, void *data) +{ + struct mmc_host *mmc = data; + + dev_dbg(mmc_dev(mmc), "%s\n", __func__); + + mmc_detect_change(mmc, msecs_to_jiffies(250)); + return IRQ_HANDLED; +} + +static int mxcmci_get_ro(struct mmc_host *mmc) +{ + struct mxcmci_host *host = mmc_priv(mmc); + + if (host->pdata && host->pdata->get_ro) + return !!host->pdata->get_ro(mmc_dev(mmc)); + /* + * If board doesn't support read only detection (no mmc_gpio + * context or gpio is invalid), then let the mmc core decide + * what to do. + */ + return mmc_gpio_get_ro(mmc); +} + +static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct mxcmci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 int_cntr; + + spin_lock_irqsave(&host->lock, flags); + host->use_sdio = enable; + int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR); + + if (enable) + int_cntr |= INT_SDIO_IRQ_EN; + else + int_cntr &= ~INT_SDIO_IRQ_EN; + + mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) +{ + struct mxcmci_host *mxcmci = mmc_priv(host); + + /* + * MX3 SoCs have a silicon bug which corrupts CRC calculation of + * multi-block transfers when connected SDIO peripheral doesn't + * drive the BUSY line as required by the specs. + * One way to prevent this is to only allow 1-bit transfers. + */ + + if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO) + host->caps &= ~MMC_CAP_4_BIT_DATA; + else + host->caps |= MMC_CAP_4_BIT_DATA; +} + +static bool filter(struct dma_chan *chan, void *param) +{ + struct mxcmci_host *host = param; + + if (!imx_dma_is_general_purpose(chan)) + return false; + + chan->private = &host->dma_data; + + return true; +} + +static void mxcmci_watchdog(struct timer_list *t) +{ + struct mxcmci_host *host = from_timer(host, t, watchdog); + struct mmc_request *req = host->req; + unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS); + + if (host->dma_dir == DMA_FROM_DEVICE) { + dmaengine_terminate_all(host->dma); + dev_err(mmc_dev(host->mmc), + "%s: read time out (status = 0x%08x)\n", + __func__, stat); + } else { + dev_err(mmc_dev(host->mmc), + "%s: write time out (status = 0x%08x)\n", + __func__, stat); + mxcmci_softreset(host); + } + + /* Mark transfer as erroneus and inform the upper layers */ + + if (host->data) + host->data->error = -ETIMEDOUT; + host->req = NULL; + host->cmd = NULL; + host->data = NULL; + mmc_request_done(host->mmc, req); +} + +static const struct mmc_host_ops mxcmci_ops = { + .request = mxcmci_request, + .set_ios = mxcmci_set_ios, + .get_ro = mxcmci_get_ro, + .enable_sdio_irq = mxcmci_enable_sdio_irq, + .init_card = mxcmci_init_card, +}; + +static int mxcmci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct mxcmci_host *host; + struct resource *res; + int ret = 0, irq; + bool dat3_card_detect = false; + dma_cap_mask_t mask; + const struct of_device_id *of_id; + struct imxmmc_platform_data *pdata = pdev->dev.platform_data; + + pr_info("i.MX/MPC512x SDHC driver\n"); + + of_id = of_match_device(mxcmci_of_match, &pdev->dev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto out_free; + } + + host->phys_base = res->start; + + ret = mmc_of_parse(mmc); + if (ret) + goto out_free; + mmc->ops = &mxcmci_ops; + + /* For devicetree parsing, the bus width is read from devicetree */ + if (pdata) + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; + else + mmc->caps |= MMC_CAP_SDIO_IRQ; + + /* MMC core transfer sizes tunable parameters */ + mmc->max_blk_size = 2048; + mmc->max_blk_count = 65535; + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + mmc->max_seg_size = mmc->max_req_size; + + if (of_id) { + const struct platform_device_id *id_entry = of_id->data; + host->devtype = id_entry->driver_data; + } else { + host->devtype = pdev->id_entry->driver_data; + } + + /* adjust max_segs after devtype detection */ + if (!is_mpc512x_mmc(host)) + mmc->max_segs = 64; + + host->mmc = mmc; + host->pdata = pdata; + spin_lock_init(&host->lock); + + if (pdata) + dat3_card_detect = pdata->dat3_card_detect; + else if (mmc_card_is_removable(mmc) + && !of_property_read_bool(pdev->dev.of_node, "cd-gpios")) + dat3_card_detect = true; + + ret = mmc_regulator_get_supply(mmc); + if (ret) + goto out_free; + + if (!mmc->ocr_avail) { + if (pdata && pdata->ocr_avail) + mmc->ocr_avail = pdata->ocr_avail; + else + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + } + + if (dat3_card_detect) + host->default_irq_mask = + INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN; + else + host->default_irq_mask = 0; + + host->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(host->clk_ipg)) { + ret = PTR_ERR(host->clk_ipg); + goto out_free; + } + + host->clk_per = devm_clk_get(&pdev->dev, "per"); + if (IS_ERR(host->clk_per)) { + ret = PTR_ERR(host->clk_per); + goto out_free; + } + + ret = clk_prepare_enable(host->clk_per); + if (ret) + goto out_free; + + ret = clk_prepare_enable(host->clk_ipg); + if (ret) + goto out_clk_per_put; + + mxcmci_softreset(host); + + host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO); + if (host->rev_no != 0x400) { + ret = -ENODEV; + dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", + host->rev_no); + goto out_clk_put; + } + + mmc->f_min = clk_get_rate(host->clk_per) >> 16; + mmc->f_max = clk_get_rate(host->clk_per) >> 1; + + /* recommended in data sheet */ + mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO); + + mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR); + + if (!host->pdata) { + host->dma = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(host->dma)) { + if (PTR_ERR(host->dma) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk_put; + } + + /* Ignore errors to fall back to PIO mode */ + host->dma = NULL; + } + } else { + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (res) { + host->dmareq = res->start; + host->dma_data.peripheral_type = IMX_DMATYPE_SDHC; + host->dma_data.priority = DMA_PRIO_LOW; + host->dma_data.dma_request = host->dmareq; + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + host->dma = dma_request_channel(mask, filter, host); + } + } + if (host->dma) + mmc->max_seg_size = dma_get_max_seg_size( + host->dma->device->dev); + else + dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n"); + + INIT_WORK(&host->datawork, mxcmci_datawork); + + ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0, + dev_name(&pdev->dev), host); + if (ret) + goto out_free_dma; + + platform_set_drvdata(pdev, mmc); + + if (host->pdata && host->pdata->init) { + ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq, + host->mmc); + if (ret) + goto out_free_dma; + } + + timer_setup(&host->watchdog, mxcmci_watchdog, 0); + + ret = mmc_add_host(mmc); + if (ret) + goto out_free_dma; + + return 0; + +out_free_dma: + if (host->dma) + dma_release_channel(host->dma); + +out_clk_put: + clk_disable_unprepare(host->clk_ipg); +out_clk_per_put: + clk_disable_unprepare(host->clk_per); + +out_free: + mmc_free_host(mmc); + + return ret; +} + +static int mxcmci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct mxcmci_host *host = mmc_priv(mmc); + + mmc_remove_host(mmc); + + if (host->pdata && host->pdata->exit) + host->pdata->exit(&pdev->dev, mmc); + + if (host->dma) + dma_release_channel(host->dma); + + clk_disable_unprepare(host->clk_per); + clk_disable_unprepare(host->clk_ipg); + + mmc_free_host(mmc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mxcmci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct mxcmci_host *host = mmc_priv(mmc); + + clk_disable_unprepare(host->clk_per); + clk_disable_unprepare(host->clk_ipg); + return 0; +} + +static int mxcmci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct mxcmci_host *host = mmc_priv(mmc); + int ret; + + ret = clk_prepare_enable(host->clk_per); + if (ret) + return ret; + + ret = clk_prepare_enable(host->clk_ipg); + if (ret) + clk_disable_unprepare(host->clk_per); + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); + +static struct platform_driver mxcmci_driver = { + .probe = mxcmci_probe, + .remove = mxcmci_remove, + .id_table = mxcmci_devtype, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &mxcmci_pm_ops, + .of_match_table = mxcmci_of_match, + } +}; + +module_platform_driver(mxcmci_driver); + +MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); +MODULE_AUTHOR("Sascha Hauer, Pengutronix"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mxc-mmc"); diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c new file mode 100644 index 000000000..2ec3eb651 --- /dev/null +++ b/drivers/mmc/host/mxs-mmc.c @@ -0,0 +1,750 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Portions copyright (C) 2003 Russell King, PXA MMCI Driver + * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver + * + * Copyright 2008 Embedded Alley Solutions, Inc. + * Copyright 2009-2011 Freescale Semiconductor, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "mxs-mmc" + +#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \ + BM_SSP_CTRL1_RESP_ERR_IRQ | \ + BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \ + BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \ + BM_SSP_CTRL1_DATA_CRC_IRQ | \ + BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \ + BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \ + BM_SSP_CTRL1_FIFO_OVERRUN_IRQ) + +/* card detect polling timeout */ +#define MXS_MMC_DETECT_TIMEOUT (HZ/2) + +struct mxs_mmc_host { + struct mxs_ssp ssp; + + struct mmc_host *mmc; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + unsigned char bus_width; + spinlock_t lock; + int sdio_irq_en; + bool broken_cd; +}; + +static int mxs_mmc_get_cd(struct mmc_host *mmc) +{ + struct mxs_mmc_host *host = mmc_priv(mmc); + struct mxs_ssp *ssp = &host->ssp; + int present, ret; + + if (host->broken_cd) + return -ENOSYS; + + ret = mmc_gpio_get_cd(mmc); + if (ret >= 0) + return ret; + + present = mmc->caps & MMC_CAP_NEEDS_POLL || + !(readl(ssp->base + HW_SSP_STATUS(ssp)) & + BM_SSP_STATUS_CARD_DETECT); + + if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) + present = !present; + + return present; +} + +static int mxs_mmc_reset(struct mxs_mmc_host *host) +{ + struct mxs_ssp *ssp = &host->ssp; + u32 ctrl0, ctrl1; + int ret; + + ret = stmp_reset_block(ssp->base); + if (ret) + return ret; + + ctrl0 = BM_SSP_CTRL0_IGNORE_CRC; + ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) | + BF_SSP(0x7, CTRL1_WORD_LENGTH) | + BM_SSP_CTRL1_DMA_ENABLE | + BM_SSP_CTRL1_POLARITY | + BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN | + BM_SSP_CTRL1_DATA_CRC_IRQ_EN | + BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN | + BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN | + BM_SSP_CTRL1_RESP_ERR_IRQ_EN; + + writel(BF_SSP(0xffff, TIMING_TIMEOUT) | + BF_SSP(2, TIMING_CLOCK_DIVIDE) | + BF_SSP(0, TIMING_CLOCK_RATE), + ssp->base + HW_SSP_TIMING(ssp)); + + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN; + } + + writel(ctrl0, ssp->base + HW_SSP_CTRL0); + writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp)); + return 0; +} + +static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, + struct mmc_command *cmd); + +static void mxs_mmc_request_done(struct mxs_mmc_host *host) +{ + struct mmc_command *cmd = host->cmd; + struct mmc_data *data = host->data; + struct mmc_request *mrq = host->mrq; + struct mxs_ssp *ssp = &host->ssp; + + if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { + if (mmc_resp_type(cmd) & MMC_RSP_136) { + cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp)); + cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp)); + cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp)); + cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp)); + } else { + cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp)); + } + } + + if (cmd == mrq->sbc) { + /* Finished CMD23, now send actual command. */ + mxs_mmc_start_cmd(host, mrq->cmd); + return; + } else if (data) { + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, ssp->dma_dir); + /* + * If there was an error on any block, we mark all + * data blocks as being in error. + */ + if (!data->error) + data->bytes_xfered = data->blocks * data->blksz; + else + data->bytes_xfered = 0; + + host->data = NULL; + if (data->stop && (data->error || !mrq->sbc)) { + mxs_mmc_start_cmd(host, mrq->stop); + return; + } + } + + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); +} + +static void mxs_mmc_dma_irq_callback(void *param) +{ + struct mxs_mmc_host *host = param; + + mxs_mmc_request_done(host); +} + +static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) +{ + struct mxs_mmc_host *host = dev_id; + struct mmc_command *cmd = host->cmd; + struct mmc_data *data = host->data; + struct mxs_ssp *ssp = &host->ssp; + u32 stat; + + spin_lock(&host->lock); + + stat = readl(ssp->base + HW_SSP_CTRL1(ssp)); + writel(stat & MXS_MMC_IRQ_BITS, + ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR); + + spin_unlock(&host->lock); + + if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) + mmc_signal_sdio_irq(host->mmc); + + if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) + cmd->error = -ETIMEDOUT; + else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) + cmd->error = -EIO; + + if (data) { + if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | + BM_SSP_CTRL1_RECV_TIMEOUT_IRQ)) + data->error = -ETIMEDOUT; + else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ) + data->error = -EILSEQ; + else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | + BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)) + data->error = -EIO; + } + + return IRQ_HANDLED; +} + +static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( + struct mxs_mmc_host *host, unsigned long flags) +{ + struct mxs_ssp *ssp = &host->ssp; + struct dma_async_tx_descriptor *desc; + struct mmc_data *data = host->data; + struct scatterlist * sgl; + unsigned int sg_len; + + if (data) { + /* data */ + dma_map_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, ssp->dma_dir); + sgl = data->sg; + sg_len = data->sg_len; + } else { + /* pio */ + sgl = (struct scatterlist *) ssp->ssp_pio_words; + sg_len = SSP_PIO_NUM; + } + + desc = dmaengine_prep_slave_sg(ssp->dmach, + sgl, sg_len, ssp->slave_dirn, flags); + if (desc) { + desc->callback = mxs_mmc_dma_irq_callback; + desc->callback_param = host; + } else { + if (data) + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, ssp->dma_dir); + } + + return desc; +} + +static void mxs_mmc_bc(struct mxs_mmc_host *host) +{ + struct mxs_ssp *ssp = &host->ssp; + struct mmc_command *cmd = host->cmd; + struct dma_async_tx_descriptor *desc; + u32 ctrl0, cmd0, cmd1; + + ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC; + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC; + cmd1 = cmd->arg; + + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; + } + + ssp->ssp_pio_words[0] = ctrl0; + ssp->ssp_pio_words[1] = cmd0; + ssp->ssp_pio_words[2] = cmd1; + ssp->dma_dir = DMA_NONE; + ssp->slave_dirn = DMA_TRANS_NONE; + desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END); + if (!desc) + goto out; + + dmaengine_submit(desc); + dma_async_issue_pending(ssp->dmach); + return; + +out: + dev_warn(mmc_dev(host->mmc), + "%s: failed to prep dma\n", __func__); +} + +static void mxs_mmc_ac(struct mxs_mmc_host *host) +{ + struct mxs_ssp *ssp = &host->ssp; + struct mmc_command *cmd = host->cmd; + struct dma_async_tx_descriptor *desc; + u32 ignore_crc, get_resp, long_resp; + u32 ctrl0, cmd0, cmd1; + + ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? + 0 : BM_SSP_CTRL0_IGNORE_CRC; + get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? + BM_SSP_CTRL0_GET_RESP : 0; + long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? + BM_SSP_CTRL0_LONG_RESP : 0; + + ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp; + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); + cmd1 = cmd->arg; + + if (cmd->opcode == MMC_STOP_TRANSMISSION) + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; + } + + ssp->ssp_pio_words[0] = ctrl0; + ssp->ssp_pio_words[1] = cmd0; + ssp->ssp_pio_words[2] = cmd1; + ssp->dma_dir = DMA_NONE; + ssp->slave_dirn = DMA_TRANS_NONE; + desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END); + if (!desc) + goto out; + + dmaengine_submit(desc); + dma_async_issue_pending(ssp->dmach); + return; + +out: + dev_warn(mmc_dev(host->mmc), + "%s: failed to prep dma\n", __func__); +} + +static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns) +{ + const unsigned int ssp_timeout_mul = 4096; + /* + * Calculate ticks in ms since ns are large numbers + * and might overflow + */ + const unsigned int clock_per_ms = clock_rate / 1000; + const unsigned int ms = ns / 1000; + const unsigned int ticks = ms * clock_per_ms; + const unsigned int ssp_ticks = ticks / ssp_timeout_mul; + + WARN_ON(ssp_ticks == 0); + return ssp_ticks; +} + +static void mxs_mmc_adtc(struct mxs_mmc_host *host) +{ + struct mmc_command *cmd = host->cmd; + struct mmc_data *data = cmd->data; + struct dma_async_tx_descriptor *desc; + struct scatterlist *sgl = data->sg, *sg; + unsigned int sg_len = data->sg_len; + unsigned int i; + + unsigned short dma_data_dir, timeout; + enum dma_transfer_direction slave_dirn; + unsigned int data_size = 0, log2_blksz; + unsigned int blocks = data->blocks; + + struct mxs_ssp *ssp = &host->ssp; + + u32 ignore_crc, get_resp, long_resp, read; + u32 ctrl0, cmd0, cmd1, val; + + ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? + 0 : BM_SSP_CTRL0_IGNORE_CRC; + get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? + BM_SSP_CTRL0_GET_RESP : 0; + long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? + BM_SSP_CTRL0_LONG_RESP : 0; + + if (data->flags & MMC_DATA_WRITE) { + dma_data_dir = DMA_TO_DEVICE; + slave_dirn = DMA_MEM_TO_DEV; + read = 0; + } else { + dma_data_dir = DMA_FROM_DEVICE; + slave_dirn = DMA_DEV_TO_MEM; + read = BM_SSP_CTRL0_READ; + } + + ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) | + ignore_crc | get_resp | long_resp | + BM_SSP_CTRL0_DATA_XFER | read | + BM_SSP_CTRL0_WAIT_FOR_IRQ | + BM_SSP_CTRL0_ENABLE; + + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); + + /* get logarithm to base 2 of block size for setting register */ + log2_blksz = ilog2(data->blksz); + + /* + * take special care of the case that data size from data->sg + * is not equal to blocks x blksz + */ + for_each_sg(sgl, sg, sg_len, i) + data_size += sg->length; + + if (data_size != data->blocks * data->blksz) + blocks = 1; + + /* xfer count, block size and count need to be set differently */ + if (ssp_is_old(ssp)) { + ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT); + cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) | + BF_SSP(blocks - 1, CMD0_BLOCK_COUNT); + } else { + writel(data_size, ssp->base + HW_SSP_XFER_SIZE); + writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) | + BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT), + ssp->base + HW_SSP_BLOCK_SIZE); + } + + if (cmd->opcode == SD_IO_RW_EXTENDED) + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + + cmd1 = cmd->arg; + + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; + } + + /* set the timeout count */ + timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns); + val = readl(ssp->base + HW_SSP_TIMING(ssp)); + val &= ~(BM_SSP_TIMING_TIMEOUT); + val |= BF_SSP(timeout, TIMING_TIMEOUT); + writel(val, ssp->base + HW_SSP_TIMING(ssp)); + + /* pio */ + ssp->ssp_pio_words[0] = ctrl0; + ssp->ssp_pio_words[1] = cmd0; + ssp->ssp_pio_words[2] = cmd1; + ssp->dma_dir = DMA_NONE; + ssp->slave_dirn = DMA_TRANS_NONE; + desc = mxs_mmc_prep_dma(host, 0); + if (!desc) + goto out; + + /* append data sg */ + WARN_ON(host->data != NULL); + host->data = data; + ssp->dma_dir = dma_data_dir; + ssp->slave_dirn = slave_dirn; + desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END); + if (!desc) + goto out; + + dmaengine_submit(desc); + dma_async_issue_pending(ssp->dmach); + return; +out: + dev_warn(mmc_dev(host->mmc), + "%s: failed to prep dma\n", __func__); +} + +static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, + struct mmc_command *cmd) +{ + host->cmd = cmd; + + switch (mmc_cmd_type(cmd)) { + case MMC_CMD_BC: + mxs_mmc_bc(host); + break; + case MMC_CMD_BCR: + mxs_mmc_ac(host); + break; + case MMC_CMD_AC: + mxs_mmc_ac(host); + break; + case MMC_CMD_ADTC: + mxs_mmc_adtc(host); + break; + default: + dev_warn(mmc_dev(host->mmc), + "%s: unknown MMC command\n", __func__); + break; + } +} + +static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mxs_mmc_host *host = mmc_priv(mmc); + + WARN_ON(host->mrq != NULL); + host->mrq = mrq; + + if (mrq->sbc) + mxs_mmc_start_cmd(host, mrq->sbc); + else + mxs_mmc_start_cmd(host, mrq->cmd); +} + +static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mxs_mmc_host *host = mmc_priv(mmc); + + if (ios->bus_width == MMC_BUS_WIDTH_8) + host->bus_width = 2; + else if (ios->bus_width == MMC_BUS_WIDTH_4) + host->bus_width = 1; + else + host->bus_width = 0; + + if (ios->clock) + mxs_ssp_set_clk_rate(&host->ssp, ios->clock); +} + +static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct mxs_mmc_host *host = mmc_priv(mmc); + struct mxs_ssp *ssp = &host->ssp; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + host->sdio_irq_en = enable; + + if (enable) { + writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, + ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); + writel(BM_SSP_CTRL1_SDIO_IRQ_EN, + ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET); + } else { + writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, + ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); + writel(BM_SSP_CTRL1_SDIO_IRQ_EN, + ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR); + } + + spin_unlock_irqrestore(&host->lock, flags); + + if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) & + BM_SSP_STATUS_SDIO_IRQ) + mmc_signal_sdio_irq(host->mmc); + +} + +static const struct mmc_host_ops mxs_mmc_ops = { + .request = mxs_mmc_request, + .get_ro = mmc_gpio_get_ro, + .get_cd = mxs_mmc_get_cd, + .set_ios = mxs_mmc_set_ios, + .enable_sdio_irq = mxs_mmc_enable_sdio_irq, +}; + +static const struct platform_device_id mxs_ssp_ids[] = { + { + .name = "imx23-mmc", + .driver_data = IMX23_SSP, + }, { + .name = "imx28-mmc", + .driver_data = IMX28_SSP, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, mxs_ssp_ids); + +static const struct of_device_id mxs_mmc_dt_ids[] = { + { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, }, + { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids); + +static void mxs_mmc_regulator_disable(void *regulator) +{ + regulator_disable(regulator); +} + +static int mxs_mmc_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id = + of_match_device(mxs_mmc_dt_ids, &pdev->dev); + struct device_node *np = pdev->dev.of_node; + struct mxs_mmc_host *host; + struct mmc_host *mmc; + int ret = 0, irq_err; + struct regulator *reg_vmmc; + struct mxs_ssp *ssp; + + irq_err = platform_get_irq(pdev, 0); + if (irq_err < 0) + return irq_err; + + mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + ssp = &host->ssp; + ssp->dev = &pdev->dev; + ssp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ssp->base)) { + ret = PTR_ERR(ssp->base); + goto out_mmc_free; + } + + ssp->devid = (enum mxs_ssp_id) of_id->data; + + host->mmc = mmc; + host->sdio_irq_en = 0; + + reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc"); + if (!IS_ERR(reg_vmmc)) { + ret = regulator_enable(reg_vmmc); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable vmmc regulator: %d\n", ret); + goto out_mmc_free; + } + + ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable, + reg_vmmc); + if (ret) + goto out_mmc_free; + } + + ssp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(ssp->clk)) { + ret = PTR_ERR(ssp->clk); + goto out_mmc_free; + } + ret = clk_prepare_enable(ssp->clk); + if (ret) + goto out_mmc_free; + + ret = mxs_mmc_reset(host); + if (ret) { + dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret); + goto out_clk_disable; + } + + ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(ssp->dmach)) { + dev_err(mmc_dev(host->mmc), + "%s: failed to request dma\n", __func__); + ret = PTR_ERR(ssp->dmach); + goto out_clk_disable; + } + + /* set mmc core parameters */ + mmc->ops = &mxs_mmc_ops; + mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23; + + host->broken_cd = of_property_read_bool(np, "broken-cd"); + + mmc->f_min = 400000; + mmc->f_max = 288000000; + + ret = mmc_of_parse(mmc); + if (ret) + goto out_free_dma; + + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + mmc->max_segs = 52; + mmc->max_blk_size = 1 << 0xf; + mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff; + mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff; + mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev); + + platform_set_drvdata(pdev, mmc); + + spin_lock_init(&host->lock); + + ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, + dev_name(&pdev->dev), host); + if (ret) + goto out_free_dma; + + ret = mmc_add_host(mmc); + if (ret) + goto out_free_dma; + + dev_info(mmc_dev(host->mmc), "initialized\n"); + + return 0; + +out_free_dma: + dma_release_channel(ssp->dmach); +out_clk_disable: + clk_disable_unprepare(ssp->clk); +out_mmc_free: + mmc_free_host(mmc); + return ret; +} + +static int mxs_mmc_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct mxs_mmc_host *host = mmc_priv(mmc); + struct mxs_ssp *ssp = &host->ssp; + + mmc_remove_host(mmc); + + if (ssp->dmach) + dma_release_channel(ssp->dmach); + + clk_disable_unprepare(ssp->clk); + + mmc_free_host(mmc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mxs_mmc_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct mxs_mmc_host *host = mmc_priv(mmc); + struct mxs_ssp *ssp = &host->ssp; + + clk_disable_unprepare(ssp->clk); + return 0; +} + +static int mxs_mmc_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct mxs_mmc_host *host = mmc_priv(mmc); + struct mxs_ssp *ssp = &host->ssp; + + return clk_prepare_enable(ssp->clk); +} +#endif + +static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume); + +static struct platform_driver mxs_mmc_driver = { + .probe = mxs_mmc_probe, + .remove = mxs_mmc_remove, + .id_table = mxs_ssp_ids, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &mxs_mmc_pm_ops, + .of_match_table = mxs_mmc_dt_ids, + }, +}; + +module_platform_driver(mxs_mmc_driver); + +MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); +MODULE_AUTHOR("Freescale Semiconductor"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c new file mode 100644 index 000000000..3c4d950a4 --- /dev/null +++ b/drivers/mmc/host/of_mmc_spi.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * OpenFirmware bindings for the MMC-over-SPI driver + * + * Copyright (c) MontaVista Software, Inc. 2008. + * + * Author: Anton Vorontsov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */ +#ifndef NO_IRQ +#define NO_IRQ 0 +#endif + +MODULE_LICENSE("GPL"); + +struct of_mmc_spi { + int detect_irq; + struct mmc_spi_platform_data pdata; +}; + +static struct of_mmc_spi *to_of_mmc_spi(struct device *dev) +{ + return container_of(dev->platform_data, struct of_mmc_spi, pdata); +} + +static int of_mmc_spi_init(struct device *dev, + irqreturn_t (*irqhandler)(int, void *), void *mmc) +{ + struct of_mmc_spi *oms = to_of_mmc_spi(dev); + + return request_threaded_irq(oms->detect_irq, NULL, irqhandler, + IRQF_ONESHOT, dev_name(dev), mmc); +} + +static void of_mmc_spi_exit(struct device *dev, void *mmc) +{ + struct of_mmc_spi *oms = to_of_mmc_spi(dev); + + free_irq(oms->detect_irq, mmc); +} + +struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct device_node *np = dev->of_node; + struct of_mmc_spi *oms; + + if (dev->platform_data || !np) + return dev->platform_data; + + oms = kzalloc(sizeof(*oms), GFP_KERNEL); + if (!oms) + return NULL; + + if (mmc_of_parse_voltage(np, &oms->pdata.ocr_mask) <= 0) + goto err_ocr; + + oms->detect_irq = irq_of_parse_and_map(np, 0); + if (oms->detect_irq != 0) { + oms->pdata.init = of_mmc_spi_init; + oms->pdata.exit = of_mmc_spi_exit; + } else { + oms->pdata.caps |= MMC_CAP_NEEDS_POLL; + } + + dev->platform_data = &oms->pdata; + return dev->platform_data; +err_ocr: + kfree(oms); + return NULL; +} +EXPORT_SYMBOL(mmc_spi_get_pdata); + +void mmc_spi_put_pdata(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct device_node *np = dev->of_node; + struct of_mmc_spi *oms = to_of_mmc_spi(dev); + + if (!dev->platform_data || !np) + return; + + kfree(oms); + dev->platform_data = NULL; +} +EXPORT_SYMBOL(mmc_spi_put_pdata); diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c new file mode 100644 index 000000000..eb978b75d --- /dev/null +++ b/drivers/mmc/host/omap.c @@ -0,0 +1,1516 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/mmc/host/omap.c + * + * Copyright (C) 2004 Nokia Corporation + * Written by Tuukka Tikkanen and Juha Yrjölä + * Misc hacks here and there by Tony Lindgren + * Other hacks (DMA, SD, etc) by David Brownell + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define OMAP_MMC_REG_CMD 0x00 +#define OMAP_MMC_REG_ARGL 0x01 +#define OMAP_MMC_REG_ARGH 0x02 +#define OMAP_MMC_REG_CON 0x03 +#define OMAP_MMC_REG_STAT 0x04 +#define OMAP_MMC_REG_IE 0x05 +#define OMAP_MMC_REG_CTO 0x06 +#define OMAP_MMC_REG_DTO 0x07 +#define OMAP_MMC_REG_DATA 0x08 +#define OMAP_MMC_REG_BLEN 0x09 +#define OMAP_MMC_REG_NBLK 0x0a +#define OMAP_MMC_REG_BUF 0x0b +#define OMAP_MMC_REG_SDIO 0x0d +#define OMAP_MMC_REG_REV 0x0f +#define OMAP_MMC_REG_RSP0 0x10 +#define OMAP_MMC_REG_RSP1 0x11 +#define OMAP_MMC_REG_RSP2 0x12 +#define OMAP_MMC_REG_RSP3 0x13 +#define OMAP_MMC_REG_RSP4 0x14 +#define OMAP_MMC_REG_RSP5 0x15 +#define OMAP_MMC_REG_RSP6 0x16 +#define OMAP_MMC_REG_RSP7 0x17 +#define OMAP_MMC_REG_IOSR 0x18 +#define OMAP_MMC_REG_SYSC 0x19 +#define OMAP_MMC_REG_SYSS 0x1a + +#define OMAP_MMC_STAT_CARD_ERR (1 << 14) +#define OMAP_MMC_STAT_CARD_IRQ (1 << 13) +#define OMAP_MMC_STAT_OCR_BUSY (1 << 12) +#define OMAP_MMC_STAT_A_EMPTY (1 << 11) +#define OMAP_MMC_STAT_A_FULL (1 << 10) +#define OMAP_MMC_STAT_CMD_CRC (1 << 8) +#define OMAP_MMC_STAT_CMD_TOUT (1 << 7) +#define OMAP_MMC_STAT_DATA_CRC (1 << 6) +#define OMAP_MMC_STAT_DATA_TOUT (1 << 5) +#define OMAP_MMC_STAT_END_BUSY (1 << 4) +#define OMAP_MMC_STAT_END_OF_DATA (1 << 3) +#define OMAP_MMC_STAT_CARD_BUSY (1 << 2) +#define OMAP_MMC_STAT_END_OF_CMD (1 << 0) + +#define mmc_omap7xx() (host->features & MMC_OMAP7XX) +#define mmc_omap15xx() (host->features & MMC_OMAP15XX) +#define mmc_omap16xx() (host->features & MMC_OMAP16XX) +#define MMC_OMAP1_MASK (MMC_OMAP7XX | MMC_OMAP15XX | MMC_OMAP16XX) +#define mmc_omap1() (host->features & MMC_OMAP1_MASK) +#define mmc_omap2() (!mmc_omap1()) + +#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift) +#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg)) +#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) + +/* + * Command types + */ +#define OMAP_MMC_CMDTYPE_BC 0 +#define OMAP_MMC_CMDTYPE_BCR 1 +#define OMAP_MMC_CMDTYPE_AC 2 +#define OMAP_MMC_CMDTYPE_ADTC 3 + +#define DRIVER_NAME "mmci-omap" + +/* Specifies how often in millisecs to poll for card status changes + * when the cover switch is open */ +#define OMAP_MMC_COVER_POLL_DELAY 500 + +struct mmc_omap_host; + +struct mmc_omap_slot { + int id; + unsigned int vdd; + u16 saved_con; + u16 bus_mode; + u16 power_mode; + unsigned int fclk_freq; + + struct tasklet_struct cover_tasklet; + struct timer_list cover_timer; + unsigned cover_open; + + struct mmc_request *mrq; + struct mmc_omap_host *host; + struct mmc_host *mmc; + struct omap_mmc_slot_data *pdata; +}; + +struct mmc_omap_host { + int initialized; + struct mmc_request * mrq; + struct mmc_command * cmd; + struct mmc_data * data; + struct mmc_host * mmc; + struct device * dev; + unsigned char id; /* 16xx chips have 2 MMC blocks */ + struct clk * iclk; + struct clk * fclk; + struct dma_chan *dma_rx; + u32 dma_rx_burst; + struct dma_chan *dma_tx; + u32 dma_tx_burst; + void __iomem *virt_base; + unsigned int phys_base; + int irq; + unsigned char bus_mode; + unsigned int reg_shift; + + struct work_struct cmd_abort_work; + unsigned abort:1; + struct timer_list cmd_abort_timer; + + struct work_struct slot_release_work; + struct mmc_omap_slot *next_slot; + struct work_struct send_stop_work; + struct mmc_data *stop_data; + + unsigned int sg_len; + int sg_idx; + u16 * buffer; + u32 buffer_bytes_left; + u32 total_bytes_left; + + unsigned features; + unsigned brs_received:1, dma_done:1; + unsigned dma_in_use:1; + spinlock_t dma_lock; + + struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; + struct mmc_omap_slot *current_slot; + spinlock_t slot_lock; + wait_queue_head_t slot_wq; + int nr_slots; + + struct timer_list clk_timer; + spinlock_t clk_lock; /* for changing enabled state */ + unsigned int fclk_enabled:1; + struct workqueue_struct *mmc_omap_wq; + + struct omap_mmc_platform_data *pdata; +}; + + +static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) +{ + unsigned long tick_ns; + + if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) { + tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq); + ndelay(8 * tick_ns); + } +} + +static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable) +{ + unsigned long flags; + + spin_lock_irqsave(&host->clk_lock, flags); + if (host->fclk_enabled != enable) { + host->fclk_enabled = enable; + if (enable) + clk_enable(host->fclk); + else + clk_disable(host->fclk); + } + spin_unlock_irqrestore(&host->clk_lock, flags); +} + +static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed) +{ + struct mmc_omap_host *host = slot->host; + unsigned long flags; + + if (claimed) + goto no_claim; + spin_lock_irqsave(&host->slot_lock, flags); + while (host->mmc != NULL) { + spin_unlock_irqrestore(&host->slot_lock, flags); + wait_event(host->slot_wq, host->mmc == NULL); + spin_lock_irqsave(&host->slot_lock, flags); + } + host->mmc = slot->mmc; + spin_unlock_irqrestore(&host->slot_lock, flags); +no_claim: + del_timer(&host->clk_timer); + if (host->current_slot != slot || !claimed) + mmc_omap_fclk_offdelay(host->current_slot); + + if (host->current_slot != slot) { + OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00); + if (host->pdata->switch_slot != NULL) + host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id); + host->current_slot = slot; + } + + if (claimed) { + mmc_omap_fclk_enable(host, 1); + + /* Doing the dummy read here seems to work around some bug + * at least in OMAP24xx silicon where the command would not + * start after writing the CMD register. Sigh. */ + OMAP_MMC_READ(host, CON); + + OMAP_MMC_WRITE(host, CON, slot->saved_con); + } else + mmc_omap_fclk_enable(host, 0); +} + +static void mmc_omap_start_request(struct mmc_omap_host *host, + struct mmc_request *req); + +static void mmc_omap_slot_release_work(struct work_struct *work) +{ + struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, + slot_release_work); + struct mmc_omap_slot *next_slot = host->next_slot; + struct mmc_request *rq; + + host->next_slot = NULL; + mmc_omap_select_slot(next_slot, 1); + + rq = next_slot->mrq; + next_slot->mrq = NULL; + mmc_omap_start_request(host, rq); +} + +static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled) +{ + struct mmc_omap_host *host = slot->host; + unsigned long flags; + int i; + + BUG_ON(slot == NULL || host->mmc == NULL); + + if (clk_enabled) + /* Keeps clock running for at least 8 cycles on valid freq */ + mod_timer(&host->clk_timer, jiffies + HZ/10); + else { + del_timer(&host->clk_timer); + mmc_omap_fclk_offdelay(slot); + mmc_omap_fclk_enable(host, 0); + } + + spin_lock_irqsave(&host->slot_lock, flags); + /* Check for any pending requests */ + for (i = 0; i < host->nr_slots; i++) { + struct mmc_omap_slot *new_slot; + + if (host->slots[i] == NULL || host->slots[i]->mrq == NULL) + continue; + + BUG_ON(host->next_slot != NULL); + new_slot = host->slots[i]; + /* The current slot should not have a request in queue */ + BUG_ON(new_slot == host->current_slot); + + host->next_slot = new_slot; + host->mmc = new_slot->mmc; + spin_unlock_irqrestore(&host->slot_lock, flags); + queue_work(host->mmc_omap_wq, &host->slot_release_work); + return; + } + + host->mmc = NULL; + wake_up(&host->slot_wq); + spin_unlock_irqrestore(&host->slot_lock, flags); +} + +static inline +int mmc_omap_cover_is_open(struct mmc_omap_slot *slot) +{ + if (slot->pdata->get_cover_state) + return slot->pdata->get_cover_state(mmc_dev(slot->mmc), + slot->id); + return 0; +} + +static ssize_t +mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); + struct mmc_omap_slot *slot = mmc_priv(mmc); + + return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" : + "closed"); +} + +static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL); + +static ssize_t +mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); + struct mmc_omap_slot *slot = mmc_priv(mmc); + + return sprintf(buf, "%s\n", slot->pdata->name); +} + +static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL); + +static void +mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd) +{ + u32 cmdreg; + u32 resptype; + u32 cmdtype; + u16 irq_mask; + + host->cmd = cmd; + + resptype = 0; + cmdtype = 0; + + /* Our hardware needs to know exact type */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + break; + case MMC_RSP_R1: + case MMC_RSP_R1B: + /* resp 1, 1b, 6, 7 */ + resptype = 1; + break; + case MMC_RSP_R2: + resptype = 2; + break; + case MMC_RSP_R3: + resptype = 3; + break; + default: + dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd)); + break; + } + + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) { + cmdtype = OMAP_MMC_CMDTYPE_ADTC; + } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) { + cmdtype = OMAP_MMC_CMDTYPE_BC; + } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) { + cmdtype = OMAP_MMC_CMDTYPE_BCR; + } else { + cmdtype = OMAP_MMC_CMDTYPE_AC; + } + + cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12); + + if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN) + cmdreg |= 1 << 6; + + if (cmd->flags & MMC_RSP_BUSY) + cmdreg |= 1 << 11; + + if (host->data && !(host->data->flags & MMC_DATA_WRITE)) + cmdreg |= 1 << 15; + + mod_timer(&host->cmd_abort_timer, jiffies + HZ/2); + + OMAP_MMC_WRITE(host, CTO, 200); + OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff); + OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16); + irq_mask = OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL | + OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT | + OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT | + OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR | + OMAP_MMC_STAT_END_OF_DATA; + if (cmd->opcode == MMC_ERASE) + irq_mask &= ~OMAP_MMC_STAT_DATA_TOUT; + OMAP_MMC_WRITE(host, IE, irq_mask); + OMAP_MMC_WRITE(host, CMD, cmdreg); +} + +static void +mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, + int abort) +{ + enum dma_data_direction dma_data_dir; + struct device *dev = mmc_dev(host->mmc); + struct dma_chan *c; + + if (data->flags & MMC_DATA_WRITE) { + dma_data_dir = DMA_TO_DEVICE; + c = host->dma_tx; + } else { + dma_data_dir = DMA_FROM_DEVICE; + c = host->dma_rx; + } + if (c) { + if (data->error) { + dmaengine_terminate_all(c); + /* Claim nothing transferred on error... */ + data->bytes_xfered = 0; + } + dev = c->device->dev; + } + dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir); +} + +static void mmc_omap_send_stop_work(struct work_struct *work) +{ + struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, + send_stop_work); + struct mmc_omap_slot *slot = host->current_slot; + struct mmc_data *data = host->stop_data; + unsigned long tick_ns; + + tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq); + ndelay(8*tick_ns); + + mmc_omap_start_command(host, data->stop); +} + +static void +mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) +{ + if (host->dma_in_use) + mmc_omap_release_dma(host, data, data->error); + + host->data = NULL; + host->sg_len = 0; + + /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing + * dozens of requests until the card finishes writing data. + * It'd be cheaper to just wait till an EOFB interrupt arrives... + */ + + if (!data->stop) { + struct mmc_host *mmc; + + host->mrq = NULL; + mmc = host->mmc; + mmc_omap_release_slot(host->current_slot, 1); + mmc_request_done(mmc, data->mrq); + return; + } + + host->stop_data = data; + queue_work(host->mmc_omap_wq, &host->send_stop_work); +} + +static void +mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops) +{ + struct mmc_omap_slot *slot = host->current_slot; + unsigned int restarts, passes, timeout; + u16 stat = 0; + + /* Sending abort takes 80 clocks. Have some extra and round up */ + timeout = DIV_ROUND_UP(120 * USEC_PER_SEC, slot->fclk_freq); + restarts = 0; + while (restarts < maxloops) { + OMAP_MMC_WRITE(host, STAT, 0xFFFF); + OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7)); + + passes = 0; + while (passes < timeout) { + stat = OMAP_MMC_READ(host, STAT); + if (stat & OMAP_MMC_STAT_END_OF_CMD) + goto out; + udelay(1); + passes++; + } + + restarts++; + } +out: + OMAP_MMC_WRITE(host, STAT, stat); +} + +static void +mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data) +{ + if (host->dma_in_use) + mmc_omap_release_dma(host, data, 1); + + host->data = NULL; + host->sg_len = 0; + + mmc_omap_send_abort(host, 10000); +} + +static void +mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) +{ + unsigned long flags; + int done; + + if (!host->dma_in_use) { + mmc_omap_xfer_done(host, data); + return; + } + done = 0; + spin_lock_irqsave(&host->dma_lock, flags); + if (host->dma_done) + done = 1; + else + host->brs_received = 1; + spin_unlock_irqrestore(&host->dma_lock, flags); + if (done) + mmc_omap_xfer_done(host, data); +} + +static void +mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) +{ + unsigned long flags; + int done; + + done = 0; + spin_lock_irqsave(&host->dma_lock, flags); + if (host->brs_received) + done = 1; + else + host->dma_done = 1; + spin_unlock_irqrestore(&host->dma_lock, flags); + if (done) + mmc_omap_xfer_done(host, data); +} + +static void +mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd) +{ + host->cmd = NULL; + + del_timer(&host->cmd_abort_timer); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* response type 2 */ + cmd->resp[3] = + OMAP_MMC_READ(host, RSP0) | + (OMAP_MMC_READ(host, RSP1) << 16); + cmd->resp[2] = + OMAP_MMC_READ(host, RSP2) | + (OMAP_MMC_READ(host, RSP3) << 16); + cmd->resp[1] = + OMAP_MMC_READ(host, RSP4) | + (OMAP_MMC_READ(host, RSP5) << 16); + cmd->resp[0] = + OMAP_MMC_READ(host, RSP6) | + (OMAP_MMC_READ(host, RSP7) << 16); + } else { + /* response types 1, 1b, 3, 4, 5, 6 */ + cmd->resp[0] = + OMAP_MMC_READ(host, RSP6) | + (OMAP_MMC_READ(host, RSP7) << 16); + } + } + + if (host->data == NULL || cmd->error) { + struct mmc_host *mmc; + + if (host->data != NULL) + mmc_omap_abort_xfer(host, host->data); + host->mrq = NULL; + mmc = host->mmc; + mmc_omap_release_slot(host->current_slot, 1); + mmc_request_done(mmc, cmd->mrq); + } +} + +/* + * Abort stuck command. Can occur when card is removed while it is being + * read. + */ +static void mmc_omap_abort_command(struct work_struct *work) +{ + struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, + cmd_abort_work); + BUG_ON(!host->cmd); + + dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n", + host->cmd->opcode); + + if (host->cmd->error == 0) + host->cmd->error = -ETIMEDOUT; + + if (host->data == NULL) { + struct mmc_command *cmd; + struct mmc_host *mmc; + + cmd = host->cmd; + host->cmd = NULL; + mmc_omap_send_abort(host, 10000); + + host->mrq = NULL; + mmc = host->mmc; + mmc_omap_release_slot(host->current_slot, 1); + mmc_request_done(mmc, cmd->mrq); + } else + mmc_omap_cmd_done(host, host->cmd); + + host->abort = 0; + enable_irq(host->irq); +} + +static void +mmc_omap_cmd_timer(struct timer_list *t) +{ + struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer); + unsigned long flags; + + spin_lock_irqsave(&host->slot_lock, flags); + if (host->cmd != NULL && !host->abort) { + OMAP_MMC_WRITE(host, IE, 0); + disable_irq(host->irq); + host->abort = 1; + queue_work(host->mmc_omap_wq, &host->cmd_abort_work); + } + spin_unlock_irqrestore(&host->slot_lock, flags); +} + +/* PIO only */ +static void +mmc_omap_sg_to_buf(struct mmc_omap_host *host) +{ + struct scatterlist *sg; + + sg = host->data->sg + host->sg_idx; + host->buffer_bytes_left = sg->length; + host->buffer = sg_virt(sg); + if (host->buffer_bytes_left > host->total_bytes_left) + host->buffer_bytes_left = host->total_bytes_left; +} + +static void +mmc_omap_clk_timer(struct timer_list *t) +{ + struct mmc_omap_host *host = from_timer(host, t, clk_timer); + + mmc_omap_fclk_enable(host, 0); +} + +/* PIO only */ +static void +mmc_omap_xfer_data(struct mmc_omap_host *host, int write) +{ + int n, nwords; + + if (host->buffer_bytes_left == 0) { + host->sg_idx++; + BUG_ON(host->sg_idx == host->sg_len); + mmc_omap_sg_to_buf(host); + } + n = 64; + if (n > host->buffer_bytes_left) + n = host->buffer_bytes_left; + + /* Round up to handle odd number of bytes to transfer */ + nwords = DIV_ROUND_UP(n, 2); + + host->buffer_bytes_left -= n; + host->total_bytes_left -= n; + host->data->bytes_xfered += n; + + if (write) { + __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); + } else { + __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); + } + + host->buffer += nwords; +} + +#ifdef CONFIG_MMC_DEBUG +static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) +{ + static const char *mmc_omap_status_bits[] = { + "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", + "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" + }; + int i; + char res[64], *buf = res; + + buf += sprintf(buf, "MMC IRQ 0x%x:", status); + + for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) + if (status & (1 << i)) + buf += sprintf(buf, " %s", mmc_omap_status_bits[i]); + dev_vdbg(mmc_dev(host->mmc), "%s\n", res); +} +#else +static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) +{ +} +#endif + + +static irqreturn_t mmc_omap_irq(int irq, void *dev_id) +{ + struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id; + u16 status; + int end_command; + int end_transfer; + int transfer_error, cmd_error; + + if (host->cmd == NULL && host->data == NULL) { + status = OMAP_MMC_READ(host, STAT); + dev_info(mmc_dev(host->slots[0]->mmc), + "Spurious IRQ 0x%04x\n", status); + if (status != 0) { + OMAP_MMC_WRITE(host, STAT, status); + OMAP_MMC_WRITE(host, IE, 0); + } + return IRQ_HANDLED; + } + + end_command = 0; + end_transfer = 0; + transfer_error = 0; + cmd_error = 0; + + while ((status = OMAP_MMC_READ(host, STAT)) != 0) { + int cmd; + + OMAP_MMC_WRITE(host, STAT, status); + if (host->cmd != NULL) + cmd = host->cmd->opcode; + else + cmd = -1; + dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", + status, cmd); + mmc_omap_report_irq(host, status); + + if (host->total_bytes_left) { + if ((status & OMAP_MMC_STAT_A_FULL) || + (status & OMAP_MMC_STAT_END_OF_DATA)) + mmc_omap_xfer_data(host, 0); + if (status & OMAP_MMC_STAT_A_EMPTY) + mmc_omap_xfer_data(host, 1); + } + + if (status & OMAP_MMC_STAT_END_OF_DATA) + end_transfer = 1; + + if (status & OMAP_MMC_STAT_DATA_TOUT) { + dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n", + cmd); + if (host->data) { + host->data->error = -ETIMEDOUT; + transfer_error = 1; + } + } + + if (status & OMAP_MMC_STAT_DATA_CRC) { + if (host->data) { + host->data->error = -EILSEQ; + dev_dbg(mmc_dev(host->mmc), + "data CRC error, bytes left %d\n", + host->total_bytes_left); + transfer_error = 1; + } else { + dev_dbg(mmc_dev(host->mmc), "data CRC error\n"); + } + } + + if (status & OMAP_MMC_STAT_CMD_TOUT) { + /* Timeouts are routine with some commands */ + if (host->cmd) { + struct mmc_omap_slot *slot = + host->current_slot; + if (slot == NULL || + !mmc_omap_cover_is_open(slot)) + dev_err(mmc_dev(host->mmc), + "command timeout (CMD%d)\n", + cmd); + host->cmd->error = -ETIMEDOUT; + end_command = 1; + cmd_error = 1; + } + } + + if (status & OMAP_MMC_STAT_CMD_CRC) { + if (host->cmd) { + dev_err(mmc_dev(host->mmc), + "command CRC error (CMD%d, arg 0x%08x)\n", + cmd, host->cmd->arg); + host->cmd->error = -EILSEQ; + end_command = 1; + cmd_error = 1; + } else + dev_err(mmc_dev(host->mmc), + "command CRC error without cmd?\n"); + } + + if (status & OMAP_MMC_STAT_CARD_ERR) { + dev_dbg(mmc_dev(host->mmc), + "ignoring card status error (CMD%d)\n", + cmd); + end_command = 1; + } + + /* + * NOTE: On 1610 the END_OF_CMD may come too early when + * starting a write + */ + if ((status & OMAP_MMC_STAT_END_OF_CMD) && + (!(status & OMAP_MMC_STAT_A_EMPTY))) { + end_command = 1; + } + } + + if (cmd_error && host->data) { + del_timer(&host->cmd_abort_timer); + host->abort = 1; + OMAP_MMC_WRITE(host, IE, 0); + disable_irq_nosync(host->irq); + queue_work(host->mmc_omap_wq, &host->cmd_abort_work); + return IRQ_HANDLED; + } + + if (end_command && host->cmd) + mmc_omap_cmd_done(host, host->cmd); + if (host->data != NULL) { + if (transfer_error) + mmc_omap_xfer_done(host, host->data); + else if (end_transfer) + mmc_omap_end_of_data(host, host->data); + } + + return IRQ_HANDLED; +} + +void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed) +{ + int cover_open; + struct mmc_omap_host *host = dev_get_drvdata(dev); + struct mmc_omap_slot *slot = host->slots[num]; + + BUG_ON(num >= host->nr_slots); + + /* Other subsystems can call in here before we're initialised. */ + if (host->nr_slots == 0 || !host->slots[num]) + return; + + cover_open = mmc_omap_cover_is_open(slot); + if (cover_open != slot->cover_open) { + slot->cover_open = cover_open; + sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch"); + } + + tasklet_hi_schedule(&slot->cover_tasklet); +} + +static void mmc_omap_cover_timer(struct timer_list *t) +{ + struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer); + tasklet_schedule(&slot->cover_tasklet); +} + +static void mmc_omap_cover_handler(unsigned long param) +{ + struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param; + int cover_open = mmc_omap_cover_is_open(slot); + + mmc_detect_change(slot->mmc, 0); + if (!cover_open) + return; + + /* + * If no card is inserted, we postpone polling until + * the cover has been closed. + */ + if (slot->mmc->card == NULL) + return; + + mod_timer(&slot->cover_timer, + jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); +} + +static void mmc_omap_dma_callback(void *priv) +{ + struct mmc_omap_host *host = priv; + struct mmc_data *data = host->data; + + /* If we got to the end of DMA, assume everything went well */ + data->bytes_xfered += data->blocks * data->blksz; + + mmc_omap_dma_done(host, data); +} + +static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) +{ + u16 reg; + + reg = OMAP_MMC_READ(host, SDIO); + reg &= ~(1 << 5); + OMAP_MMC_WRITE(host, SDIO, reg); + /* Set maximum timeout */ + OMAP_MMC_WRITE(host, CTO, 0xfd); +} + +static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req) +{ + unsigned int timeout, cycle_ns; + u16 reg; + + cycle_ns = 1000000000 / host->current_slot->fclk_freq; + timeout = req->data->timeout_ns / cycle_ns; + timeout += req->data->timeout_clks; + + /* Check if we need to use timeout multiplier register */ + reg = OMAP_MMC_READ(host, SDIO); + if (timeout > 0xffff) { + reg |= (1 << 5); + timeout /= 1024; + } else + reg &= ~(1 << 5); + OMAP_MMC_WRITE(host, SDIO, reg); + OMAP_MMC_WRITE(host, DTO, timeout); +} + +static void +mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) +{ + struct mmc_data *data = req->data; + int i, use_dma = 1, block_size; + struct scatterlist *sg; + unsigned sg_len; + + host->data = data; + if (data == NULL) { + OMAP_MMC_WRITE(host, BLEN, 0); + OMAP_MMC_WRITE(host, NBLK, 0); + OMAP_MMC_WRITE(host, BUF, 0); + host->dma_in_use = 0; + set_cmd_timeout(host, req); + return; + } + + block_size = data->blksz; + + OMAP_MMC_WRITE(host, NBLK, data->blocks - 1); + OMAP_MMC_WRITE(host, BLEN, block_size - 1); + set_data_timeout(host, req); + + /* cope with calling layer confusion; it issues "single + * block" writes using multi-block scatterlists. + */ + sg_len = (data->blocks == 1) ? 1 : data->sg_len; + + /* Only do DMA for entire blocks */ + for_each_sg(data->sg, sg, sg_len, i) { + if ((sg->length % block_size) != 0) { + use_dma = 0; + break; + } + } + + host->sg_idx = 0; + if (use_dma) { + enum dma_data_direction dma_data_dir; + struct dma_async_tx_descriptor *tx; + struct dma_chan *c; + u32 burst, *bp; + u16 buf; + + /* + * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx + * and 24xx. Use 16 or 32 word frames when the + * blocksize is at least that large. Blocksize is + * usually 512 bytes; but not for some SD reads. + */ + burst = mmc_omap15xx() ? 32 : 64; + if (burst > data->blksz) + burst = data->blksz; + + burst >>= 1; + + if (data->flags & MMC_DATA_WRITE) { + c = host->dma_tx; + bp = &host->dma_tx_burst; + buf = 0x0f80 | (burst - 1) << 0; + dma_data_dir = DMA_TO_DEVICE; + } else { + c = host->dma_rx; + bp = &host->dma_rx_burst; + buf = 0x800f | (burst - 1) << 8; + dma_data_dir = DMA_FROM_DEVICE; + } + + if (!c) + goto use_pio; + + /* Only reconfigure if we have a different burst size */ + if (*bp != burst) { + struct dma_slave_config cfg = { + .src_addr = host->phys_base + + OMAP_MMC_REG(host, DATA), + .dst_addr = host->phys_base + + OMAP_MMC_REG(host, DATA), + .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, + .src_maxburst = burst, + .dst_maxburst = burst, + }; + + if (dmaengine_slave_config(c, &cfg)) + goto use_pio; + + *bp = burst; + } + + host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, + dma_data_dir); + if (host->sg_len == 0) + goto use_pio; + + tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto use_pio; + + OMAP_MMC_WRITE(host, BUF, buf); + + tx->callback = mmc_omap_dma_callback; + tx->callback_param = host; + dmaengine_submit(tx); + host->brs_received = 0; + host->dma_done = 0; + host->dma_in_use = 1; + return; + } + use_pio: + + /* Revert to PIO? */ + OMAP_MMC_WRITE(host, BUF, 0x1f1f); + host->total_bytes_left = data->blocks * block_size; + host->sg_len = sg_len; + mmc_omap_sg_to_buf(host); + host->dma_in_use = 0; +} + +static void mmc_omap_start_request(struct mmc_omap_host *host, + struct mmc_request *req) +{ + BUG_ON(host->mrq != NULL); + + host->mrq = req; + + /* only touch fifo AFTER the controller readies it */ + mmc_omap_prepare_data(host, req); + mmc_omap_start_command(host, req->cmd); + if (host->dma_in_use) { + struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? + host->dma_tx : host->dma_rx; + + dma_async_issue_pending(c); + } +} + +static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct mmc_omap_slot *slot = mmc_priv(mmc); + struct mmc_omap_host *host = slot->host; + unsigned long flags; + + spin_lock_irqsave(&host->slot_lock, flags); + if (host->mmc != NULL) { + BUG_ON(slot->mrq != NULL); + slot->mrq = req; + spin_unlock_irqrestore(&host->slot_lock, flags); + return; + } else + host->mmc = mmc; + spin_unlock_irqrestore(&host->slot_lock, flags); + mmc_omap_select_slot(slot, 1); + mmc_omap_start_request(host, req); +} + +static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on, + int vdd) +{ + struct mmc_omap_host *host; + + host = slot->host; + + if (slot->pdata->set_power != NULL) + slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on, + vdd); + if (mmc_omap2()) { + u16 w; + + if (power_on) { + w = OMAP_MMC_READ(host, CON); + OMAP_MMC_WRITE(host, CON, w | (1 << 11)); + } else { + w = OMAP_MMC_READ(host, CON); + OMAP_MMC_WRITE(host, CON, w & ~(1 << 11)); + } + } +} + +static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mmc_omap_slot *slot = mmc_priv(mmc); + struct mmc_omap_host *host = slot->host; + int func_clk_rate = clk_get_rate(host->fclk); + int dsor; + + if (ios->clock == 0) + return 0; + + dsor = func_clk_rate / ios->clock; + if (dsor < 1) + dsor = 1; + + if (func_clk_rate / dsor > ios->clock) + dsor++; + + if (dsor > 250) + dsor = 250; + + slot->fclk_freq = func_clk_rate / dsor; + + if (ios->bus_width == MMC_BUS_WIDTH_4) + dsor |= 1 << 15; + + return dsor; +} + +static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mmc_omap_slot *slot = mmc_priv(mmc); + struct mmc_omap_host *host = slot->host; + int i, dsor; + int clk_enabled, init_stream; + + mmc_omap_select_slot(slot, 0); + + dsor = mmc_omap_calc_divisor(mmc, ios); + + if (ios->vdd != slot->vdd) + slot->vdd = ios->vdd; + + clk_enabled = 0; + init_stream = 0; + switch (ios->power_mode) { + case MMC_POWER_OFF: + mmc_omap_set_power(slot, 0, ios->vdd); + break; + case MMC_POWER_UP: + /* Cannot touch dsor yet, just power up MMC */ + mmc_omap_set_power(slot, 1, ios->vdd); + slot->power_mode = ios->power_mode; + goto exit; + case MMC_POWER_ON: + mmc_omap_fclk_enable(host, 1); + clk_enabled = 1; + dsor |= 1 << 11; + if (slot->power_mode != MMC_POWER_ON) + init_stream = 1; + break; + } + slot->power_mode = ios->power_mode; + + if (slot->bus_mode != ios->bus_mode) { + if (slot->pdata->set_bus_mode != NULL) + slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id, + ios->bus_mode); + slot->bus_mode = ios->bus_mode; + } + + /* On insanely high arm_per frequencies something sometimes + * goes somehow out of sync, and the POW bit is not being set, + * which results in the while loop below getting stuck. + * Writing to the CON register twice seems to do the trick. */ + for (i = 0; i < 2; i++) + OMAP_MMC_WRITE(host, CON, dsor); + slot->saved_con = dsor; + if (init_stream) { + /* worst case at 400kHz, 80 cycles makes 200 microsecs */ + int usecs = 250; + + /* Send clock cycles, poll completion */ + OMAP_MMC_WRITE(host, IE, 0); + OMAP_MMC_WRITE(host, STAT, 0xffff); + OMAP_MMC_WRITE(host, CMD, 1 << 7); + while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) { + udelay(1); + usecs--; + } + OMAP_MMC_WRITE(host, STAT, 1); + } + +exit: + mmc_omap_release_slot(slot, clk_enabled); +} + +static const struct mmc_host_ops mmc_omap_ops = { + .request = mmc_omap_request, + .set_ios = mmc_omap_set_ios, +}; + +static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) +{ + struct mmc_omap_slot *slot = NULL; + struct mmc_host *mmc; + int r; + + mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev); + if (mmc == NULL) + return -ENOMEM; + + slot = mmc_priv(mmc); + slot->host = host; + slot->mmc = mmc; + slot->id = id; + slot->power_mode = MMC_POWER_UNDEFINED; + slot->pdata = &host->pdata->slots[id]; + + host->slots[id] = slot; + + mmc->caps = 0; + if (host->pdata->slots[id].wires >= 4) + mmc->caps |= MMC_CAP_4_BIT_DATA; + + mmc->ops = &mmc_omap_ops; + mmc->f_min = 400000; + + if (mmc_omap2()) + mmc->f_max = 48000000; + else + mmc->f_max = 24000000; + if (host->pdata->max_freq) + mmc->f_max = min(host->pdata->max_freq, mmc->f_max); + mmc->ocr_avail = slot->pdata->ocr_mask; + + /* Use scatterlist DMA to reduce per-transfer costs. + * NOTE max_seg_size assumption that small blocks aren't + * normally used (except e.g. for reading SD registers). + */ + mmc->max_segs = 32; + mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */ + mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */ + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + mmc->max_seg_size = mmc->max_req_size; + + if (slot->pdata->get_cover_state != NULL) { + timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0); + tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, + (unsigned long)slot); + } + + r = mmc_add_host(mmc); + if (r < 0) + goto err_remove_host; + + if (slot->pdata->name != NULL) { + r = device_create_file(&mmc->class_dev, + &dev_attr_slot_name); + if (r < 0) + goto err_remove_host; + } + + if (slot->pdata->get_cover_state != NULL) { + r = device_create_file(&mmc->class_dev, + &dev_attr_cover_switch); + if (r < 0) + goto err_remove_slot_name; + tasklet_schedule(&slot->cover_tasklet); + } + + return 0; + +err_remove_slot_name: + if (slot->pdata->name != NULL) + device_remove_file(&mmc->class_dev, &dev_attr_slot_name); +err_remove_host: + mmc_remove_host(mmc); + mmc_free_host(mmc); + return r; +} + +static void mmc_omap_remove_slot(struct mmc_omap_slot *slot) +{ + struct mmc_host *mmc = slot->mmc; + + if (slot->pdata->name != NULL) + device_remove_file(&mmc->class_dev, &dev_attr_slot_name); + if (slot->pdata->get_cover_state != NULL) + device_remove_file(&mmc->class_dev, &dev_attr_cover_switch); + + tasklet_kill(&slot->cover_tasklet); + del_timer_sync(&slot->cover_timer); + flush_workqueue(slot->host->mmc_omap_wq); + + mmc_remove_host(mmc); + mmc_free_host(mmc); +} + +static int mmc_omap_probe(struct platform_device *pdev) +{ + struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; + struct mmc_omap_host *host = NULL; + struct resource *res; + int i, ret = 0; + int irq; + + if (pdata == NULL) { + dev_err(&pdev->dev, "platform data missing\n"); + return -ENXIO; + } + if (pdata->nr_slots == 0) { + dev_err(&pdev->dev, "no slots\n"); + return -EPROBE_DEFER; + } + + host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host), + GFP_KERNEL); + if (host == NULL) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->virt_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->virt_base)) + return PTR_ERR(host->virt_base); + + INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work); + INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); + + INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command); + timer_setup(&host->cmd_abort_timer, mmc_omap_cmd_timer, 0); + + spin_lock_init(&host->clk_lock); + timer_setup(&host->clk_timer, mmc_omap_clk_timer, 0); + + spin_lock_init(&host->dma_lock); + spin_lock_init(&host->slot_lock); + init_waitqueue_head(&host->slot_wq); + + host->pdata = pdata; + host->features = host->pdata->slots[0].features; + host->dev = &pdev->dev; + platform_set_drvdata(pdev, host); + + host->id = pdev->id; + host->irq = irq; + host->phys_base = res->start; + host->iclk = clk_get(&pdev->dev, "ick"); + if (IS_ERR(host->iclk)) + return PTR_ERR(host->iclk); + clk_enable(host->iclk); + + host->fclk = clk_get(&pdev->dev, "fck"); + if (IS_ERR(host->fclk)) { + ret = PTR_ERR(host->fclk); + goto err_free_iclk; + } + + host->dma_tx_burst = -1; + host->dma_rx_burst = -1; + + host->dma_tx = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR(host->dma_tx)) { + ret = PTR_ERR(host->dma_tx); + if (ret == -EPROBE_DEFER) { + clk_put(host->fclk); + goto err_free_iclk; + } + + host->dma_tx = NULL; + dev_warn(host->dev, "TX DMA channel request failed\n"); + } + + host->dma_rx = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR(host->dma_rx)) { + ret = PTR_ERR(host->dma_rx); + if (ret == -EPROBE_DEFER) { + if (host->dma_tx) + dma_release_channel(host->dma_tx); + clk_put(host->fclk); + goto err_free_iclk; + } + + host->dma_rx = NULL; + dev_warn(host->dev, "RX DMA channel request failed\n"); + } + + ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); + if (ret) + goto err_free_dma; + + if (pdata->init != NULL) { + ret = pdata->init(&pdev->dev); + if (ret < 0) + goto err_free_irq; + } + + host->nr_slots = pdata->nr_slots; + host->reg_shift = (mmc_omap7xx() ? 1 : 2); + + host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); + if (!host->mmc_omap_wq) { + ret = -ENOMEM; + goto err_plat_cleanup; + } + + for (i = 0; i < pdata->nr_slots; i++) { + ret = mmc_omap_new_slot(host, i); + if (ret < 0) { + while (--i >= 0) + mmc_omap_remove_slot(host->slots[i]); + + goto err_destroy_wq; + } + } + + return 0; + +err_destroy_wq: + destroy_workqueue(host->mmc_omap_wq); +err_plat_cleanup: + if (pdata->cleanup) + pdata->cleanup(&pdev->dev); +err_free_irq: + free_irq(host->irq, host); +err_free_dma: + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); + clk_put(host->fclk); +err_free_iclk: + clk_disable(host->iclk); + clk_put(host->iclk); + return ret; +} + +static int mmc_omap_remove(struct platform_device *pdev) +{ + struct mmc_omap_host *host = platform_get_drvdata(pdev); + int i; + + BUG_ON(host == NULL); + + for (i = 0; i < host->nr_slots; i++) + mmc_omap_remove_slot(host->slots[i]); + + if (host->pdata->cleanup) + host->pdata->cleanup(&pdev->dev); + + mmc_omap_fclk_enable(host, 0); + free_irq(host->irq, host); + clk_put(host->fclk); + clk_disable(host->iclk); + clk_put(host->iclk); + + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); + + destroy_workqueue(host->mmc_omap_wq); + + return 0; +} + +#if IS_BUILTIN(CONFIG_OF) +static const struct of_device_id mmc_omap_match[] = { + { .compatible = "ti,omap2420-mmc", }, + { }, +}; +MODULE_DEVICE_TABLE(of, mmc_omap_match); +#endif + +static struct platform_driver mmc_omap_driver = { + .probe = mmc_omap_probe, + .remove = mmc_omap_remove, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(mmc_omap_match), + }, +}; + +module_platform_driver(mmc_omap_driver); +MODULE_DESCRIPTION("OMAP Multimedia Card driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Juha Yrjölä"); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c new file mode 100644 index 000000000..098075449 --- /dev/null +++ b/drivers/mmc/host/omap_hsmmc.c @@ -0,0 +1,2182 @@ +/* + * drivers/mmc/host/omap_hsmmc.c + * + * Driver for OMAP2430/3430 MMC controller. + * + * Copyright (C) 2007 Texas Instruments. + * + * Authors: + * Syed Mohammed Khasim + * Madhusudhan + * Mohit Jalori + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* OMAP HSMMC Host Controller Registers */ +#define OMAP_HSMMC_SYSSTATUS 0x0014 +#define OMAP_HSMMC_CON 0x002C +#define OMAP_HSMMC_SDMASA 0x0100 +#define OMAP_HSMMC_BLK 0x0104 +#define OMAP_HSMMC_ARG 0x0108 +#define OMAP_HSMMC_CMD 0x010C +#define OMAP_HSMMC_RSP10 0x0110 +#define OMAP_HSMMC_RSP32 0x0114 +#define OMAP_HSMMC_RSP54 0x0118 +#define OMAP_HSMMC_RSP76 0x011C +#define OMAP_HSMMC_DATA 0x0120 +#define OMAP_HSMMC_PSTATE 0x0124 +#define OMAP_HSMMC_HCTL 0x0128 +#define OMAP_HSMMC_SYSCTL 0x012C +#define OMAP_HSMMC_STAT 0x0130 +#define OMAP_HSMMC_IE 0x0134 +#define OMAP_HSMMC_ISE 0x0138 +#define OMAP_HSMMC_AC12 0x013C +#define OMAP_HSMMC_CAPA 0x0140 + +#define VS18 (1 << 26) +#define VS30 (1 << 25) +#define HSS (1 << 21) +#define SDVS18 (0x5 << 9) +#define SDVS30 (0x6 << 9) +#define SDVS33 (0x7 << 9) +#define SDVS_MASK 0x00000E00 +#define SDVSCLR 0xFFFFF1FF +#define SDVSDET 0x00000400 +#define AUTOIDLE 0x1 +#define SDBP (1 << 8) +#define DTO 0xe +#define ICE 0x1 +#define ICS 0x2 +#define CEN (1 << 2) +#define CLKD_MAX 0x3FF /* max clock divisor: 1023 */ +#define CLKD_MASK 0x0000FFC0 +#define CLKD_SHIFT 6 +#define DTO_MASK 0x000F0000 +#define DTO_SHIFT 16 +#define INIT_STREAM (1 << 1) +#define ACEN_ACMD23 (2 << 2) +#define DP_SELECT (1 << 21) +#define DDIR (1 << 4) +#define DMAE 0x1 +#define MSBS (1 << 5) +#define BCE (1 << 1) +#define FOUR_BIT (1 << 1) +#define HSPE (1 << 2) +#define IWE (1 << 24) +#define DDR (1 << 19) +#define CLKEXTFREE (1 << 16) +#define CTPL (1 << 11) +#define DW8 (1 << 5) +#define OD 0x1 +#define STAT_CLEAR 0xFFFFFFFF +#define INIT_STREAM_CMD 0x00000000 +#define DUAL_VOLT_OCR_BIT 7 +#define SRC (1 << 25) +#define SRD (1 << 26) +#define SOFTRESET (1 << 1) + +/* PSTATE */ +#define DLEV_DAT(x) (1 << (20 + (x))) + +/* Interrupt masks for IE and ISE register */ +#define CC_EN (1 << 0) +#define TC_EN (1 << 1) +#define BWR_EN (1 << 4) +#define BRR_EN (1 << 5) +#define CIRQ_EN (1 << 8) +#define ERR_EN (1 << 15) +#define CTO_EN (1 << 16) +#define CCRC_EN (1 << 17) +#define CEB_EN (1 << 18) +#define CIE_EN (1 << 19) +#define DTO_EN (1 << 20) +#define DCRC_EN (1 << 21) +#define DEB_EN (1 << 22) +#define ACE_EN (1 << 24) +#define CERR_EN (1 << 28) +#define BADA_EN (1 << 29) + +#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\ + DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \ + BRR_EN | BWR_EN | TC_EN | CC_EN) + +#define CNI (1 << 7) +#define ACIE (1 << 4) +#define ACEB (1 << 3) +#define ACCE (1 << 2) +#define ACTO (1 << 1) +#define ACNE (1 << 0) + +#define MMC_AUTOSUSPEND_DELAY 100 +#define MMC_TIMEOUT_MS 20 /* 20 mSec */ +#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ +#define OMAP_MMC_MIN_CLOCK 400000 +#define OMAP_MMC_MAX_CLOCK 52000000 +#define DRIVER_NAME "omap_hsmmc" + +/* + * One controller can have multiple slots, like on some omap boards using + * omap.c controller driver. Luckily this is not currently done on any known + * omap_hsmmc.c device. + */ +#define mmc_pdata(host) host->pdata + +/* + * MMC Host controller read/write API's + */ +#define OMAP_HSMMC_READ(base, reg) \ + __raw_readl((base) + OMAP_HSMMC_##reg) + +#define OMAP_HSMMC_WRITE(base, reg, val) \ + __raw_writel((val), (base) + OMAP_HSMMC_##reg) + +struct omap_hsmmc_next { + unsigned int dma_len; + s32 cookie; +}; + +struct omap_hsmmc_host { + struct device *dev; + struct mmc_host *mmc; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + struct clk *fclk; + struct clk *dbclk; + struct regulator *pbias; + bool pbias_enabled; + void __iomem *base; + int vqmmc_enabled; + resource_size_t mapbase; + spinlock_t irq_lock; /* Prevent races with irq handler */ + unsigned int dma_len; + unsigned int dma_sg_idx; + unsigned char bus_mode; + unsigned char power_mode; + int suspended; + u32 con; + u32 hctl; + u32 sysctl; + u32 capa; + int irq; + int wake_irq; + int use_dma, dma_ch; + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; + int response_busy; + int context_loss; + int reqs_blocked; + int req_in_progress; + unsigned long clk_rate; + unsigned int flags; +#define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */ +#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */ + struct omap_hsmmc_next next_data; + struct omap_hsmmc_platform_data *pdata; +}; + +struct omap_mmc_of_data { + u32 reg_offset; + u8 controller_flags; +}; + +static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host); + +static int omap_hsmmc_enable_supply(struct mmc_host *mmc) +{ + int ret; + struct omap_hsmmc_host *host = mmc_priv(mmc); + struct mmc_ios *ios = &mmc->ios; + + if (!IS_ERR(mmc->supply.vmmc)) { + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + if (ret) + return ret; + } + + /* Enable interface voltage rail, if needed */ + if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { + ret = regulator_enable(mmc->supply.vqmmc); + if (ret) { + dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n"); + goto err_vqmmc; + } + host->vqmmc_enabled = 1; + } + + return 0; + +err_vqmmc: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + return ret; +} + +static int omap_hsmmc_disable_supply(struct mmc_host *mmc) +{ + int ret; + int status; + struct omap_hsmmc_host *host = mmc_priv(mmc); + + if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { + ret = regulator_disable(mmc->supply.vqmmc); + if (ret) { + dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n"); + return ret; + } + host->vqmmc_enabled = 0; + } + + if (!IS_ERR(mmc->supply.vmmc)) { + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + if (ret) + goto err_set_ocr; + } + + return 0; + +err_set_ocr: + if (!IS_ERR(mmc->supply.vqmmc)) { + status = regulator_enable(mmc->supply.vqmmc); + if (status) + dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n"); + } + + return ret; +} + +static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on) +{ + int ret; + + if (IS_ERR(host->pbias)) + return 0; + + if (power_on) { + if (host->pbias_enabled == 0) { + ret = regulator_enable(host->pbias); + if (ret) { + dev_err(host->dev, "pbias reg enable fail\n"); + return ret; + } + host->pbias_enabled = 1; + } + } else { + if (host->pbias_enabled == 1) { + ret = regulator_disable(host->pbias); + if (ret) { + dev_err(host->dev, "pbias reg disable fail\n"); + return ret; + } + host->pbias_enabled = 0; + } + } + + return 0; +} + +static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on) +{ + struct mmc_host *mmc = host->mmc; + int ret = 0; + + /* + * If we don't see a Vcc regulator, assume it's a fixed + * voltage always-on regulator. + */ + if (IS_ERR(mmc->supply.vmmc)) + return 0; + + ret = omap_hsmmc_set_pbias(host, false); + if (ret) + return ret; + + /* + * Assume Vcc regulator is used only to power the card ... OMAP + * VDDS is used to power the pins, optionally with a transceiver to + * support cards using voltages other than VDDS (1.8V nominal). When a + * transceiver is used, DAT3..7 are muxed as transceiver control pins. + * + * In some cases this regulator won't support enable/disable; + * e.g. it's a fixed rail for a WLAN chip. + * + * In other cases vcc_aux switches interface power. Example, for + * eMMC cards it represents VccQ. Sometimes transceivers or SDIO + * chips/cards need an interface voltage rail too. + */ + if (power_on) { + ret = omap_hsmmc_enable_supply(mmc); + if (ret) + return ret; + + ret = omap_hsmmc_set_pbias(host, true); + if (ret) + goto err_set_voltage; + } else { + ret = omap_hsmmc_disable_supply(mmc); + if (ret) + return ret; + } + + return 0; + +err_set_voltage: + omap_hsmmc_disable_supply(mmc); + + return ret; +} + +static int omap_hsmmc_disable_boot_regulator(struct regulator *reg) +{ + int ret; + + if (IS_ERR(reg)) + return 0; + + if (regulator_is_enabled(reg)) { + ret = regulator_enable(reg); + if (ret) + return ret; + + ret = regulator_disable(reg); + if (ret) + return ret; + } + + return 0; +} + +static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + int ret; + + /* + * disable regulators enabled during boot and get the usecount + * right so that regulators can be enabled/disabled by checking + * the return value of regulator_is_enabled + */ + ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vmmc); + if (ret) { + dev_err(host->dev, "fail to disable boot enabled vmmc reg\n"); + return ret; + } + + ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vqmmc); + if (ret) { + dev_err(host->dev, + "fail to disable boot enabled vmmc_aux reg\n"); + return ret; + } + + ret = omap_hsmmc_disable_boot_regulator(host->pbias); + if (ret) { + dev_err(host->dev, + "failed to disable boot enabled pbias reg\n"); + return ret; + } + + return 0; +} + +static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) +{ + int ret; + struct mmc_host *mmc = host->mmc; + + + ret = mmc_regulator_get_supply(mmc); + if (ret) + return ret; + + /* Allow an aux regulator */ + if (IS_ERR(mmc->supply.vqmmc)) { + mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, + "vmmc_aux"); + if (IS_ERR(mmc->supply.vqmmc)) { + ret = PTR_ERR(mmc->supply.vqmmc); + if ((ret != -ENODEV) && host->dev->of_node) + return ret; + dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", + PTR_ERR(mmc->supply.vqmmc)); + } + } + + host->pbias = devm_regulator_get_optional(host->dev, "pbias"); + if (IS_ERR(host->pbias)) { + ret = PTR_ERR(host->pbias); + if ((ret != -ENODEV) && host->dev->of_node) { + dev_err(host->dev, + "SD card detect fail? enable CONFIG_REGULATOR_PBIAS\n"); + return ret; + } + dev_dbg(host->dev, "unable to get pbias regulator %ld\n", + PTR_ERR(host->pbias)); + } + + /* For eMMC do not power off when not in sleep state */ + if (mmc_pdata(host)->no_regulator_off_init) + return 0; + + ret = omap_hsmmc_disable_boot_regulators(host); + if (ret) + return ret; + + return 0; +} + +/* + * Start clock to the card + */ +static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host) +{ + OMAP_HSMMC_WRITE(host->base, SYSCTL, + OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); +} + +/* + * Stop clock to the card + */ +static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) +{ + OMAP_HSMMC_WRITE(host->base, SYSCTL, + OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); + if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) + dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n"); +} + +static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, + struct mmc_command *cmd) +{ + u32 irq_mask = INT_EN_MASK; + unsigned long flags; + + if (host->use_dma) + irq_mask &= ~(BRR_EN | BWR_EN); + + /* Disable timeout for erases */ + if (cmd->opcode == MMC_ERASE) + irq_mask &= ~DTO_EN; + + spin_lock_irqsave(&host->irq_lock, flags); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); + + /* latch pending CIRQ, but don't signal MMC core */ + if (host->flags & HSMMC_SDIO_IRQ_ENABLED) + irq_mask |= CIRQ_EN; + OMAP_HSMMC_WRITE(host->base, IE, irq_mask); + spin_unlock_irqrestore(&host->irq_lock, flags); +} + +static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host) +{ + u32 irq_mask = 0; + unsigned long flags; + + spin_lock_irqsave(&host->irq_lock, flags); + /* no transfer running but need to keep cirq if enabled */ + if (host->flags & HSMMC_SDIO_IRQ_ENABLED) + irq_mask |= CIRQ_EN; + OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); + OMAP_HSMMC_WRITE(host->base, IE, irq_mask); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + spin_unlock_irqrestore(&host->irq_lock, flags); +} + +/* Calculate divisor for the given clock frequency */ +static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) +{ + u16 dsor = 0; + + if (ios->clock) { + dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); + if (dsor > CLKD_MAX) + dsor = CLKD_MAX; + } + + return dsor; +} + +static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) +{ + struct mmc_ios *ios = &host->mmc->ios; + unsigned long regval; + unsigned long timeout; + unsigned long clkdiv; + + dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); + + omap_hsmmc_stop_clock(host); + + regval = OMAP_HSMMC_READ(host->base, SYSCTL); + regval = regval & ~(CLKD_MASK | DTO_MASK); + clkdiv = calc_divisor(host, ios); + regval = regval | (clkdiv << 6) | (DTO << 16); + OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); + OMAP_HSMMC_WRITE(host->base, SYSCTL, + OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); + + /* Wait till the ICS bit is set */ + timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); + while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS + && time_before(jiffies, timeout)) + cpu_relax(); + + /* + * Enable High-Speed Support + * Pre-Requisites + * - Controller should support High-Speed-Enable Bit + * - Controller should not be using DDR Mode + * - Controller should advertise that it supports High Speed + * in capabilities register + * - MMC/SD clock coming out of controller > 25MHz + */ + if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) && + (ios->timing != MMC_TIMING_MMC_DDR52) && + (ios->timing != MMC_TIMING_UHS_DDR50) && + ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) { + regval = OMAP_HSMMC_READ(host->base, HCTL); + if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000) + regval |= HSPE; + else + regval &= ~HSPE; + + OMAP_HSMMC_WRITE(host->base, HCTL, regval); + } + + omap_hsmmc_start_clock(host); +} + +static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host) +{ + struct mmc_ios *ios = &host->mmc->ios; + u32 con; + + con = OMAP_HSMMC_READ(host->base, CON); + if (ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_DDR50) + con |= DDR; /* configure in DDR mode */ + else + con &= ~DDR; + switch (ios->bus_width) { + case MMC_BUS_WIDTH_8: + OMAP_HSMMC_WRITE(host->base, CON, con | DW8); + break; + case MMC_BUS_WIDTH_4: + OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); + break; + case MMC_BUS_WIDTH_1: + OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); + break; + } +} + +static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host) +{ + struct mmc_ios *ios = &host->mmc->ios; + u32 con; + + con = OMAP_HSMMC_READ(host->base, CON); + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) + OMAP_HSMMC_WRITE(host->base, CON, con | OD); + else + OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); +} + +#ifdef CONFIG_PM + +/* + * Restore the MMC host context, if it was lost as result of a + * power state change. + */ +static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) +{ + struct mmc_ios *ios = &host->mmc->ios; + u32 hctl, capa; + unsigned long timeout; + + if (host->con == OMAP_HSMMC_READ(host->base, CON) && + host->hctl == OMAP_HSMMC_READ(host->base, HCTL) && + host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) && + host->capa == OMAP_HSMMC_READ(host->base, CAPA)) + return 0; + + host->context_loss++; + + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { + if (host->power_mode != MMC_POWER_OFF && + (1 << ios->vdd) <= MMC_VDD_23_24) + hctl = SDVS18; + else + hctl = SDVS30; + capa = VS30 | VS18; + } else { + hctl = SDVS18; + capa = VS18; + } + + if (host->mmc->caps & MMC_CAP_SDIO_IRQ) + hctl |= IWE; + + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) | hctl); + + OMAP_HSMMC_WRITE(host->base, CAPA, + OMAP_HSMMC_READ(host->base, CAPA) | capa); + + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) | SDBP); + + timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); + while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP + && time_before(jiffies, timeout)) + ; + + OMAP_HSMMC_WRITE(host->base, ISE, 0); + OMAP_HSMMC_WRITE(host->base, IE, 0); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + + /* Do not initialize card-specific things if the power is off */ + if (host->power_mode == MMC_POWER_OFF) + goto out; + + omap_hsmmc_set_bus_width(host); + + omap_hsmmc_set_clock(host); + + omap_hsmmc_set_bus_mode(host); + +out: + dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n", + host->context_loss); + return 0; +} + +/* + * Save the MMC host context (store the number of power state changes so far). + */ +static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) +{ + host->con = OMAP_HSMMC_READ(host->base, CON); + host->hctl = OMAP_HSMMC_READ(host->base, HCTL); + host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL); + host->capa = OMAP_HSMMC_READ(host->base, CAPA); +} + +#else + +static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) +{ + return 0; +} + +static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) +{ +} + +#endif + +/* + * Send init stream sequence to card + * before sending IDLE command + */ +static void send_init_stream(struct omap_hsmmc_host *host) +{ + int reg = 0; + unsigned long timeout; + + disable_irq(host->irq); + + OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + OMAP_HSMMC_WRITE(host->base, CON, + OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); + OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); + + timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); + while ((reg != CC_EN) && time_before(jiffies, timeout)) + reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN; + + OMAP_HSMMC_WRITE(host->base, CON, + OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); + + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_READ(host->base, STAT); + + enable_irq(host->irq); +} + +static ssize_t +omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); + struct omap_hsmmc_host *host = mmc_priv(mmc); + + return sprintf(buf, "%s\n", mmc_pdata(host)->name); +} + +static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL); + +/* + * Configure the response type and send the cmd. + */ +static void +omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, + struct mmc_data *data) +{ + int cmdreg = 0, resptype = 0, cmdtype = 0; + + dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", + mmc_hostname(host->mmc), cmd->opcode, cmd->arg); + host->cmd = cmd; + + omap_hsmmc_enable_irq(host, cmd); + + host->response_busy = 0; + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) + resptype = 1; + else if (cmd->flags & MMC_RSP_BUSY) { + resptype = 3; + host->response_busy = 1; + } else + resptype = 2; + } + + /* + * Unlike OMAP1 controller, the cmdtype does not seem to be based on + * ac, bc, adtc, bcr. Only commands ending an open ended transfer need + * a val of 0x3, rest 0x0. + */ + if (cmd == host->mrq->stop) + cmdtype = 0x3; + + cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22); + + if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) && + host->mrq->sbc) { + cmdreg |= ACEN_ACMD23; + OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg); + } + if (data) { + cmdreg |= DP_SELECT | MSBS | BCE; + if (data->flags & MMC_DATA_READ) + cmdreg |= DDIR; + else + cmdreg &= ~(DDIR); + } + + if (host->use_dma) + cmdreg |= DMAE; + + host->req_in_progress = 1; + + OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); + OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); +} + +static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + +static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) +{ + int dma_ch; + unsigned long flags; + + spin_lock_irqsave(&host->irq_lock, flags); + host->req_in_progress = 0; + dma_ch = host->dma_ch; + spin_unlock_irqrestore(&host->irq_lock, flags); + + omap_hsmmc_disable_irq(host); + /* Do not complete the request if DMA is still in progress */ + if (mrq->data && host->use_dma && dma_ch != -1) + return; + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); +} + +/* + * Notify the transfer complete to MMC core + */ +static void +omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) +{ + if (!data) { + struct mmc_request *mrq = host->mrq; + + /* TC before CC from CMD6 - don't know why, but it happens */ + if (host->cmd && host->cmd->opcode == 6 && + host->response_busy) { + host->response_busy = 0; + return; + } + + omap_hsmmc_request_done(host, mrq); + return; + } + + host->data = NULL; + + if (!data->error) + data->bytes_xfered += data->blocks * (data->blksz); + else + data->bytes_xfered = 0; + + if (data->stop && (data->error || !host->mrq->sbc)) + omap_hsmmc_start_command(host, data->stop, NULL); + else + omap_hsmmc_request_done(host, data->mrq); +} + +/* + * Notify the core about command completion + */ +static void +omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) +{ + if (host->mrq->sbc && (host->cmd == host->mrq->sbc) && + !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) { + host->cmd = NULL; + omap_hsmmc_start_dma_transfer(host); + omap_hsmmc_start_command(host, host->mrq->cmd, + host->mrq->data); + return; + } + + host->cmd = NULL; + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* response type 2 */ + cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10); + cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32); + cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54); + cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76); + } else { + /* response types 1, 1b, 3, 4, 5, 6 */ + cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); + } + } + if ((host->data == NULL && !host->response_busy) || cmd->error) + omap_hsmmc_request_done(host, host->mrq); +} + +/* + * DMA clean up for command errors + */ +static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) +{ + int dma_ch; + unsigned long flags; + + host->data->error = errno; + + spin_lock_irqsave(&host->irq_lock, flags); + dma_ch = host->dma_ch; + host->dma_ch = -1; + spin_unlock_irqrestore(&host->irq_lock, flags); + + if (host->use_dma && dma_ch != -1) { + struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); + + dmaengine_terminate_all(chan); + dma_unmap_sg(chan->device->dev, + host->data->sg, host->data->sg_len, + mmc_get_dma_dir(host->data)); + + host->data->host_cookie = 0; + } + host->data = NULL; +} + +/* + * Readable error output + */ +#ifdef CONFIG_MMC_DEBUG +static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status) +{ + /* --- means reserved bit without definition at documentation */ + static const char *omap_hsmmc_status_bits[] = { + "CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" , + "CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI", + "CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" , + "ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---" + }; + char res[256]; + char *buf = res; + int len, i; + + len = sprintf(buf, "MMC IRQ 0x%x :", status); + buf += len; + + for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++) + if (status & (1 << i)) { + len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]); + buf += len; + } + + dev_vdbg(mmc_dev(host->mmc), "%s\n", res); +} +#else +static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, + u32 status) +{ +} +#endif /* CONFIG_MMC_DEBUG */ + +/* + * MMC controller internal state machines reset + * + * Used to reset command or data internal state machines, using respectively + * SRC or SRD bit of SYSCTL register + * Can be called from interrupt context + */ +static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, + unsigned long bit) +{ + unsigned long i = 0; + unsigned long limit = MMC_TIMEOUT_US; + + OMAP_HSMMC_WRITE(host->base, SYSCTL, + OMAP_HSMMC_READ(host->base, SYSCTL) | bit); + + /* + * OMAP4 ES2 and greater has an updated reset logic. + * Monitor a 0->1 transition first + */ + if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) { + while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) + && (i++ < limit)) + udelay(1); + } + i = 0; + + while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) && + (i++ < limit)) + udelay(1); + + if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit) + dev_err(mmc_dev(host->mmc), + "Timeout waiting on controller reset in %s\n", + __func__); +} + +static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, + int err, int end_cmd) +{ + if (end_cmd) { + omap_hsmmc_reset_controller_fsm(host, SRC); + if (host->cmd) + host->cmd->error = err; + } + + if (host->data) { + omap_hsmmc_reset_controller_fsm(host, SRD); + omap_hsmmc_dma_cleanup(host, err); + } else if (host->mrq && host->mrq->cmd) + host->mrq->cmd->error = err; +} + +static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) +{ + struct mmc_data *data; + int end_cmd = 0, end_trans = 0; + int error = 0; + + data = host->data; + dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); + + if (status & ERR_EN) { + omap_hsmmc_dbg_report_irq(host, status); + + if (status & (CTO_EN | CCRC_EN | CEB_EN)) + end_cmd = 1; + if (host->data || host->response_busy) { + end_trans = !end_cmd; + host->response_busy = 0; + } + if (status & (CTO_EN | DTO_EN)) + hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); + else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN | + BADA_EN)) + hsmmc_command_incomplete(host, -EILSEQ, end_cmd); + + if (status & ACE_EN) { + u32 ac12; + ac12 = OMAP_HSMMC_READ(host->base, AC12); + if (!(ac12 & ACNE) && host->mrq->sbc) { + end_cmd = 1; + if (ac12 & ACTO) + error = -ETIMEDOUT; + else if (ac12 & (ACCE | ACEB | ACIE)) + error = -EILSEQ; + host->mrq->sbc->error = error; + hsmmc_command_incomplete(host, error, end_cmd); + } + dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); + } + } + + OMAP_HSMMC_WRITE(host->base, STAT, status); + if (end_cmd || ((status & CC_EN) && host->cmd)) + omap_hsmmc_cmd_done(host, host->cmd); + if ((end_trans || (status & TC_EN)) && host->mrq) + omap_hsmmc_xfer_done(host, data); +} + +/* + * MMC controller IRQ handler + */ +static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) +{ + struct omap_hsmmc_host *host = dev_id; + int status; + + status = OMAP_HSMMC_READ(host->base, STAT); + while (status & (INT_EN_MASK | CIRQ_EN)) { + if (host->req_in_progress) + omap_hsmmc_do_irq(host, status); + + if (status & CIRQ_EN) + mmc_signal_sdio_irq(host->mmc); + + /* Flush posted write */ + status = OMAP_HSMMC_READ(host->base, STAT); + } + + return IRQ_HANDLED; +} + +static void set_sd_bus_power(struct omap_hsmmc_host *host) +{ + unsigned long i; + + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) | SDBP); + for (i = 0; i < loops_per_jiffy; i++) { + if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP) + break; + cpu_relax(); + } +} + +/* + * Switch MMC interface voltage ... only relevant for MMC1. + * + * MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver. + * The MMC2 transceiver controls are used instead of DAT4..DAT7. + * Some chips, like eMMC ones, use internal transceivers. + */ +static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) +{ + u32 reg_val = 0; + int ret; + + /* Disable the clocks */ + clk_disable_unprepare(host->dbclk); + + /* Turn the power off */ + ret = omap_hsmmc_set_power(host, 0); + + /* Turn the power ON with given VDD 1.8 or 3.0v */ + if (!ret) + ret = omap_hsmmc_set_power(host, 1); + clk_prepare_enable(host->dbclk); + + if (ret != 0) + goto err; + + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR); + reg_val = OMAP_HSMMC_READ(host->base, HCTL); + + /* + * If a MMC dual voltage card is detected, the set_ios fn calls + * this fn with VDD bit set for 1.8V. Upon card removal from the + * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF. + * + * Cope with a bit of slop in the range ... per data sheets: + * - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max, + * but recommended values are 1.71V to 1.89V + * - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max, + * but recommended values are 2.7V to 3.3V + * + * Board setup code shouldn't permit anything very out-of-range. + * TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the + * middle range) but VSIM can't power DAT4..DAT7 at more than 3V. + */ + if ((1 << vdd) <= MMC_VDD_23_24) + reg_val |= SDVS18; + else + reg_val |= SDVS30; + + OMAP_HSMMC_WRITE(host->base, HCTL, reg_val); + set_sd_bus_power(host); + + return 0; +err: + dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n"); + return ret; +} + +static void omap_hsmmc_dma_callback(void *param) +{ + struct omap_hsmmc_host *host = param; + struct dma_chan *chan; + struct mmc_data *data; + int req_in_progress; + + spin_lock_irq(&host->irq_lock); + if (host->dma_ch < 0) { + spin_unlock_irq(&host->irq_lock); + return; + } + + data = host->mrq->data; + chan = omap_hsmmc_get_dma_chan(host, data); + if (!data->host_cookie) + dma_unmap_sg(chan->device->dev, + data->sg, data->sg_len, + mmc_get_dma_dir(data)); + + req_in_progress = host->req_in_progress; + host->dma_ch = -1; + spin_unlock_irq(&host->irq_lock); + + /* If DMA has finished after TC, complete the request */ + if (!req_in_progress) { + struct mmc_request *mrq = host->mrq; + + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); + } +} + +static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, + struct mmc_data *data, + struct omap_hsmmc_next *next, + struct dma_chan *chan) +{ + int dma_len; + + if (!next && data->host_cookie && + data->host_cookie != host->next_data.cookie) { + dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d" + " host->next_data.cookie %d\n", + __func__, data->host_cookie, host->next_data.cookie); + data->host_cookie = 0; + } + + /* Check if next job is already prepared */ + if (next || data->host_cookie != host->next_data.cookie) { + dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + + } else { + dma_len = host->next_data.dma_len; + host->next_data.dma_len = 0; + } + + + if (dma_len == 0) + return -EINVAL; + + if (next) { + next->dma_len = dma_len; + data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; + } else + host->dma_len = dma_len; + + return 0; +} + +/* + * Routine to configure and start DMA for the MMC card + */ +static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host, + struct mmc_request *req) +{ + struct dma_async_tx_descriptor *tx; + int ret = 0, i; + struct mmc_data *data = req->data; + struct dma_chan *chan; + struct dma_slave_config cfg = { + .src_addr = host->mapbase + OMAP_HSMMC_DATA, + .dst_addr = host->mapbase + OMAP_HSMMC_DATA, + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .src_maxburst = data->blksz / 4, + .dst_maxburst = data->blksz / 4, + }; + + /* Sanity check: all the SG entries must be aligned by block size. */ + for (i = 0; i < data->sg_len; i++) { + struct scatterlist *sgl; + + sgl = data->sg + i; + if (sgl->length % data->blksz) + return -EINVAL; + } + if ((data->blksz % 4) != 0) + /* REVISIT: The MMC buffer increments only when MSB is written. + * Return error for blksz which is non multiple of four. + */ + return -EINVAL; + + BUG_ON(host->dma_ch != -1); + + chan = omap_hsmmc_get_dma_chan(host, data); + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) + return ret; + + ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); + if (ret) + return ret; + + tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) { + dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); + /* FIXME: cleanup */ + return -1; + } + + tx->callback = omap_hsmmc_dma_callback; + tx->callback_param = host; + + /* Does not fail */ + dmaengine_submit(tx); + + host->dma_ch = 1; + + return 0; +} + +static void set_data_timeout(struct omap_hsmmc_host *host, + unsigned long long timeout_ns, + unsigned int timeout_clks) +{ + unsigned long long timeout = timeout_ns; + unsigned int cycle_ns; + uint32_t reg, clkd, dto = 0; + + reg = OMAP_HSMMC_READ(host->base, SYSCTL); + clkd = (reg & CLKD_MASK) >> CLKD_SHIFT; + if (clkd == 0) + clkd = 1; + + cycle_ns = 1000000000 / (host->clk_rate / clkd); + do_div(timeout, cycle_ns); + timeout += timeout_clks; + if (timeout) { + while ((timeout & 0x80000000) == 0) { + dto += 1; + timeout <<= 1; + } + dto = 31 - dto; + timeout <<= 1; + if (timeout && dto) + dto += 1; + if (dto >= 13) + dto -= 13; + else + dto = 0; + if (dto > 14) + dto = 14; + } + + reg &= ~DTO_MASK; + reg |= dto << DTO_SHIFT; + OMAP_HSMMC_WRITE(host->base, SYSCTL, reg); +} + +static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host) +{ + struct mmc_request *req = host->mrq; + struct dma_chan *chan; + + if (!req->data) + return; + OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) + | (req->data->blocks << 16)); + set_data_timeout(host, req->data->timeout_ns, + req->data->timeout_clks); + chan = omap_hsmmc_get_dma_chan(host, req->data); + dma_async_issue_pending(chan); +} + +/* + * Configure block length for MMC/SD cards and initiate the transfer. + */ +static int +omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req) +{ + int ret; + unsigned long long timeout; + + host->data = req->data; + + if (req->data == NULL) { + OMAP_HSMMC_WRITE(host->base, BLK, 0); + if (req->cmd->flags & MMC_RSP_BUSY) { + timeout = req->cmd->busy_timeout * NSEC_PER_MSEC; + + /* + * Set an arbitrary 100ms data timeout for commands with + * busy signal and no indication of busy_timeout. + */ + if (!timeout) + timeout = 100000000U; + + set_data_timeout(host, timeout, 0); + } + return 0; + } + + if (host->use_dma) { + ret = omap_hsmmc_setup_dma_transfer(host, req); + if (ret != 0) { + dev_err(mmc_dev(host->mmc), "MMC start dma failure\n"); + return ret; + } + } + return 0; +} + +static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (host->use_dma && data->host_cookie) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); + + dma_unmap_sg(c->device->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + data->host_cookie = 0; + } +} + +static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + + if (mrq->data->host_cookie) { + mrq->data->host_cookie = 0; + return ; + } + + if (host->use_dma) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); + + if (omap_hsmmc_pre_dma_transfer(host, mrq->data, + &host->next_data, c)) + mrq->data->host_cookie = 0; + } +} + +/* + * Request function. for read/write operation + */ +static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + int err; + + BUG_ON(host->req_in_progress); + BUG_ON(host->dma_ch != -1); + if (host->reqs_blocked) + host->reqs_blocked = 0; + WARN_ON(host->mrq != NULL); + host->mrq = req; + host->clk_rate = clk_get_rate(host->fclk); + err = omap_hsmmc_prepare_data(host, req); + if (err) { + req->cmd->error = err; + if (req->data) + req->data->error = err; + host->mrq = NULL; + mmc_request_done(mmc, req); + return; + } + if (req->sbc && !(host->flags & AUTO_CMD23)) { + omap_hsmmc_start_command(host, req->sbc, NULL); + return; + } + + omap_hsmmc_start_dma_transfer(host); + omap_hsmmc_start_command(host, req->cmd, req->data); +} + +/* Routine to configure clock values. Exposed API to core */ +static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + int do_send_init_stream = 0; + + if (ios->power_mode != host->power_mode) { + switch (ios->power_mode) { + case MMC_POWER_OFF: + omap_hsmmc_set_power(host, 0); + break; + case MMC_POWER_UP: + omap_hsmmc_set_power(host, 1); + break; + case MMC_POWER_ON: + do_send_init_stream = 1; + break; + } + host->power_mode = ios->power_mode; + } + + /* FIXME: set registers based only on changes to ios */ + + omap_hsmmc_set_bus_width(host); + + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { + /* Only MMC1 can interface at 3V without some flavor + * of external transceiver; but they all handle 1.8V. + */ + if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) && + (ios->vdd == DUAL_VOLT_OCR_BIT)) { + /* + * The mmc_select_voltage fn of the core does + * not seem to set the power_mode to + * MMC_POWER_UP upon recalculating the voltage. + * vdd 1.8v. + */ + if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0) + dev_dbg(mmc_dev(host->mmc), + "Switch operation failed\n"); + } + } + + omap_hsmmc_set_clock(host); + + if (do_send_init_stream) + send_init_stream(host); + + omap_hsmmc_set_bus_mode(host); +} + +static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + + if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) { + struct device_node *np = mmc_dev(mmc)->of_node; + + /* + * REVISIT: should be moved to sdio core and made more + * general e.g. by expanding the DT bindings of child nodes + * to provide a mechanism to provide this information: + * Documentation/devicetree/bindings/mmc/mmc-card.txt + */ + + np = of_get_compatible_child(np, "ti,wl1251"); + if (np) { + /* + * We have TI wl1251 attached to MMC3. Pass this + * information to the SDIO core because it can't be + * probed by normal methods. + */ + + dev_info(host->dev, "found wl1251\n"); + card->quirks |= MMC_QUIRK_NONSTD_SDIO; + card->cccr.wide_bus = 1; + card->cis.vendor = 0x104c; + card->cis.device = 0x9066; + card->cis.blksize = 512; + card->cis.max_dtr = 24000000; + card->ocr = 0x80; + of_node_put(np); + } + } +} + +static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + u32 irq_mask, con; + unsigned long flags; + + spin_lock_irqsave(&host->irq_lock, flags); + + con = OMAP_HSMMC_READ(host->base, CON); + irq_mask = OMAP_HSMMC_READ(host->base, ISE); + if (enable) { + host->flags |= HSMMC_SDIO_IRQ_ENABLED; + irq_mask |= CIRQ_EN; + con |= CTPL | CLKEXTFREE; + } else { + host->flags &= ~HSMMC_SDIO_IRQ_ENABLED; + irq_mask &= ~CIRQ_EN; + con &= ~(CTPL | CLKEXTFREE); + } + OMAP_HSMMC_WRITE(host->base, CON, con); + OMAP_HSMMC_WRITE(host->base, IE, irq_mask); + + /* + * if enable, piggy back detection on current request + * but always disable immediately + */ + if (!host->req_in_progress || !enable) + OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); + + /* flush posted write */ + OMAP_HSMMC_READ(host->base, IE); + + spin_unlock_irqrestore(&host->irq_lock, flags); +} + +static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) +{ + int ret; + + /* + * For omaps with wake-up path, wakeirq will be irq from pinctrl and + * for other omaps, wakeirq will be from GPIO (dat line remuxed to + * gpio). wakeirq is needed to detect sdio irq in runtime suspend state + * with functional clock disabled. + */ + if (!host->dev->of_node || !host->wake_irq) + return -ENODEV; + + ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq); + if (ret) { + dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n"); + goto err; + } + + /* + * Some omaps don't have wake-up path from deeper idle states + * and need to remux SDIO DAT1 to GPIO for wake-up from idle. + */ + if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) { + struct pinctrl *p = devm_pinctrl_get(host->dev); + if (IS_ERR(p)) { + ret = PTR_ERR(p); + goto err_free_irq; + } + + if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) { + dev_info(host->dev, "missing idle pinctrl state\n"); + devm_pinctrl_put(p); + ret = -EINVAL; + goto err_free_irq; + } + devm_pinctrl_put(p); + } + + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) | IWE); + return 0; + +err_free_irq: + dev_pm_clear_wake_irq(host->dev); +err: + dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n"); + host->wake_irq = 0; + return ret; +} + +static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) +{ + u32 hctl, capa, value; + + /* Only MMC1 supports 3.0V */ + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { + hctl = SDVS30; + capa = VS30 | VS18; + } else { + hctl = SDVS18; + capa = VS18; + } + + value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK; + OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl); + + value = OMAP_HSMMC_READ(host->base, CAPA); + OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); + + /* Set SD bus power bit */ + set_sd_bus_power(host); +} + +static int omap_hsmmc_multi_io_quirk(struct mmc_card *card, + unsigned int direction, int blk_size) +{ + /* This controller can't do multiblock reads due to hw bugs */ + if (direction == MMC_DATA_READ) + return 1; + + return blk_size; +} + +static struct mmc_host_ops omap_hsmmc_ops = { + .post_req = omap_hsmmc_post_req, + .pre_req = omap_hsmmc_pre_req, + .request = omap_hsmmc_request, + .set_ios = omap_hsmmc_set_ios, + .get_cd = mmc_gpio_get_cd, + .get_ro = mmc_gpio_get_ro, + .init_card = omap_hsmmc_init_card, + .enable_sdio_irq = omap_hsmmc_enable_sdio_irq, +}; + +#ifdef CONFIG_DEBUG_FS + +static int mmc_regs_show(struct seq_file *s, void *data) +{ + struct mmc_host *mmc = s->private; + struct omap_hsmmc_host *host = mmc_priv(mmc); + + seq_printf(s, "mmc%d:\n", mmc->index); + seq_printf(s, "sdio irq mode\t%s\n", + (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling"); + + if (mmc->caps & MMC_CAP_SDIO_IRQ) { + seq_printf(s, "sdio irq \t%s\n", + (host->flags & HSMMC_SDIO_IRQ_ENABLED) ? "enabled" + : "disabled"); + } + seq_printf(s, "ctx_loss:\t%d\n", host->context_loss); + + pm_runtime_get_sync(host->dev); + seq_puts(s, "\nregs:\n"); + seq_printf(s, "CON:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, CON)); + seq_printf(s, "PSTATE:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, PSTATE)); + seq_printf(s, "HCTL:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, HCTL)); + seq_printf(s, "SYSCTL:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, SYSCTL)); + seq_printf(s, "IE:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, IE)); + seq_printf(s, "ISE:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, ISE)); + seq_printf(s, "CAPA:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, CAPA)); + + pm_runtime_mark_last_busy(host->dev); + pm_runtime_put_autosuspend(host->dev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mmc_regs); + +static void omap_hsmmc_debugfs(struct mmc_host *mmc) +{ + if (mmc->debugfs_root) + debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root, + mmc, &mmc_regs_fops); +} + +#else + +static void omap_hsmmc_debugfs(struct mmc_host *mmc) +{ +} + +#endif + +#ifdef CONFIG_OF +static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = { + /* See 35xx errata 2.1.1.128 in SPRZ278F */ + .controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ, +}; + +static const struct omap_mmc_of_data omap4_mmc_of_data = { + .reg_offset = 0x100, +}; +static const struct omap_mmc_of_data am33xx_mmc_of_data = { + .reg_offset = 0x100, + .controller_flags = OMAP_HSMMC_SWAKEUP_MISSING, +}; + +static const struct of_device_id omap_mmc_of_match[] = { + { + .compatible = "ti,omap2-hsmmc", + }, + { + .compatible = "ti,omap3-pre-es3-hsmmc", + .data = &omap3_pre_es3_mmc_of_data, + }, + { + .compatible = "ti,omap3-hsmmc", + }, + { + .compatible = "ti,omap4-hsmmc", + .data = &omap4_mmc_of_data, + }, + { + .compatible = "ti,am33xx-hsmmc", + .data = &am33xx_mmc_of_data, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_mmc_of_match); + +static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev) +{ + struct omap_hsmmc_platform_data *pdata, *legacy; + struct device_node *np = dev->of_node; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); /* out of memory */ + + legacy = dev_get_platdata(dev); + if (legacy && legacy->name) + pdata->name = legacy->name; + + if (of_find_property(np, "ti,dual-volt", NULL)) + pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT; + + if (of_find_property(np, "ti,non-removable", NULL)) { + pdata->nonremovable = true; + pdata->no_regulator_off_init = true; + } + + if (of_find_property(np, "ti,needs-special-reset", NULL)) + pdata->features |= HSMMC_HAS_UPDATED_RESET; + + if (of_find_property(np, "ti,needs-special-hs-handling", NULL)) + pdata->features |= HSMMC_HAS_HSPE_SUPPORT; + + return pdata; +} +#else +static inline struct omap_hsmmc_platform_data + *of_get_hsmmc_pdata(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif + +static int omap_hsmmc_probe(struct platform_device *pdev) +{ + struct omap_hsmmc_platform_data *pdata = pdev->dev.platform_data; + struct mmc_host *mmc; + struct omap_hsmmc_host *host = NULL; + struct resource *res; + int ret, irq; + const struct of_device_id *match; + const struct omap_mmc_of_data *data; + void __iomem *base; + + match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); + if (match) { + pdata = of_get_hsmmc_pdata(&pdev->dev); + + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + + if (match->data) { + data = match->data; + pdata->reg_offset = data->reg_offset; + pdata->controller_flags |= data->controller_flags; + } + } + + if (pdata == NULL) { + dev_err(&pdev->dev, "Platform Data is missing\n"); + return -ENXIO; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto err; + } + + ret = mmc_of_parse(mmc); + if (ret) + goto err1; + + host = mmc_priv(mmc); + host->mmc = mmc; + host->pdata = pdata; + host->dev = &pdev->dev; + host->use_dma = 1; + host->dma_ch = -1; + host->irq = irq; + host->mapbase = res->start + pdata->reg_offset; + host->base = base + pdata->reg_offset; + host->power_mode = MMC_POWER_OFF; + host->next_data.cookie = 1; + host->pbias_enabled = 0; + host->vqmmc_enabled = 0; + + platform_set_drvdata(pdev, host); + + if (pdev->dev.of_node) + host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1); + + mmc->ops = &omap_hsmmc_ops; + + mmc->f_min = OMAP_MMC_MIN_CLOCK; + + if (pdata->max_freq > 0) + mmc->f_max = pdata->max_freq; + else if (mmc->f_max == 0) + mmc->f_max = OMAP_MMC_MAX_CLOCK; + + spin_lock_init(&host->irq_lock); + + host->fclk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(host->fclk)) { + ret = PTR_ERR(host->fclk); + host->fclk = NULL; + goto err1; + } + + if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { + dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n"); + omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk; + } + + device_init_wakeup(&pdev->dev, true); + pm_runtime_enable(host->dev); + pm_runtime_get_sync(host->dev); + pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(host->dev); + + omap_hsmmc_context_save(host); + + host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck"); + /* + * MMC can still work without debounce clock. + */ + if (IS_ERR(host->dbclk)) { + host->dbclk = NULL; + } else if (clk_prepare_enable(host->dbclk) != 0) { + dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n"); + host->dbclk = NULL; + } + + /* Set this to a value that allows allocating an entire descriptor + * list within a page (zero order allocation). */ + mmc->max_segs = 64; + + mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ + mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | + MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_CMD23; + + mmc->caps |= mmc_pdata(host)->caps; + if (mmc->caps & MMC_CAP_8_BIT_DATA) + mmc->caps |= MMC_CAP_4_BIT_DATA; + + if (mmc_pdata(host)->nonremovable) + mmc->caps |= MMC_CAP_NONREMOVABLE; + + mmc->pm_caps |= mmc_pdata(host)->pm_caps; + + omap_hsmmc_conf_bus_power(host); + + host->rx_chan = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR(host->rx_chan)) { + dev_err(mmc_dev(host->mmc), "RX DMA channel request failed\n"); + ret = PTR_ERR(host->rx_chan); + goto err_irq; + } + + host->tx_chan = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR(host->tx_chan)) { + dev_err(mmc_dev(host->mmc), "TX DMA channel request failed\n"); + ret = PTR_ERR(host->tx_chan); + goto err_irq; + } + + /* + * Limit the maximum segment size to the lower of the request size + * and the DMA engine device segment size limits. In reality, with + * 32-bit transfers, the DMA engine can do longer segments than this + * but there is no way to represent that in the DMA model - if we + * increase this figure here, we get warnings from the DMA API debug. + */ + mmc->max_seg_size = min3(mmc->max_req_size, + dma_get_max_seg_size(host->rx_chan->device->dev), + dma_get_max_seg_size(host->tx_chan->device->dev)); + + /* Request IRQ for MMC operations */ + ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, + mmc_hostname(mmc), host); + if (ret) { + dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); + goto err_irq; + } + + ret = omap_hsmmc_reg_get(host); + if (ret) + goto err_irq; + + if (!mmc->ocr_avail) + mmc->ocr_avail = mmc_pdata(host)->ocr_mask; + + omap_hsmmc_disable_irq(host); + + /* + * For now, only support SDIO interrupt if we have a separate + * wake-up interrupt configured from device tree. This is because + * the wake-up interrupt is needed for idle state and some + * platforms need special quirks. And we don't want to add new + * legacy mux platform init code callbacks any longer as we + * are moving to DT based booting anyways. + */ + ret = omap_hsmmc_configure_wake_irq(host); + if (!ret) + mmc->caps |= MMC_CAP_SDIO_IRQ; + + ret = mmc_add_host(mmc); + if (ret) + goto err_irq; + + if (mmc_pdata(host)->name != NULL) { + ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); + if (ret < 0) + goto err_slot_name; + } + + omap_hsmmc_debugfs(mmc); + pm_runtime_mark_last_busy(host->dev); + pm_runtime_put_autosuspend(host->dev); + + return 0; + +err_slot_name: + mmc_remove_host(mmc); +err_irq: + device_init_wakeup(&pdev->dev, false); + if (!IS_ERR_OR_NULL(host->tx_chan)) + dma_release_channel(host->tx_chan); + if (!IS_ERR_OR_NULL(host->rx_chan)) + dma_release_channel(host->rx_chan); + pm_runtime_dont_use_autosuspend(host->dev); + pm_runtime_put_sync(host->dev); + pm_runtime_disable(host->dev); + clk_disable_unprepare(host->dbclk); +err1: + mmc_free_host(mmc); +err: + return ret; +} + +static int omap_hsmmc_remove(struct platform_device *pdev) +{ + struct omap_hsmmc_host *host = platform_get_drvdata(pdev); + + pm_runtime_get_sync(host->dev); + mmc_remove_host(host->mmc); + + dma_release_channel(host->tx_chan); + dma_release_channel(host->rx_chan); + + dev_pm_clear_wake_irq(host->dev); + pm_runtime_dont_use_autosuspend(host->dev); + pm_runtime_put_sync(host->dev); + pm_runtime_disable(host->dev); + device_init_wakeup(&pdev->dev, false); + clk_disable_unprepare(host->dbclk); + + mmc_free_host(host->mmc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int omap_hsmmc_suspend(struct device *dev) +{ + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + + if (!host) + return 0; + + pm_runtime_get_sync(host->dev); + + if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { + OMAP_HSMMC_WRITE(host->base, ISE, 0); + OMAP_HSMMC_WRITE(host->base, IE, 0); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); + } + + clk_disable_unprepare(host->dbclk); + + pm_runtime_put_sync(host->dev); + return 0; +} + +/* Routine to resume the MMC device */ +static int omap_hsmmc_resume(struct device *dev) +{ + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + + if (!host) + return 0; + + pm_runtime_get_sync(host->dev); + + clk_prepare_enable(host->dbclk); + + if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) + omap_hsmmc_conf_bus_power(host); + + pm_runtime_mark_last_busy(host->dev); + pm_runtime_put_autosuspend(host->dev); + return 0; +} +#endif + +static int omap_hsmmc_runtime_suspend(struct device *dev) +{ + struct omap_hsmmc_host *host; + unsigned long flags; + int ret = 0; + + host = dev_get_drvdata(dev); + omap_hsmmc_context_save(host); + dev_dbg(dev, "disabled\n"); + + spin_lock_irqsave(&host->irq_lock, flags); + if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && + (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { + /* disable sdio irq handling to prevent race */ + OMAP_HSMMC_WRITE(host->base, ISE, 0); + OMAP_HSMMC_WRITE(host->base, IE, 0); + + if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) { + /* + * dat1 line low, pending sdio irq + * race condition: possible irq handler running on + * multi-core, abort + */ + dev_dbg(dev, "pending sdio irq, abort suspend\n"); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); + OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); + pm_runtime_mark_last_busy(dev); + ret = -EBUSY; + goto abort; + } + + pinctrl_pm_select_idle_state(dev); + } else { + pinctrl_pm_select_idle_state(dev); + } + +abort: + spin_unlock_irqrestore(&host->irq_lock, flags); + return ret; +} + +static int omap_hsmmc_runtime_resume(struct device *dev) +{ + struct omap_hsmmc_host *host; + unsigned long flags; + + host = dev_get_drvdata(dev); + omap_hsmmc_context_restore(host); + dev_dbg(dev, "enabled\n"); + + spin_lock_irqsave(&host->irq_lock, flags); + if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && + (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { + + pinctrl_select_default_state(host->dev); + + /* irq lost, if pinmux incorrect */ + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); + OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); + } else { + pinctrl_select_default_state(host->dev); + } + spin_unlock_irqrestore(&host->irq_lock, flags); + return 0; +} + +static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume) + .runtime_suspend = omap_hsmmc_runtime_suspend, + .runtime_resume = omap_hsmmc_runtime_resume, +}; + +static struct platform_driver omap_hsmmc_driver = { + .probe = omap_hsmmc_probe, + .remove = omap_hsmmc_remove, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &omap_hsmmc_dev_pm_ops, + .of_match_table = of_match_ptr(omap_mmc_of_match), + }, +}; + +module_platform_driver(omap_hsmmc_driver); +MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Texas Instruments Inc"); diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c new file mode 100644 index 000000000..8a40cf8a9 --- /dev/null +++ b/drivers/mmc/host/owl-mmc.c @@ -0,0 +1,706 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Actions Semi Owl SoCs SD/MMC driver + * + * Copyright (c) 2014 Actions Semi Inc. + * Copyright (c) 2019 Manivannan Sadhasivam + * + * TODO: SDIO support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * SDC registers + */ +#define OWL_REG_SD_EN 0x0000 +#define OWL_REG_SD_CTL 0x0004 +#define OWL_REG_SD_STATE 0x0008 +#define OWL_REG_SD_CMD 0x000c +#define OWL_REG_SD_ARG 0x0010 +#define OWL_REG_SD_RSPBUF0 0x0014 +#define OWL_REG_SD_RSPBUF1 0x0018 +#define OWL_REG_SD_RSPBUF2 0x001c +#define OWL_REG_SD_RSPBUF3 0x0020 +#define OWL_REG_SD_RSPBUF4 0x0024 +#define OWL_REG_SD_DAT 0x0028 +#define OWL_REG_SD_BLK_SIZE 0x002c +#define OWL_REG_SD_BLK_NUM 0x0030 +#define OWL_REG_SD_BUF_SIZE 0x0034 + +/* SD_EN Bits */ +#define OWL_SD_EN_RANE BIT(31) +#define OWL_SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24) +#define OWL_SD_EN_S18EN BIT(12) +#define OWL_SD_EN_RESE BIT(10) +#define OWL_SD_EN_DAT1_S BIT(9) +#define OWL_SD_EN_CLK_S BIT(8) +#define OWL_SD_ENABLE BIT(7) +#define OWL_SD_EN_BSEL BIT(6) +#define OWL_SD_EN_SDIOEN BIT(3) +#define OWL_SD_EN_DDREN BIT(2) +#define OWL_SD_EN_DATAWID(x) (((x) & 0x3) << 0) + +/* SD_CTL Bits */ +#define OWL_SD_CTL_TOUTEN BIT(31) +#define OWL_SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24) +#define OWL_SD_CTL_DELAY_MSK GENMASK(23, 16) +#define OWL_SD_CTL_RDELAY(x) (((x) & 0xf) << 20) +#define OWL_SD_CTL_WDELAY(x) (((x) & 0xf) << 16) +#define OWL_SD_CTL_CMDLEN BIT(13) +#define OWL_SD_CTL_SCC BIT(12) +#define OWL_SD_CTL_TCN(x) (((x) & 0xf) << 8) +#define OWL_SD_CTL_TS BIT(7) +#define OWL_SD_CTL_LBE BIT(6) +#define OWL_SD_CTL_C7EN BIT(5) +#define OWL_SD_CTL_TM(x) (((x) & 0xf) << 0) + +#define OWL_SD_DELAY_LOW_CLK 0x0f +#define OWL_SD_DELAY_MID_CLK 0x0a +#define OWL_SD_DELAY_HIGH_CLK 0x09 +#define OWL_SD_RDELAY_DDR50 0x0a +#define OWL_SD_WDELAY_DDR50 0x08 + +/* SD_STATE Bits */ +#define OWL_SD_STATE_DAT1BS BIT(18) +#define OWL_SD_STATE_SDIOB_P BIT(17) +#define OWL_SD_STATE_SDIOB_EN BIT(16) +#define OWL_SD_STATE_TOUTE BIT(15) +#define OWL_SD_STATE_BAEP BIT(14) +#define OWL_SD_STATE_MEMRDY BIT(12) +#define OWL_SD_STATE_CMDS BIT(11) +#define OWL_SD_STATE_DAT1AS BIT(10) +#define OWL_SD_STATE_SDIOA_P BIT(9) +#define OWL_SD_STATE_SDIOA_EN BIT(8) +#define OWL_SD_STATE_DAT0S BIT(7) +#define OWL_SD_STATE_TEIE BIT(6) +#define OWL_SD_STATE_TEI BIT(5) +#define OWL_SD_STATE_CLNR BIT(4) +#define OWL_SD_STATE_CLC BIT(3) +#define OWL_SD_STATE_WC16ER BIT(2) +#define OWL_SD_STATE_RC16ER BIT(1) +#define OWL_SD_STATE_CRC7ER BIT(0) + +#define OWL_CMD_TIMEOUT_MS 30000 + +struct owl_mmc_host { + struct device *dev; + struct reset_control *reset; + void __iomem *base; + struct clk *clk; + struct completion sdc_complete; + spinlock_t lock; + int irq; + u32 clock; + bool ddr_50; + + enum dma_data_direction dma_dir; + struct dma_chan *dma; + struct dma_async_tx_descriptor *desc; + struct dma_slave_config dma_cfg; + struct completion dma_complete; + + struct mmc_host *mmc; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; +}; + +static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state) +{ + unsigned int regval; + + regval = readl(reg); + + if (state) + regval |= val; + else + regval &= ~val; + + writel(regval, reg); +} + +static irqreturn_t owl_irq_handler(int irq, void *devid) +{ + struct owl_mmc_host *owl_host = devid; + unsigned long flags; + u32 state; + + spin_lock_irqsave(&owl_host->lock, flags); + + state = readl(owl_host->base + OWL_REG_SD_STATE); + if (state & OWL_SD_STATE_TEI) { + state = readl(owl_host->base + OWL_REG_SD_STATE); + state |= OWL_SD_STATE_TEI; + writel(state, owl_host->base + OWL_REG_SD_STATE); + complete(&owl_host->sdc_complete); + } + + spin_unlock_irqrestore(&owl_host->lock, flags); + + return IRQ_HANDLED; +} + +static void owl_mmc_finish_request(struct owl_mmc_host *owl_host) +{ + struct mmc_request *mrq = owl_host->mrq; + struct mmc_data *data = mrq->data; + + /* Should never be NULL */ + WARN_ON(!mrq); + + owl_host->mrq = NULL; + + if (data) + dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len, + owl_host->dma_dir); + + /* Finally finish request */ + mmc_request_done(owl_host->mmc, mrq); +} + +static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + unsigned long timeout; + u32 mode, state, resp[2]; + u32 cmd_rsp_mask = 0; + + init_completion(&owl_host->sdc_complete); + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + mode = OWL_SD_CTL_TM(0); + break; + + case MMC_RSP_R1: + if (data) { + if (data->flags & MMC_DATA_READ) + mode = OWL_SD_CTL_TM(4); + else + mode = OWL_SD_CTL_TM(5); + } else { + mode = OWL_SD_CTL_TM(1); + } + cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER; + + break; + + case MMC_RSP_R1B: + mode = OWL_SD_CTL_TM(3); + cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER; + break; + + case MMC_RSP_R2: + mode = OWL_SD_CTL_TM(2); + cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER; + break; + + case MMC_RSP_R3: + mode = OWL_SD_CTL_TM(1); + cmd_rsp_mask = OWL_SD_STATE_CLNR; + break; + + default: + dev_warn(owl_host->dev, "Unknown MMC command\n"); + cmd->error = -EINVAL; + return; + } + + /* Keep current WDELAY and RDELAY */ + mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16)); + + /* Start to send corresponding command type */ + writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG); + writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD); + + /* Set LBE to send clk at the end of last read block */ + if (data) { + mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000); + } else { + mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE); + mode |= OWL_SD_CTL_TS; + } + + owl_host->cmd = cmd; + + /* Start transfer */ + writel(mode, owl_host->base + OWL_REG_SD_CTL); + + if (data) + return; + + timeout = msecs_to_jiffies(cmd->busy_timeout ? cmd->busy_timeout : + OWL_CMD_TIMEOUT_MS); + + if (!wait_for_completion_timeout(&owl_host->sdc_complete, timeout)) { + dev_err(owl_host->dev, "CMD interrupt timeout\n"); + cmd->error = -ETIMEDOUT; + return; + } + + state = readl(owl_host->base + OWL_REG_SD_STATE); + if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { + if (cmd_rsp_mask & state) { + if (state & OWL_SD_STATE_CLNR) { + dev_err(owl_host->dev, "Error CMD_NO_RSP\n"); + cmd->error = -EILSEQ; + return; + } + + if (state & OWL_SD_STATE_CRC7ER) { + dev_err(owl_host->dev, "Error CMD_RSP_CRC\n"); + cmd->error = -EILSEQ; + return; + } + } + + if (mmc_resp_type(cmd) & MMC_RSP_136) { + cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0); + cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1); + cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2); + cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3); + } else { + resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0); + resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1); + cmd->resp[0] = resp[1] << 24 | resp[0] >> 8; + cmd->resp[1] = resp[1] >> 8; + } + } +} + +static void owl_mmc_dma_complete(void *param) +{ + struct owl_mmc_host *owl_host = param; + struct mmc_data *data = owl_host->data; + + if (data) + complete(&owl_host->dma_complete); +} + +static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host, + struct mmc_data *data) +{ + u32 total; + + owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL, + true); + writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM); + writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE); + total = data->blksz * data->blocks; + + if (total < 512) + writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE); + else + writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE); + + if (data->flags & MMC_DATA_WRITE) { + owl_host->dma_dir = DMA_TO_DEVICE; + owl_host->dma_cfg.direction = DMA_MEM_TO_DEV; + } else { + owl_host->dma_dir = DMA_FROM_DEVICE; + owl_host->dma_cfg.direction = DMA_DEV_TO_MEM; + } + + dma_map_sg(owl_host->dma->device->dev, data->sg, + data->sg_len, owl_host->dma_dir); + + dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg); + owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg, + data->sg_len, + owl_host->dma_cfg.direction, + DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + if (!owl_host->desc) { + dev_err(owl_host->dev, "Can't prepare slave sg\n"); + return -EBUSY; + } + + owl_host->data = data; + + owl_host->desc->callback = owl_mmc_dma_complete; + owl_host->desc->callback_param = (void *)owl_host; + data->error = 0; + + return 0; +} + +static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct owl_mmc_host *owl_host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + int ret; + + owl_host->mrq = mrq; + if (mrq->data) { + ret = owl_mmc_prepare_data(owl_host, data); + if (ret < 0) { + data->error = ret; + goto err_out; + } + + init_completion(&owl_host->dma_complete); + dmaengine_submit(owl_host->desc); + dma_async_issue_pending(owl_host->dma); + } + + owl_mmc_send_cmd(owl_host, mrq->cmd, data); + + if (data) { + if (!wait_for_completion_timeout(&owl_host->sdc_complete, + 10 * HZ)) { + dev_err(owl_host->dev, "CMD interrupt timeout\n"); + mrq->cmd->error = -ETIMEDOUT; + dmaengine_terminate_all(owl_host->dma); + goto err_out; + } + + if (!wait_for_completion_timeout(&owl_host->dma_complete, + 5 * HZ)) { + dev_err(owl_host->dev, "DMA interrupt timeout\n"); + mrq->cmd->error = -ETIMEDOUT; + dmaengine_terminate_all(owl_host->dma); + goto err_out; + } + + if (data->stop) + owl_mmc_send_cmd(owl_host, data->stop, NULL); + + data->bytes_xfered = data->blocks * data->blksz; + } + +err_out: + owl_mmc_finish_request(owl_host); +} + +static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host, + unsigned int rate) +{ + unsigned long clk_rate; + int ret; + u32 reg; + + reg = readl(owl_host->base + OWL_REG_SD_CTL); + reg &= ~OWL_SD_CTL_DELAY_MSK; + + /* Set RDELAY and WDELAY based on the clock */ + if (rate <= 1000000) { + writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) | + OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK), + owl_host->base + OWL_REG_SD_CTL); + } else if ((rate > 1000000) && (rate <= 26000000)) { + writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) | + OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK), + owl_host->base + OWL_REG_SD_CTL); + } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) { + writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) | + OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK), + owl_host->base + OWL_REG_SD_CTL); + /* DDR50 mode has special delay chain */ + } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) { + writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) | + OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50), + owl_host->base + OWL_REG_SD_CTL); + } else { + dev_err(owl_host->dev, "SD clock rate not supported\n"); + return -EINVAL; + } + + clk_rate = clk_round_rate(owl_host->clk, rate << 1); + ret = clk_set_rate(owl_host->clk, clk_rate); + + return ret; +} + +static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios) +{ + if (!ios->clock) + return; + + owl_host->clock = ios->clock; + owl_mmc_set_clk_rate(owl_host, ios->clock); +} + +static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host, + struct mmc_ios *ios) +{ + u32 reg; + + reg = readl(owl_host->base + OWL_REG_SD_EN); + reg &= ~0x03; + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + break; + case MMC_BUS_WIDTH_4: + reg |= OWL_SD_EN_DATAWID(1); + break; + case MMC_BUS_WIDTH_8: + reg |= OWL_SD_EN_DATAWID(2); + break; + } + + writel(reg, owl_host->base + OWL_REG_SD_EN); +} + +static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host) +{ + reset_control_assert(owl_host->reset); + udelay(20); + reset_control_deassert(owl_host->reset); +} + +static void owl_mmc_power_on(struct owl_mmc_host *owl_host) +{ + u32 mode; + + init_completion(&owl_host->sdc_complete); + + /* Enable transfer end IRQ */ + owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE, + OWL_SD_STATE_TEIE, true); + + /* Send init clk */ + mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16)); + mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8); + writel(mode, owl_host->base + OWL_REG_SD_CTL); + + if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) { + dev_err(owl_host->dev, "CMD interrupt timeout\n"); + return; + } +} + +static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct owl_mmc_host *owl_host = mmc_priv(mmc); + + switch (ios->power_mode) { + case MMC_POWER_UP: + dev_dbg(owl_host->dev, "Powering card up\n"); + + /* Reset the SDC controller to clear all previous states */ + owl_mmc_ctr_reset(owl_host); + clk_prepare_enable(owl_host->clk); + writel(OWL_SD_ENABLE | OWL_SD_EN_RESE, + owl_host->base + OWL_REG_SD_EN); + + break; + + case MMC_POWER_ON: + dev_dbg(owl_host->dev, "Powering card on\n"); + owl_mmc_power_on(owl_host); + + break; + + case MMC_POWER_OFF: + dev_dbg(owl_host->dev, "Powering card off\n"); + clk_disable_unprepare(owl_host->clk); + + return; + + default: + dev_dbg(owl_host->dev, "Ignoring unknown card power state\n"); + break; + } + + if (ios->clock != owl_host->clock) + owl_mmc_set_clk(owl_host, ios); + + owl_mmc_set_bus_width(owl_host, ios); + + /* Enable DDR mode if requested */ + if (ios->timing == MMC_TIMING_UHS_DDR50) { + owl_host->ddr_50 = 1; + owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, + OWL_SD_EN_DDREN, true); + } else { + owl_host->ddr_50 = 0; + } +} + +static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct owl_mmc_host *owl_host = mmc_priv(mmc); + + /* It is enough to change the pad ctrl bit for voltage switch */ + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, + OWL_SD_EN_S18EN, false); + break; + case MMC_SIGNAL_VOLTAGE_180: + owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, + OWL_SD_EN_S18EN, true); + break; + default: + return -ENOTSUPP; + } + + return 0; +} + +static const struct mmc_host_ops owl_mmc_ops = { + .request = owl_mmc_request, + .set_ios = owl_mmc_set_ios, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, + .start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch, +}; + +static int owl_mmc_probe(struct platform_device *pdev) +{ + struct owl_mmc_host *owl_host; + struct mmc_host *mmc; + struct resource *res; + int ret; + + mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "mmc alloc host failed\n"); + return -ENOMEM; + } + platform_set_drvdata(pdev, mmc); + + owl_host = mmc_priv(mmc); + owl_host->dev = &pdev->dev; + owl_host->mmc = mmc; + spin_lock_init(&owl_host->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + owl_host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(owl_host->base)) { + dev_err(&pdev->dev, "Failed to remap registers\n"); + ret = PTR_ERR(owl_host->base); + goto err_free_host; + } + + owl_host->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(owl_host->clk)) { + dev_err(&pdev->dev, "No clock defined\n"); + ret = PTR_ERR(owl_host->clk); + goto err_free_host; + } + + owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(owl_host->reset)) { + dev_err(&pdev->dev, "Could not get reset control\n"); + ret = PTR_ERR(owl_host->reset); + goto err_free_host; + } + + mmc->ops = &owl_mmc_ops; + mmc->max_blk_count = 512; + mmc->max_blk_size = 512; + mmc->max_segs = 256; + mmc->max_seg_size = 262144; + mmc->max_req_size = 262144; + /* 100kHz ~ 52MHz */ + mmc->f_min = 100000; + mmc->f_max = 52000000; + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | + MMC_CAP_4_BIT_DATA; + mmc->caps2 = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO); + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | + MMC_VDD_165_195; + + ret = mmc_of_parse(mmc); + if (ret) + goto err_free_host; + + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + owl_host->dma = dma_request_chan(&pdev->dev, "mmc"); + if (IS_ERR(owl_host->dma)) { + dev_err(owl_host->dev, "Failed to get external DMA channel.\n"); + ret = PTR_ERR(owl_host->dma); + goto err_free_host; + } + + dev_info(&pdev->dev, "Using %s for DMA transfers\n", + dma_chan_name(owl_host->dma)); + + owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT; + owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT; + owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + owl_host->dma_cfg.device_fc = false; + + owl_host->irq = platform_get_irq(pdev, 0); + if (owl_host->irq < 0) { + ret = owl_host->irq; + goto err_release_channel; + } + + ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler, + 0, dev_name(&pdev->dev), owl_host); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq %d\n", + owl_host->irq); + goto err_release_channel; + } + + ret = mmc_add_host(mmc); + if (ret) { + dev_err(&pdev->dev, "Failed to add host\n"); + goto err_release_channel; + } + + dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n"); + + return 0; + +err_release_channel: + dma_release_channel(owl_host->dma); +err_free_host: + mmc_free_host(mmc); + + return ret; +} + +static int owl_mmc_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct owl_mmc_host *owl_host = mmc_priv(mmc); + + mmc_remove_host(mmc); + disable_irq(owl_host->irq); + dma_release_channel(owl_host->dma); + mmc_free_host(mmc); + + return 0; +} + +static const struct of_device_id owl_mmc_of_match[] = { + {.compatible = "actions,owl-mmc",}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, owl_mmc_of_match); + +static struct platform_driver owl_mmc_driver = { + .driver = { + .name = "owl_mmc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = owl_mmc_of_match, + }, + .probe = owl_mmc_probe, + .remove = owl_mmc_remove, +}; +module_platform_driver(owl_mmc_driver); + +MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver"); +MODULE_AUTHOR("Actions Semi"); +MODULE_AUTHOR("Manivannan Sadhasivam "); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c new file mode 100644 index 000000000..e25e9bb34 --- /dev/null +++ b/drivers/mmc/host/pxamci.c @@ -0,0 +1,829 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/mmc/host/pxa.c - PXA MMCI driver + * + * Copyright (C) 2003 Russell King, All Rights Reserved. + * + * This hardware is really sick: + * - No way to clear interrupts. + * - Have to turn off the clock whenever we touch the device. + * - Doesn't tell you how many data blocks were transferred. + * Yuck! + * + * 1 and 3 byte data transfers not supported + * max block length up to 1023 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "pxamci.h" + +#define DRIVER_NAME "pxa2xx-mci" + +#define NR_SG 1 +#define CLKRT_OFF (~0) + +#define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \ + || cpu_is_pxa935()) + +struct pxamci_host { + struct mmc_host *mmc; + spinlock_t lock; + struct resource *res; + void __iomem *base; + struct clk *clk; + unsigned long clkrate; + unsigned int clkrt; + unsigned int cmdat; + unsigned int imask; + unsigned int power_mode; + unsigned long detect_delay_ms; + bool use_ro_gpio; + struct gpio_desc *power; + struct pxamci_platform_data *pdata; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + struct dma_chan *dma_chan_rx; + struct dma_chan *dma_chan_tx; + dma_cookie_t dma_cookie; + unsigned int dma_len; + unsigned int dma_dir; +}; + +static int pxamci_init_ocr(struct pxamci_host *host) +{ + struct mmc_host *mmc = host->mmc; + int ret; + + ret = mmc_regulator_get_supply(mmc); + if (ret < 0) + return ret; + + if (IS_ERR(mmc->supply.vmmc)) { + /* fall-back to platform data */ + mmc->ocr_avail = host->pdata ? + host->pdata->ocr_mask : + MMC_VDD_32_33 | MMC_VDD_33_34; + } + + return 0; +} + +static inline int pxamci_set_power(struct pxamci_host *host, + unsigned char power_mode, + unsigned int vdd) +{ + struct mmc_host *mmc = host->mmc; + struct regulator *supply = mmc->supply.vmmc; + + if (!IS_ERR(supply)) + return mmc_regulator_set_ocr(mmc, supply, vdd); + + if (host->power) { + bool on = !!((1 << vdd) & host->pdata->ocr_mask); + gpiod_set_value(host->power, on); + } + + if (host->pdata && host->pdata->setpower) + return host->pdata->setpower(mmc_dev(host->mmc), vdd); + + return 0; +} + +static void pxamci_stop_clock(struct pxamci_host *host) +{ + if (readl(host->base + MMC_STAT) & STAT_CLK_EN) { + unsigned long timeout = 10000; + unsigned int v; + + writel(STOP_CLOCK, host->base + MMC_STRPCL); + + do { + v = readl(host->base + MMC_STAT); + if (!(v & STAT_CLK_EN)) + break; + udelay(1); + } while (timeout--); + + if (v & STAT_CLK_EN) + dev_err(mmc_dev(host->mmc), "unable to stop clock\n"); + } +} + +static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->imask &= ~mask; + writel(host->imask, host->base + MMC_I_MASK); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->imask |= mask; + writel(host->imask, host->base + MMC_I_MASK); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void pxamci_dma_irq(void *param); + +static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) +{ + struct dma_async_tx_descriptor *tx; + enum dma_transfer_direction direction; + struct dma_slave_config config; + struct dma_chan *chan; + unsigned int nob = data->blocks; + unsigned long long clks; + unsigned int timeout; + int ret; + + host->data = data; + + writel(nob, host->base + MMC_NOB); + writel(data->blksz, host->base + MMC_BLKLEN); + + clks = (unsigned long long)data->timeout_ns * host->clkrate; + do_div(clks, 1000000000UL); + timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt); + writel((timeout + 255) / 256, host->base + MMC_RDTO); + + memset(&config, 0, sizeof(config)); + config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + config.src_addr = host->res->start + MMC_RXFIFO; + config.dst_addr = host->res->start + MMC_TXFIFO; + config.src_maxburst = 32; + config.dst_maxburst = 32; + + if (data->flags & MMC_DATA_READ) { + host->dma_dir = DMA_FROM_DEVICE; + direction = DMA_DEV_TO_MEM; + chan = host->dma_chan_rx; + } else { + host->dma_dir = DMA_TO_DEVICE; + direction = DMA_MEM_TO_DEV; + chan = host->dma_chan_tx; + } + + config.direction = direction; + + ret = dmaengine_slave_config(chan, &config); + if (ret < 0) { + dev_err(mmc_dev(host->mmc), "dma slave config failed\n"); + return; + } + + host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, + host->dma_dir); + + tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction, + DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); + return; + } + + if (!(data->flags & MMC_DATA_READ)) { + tx->callback = pxamci_dma_irq; + tx->callback_param = host; + } + + host->dma_cookie = dmaengine_submit(tx); + + /* + * workaround for erratum #91: + * only start DMA now if we are doing a read, + * otherwise we wait until CMD/RESP has finished + * before starting DMA. + */ + if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ) + dma_async_issue_pending(chan); +} + +static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat) +{ + WARN_ON(host->cmd != NULL); + host->cmd = cmd; + + if (cmd->flags & MMC_RSP_BUSY) + cmdat |= CMDAT_BUSY; + +#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) + switch (RSP_TYPE(mmc_resp_type(cmd))) { + case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */ + cmdat |= CMDAT_RESP_SHORT; + break; + case RSP_TYPE(MMC_RSP_R3): + cmdat |= CMDAT_RESP_R3; + break; + case RSP_TYPE(MMC_RSP_R2): + cmdat |= CMDAT_RESP_R2; + break; + default: + break; + } + + writel(cmd->opcode, host->base + MMC_CMD); + writel(cmd->arg >> 16, host->base + MMC_ARGH); + writel(cmd->arg & 0xffff, host->base + MMC_ARGL); + writel(cmdat, host->base + MMC_CMDAT); + writel(host->clkrt, host->base + MMC_CLKRT); + + writel(START_CLOCK, host->base + MMC_STRPCL); + + pxamci_enable_irq(host, END_CMD_RES); +} + +static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) +{ + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + mmc_request_done(host->mmc, mrq); +} + +static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat) +{ + struct mmc_command *cmd = host->cmd; + int i; + u32 v; + + if (!cmd) + return 0; + + host->cmd = NULL; + + /* + * Did I mention this is Sick. We always need to + * discard the upper 8 bits of the first 16-bit word. + */ + v = readl(host->base + MMC_RES) & 0xffff; + for (i = 0; i < 4; i++) { + u32 w1 = readl(host->base + MMC_RES) & 0xffff; + u32 w2 = readl(host->base + MMC_RES) & 0xffff; + cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8; + v = w2; + } + + if (stat & STAT_TIME_OUT_RESPONSE) { + cmd->error = -ETIMEDOUT; + } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) { + /* + * workaround for erratum #42: + * Intel PXA27x Family Processor Specification Update Rev 001 + * A bogus CRC error can appear if the msb of a 136 bit + * response is a one. + */ + if (cpu_is_pxa27x() && + (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000)) + pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode); + else + cmd->error = -EILSEQ; + } + + pxamci_disable_irq(host, END_CMD_RES); + if (host->data && !cmd->error) { + pxamci_enable_irq(host, DATA_TRAN_DONE); + /* + * workaround for erratum #91, if doing write + * enable DMA late + */ + if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE) + dma_async_issue_pending(host->dma_chan_tx); + } else { + pxamci_finish_request(host, host->mrq); + } + + return 1; +} + +static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) +{ + struct mmc_data *data = host->data; + struct dma_chan *chan; + + if (!data) + return 0; + + if (data->flags & MMC_DATA_READ) + chan = host->dma_chan_rx; + else + chan = host->dma_chan_tx; + dma_unmap_sg(chan->device->dev, + data->sg, data->sg_len, host->dma_dir); + + if (stat & STAT_READ_TIME_OUT) + data->error = -ETIMEDOUT; + else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR)) + data->error = -EILSEQ; + + /* + * There appears to be a hardware design bug here. There seems to + * be no way to find out how much data was transferred to the card. + * This means that if there was an error on any block, we mark all + * data blocks as being in error. + */ + if (!data->error) + data->bytes_xfered = data->blocks * data->blksz; + else + data->bytes_xfered = 0; + + pxamci_disable_irq(host, DATA_TRAN_DONE); + + host->data = NULL; + if (host->mrq->stop) { + pxamci_stop_clock(host); + pxamci_start_cmd(host, host->mrq->stop, host->cmdat); + } else { + pxamci_finish_request(host, host->mrq); + } + + return 1; +} + +static irqreturn_t pxamci_irq(int irq, void *devid) +{ + struct pxamci_host *host = devid; + unsigned int ireg; + int handled = 0; + + ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK); + + if (ireg) { + unsigned stat = readl(host->base + MMC_STAT); + + pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat); + + if (ireg & END_CMD_RES) + handled |= pxamci_cmd_done(host, stat); + if (ireg & DATA_TRAN_DONE) + handled |= pxamci_data_done(host, stat); + if (ireg & SDIO_INT) { + mmc_signal_sdio_irq(host->mmc); + handled = 1; + } + } + + return IRQ_RETVAL(handled); +} + +static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct pxamci_host *host = mmc_priv(mmc); + unsigned int cmdat; + + WARN_ON(host->mrq != NULL); + + host->mrq = mrq; + + pxamci_stop_clock(host); + + cmdat = host->cmdat; + host->cmdat &= ~CMDAT_INIT; + + if (mrq->data) { + pxamci_setup_data(host, mrq->data); + + cmdat &= ~CMDAT_BUSY; + cmdat |= CMDAT_DATAEN | CMDAT_DMAEN; + if (mrq->data->flags & MMC_DATA_WRITE) + cmdat |= CMDAT_WRITE; + } + + pxamci_start_cmd(host, mrq->cmd, cmdat); +} + +static int pxamci_get_ro(struct mmc_host *mmc) +{ + struct pxamci_host *host = mmc_priv(mmc); + + if (host->use_ro_gpio) + return mmc_gpio_get_ro(mmc); + if (host->pdata && host->pdata->get_ro) + return !!host->pdata->get_ro(mmc_dev(mmc)); + /* + * Board doesn't support read only detection; let the mmc core + * decide what to do. + */ + return -ENOSYS; +} + +static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct pxamci_host *host = mmc_priv(mmc); + + if (ios->clock) { + unsigned long rate = host->clkrate; + unsigned int clk = rate / ios->clock; + + if (host->clkrt == CLKRT_OFF) + clk_prepare_enable(host->clk); + + if (ios->clock == 26000000) { + /* to support 26MHz */ + host->clkrt = 7; + } else { + /* to handle (19.5MHz, 26MHz) */ + if (!clk) + clk = 1; + + /* + * clk might result in a lower divisor than we + * desire. check for that condition and adjust + * as appropriate. + */ + if (rate / clk > ios->clock) + clk <<= 1; + host->clkrt = fls(clk) - 1; + } + + /* + * we write clkrt on the next command + */ + } else { + pxamci_stop_clock(host); + if (host->clkrt != CLKRT_OFF) { + host->clkrt = CLKRT_OFF; + clk_disable_unprepare(host->clk); + } + } + + if (host->power_mode != ios->power_mode) { + int ret; + + host->power_mode = ios->power_mode; + + ret = pxamci_set_power(host, ios->power_mode, ios->vdd); + if (ret) { + dev_err(mmc_dev(mmc), "unable to set power\n"); + /* + * The .set_ios() function in the mmc_host_ops + * struct return void, and failing to set the + * power should be rare so we print an error and + * return here. + */ + return; + } + + if (ios->power_mode == MMC_POWER_ON) + host->cmdat |= CMDAT_INIT; + } + + if (ios->bus_width == MMC_BUS_WIDTH_4) + host->cmdat |= CMDAT_SD_4DAT; + else + host->cmdat &= ~CMDAT_SD_4DAT; + + dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n", + host->clkrt, host->cmdat); +} + +static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) +{ + struct pxamci_host *pxa_host = mmc_priv(host); + + if (enable) + pxamci_enable_irq(pxa_host, SDIO_INT); + else + pxamci_disable_irq(pxa_host, SDIO_INT); +} + +static const struct mmc_host_ops pxamci_ops = { + .request = pxamci_request, + .get_cd = mmc_gpio_get_cd, + .get_ro = pxamci_get_ro, + .set_ios = pxamci_set_ios, + .enable_sdio_irq = pxamci_enable_sdio_irq, +}; + +static void pxamci_dma_irq(void *param) +{ + struct pxamci_host *host = param; + struct dma_tx_state state; + enum dma_status status; + struct dma_chan *chan; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + if (!host->data) + goto out_unlock; + + if (host->data->flags & MMC_DATA_READ) + chan = host->dma_chan_rx; + else + chan = host->dma_chan_tx; + + status = dmaengine_tx_status(chan, host->dma_cookie, &state); + + if (likely(status == DMA_COMPLETE)) { + writel(BUF_PART_FULL, host->base + MMC_PRTBUF); + } else { + pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc), + host->data->flags & MMC_DATA_READ ? "rx" : "tx"); + host->data->error = -EIO; + pxamci_data_done(host, 0); + } + +out_unlock: + spin_unlock_irqrestore(&host->lock, flags); +} + +static irqreturn_t pxamci_detect_irq(int irq, void *devid) +{ + struct pxamci_host *host = mmc_priv(devid); + + mmc_detect_change(devid, msecs_to_jiffies(host->detect_delay_ms)); + return IRQ_HANDLED; +} + +#ifdef CONFIG_OF +static const struct of_device_id pxa_mmc_dt_ids[] = { + { .compatible = "marvell,pxa-mmc" }, + { } +}; + +MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids); + +static int pxamci_of_init(struct platform_device *pdev, + struct mmc_host *mmc) +{ + struct device_node *np = pdev->dev.of_node; + struct pxamci_host *host = mmc_priv(mmc); + u32 tmp; + int ret; + + if (!np) + return 0; + + /* pxa-mmc specific */ + if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0) + host->detect_delay_ms = tmp; + + ret = mmc_of_parse(mmc); + if (ret < 0) + return ret; + + return 0; +} +#else +static int pxamci_of_init(struct platform_device *pdev, + struct mmc_host *mmc) +{ + return 0; +} +#endif + +static int pxamci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct pxamci_host *host = NULL; + struct device *dev = &pdev->dev; + struct resource *r; + int ret, irq; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev); + if (!mmc) { + ret = -ENOMEM; + goto out; + } + + mmc->ops = &pxamci_ops; + + /* + * We can do SG-DMA, but we don't because we never know how much + * data we successfully wrote to the card. + */ + mmc->max_segs = NR_SG; + + /* + * Our hardware DMA can handle a maximum of one page per SG entry. + */ + mmc->max_seg_size = PAGE_SIZE; + + /* + * Block length register is only 10 bits before PXA27x. + */ + mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048; + + /* + * Block count register is 16 bits. + */ + mmc->max_blk_count = 65535; + + ret = pxamci_of_init(pdev, mmc); + if (ret) + goto out; + + host = mmc_priv(mmc); + host->mmc = mmc; + host->pdata = pdev->dev.platform_data; + host->clkrt = CLKRT_OFF; + + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + host->clk = NULL; + goto out; + } + + host->clkrate = clk_get_rate(host->clk); + + /* + * Calculate minimum clock rate, rounding up. + */ + mmc->f_min = (host->clkrate + 63) / 64; + mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate; + + ret = pxamci_init_ocr(host); + if (ret < 0) + goto out; + + mmc->caps = 0; + host->cmdat = 0; + if (!cpu_is_pxa25x()) { + mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; + host->cmdat |= CMDAT_SDIO_INT_EN; + if (mmc_has_26MHz()) + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_SD_HIGHSPEED; + } + + spin_lock_init(&host->lock); + host->res = r; + host->imask = MMC_I_MASK_ALL; + + host->base = devm_ioremap_resource(dev, r); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto out; + } + + /* + * Ensure that the host controller is shut down, and setup + * with our defaults. + */ + pxamci_stop_clock(host); + writel(0, host->base + MMC_SPI); + writel(64, host->base + MMC_RESTO); + writel(host->imask, host->base + MMC_I_MASK); + + ret = devm_request_irq(dev, irq, pxamci_irq, 0, + DRIVER_NAME, host); + if (ret) + goto out; + + platform_set_drvdata(pdev, mmc); + + host->dma_chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->dma_chan_rx)) { + dev_err(dev, "unable to request rx dma channel\n"); + ret = PTR_ERR(host->dma_chan_rx); + host->dma_chan_rx = NULL; + goto out; + } + + host->dma_chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->dma_chan_tx)) { + dev_err(dev, "unable to request tx dma channel\n"); + ret = PTR_ERR(host->dma_chan_tx); + host->dma_chan_tx = NULL; + goto out; + } + + if (host->pdata) { + host->detect_delay_ms = host->pdata->detect_delay_ms; + + host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); + if (IS_ERR(host->power)) { + ret = PTR_ERR(host->power); + dev_err(dev, "Failed requesting gpio_power\n"); + goto out; + } + + /* FIXME: should we pass detection delay to debounce? */ + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); + if (ret && ret != -ENOENT) { + dev_err(dev, "Failed requesting gpio_cd\n"); + goto out; + } + + if (!host->pdata->gpio_card_ro_invert) + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); + if (ret && ret != -ENOENT) { + dev_err(dev, "Failed requesting gpio_ro\n"); + goto out; + } + if (!ret) + host->use_ro_gpio = true; + + if (host->pdata->init) + host->pdata->init(dev, pxamci_detect_irq, mmc); + + if (host->power && host->pdata->setpower) + dev_warn(dev, "gpio_power and setpower() both defined\n"); + if (host->use_ro_gpio && host->pdata->get_ro) + dev_warn(dev, "gpio_ro and get_ro() both defined\n"); + } + + ret = mmc_add_host(mmc); + if (ret) { + if (host->pdata && host->pdata->exit) + host->pdata->exit(dev, mmc); + goto out; + } + + return 0; + +out: + if (host) { + if (host->dma_chan_rx) + dma_release_channel(host->dma_chan_rx); + if (host->dma_chan_tx) + dma_release_channel(host->dma_chan_tx); + } + if (mmc) + mmc_free_host(mmc); + return ret; +} + +static int pxamci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + + if (mmc) { + struct pxamci_host *host = mmc_priv(mmc); + + mmc_remove_host(mmc); + + if (host->pdata && host->pdata->exit) + host->pdata->exit(&pdev->dev, mmc); + + pxamci_stop_clock(host); + writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| + END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, + host->base + MMC_I_MASK); + + dmaengine_terminate_all(host->dma_chan_rx); + dmaengine_terminate_all(host->dma_chan_tx); + dma_release_channel(host->dma_chan_rx); + dma_release_channel(host->dma_chan_tx); + + mmc_free_host(mmc); + } + + return 0; +} + +static struct platform_driver pxamci_driver = { + .probe = pxamci_probe, + .remove = pxamci_remove, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(pxa_mmc_dt_ids), + }, +}; + +module_platform_driver(pxamci_driver); + +MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pxa2xx-mci"); diff --git a/drivers/mmc/host/pxamci.h b/drivers/mmc/host/pxamci.h new file mode 100644 index 000000000..d301ca18c --- /dev/null +++ b/drivers/mmc/host/pxamci.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#define MMC_STRPCL 0x0000 +#define STOP_CLOCK (1 << 0) +#define START_CLOCK (2 << 0) + +#define MMC_STAT 0x0004 +#define STAT_END_CMD_RES (1 << 13) +#define STAT_PRG_DONE (1 << 12) +#define STAT_DATA_TRAN_DONE (1 << 11) +#define STAT_CLK_EN (1 << 8) +#define STAT_RECV_FIFO_FULL (1 << 7) +#define STAT_XMIT_FIFO_EMPTY (1 << 6) +#define STAT_RES_CRC_ERR (1 << 5) +#define STAT_SPI_READ_ERROR_TOKEN (1 << 4) +#define STAT_CRC_READ_ERROR (1 << 3) +#define STAT_CRC_WRITE_ERROR (1 << 2) +#define STAT_TIME_OUT_RESPONSE (1 << 1) +#define STAT_READ_TIME_OUT (1 << 0) + +#define MMC_CLKRT 0x0008 /* 3 bit */ + +#define MMC_SPI 0x000c +#define SPI_CS_ADDRESS (1 << 3) +#define SPI_CS_EN (1 << 2) +#define CRC_ON (1 << 1) +#define SPI_EN (1 << 0) + +#define MMC_CMDAT 0x0010 +#define CMDAT_SDIO_INT_EN (1 << 11) +#define CMDAT_SD_4DAT (1 << 8) +#define CMDAT_DMAEN (1 << 7) +#define CMDAT_INIT (1 << 6) +#define CMDAT_BUSY (1 << 5) +#define CMDAT_STREAM (1 << 4) /* 1 = stream */ +#define CMDAT_WRITE (1 << 3) /* 1 = write */ +#define CMDAT_DATAEN (1 << 2) +#define CMDAT_RESP_NONE (0 << 0) +#define CMDAT_RESP_SHORT (1 << 0) +#define CMDAT_RESP_R2 (2 << 0) +#define CMDAT_RESP_R3 (3 << 0) + +#define MMC_RESTO 0x0014 /* 7 bit */ + +#define MMC_RDTO 0x0018 /* 16 bit */ + +#define MMC_BLKLEN 0x001c /* 10 bit */ + +#define MMC_NOB 0x0020 /* 16 bit */ + +#define MMC_PRTBUF 0x0024 +#define BUF_PART_FULL (1 << 0) + +#define MMC_I_MASK 0x0028 + +/*PXA27x MMC interrupts*/ +#define SDIO_SUSPEND_ACK (1 << 12) +#define SDIO_INT (1 << 11) +#define RD_STALLED (1 << 10) +#define RES_ERR (1 << 9) +#define DAT_ERR (1 << 8) +#define TINT (1 << 7) + +/*PXA2xx MMC interrupts*/ +#define TXFIFO_WR_REQ (1 << 6) +#define RXFIFO_RD_REQ (1 << 5) +#define CLK_IS_OFF (1 << 4) +#define STOP_CMD (1 << 3) +#define END_CMD_RES (1 << 2) +#define PRG_DONE (1 << 1) +#define DATA_TRAN_DONE (1 << 0) + +#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) +#define MMC_I_MASK_ALL 0x00001fff +#else +#define MMC_I_MASK_ALL 0x0000007f +#endif + +#define MMC_I_REG 0x002c +/* same as MMC_I_MASK */ + +#define MMC_CMD 0x0030 + +#define MMC_ARGH 0x0034 /* 16 bit */ + +#define MMC_ARGL 0x0038 /* 16 bit */ + +#define MMC_RES 0x003c /* 16 bit */ + +#define MMC_RXFIFO 0x0040 /* 8 bit */ + +#define MMC_TXFIFO 0x0044 /* 8 bit */ diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h new file mode 100644 index 000000000..cb962c788 --- /dev/null +++ b/drivers/mmc/host/renesas_sdhi.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Renesas Mobile SDHI + * + * Copyright (C) 2017 Horms Solutions Ltd., Simon Horman + * Copyright (C) 2017-19 Renesas Electronics Corporation + */ + +#ifndef RENESAS_SDHI_H +#define RENESAS_SDHI_H + +#include +#include "tmio_mmc.h" + +struct renesas_sdhi_scc { + unsigned long clk_rate; /* clock rate for SDR104 */ + u32 tap; /* sampling clock position for SDR104/HS400 (8 TAP) */ + u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */ +}; + +struct renesas_sdhi_of_data { + unsigned long tmio_flags; + u32 tmio_ocr_mask; + unsigned long capabilities; + unsigned long capabilities2; + enum dma_slave_buswidth dma_buswidth; + dma_addr_t dma_rx_offset; + unsigned int bus_shift; + int scc_offset; + struct renesas_sdhi_scc *taps; + int taps_num; + unsigned int max_blk_count; + unsigned short max_segs; +}; + +#define SDHI_CALIB_TABLE_MAX 32 + +struct renesas_sdhi_quirks { + bool hs400_disabled; + bool hs400_4taps; + u32 hs400_bad_taps; + const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX]; +}; + +struct tmio_mmc_dma { + enum dma_slave_buswidth dma_buswidth; + bool (*filter)(struct dma_chan *chan, void *arg); + void (*enable)(struct tmio_mmc_host *host, bool enable); + struct completion dma_dataend; + struct tasklet_struct dma_complete; +}; + +struct renesas_sdhi { + struct clk *clk; + struct clk *clk_cd; + struct tmio_mmc_data mmc_data; + struct tmio_mmc_dma dma_priv; + const struct renesas_sdhi_quirks *quirks; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default, *pins_uhs; + void __iomem *scc_ctl; + u32 scc_tappos; + u32 scc_tappos_hs400; + const u8 *adjust_hs400_calib_table; + bool needs_adjust_hs400; + + /* Tuning values: 1 for success, 0 for failure */ + DECLARE_BITMAP(taps, BITS_PER_LONG); + /* Sampling data comparison: 1 for match, 0 for mismatch */ + DECLARE_BITMAP(smpcmp, BITS_PER_LONG); + unsigned int tap_num; + unsigned int tap_set; +}; + +#define host_to_priv(host) \ + container_of((host)->pdata, struct renesas_sdhi, mmc_data) + +int renesas_sdhi_probe(struct platform_device *pdev, + const struct tmio_mmc_dma_ops *dma_ops); +int renesas_sdhi_remove(struct platform_device *pdev); +#endif diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c new file mode 100644 index 000000000..24e524a1b --- /dev/null +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -0,0 +1,1165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas SDHI + * + * Copyright (C) 2015-19 Renesas Electronics Corporation + * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang + * Copyright (C) 2016-17 Horms Solutions, Simon Horman + * Copyright (C) 2009 Magnus Damm + * + * Based on "Compaq ASIC3 support": + * + * Copyright 2001 Compaq Computer Corporation. + * Copyright 2004-2005 Phil Blundell + * Copyright 2007-2008 OpenedHand Ltd. + * + * Authors: Phil Blundell , + * Samuel Ortiz + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "renesas_sdhi.h" +#include "tmio_mmc.h" + +#define HOST_MODE 0xe4 + +#define SDHI_VER_GEN2_SDR50 0x490c +#define SDHI_VER_RZ_A1 0x820b +/* very old datasheets said 0x490c for SDR104, too. They are wrong! */ +#define SDHI_VER_GEN2_SDR104 0xcb0d +#define SDHI_VER_GEN3_SD 0xcc10 +#define SDHI_VER_GEN3_SDMMC 0xcd10 + +#define SDHI_GEN3_MMC0_ADDR 0xee140000 + +static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width) +{ + u32 val; + + /* + * see also + * renesas_sdhi_of_data :: dma_buswidth + */ + switch (sd_ctrl_read16(host, CTL_VERSION)) { + case SDHI_VER_GEN2_SDR50: + val = (width == 32) ? 0x0001 : 0x0000; + break; + case SDHI_VER_GEN2_SDR104: + val = (width == 32) ? 0x0000 : 0x0001; + break; + case SDHI_VER_GEN3_SD: + case SDHI_VER_GEN3_SDMMC: + if (width == 64) + val = 0x0000; + else if (width == 32) + val = 0x0101; + else + val = 0x0001; + break; + default: + /* nothing to do */ + return; + } + + sd_ctrl_write16(host, HOST_MODE, val); +} + +static int renesas_sdhi_clk_enable(struct tmio_mmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct renesas_sdhi *priv = host_to_priv(host); + int ret; + + ret = clk_prepare_enable(priv->clk_cd); + if (ret < 0) + return ret; + + /* + * The clock driver may not know what maximum frequency + * actually works, so it should be set with the max-frequency + * property which will already have been read to f_max. If it + * was missing, assume the current frequency is the maximum. + */ + if (!mmc->f_max) + mmc->f_max = clk_get_rate(priv->clk); + + /* + * Minimum frequency is the minimum input clock frequency + * divided by our maximum divider. + */ + mmc->f_min = max(clk_round_rate(priv->clk, 1) / 512, 1L); + + /* enable 16bit data access on SDBUF as default */ + renesas_sdhi_sdbuf_width(host, 16); + + return 0; +} + +static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host, + unsigned int new_clock) +{ + struct renesas_sdhi *priv = host_to_priv(host); + unsigned int freq, diff, best_freq = 0, diff_min = ~0; + int i; + + /* + * We simply return the current rate if a) we are not on a R-Car Gen2+ + * SoC (may work for others, but untested) or b) if the SCC needs its + * clock during tuning, so we don't change the external clock setup. + */ + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2) || mmc_doing_tune(host->mmc)) + return clk_get_rate(priv->clk); + + /* + * We want the bus clock to be as close as possible to, but no + * greater than, new_clock. As we can divide by 1 << i for + * any i in [0, 9] we want the input clock to be as close as + * possible, but no greater than, new_clock << i. + */ + for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) { + freq = clk_round_rate(priv->clk, new_clock << i); + if (freq > (new_clock << i)) { + /* Too fast; look for a slightly slower option */ + freq = clk_round_rate(priv->clk, + (new_clock << i) / 4 * 3); + if (freq > (new_clock << i)) + continue; + } + + diff = new_clock - (freq >> i); + if (diff <= diff_min) { + best_freq = freq; + diff_min = diff; + } + } + + clk_set_rate(priv->clk, best_freq); + + return clk_get_rate(priv->clk); +} + +static void renesas_sdhi_set_clock(struct tmio_mmc_host *host, + unsigned int new_clock) +{ + u32 clk = 0, clock; + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + if (new_clock == 0) { + host->mmc->actual_clock = 0; + goto out; + } + + host->mmc->actual_clock = renesas_sdhi_clk_update(host, new_clock); + clock = host->mmc->actual_clock / 512; + + for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1) + clock <<= 1; + + /* 1/1 clock is option */ + if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1)) { + if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400)) + clk |= 0xff; + else + clk &= ~0xff; + } + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + +out: + /* HW engineers overrode docs: no sleep needed on R-Car2+ */ + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); +} + +static void renesas_sdhi_clk_disable(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + clk_disable_unprepare(priv->clk_cd); +} + +static int renesas_sdhi_card_busy(struct mmc_host *mmc) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + + return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & + TMIO_STAT_DAT0); +} + +static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct renesas_sdhi *priv = host_to_priv(host); + struct pinctrl_state *pin_state; + int ret; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + pin_state = priv->pins_default; + break; + case MMC_SIGNAL_VOLTAGE_180: + pin_state = priv->pins_uhs; + break; + default: + return -EINVAL; + } + + /* + * If anything is missing, assume signal voltage is fixed at + * 3.3V and succeed/fail accordingly. + */ + if (IS_ERR(priv->pinctrl) || IS_ERR(pin_state)) + return ios->signal_voltage == + MMC_SIGNAL_VOLTAGE_330 ? 0 : -EINVAL; + + ret = mmc_regulator_set_vqmmc(host->mmc, ios); + if (ret < 0) + return ret; + + return pinctrl_select_state(priv->pinctrl, pin_state); +} + +/* SCC registers */ +#define SH_MOBILE_SDHI_SCC_DTCNTL 0x000 +#define SH_MOBILE_SDHI_SCC_TAPSET 0x002 +#define SH_MOBILE_SDHI_SCC_DT2FF 0x004 +#define SH_MOBILE_SDHI_SCC_CKSEL 0x006 +#define SH_MOBILE_SDHI_SCC_RVSCNTL 0x008 +#define SH_MOBILE_SDHI_SCC_RVSREQ 0x00A +#define SH_MOBILE_SDHI_SCC_SMPCMP 0x00C +#define SH_MOBILE_SDHI_SCC_TMPPORT2 0x00E +#define SH_MOBILE_SDHI_SCC_TMPPORT3 0x014 +#define SH_MOBILE_SDHI_SCC_TMPPORT4 0x016 +#define SH_MOBILE_SDHI_SCC_TMPPORT5 0x018 +#define SH_MOBILE_SDHI_SCC_TMPPORT6 0x01A +#define SH_MOBILE_SDHI_SCC_TMPPORT7 0x01C + +#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN BIT(0) +#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT 16 +#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK 0xff + +#define SH_MOBILE_SDHI_SCC_CKSEL_DTSEL BIT(0) + +#define SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN BIT(0) + +#define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN BIT(0) +#define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP BIT(1) +#define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR BIT(2) + +#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN BIT(8) +#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP BIT(24) +#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR (BIT(8) | BIT(24)) + +#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL BIT(4) +#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN BIT(31) + +/* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT4 register */ +#define SH_MOBILE_SDHI_SCC_TMPPORT4_DLL_ACC_START BIT(0) + +/* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT5 register */ +#define SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_R BIT(8) +#define SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_W (0 << 8) +#define SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_ADR_MASK 0x3F + +/* Definitions for values the SH_MOBILE_SDHI_SCC register */ +#define SH_MOBILE_SDHI_SCC_TMPPORT_DISABLE_WP_CODE 0xa5000000 +#define SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK 0x1f +#define SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE BIT(7) + +static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = { + { 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15, + 16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 }, + { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11, + 12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 } +}; + +static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = { + { 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 }, + { 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 } +}; + +static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = { + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10, + 11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 } +}; + +static inline u32 sd_scc_read32(struct tmio_mmc_host *host, + struct renesas_sdhi *priv, int addr) +{ + return readl(priv->scc_ctl + (addr << host->bus_shift)); +} + +static inline void sd_scc_write32(struct tmio_mmc_host *host, + struct renesas_sdhi *priv, + int addr, u32 val) +{ + writel(val, priv->scc_ctl + (addr << host->bus_shift)); +} + +static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv; + + priv = host_to_priv(host); + + /* Initialize SCC */ + sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, 0x0); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + /* set sampling clock selection range */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL, + SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN | + 0x8 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL, + SH_MOBILE_SDHI_SCC_CKSEL_DTSEL | + sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL, + ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN & + sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + /* Read TAPNUM */ + return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >> + SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT) & + SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK; +} + +static void renesas_sdhi_hs400_complete(struct mmc_host *mmc) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct renesas_sdhi *priv = host_to_priv(host); + u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + /* Set HS400 mode */ + sd_ctrl_write16(host, CTL_SDIF_MODE, 0x0001 | + sd_ctrl_read16(host, CTL_SDIF_MODE)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, + priv->scc_tappos_hs400); + + /* Gen3 can't do automatic tap correction with HS400, so disable it */ + if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC) + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL, + ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN & + sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2, + (SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN | + SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) | + sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL, + SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN | + sd_scc_read32(host, priv, + SH_MOBILE_SDHI_SCC_DTCNTL)); + + /* Avoid bad TAP */ + if (bad_taps & BIT(priv->tap_set)) { + u32 new_tap = (priv->tap_set + 1) % priv->tap_num; + + if (bad_taps & BIT(new_tap)) + new_tap = (priv->tap_set - 1) % priv->tap_num; + + if (bad_taps & BIT(new_tap)) { + new_tap = priv->tap_set; + dev_dbg(&host->pdev->dev, "Can't handle three bad tap in a row\n"); + } + + priv->tap_set = new_tap; + } + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, + priv->tap_set / (use_4tap ? 2 : 1)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL, + SH_MOBILE_SDHI_SCC_CKSEL_DTSEL | + sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL)); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + if (priv->adjust_hs400_calib_table) + priv->needs_adjust_hs400 = true; +} + +static void renesas_sdhi_reset_scc(struct tmio_mmc_host *host, + struct renesas_sdhi *priv) +{ + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL, + ~SH_MOBILE_SDHI_SCC_CKSEL_DTSEL & + sd_scc_read32(host, priv, + SH_MOBILE_SDHI_SCC_CKSEL)); +} + +static void renesas_sdhi_disable_scc(struct mmc_host *mmc) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct renesas_sdhi *priv = host_to_priv(host); + + renesas_sdhi_reset_scc(host, priv); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL, + ~SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN & + sd_scc_read32(host, priv, + SH_MOBILE_SDHI_SCC_DTCNTL)); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); +} + +static u32 sd_scc_tmpport_read32(struct tmio_mmc_host *host, + struct renesas_sdhi *priv, u32 addr) +{ + /* read mode */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT5, + SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_R | + (SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_ADR_MASK & addr)); + + /* access start and stop */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4, + SH_MOBILE_SDHI_SCC_TMPPORT4_DLL_ACC_START); + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4, 0); + + return sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT7); +} + +static void sd_scc_tmpport_write32(struct tmio_mmc_host *host, + struct renesas_sdhi *priv, u32 addr, u32 val) +{ + /* write mode */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT5, + SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_W | + (SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_ADR_MASK & addr)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT6, val); + + /* access start and stop */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4, + SH_MOBILE_SDHI_SCC_TMPPORT4_DLL_ACC_START); + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4, 0); +} + +static void renesas_sdhi_adjust_hs400_mode_enable(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + u32 calib_code; + + /* disable write protect */ + sd_scc_tmpport_write32(host, priv, 0x00, + SH_MOBILE_SDHI_SCC_TMPPORT_DISABLE_WP_CODE); + /* read calibration code and adjust */ + calib_code = sd_scc_tmpport_read32(host, priv, 0x26); + calib_code &= SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK; + + sd_scc_tmpport_write32(host, priv, 0x22, + SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE | + priv->adjust_hs400_calib_table[calib_code]); + + /* set offset value to TMPPORT3, hardcoded to OFFSET0 (= 0x3) for now */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT3, 0x3); + + /* adjustment done, clear flag */ + priv->needs_adjust_hs400 = false; +} + +static void renesas_sdhi_adjust_hs400_mode_disable(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + /* disable write protect */ + sd_scc_tmpport_write32(host, priv, 0x00, + SH_MOBILE_SDHI_SCC_TMPPORT_DISABLE_WP_CODE); + /* disable manual calibration */ + sd_scc_tmpport_write32(host, priv, 0x22, 0); + /* clear offset value of TMPPORT3 */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT3, 0); +} + +static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host, + struct renesas_sdhi *priv) +{ + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + /* Reset HS400 mode */ + sd_ctrl_write16(host, CTL_SDIF_MODE, ~0x0001 & + sd_ctrl_read16(host, CTL_SDIF_MODE)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2, + ~(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN | + SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) & + sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2)); + + if (priv->quirks && (priv->quirks->hs400_calib_table || priv->quirks->hs400_bad_taps)) + renesas_sdhi_adjust_hs400_mode_disable(host); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); +} + +static int renesas_sdhi_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + + renesas_sdhi_reset_hs400_mode(host, host_to_priv(host)); + return 0; +} + +static void renesas_sdhi_reset(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + if (priv->scc_ctl) { + renesas_sdhi_reset_scc(host, priv); + renesas_sdhi_reset_hs400_mode(host, priv); + priv->needs_adjust_hs400 = false; + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL, + ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN & + sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); + } + + if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) + sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, + TMIO_MASK_ALL_RCAR2); +} + +#define SH_MOBILE_SDHI_MIN_TAP_ROW 3 + +static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + unsigned int tap_start = 0, tap_end = 0, tap_cnt = 0, rs, re, i; + unsigned int taps_size = priv->tap_num * 2, min_tap_row; + unsigned long *bitmap; + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0); + + /* + * When tuning CMD19 is issued twice for each tap, merge the + * result requiring the tap to be good in both runs before + * considering it for tuning selection. + */ + for (i = 0; i < taps_size; i++) { + int offset = priv->tap_num * (i < priv->tap_num ? 1 : -1); + + if (!test_bit(i, priv->taps)) + clear_bit(i + offset, priv->taps); + + if (!test_bit(i, priv->smpcmp)) + clear_bit(i + offset, priv->smpcmp); + } + + /* + * If all TAP are OK, the sampling clock position is selected by + * identifying the change point of data. + */ + if (bitmap_full(priv->taps, taps_size)) { + bitmap = priv->smpcmp; + min_tap_row = 1; + } else { + bitmap = priv->taps; + min_tap_row = SH_MOBILE_SDHI_MIN_TAP_ROW; + } + + /* + * Find the longest consecutive run of successful probes. If that + * is at least SH_MOBILE_SDHI_MIN_TAP_ROW probes long then use the + * center index as the tap, otherwise bail out. + */ + bitmap_for_each_set_region(bitmap, rs, re, 0, taps_size) { + if (re - rs > tap_cnt) { + tap_end = re; + tap_start = rs; + tap_cnt = tap_end - tap_start; + } + } + + if (tap_cnt >= min_tap_row) + priv->tap_set = (tap_start + tap_end) / 2 % priv->tap_num; + else + return -EIO; + + /* Set SCC */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, priv->tap_set); + + /* Enable auto re-tuning */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL, + SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN | + sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); + + return 0; +} + +static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct renesas_sdhi *priv = host_to_priv(host); + int i, ret; + + priv->tap_num = renesas_sdhi_init_tuning(host); + if (!priv->tap_num) + return 0; /* Tuning is not supported */ + + if (priv->tap_num * 2 >= sizeof(priv->taps) * BITS_PER_BYTE) { + dev_err(&host->pdev->dev, + "Too many taps, please update 'taps' in tmio_mmc_host!\n"); + return -EINVAL; + } + + bitmap_zero(priv->taps, priv->tap_num * 2); + bitmap_zero(priv->smpcmp, priv->tap_num * 2); + + /* Issue CMD19 twice for each tap */ + for (i = 0; i < 2 * priv->tap_num; i++) { + int cmd_error = 0; + + /* Set sampling clock position */ + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num); + + if (mmc_send_tuning(mmc, opcode, &cmd_error) == 0) + set_bit(i, priv->taps); + + if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0) + set_bit(i, priv->smpcmp); + + if (cmd_error) + mmc_abort_tuning(mmc, opcode); + } + + ret = renesas_sdhi_select_tuning(host); + if (ret < 0) + renesas_sdhi_reset(host); + return ret; +} + +static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_4tap) +{ + struct renesas_sdhi *priv = host_to_priv(host); + unsigned int new_tap = priv->tap_set, error_tap = priv->tap_set; + u32 val; + + val = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ); + if (!val) + return false; + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0); + + /* Change TAP position according to correction status */ + if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC && + host->mmc->ios.timing == MMC_TIMING_MMC_HS400) { + u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0; + /* + * With HS400, the DAT signal is based on DS, not CLK. + * Therefore, use only CMD status. + */ + u32 smpcmp = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) & + SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR; + if (!smpcmp) { + return false; /* no error in CMD signal */ + } else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP) { + new_tap++; + error_tap--; + } else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN) { + new_tap--; + error_tap++; + } else { + return true; /* need retune */ + } + + /* + * When new_tap is a bad tap, we cannot change. Then, we compare + * with the HS200 tuning result. When smpcmp[error_tap] is OK, + * we can at least retune. + */ + if (bad_taps & BIT(new_tap % priv->tap_num)) + return test_bit(error_tap % priv->tap_num, priv->smpcmp); + } else { + if (val & SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) + return true; /* need retune */ + else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP) + new_tap++; + else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN) + new_tap--; + else + return false; + } + + priv->tap_set = (new_tap % priv->tap_num); + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, + priv->tap_set / (use_4tap ? 2 : 1)); + + return false; +} + +static bool renesas_sdhi_auto_correction(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + /* Check SCC error */ + if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) & + SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) { + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0); + return true; + } + + return false; +} + +static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; + + /* + * Skip checking SCC errors when running on 4 taps in HS400 mode as + * any retuning would still result in the same 4 taps being used. + */ + if (!(host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) && + !(host->mmc->ios.timing == MMC_TIMING_MMC_HS200) && + !(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && !use_4tap)) + return false; + + if (mmc_doing_tune(host->mmc)) + return false; + + if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) & + SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN) + return renesas_sdhi_auto_correction(host); + + return renesas_sdhi_manual_correction(host, use_4tap); +} + +static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host, u32 bit) +{ + int timeout = 1000; + /* CBSY is set when busy, SCLKDIVEN is cleared when busy */ + u32 wait_state = (bit == TMIO_STAT_CMD_BUSY ? TMIO_STAT_CMD_BUSY : 0); + + while (--timeout && (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) + & bit) == wait_state) + udelay(1); + + if (!timeout) { + dev_warn(&host->pdev->dev, "timeout waiting for SD bus idle\n"); + return -EBUSY; + } + + return 0; +} + +static int renesas_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) +{ + u32 bit = TMIO_STAT_SCLKDIVEN; + + switch (addr) { + case CTL_SD_CMD: + case CTL_STOP_INTERNAL_ACTION: + case CTL_XFER_BLK_COUNT: + case CTL_SD_XFER_LEN: + case CTL_SD_MEM_CARD_OPT: + case CTL_TRANSACTION_CTL: + case CTL_DMA_ENABLE: + case HOST_MODE: + if (host->pdata->flags & TMIO_MMC_HAVE_CBSY) + bit = TMIO_STAT_CMD_BUSY; + fallthrough; + case CTL_SD_CARD_CLK_CTL: + return renesas_sdhi_wait_idle(host, bit); + } + + return 0; +} + +static int renesas_sdhi_multi_io_quirk(struct mmc_card *card, + unsigned int direction, int blk_size) +{ + /* + * In Renesas controllers, when performing a + * multiple block read of one or two blocks, + * depending on the timing with which the + * response register is read, the response + * value may not be read properly. + * Use single block read for this HW bug + */ + if ((direction == MMC_DATA_READ) && + blk_size == 2) + return 1; + + return blk_size; +} + +static void renesas_sdhi_fixup_request(struct tmio_mmc_host *host, struct mmc_request *mrq) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + if (priv->needs_adjust_hs400 && mrq->cmd->opcode == MMC_SEND_STATUS) + renesas_sdhi_adjust_hs400_mode_enable(host); +} +static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable) +{ + /* Iff regs are 8 byte apart, sdbuf is 64 bit. Otherwise always 32. */ + int width = (host->bus_shift == 2) ? 64 : 32; + + sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0); + renesas_sdhi_sdbuf_width(host, enable ? width : 16); +} + +static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = { + .hs400_disabled = true, + .hs400_4taps = true, +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_4tap = { + .hs400_4taps = true, + .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = { + .hs400_disabled = true, +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = { + .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7), +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = { + .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = { + .hs400_4taps = true, + .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), + .hs400_calib_table = r8a7796_es13_calib_table, +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = { + .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7), + .hs400_calib_table = r8a77965_calib_table, +}; + +static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = { + .hs400_calib_table = r8a77990_calib_table, +}; + +/* + * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now. + * So, we want to treat them equally and only have a match for ES1.2 to enforce + * this if there ever will be a way to distinguish ES1.2. + */ +static const struct soc_device_attribute sdhi_quirks_match[] = { + { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, + { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 }, + { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap }, + { .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 }, + { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, + { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 }, + { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 }, + { .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 }, + { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 }, + { .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 }, + { /* Sentinel. */ }, +}; + +int renesas_sdhi_probe(struct platform_device *pdev, + const struct tmio_mmc_dma_ops *dma_ops) +{ + struct tmio_mmc_data *mmd = pdev->dev.platform_data; + const struct renesas_sdhi_quirks *quirks = NULL; + const struct renesas_sdhi_of_data *of_data; + const struct soc_device_attribute *attr; + struct tmio_mmc_data *mmc_data; + struct tmio_mmc_dma *dma_priv; + struct tmio_mmc_host *host; + struct renesas_sdhi *priv; + int num_irqs, irq, ret, i; + struct resource *res; + u16 ver; + + of_data = of_device_get_match_data(&pdev->dev); + + attr = soc_device_match(sdhi_quirks_match); + if (attr) + quirks = attr->data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct renesas_sdhi), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->quirks = quirks; + mmc_data = &priv->mmc_data; + dma_priv = &priv->dma_priv; + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + dev_err(&pdev->dev, "cannot get clock: %d\n", ret); + return ret; + } + + /* + * Some controllers provide a 2nd clock just to run the internal card + * detection logic. Unfortunately, the existing driver architecture does + * not support a separation of clocks for runtime PM usage. When + * native hotplug is used, the tmio driver assumes that the core + * must continue to run for card detect to stay active, so we cannot + * disable it. + * Additionally, it is prohibited to supply a clock to the core but not + * to the card detect circuit. That leaves us with if separate clocks + * are presented, we must treat them both as virtually 1 clock. + */ + priv->clk_cd = devm_clk_get(&pdev->dev, "cd"); + if (IS_ERR(priv->clk_cd)) + priv->clk_cd = NULL; + + priv->pinctrl = devm_pinctrl_get(&pdev->dev); + if (!IS_ERR(priv->pinctrl)) { + priv->pins_default = pinctrl_lookup_state(priv->pinctrl, + PINCTRL_STATE_DEFAULT); + priv->pins_uhs = pinctrl_lookup_state(priv->pinctrl, + "state_uhs"); + } + + host = tmio_mmc_host_alloc(pdev, mmc_data); + if (IS_ERR(host)) + return PTR_ERR(host); + + if (of_data) { + mmc_data->flags |= of_data->tmio_flags; + mmc_data->ocr_mask = of_data->tmio_ocr_mask; + mmc_data->capabilities |= of_data->capabilities; + mmc_data->capabilities2 |= of_data->capabilities2; + mmc_data->dma_rx_offset = of_data->dma_rx_offset; + mmc_data->max_blk_count = of_data->max_blk_count; + mmc_data->max_segs = of_data->max_segs; + dma_priv->dma_buswidth = of_data->dma_buswidth; + host->bus_shift = of_data->bus_shift; + } + + host->write16_hook = renesas_sdhi_write16_hook; + host->clk_enable = renesas_sdhi_clk_enable; + host->clk_disable = renesas_sdhi_clk_disable; + host->set_clock = renesas_sdhi_set_clock; + host->multi_io_quirk = renesas_sdhi_multi_io_quirk; + host->dma_ops = dma_ops; + + if (quirks && quirks->hs400_disabled) + host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); + + /* For some SoC, we disable internal WP. GPIO may override this */ + if (mmc_can_gpio_ro(host->mmc)) + mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT; + + /* SDR speeds are only available on Gen2+ */ + if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) { + /* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */ + host->ops.card_busy = renesas_sdhi_card_busy; + host->ops.start_signal_voltage_switch = + renesas_sdhi_start_signal_voltage_switch; + host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27; + host->sdcard_irq_mask_all = TMIO_MASK_ALL_RCAR2; + host->reset = renesas_sdhi_reset; + } else { + host->sdcard_irq_mask_all = TMIO_MASK_ALL; + } + + /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */ + if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */ + host->bus_shift = 1; + + if (mmd) + *mmc_data = *mmd; + + dma_priv->filter = shdma_chan_filter; + dma_priv->enable = renesas_sdhi_enable_dma; + + mmc_data->alignment_shift = 1; /* 2-byte alignment */ + mmc_data->capabilities |= MMC_CAP_MMC_HIGHSPEED; + + /* + * All SDHI blocks support 2-byte and larger block sizes in 4-bit + * bus width mode. + */ + mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES; + + /* + * All SDHI blocks support SDIO IRQ signalling. + */ + mmc_data->flags |= TMIO_MMC_SDIO_IRQ; + + /* All SDHI have CMD12 control bit */ + mmc_data->flags |= TMIO_MMC_HAVE_CMD12_CTRL; + + /* All SDHI have SDIO status bits which must be 1 */ + mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS; + + dev_pm_domain_start(&pdev->dev); + + ret = renesas_sdhi_clk_enable(host); + if (ret) + goto efree; + + ver = sd_ctrl_read16(host, CTL_VERSION); + /* GEN2_SDR104 is first known SDHI to use 32bit block count */ + if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX) + mmc_data->max_blk_count = U16_MAX; + + /* One Gen2 SDHI incarnation does NOT have a CBSY bit */ + if (ver == SDHI_VER_GEN2_SDR50) + mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; + + if (ver == SDHI_VER_GEN3_SDMMC && quirks && quirks->hs400_calib_table) { + host->fixup_request = renesas_sdhi_fixup_request; + priv->adjust_hs400_calib_table = *( + res->start == SDHI_GEN3_MMC0_ADDR ? + quirks->hs400_calib_table : + quirks->hs400_calib_table + 1); + } + + /* Enable tuning iff we have an SCC and a supported mode */ + if (of_data && of_data->scc_offset && + (host->mmc->caps & MMC_CAP_UHS_SDR104 || + host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | + MMC_CAP2_HS400_1_8V))) { + const struct renesas_sdhi_scc *taps = of_data->taps; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; + bool hit = false; + + for (i = 0; i < of_data->taps_num; i++) { + if (taps[i].clk_rate == 0 || + taps[i].clk_rate == host->mmc->f_max) { + priv->scc_tappos = taps->tap; + priv->scc_tappos_hs400 = use_4tap ? + taps->tap_hs400_4tap : + taps->tap; + hit = true; + break; + } + } + + if (!hit) + dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n"); + + priv->scc_ctl = host->ctl + of_data->scc_offset; + host->check_retune = renesas_sdhi_check_scc_error; + host->ops.execute_tuning = renesas_sdhi_execute_tuning; + host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning; + host->ops.hs400_downgrade = renesas_sdhi_disable_scc; + host->ops.hs400_complete = renesas_sdhi_hs400_complete; + } + + sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all); + + num_irqs = platform_irq_count(pdev); + if (num_irqs < 0) { + ret = num_irqs; + goto eirq; + } + + /* There must be at least one IRQ source */ + if (!num_irqs) { + ret = -ENXIO; + goto eirq; + } + + for (i = 0; i < num_irqs; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) { + ret = irq; + goto eirq; + } + + ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, + dev_name(&pdev->dev), host); + if (ret) + goto eirq; + } + + ret = tmio_mmc_host_probe(host); + if (ret < 0) + goto edisclk; + + dev_info(&pdev->dev, "%s base at %pa, max clock rate %u MHz\n", + mmc_hostname(host->mmc), &res->start, host->mmc->f_max / 1000000); + + return ret; + +eirq: + tmio_mmc_host_remove(host); +edisclk: + renesas_sdhi_clk_disable(host); +efree: + tmio_mmc_host_free(host); + + return ret; +} +EXPORT_SYMBOL_GPL(renesas_sdhi_probe); + +int renesas_sdhi_remove(struct platform_device *pdev) +{ + struct tmio_mmc_host *host = platform_get_drvdata(pdev); + + tmio_mmc_host_remove(host); + renesas_sdhi_clk_disable(host); + tmio_mmc_host_free(host); + + return 0; +} +EXPORT_SYMBOL_GPL(renesas_sdhi_remove); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c new file mode 100644 index 000000000..f3e76d6b3 --- /dev/null +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA support for Internal DMAC with SDHI SD/SDIO controller + * + * Copyright (C) 2016-19 Renesas Electronics Corporation + * Copyright (C) 2016-17 Horms Solutions, Simon Horman + * Copyright (C) 2018-19 Sang Engineering, Wolfram Sang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "renesas_sdhi.h" +#include "tmio_mmc.h" + +#define DM_CM_DTRAN_MODE 0x820 +#define DM_CM_DTRAN_CTRL 0x828 +#define DM_CM_RST 0x830 +#define DM_CM_INFO1 0x840 +#define DM_CM_INFO1_MASK 0x848 +#define DM_CM_INFO2 0x850 +#define DM_CM_INFO2_MASK 0x858 +#define DM_DTRAN_ADDR 0x880 + +/* DM_CM_DTRAN_MODE */ +#define DTRAN_MODE_CH_NUM_CH0 0 /* "downstream" = for write commands */ +#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "upstream" = for read commands */ +#define DTRAN_MODE_BUS_WIDTH (BIT(5) | BIT(4)) +#define DTRAN_MODE_ADDR_MODE BIT(0) /* 1 = Increment address, 0 = Fixed */ + +/* DM_CM_DTRAN_CTRL */ +#define DTRAN_CTRL_DM_START BIT(0) + +/* DM_CM_RST */ +#define RST_DTRANRST1 BIT(9) +#define RST_DTRANRST0 BIT(8) +#define RST_RESERVED_BITS GENMASK_ULL(31, 0) + +/* DM_CM_INFO1 and DM_CM_INFO1_MASK */ +#define INFO1_CLEAR 0 +#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0) +#define INFO1_DTRANEND1 BIT(17) +#define INFO1_DTRANEND0 BIT(16) + +/* DM_CM_INFO2 and DM_CM_INFO2_MASK */ +#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0) +#define INFO2_DTRANERR1 BIT(17) +#define INFO2_DTRANERR0 BIT(16) + +/* + * Specification of this driver: + * - host->chan_{rx,tx} will be used as a flag of enabling/disabling the dma + * - Since this SDHI DMAC register set has 16 but 32-bit width, we + * need a custom accessor. + */ + +static unsigned long global_flags; +/* + * Workaround for avoiding to use RX DMAC by multiple channels. + * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use + * RX DMAC simultaneously, sometimes hundreds of bytes data are not + * stored into the system memory even if the DMAC interrupt happened. + * So, this driver then uses one RX DMAC channel only. + */ +#define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0 +#define SDHI_INTERNAL_DMAC_RX_IN_USE 1 + +/* RZ/A2 does not have the ADRR_MODE bit */ +#define SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY 2 + +/* Definitions for sampling clocks */ +static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { + { + .clk_rate = 0, + .tap = 0x00000300, + .tap_hs400_4tap = 0x00000100, + }, +}; + +static const struct renesas_sdhi_of_data of_rza2_compatible = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL | + TMIO_MMC_HAVE_CBSY, + .tmio_ocr_mask = MMC_VDD_32_33, + .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | + MMC_CAP_CMD23, + .bus_shift = 2, + .scc_offset = 0 - 0x1000, + .taps = rcar_gen3_scc_taps, + .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps), + /* DMAC can handle 32bit blk count but only 1 segment */ + .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE, + .max_segs = 1, +}; + +static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL | + TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2, + .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | + MMC_CAP_CMD23, + .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE, + .bus_shift = 2, + .scc_offset = 0x1000, + .taps = rcar_gen3_scc_taps, + .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps), + /* DMAC can handle 32bit blk count but only 1 segment */ + .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE, + .max_segs = 1, +}; + +static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = { + { .compatible = "renesas,sdhi-r7s9210", .data = &of_rza2_compatible, }, + { .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, }, + {}, +}; +MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match); + +static void +renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host, + int addr, u64 val) +{ + writeq(val, host->ctl + addr); +} + +static void +renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + if (!host->chan_tx || !host->chan_rx) + return; + + if (!enable) + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1, + INFO1_CLEAR); + + if (priv->dma_priv.enable) + priv->dma_priv.enable(host, enable); +} + +static void +renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { + u64 val = RST_DTRANRST1 | RST_DTRANRST0; + + renesas_sdhi_internal_dmac_enable_dma(host, false); + + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, + RST_RESERVED_BITS & ~val); + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, + RST_RESERVED_BITS | val); + + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + + renesas_sdhi_internal_dmac_enable_dma(host, true); +} + +static void +renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) { + struct renesas_sdhi *priv = host_to_priv(host); + + tasklet_schedule(&priv->dma_priv.dma_complete); +} + +static void +renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + struct scatterlist *sg = host->sg_ptr; + u32 dtran_mode = DTRAN_MODE_BUS_WIDTH; + + if (!test_bit(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY, &global_flags)) + dtran_mode |= DTRAN_MODE_ADDR_MODE; + + if (!dma_map_sg(&host->pdev->dev, sg, host->sg_len, + mmc_get_dma_dir(data))) + goto force_pio; + + /* This DMAC cannot handle if buffer is not 128-bytes alignment */ + if (!IS_ALIGNED(sg_dma_address(sg), 128)) + goto force_pio_with_unmap; + + if (data->flags & MMC_DATA_READ) { + dtran_mode |= DTRAN_MODE_CH_NUM_CH1; + if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && + test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) + goto force_pio_with_unmap; + } else { + dtran_mode |= DTRAN_MODE_CH_NUM_CH0; + } + + renesas_sdhi_internal_dmac_enable_dma(host, true); + + /* set dma parameters */ + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_MODE, + dtran_mode); + renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR, + sg_dma_address(sg)); + + host->dma_on = true; + + return; + +force_pio_with_unmap: + dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data)); + +force_pio: + renesas_sdhi_internal_dmac_enable_dma(host, false); +} + +static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + + /* start the DMAC */ + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_CTRL, + DTRAN_CTRL_DM_START); +} + +static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host) +{ + enum dma_data_direction dir; + + if (!host->dma_on) + return false; + + if (!host->data) + return false; + + if (host->data->flags & MMC_DATA_READ) + dir = DMA_FROM_DEVICE; + else + dir = DMA_TO_DEVICE; + + renesas_sdhi_internal_dmac_enable_dma(host, false); + dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); + + if (dir == DMA_FROM_DEVICE) + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + + host->dma_on = false; + + return true; +} + +static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + spin_lock_irq(&host->lock); + if (!renesas_sdhi_internal_dmac_complete(host)) + goto out; + + tmio_mmc_do_data_irq(host); +out: + spin_unlock_irq(&host->lock); +} + +static void renesas_sdhi_internal_dmac_end_dma(struct tmio_mmc_host *host) +{ + if (host->data) + renesas_sdhi_internal_dmac_complete(host); +} + +static void +renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + /* Disable DMAC interrupts, we don't use them */ + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK, + INFO1_MASK_CLEAR); + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK, + INFO2_MASK_CLEAR); + + /* Each value is set to non-zero to assume "enabling" each DMA */ + host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; + + tasklet_init(&priv->dma_priv.dma_complete, + renesas_sdhi_internal_dmac_complete_tasklet_fn, + (unsigned long)host); + tasklet_init(&host->dma_issue, + renesas_sdhi_internal_dmac_issue_tasklet_fn, + (unsigned long)host); +} + +static void +renesas_sdhi_internal_dmac_release_dma(struct tmio_mmc_host *host) +{ + /* Each value is set to zero to assume "disabling" each DMA */ + host->chan_rx = host->chan_tx = NULL; +} + +static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { + .start = renesas_sdhi_internal_dmac_start_dma, + .enable = renesas_sdhi_internal_dmac_enable_dma, + .request = renesas_sdhi_internal_dmac_request_dma, + .release = renesas_sdhi_internal_dmac_release_dma, + .abort = renesas_sdhi_internal_dmac_abort_dma, + .dataend = renesas_sdhi_internal_dmac_dataend_dma, + .end = renesas_sdhi_internal_dmac_end_dma, +}; + +/* + * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC + * implementation as others may use a different implementation. + */ +static const struct soc_device_attribute soc_dma_quirks[] = { + { .soc_id = "r7s9210", + .data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) }, + { .soc_id = "r8a7795", .revision = "ES1.*", + .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, + { .soc_id = "r8a7796", .revision = "ES1.0", + .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, + { /* sentinel */ } +}; + +static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) +{ + const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks); + struct device *dev = &pdev->dev; + + if (soc) + global_flags |= (unsigned long)soc->data; + + /* value is max of SD_SECCNT. Confirmed by HW engineers */ + dma_set_max_seg_size(dev, 0xffffffff); + + return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); +} + +static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, + tmio_mmc_host_runtime_resume, + NULL) +}; + +static struct platform_driver renesas_internal_dmac_sdhi_driver = { + .driver = { + .name = "renesas_sdhi_internal_dmac", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &renesas_sdhi_internal_dmac_dev_pm_ops, + .of_match_table = renesas_sdhi_internal_dmac_of_match, + }, + .probe = renesas_sdhi_internal_dmac_probe, + .remove = renesas_sdhi_remove, +}; + +module_platform_driver(renesas_internal_dmac_sdhi_driver); + +MODULE_DESCRIPTION("Renesas SDHI driver for internal DMAC"); +MODULE_AUTHOR("Yoshihiro Shimoda"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c new file mode 100644 index 000000000..c5f789675 --- /dev/null +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA support use of SYS DMAC with SDHI SD/SDIO controller + * + * Copyright (C) 2016-19 Renesas Electronics Corporation + * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang + * Copyright (C) 2017 Horms Solutions, Simon Horman + * Copyright (C) 2010-2011 Guennadi Liakhovetski + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "renesas_sdhi.h" +#include "tmio_mmc.h" + +#define TMIO_MMC_MIN_DMA_LEN 8 + +static const struct renesas_sdhi_of_data of_default_cfg = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, +}; + +static const struct renesas_sdhi_of_data of_rz_compatible = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT | + TMIO_MMC_HAVE_CBSY, + .tmio_ocr_mask = MMC_VDD_32_33, + .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, +}; + +static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL, + .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, + .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT, +}; + +/* Definitions for sampling clocks */ +static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = { + { + .clk_rate = 156000000, + .tap = 0x00000703, + }, + { + .clk_rate = 0, + .tap = 0x00000300, + }, +}; + +static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL | + TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2, + .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | + MMC_CAP_CMD23, + .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT, + .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dma_rx_offset = 0x2000, + .scc_offset = 0x0300, + .taps = rcar_gen2_scc_taps, + .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), + .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE, +}; + +static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { + { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, }, + { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, }, + { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, }, + { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, }, + { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, }, + { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, }, + { .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, }, + { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-shmobile" }, + {}, +}; +MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match); + +static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host, + bool enable) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + if (!host->chan_tx || !host->chan_rx) + return; + + if (priv->dma_priv.enable) + priv->dma_priv.enable(host, enable); +} + +static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) +{ + renesas_sdhi_sys_dmac_enable_dma(host, false); + + if (host->chan_rx) + dmaengine_terminate_all(host->chan_rx); + if (host->chan_tx) + dmaengine_terminate_all(host->chan_tx); + + renesas_sdhi_sys_dmac_enable_dma(host, true); +} + +static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + complete(&priv->dma_priv.dma_dataend); +} + +static void renesas_sdhi_sys_dmac_dma_callback(void *arg) +{ + struct tmio_mmc_host *host = arg; + struct renesas_sdhi *priv = host_to_priv(host); + + spin_lock_irq(&host->lock); + + if (!host->data) + goto out; + + if (host->data->flags & MMC_DATA_READ) + dma_unmap_sg(host->chan_rx->device->dev, + host->sg_ptr, host->sg_len, + DMA_FROM_DEVICE); + else + dma_unmap_sg(host->chan_tx->device->dev, + host->sg_ptr, host->sg_len, + DMA_TO_DEVICE); + + spin_unlock_irq(&host->lock); + + wait_for_completion(&priv->dma_priv.dma_dataend); + + spin_lock_irq(&host->lock); + tmio_mmc_do_data_irq(host); +out: + spin_unlock_irq(&host->lock); +} + +static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + struct scatterlist *sg = host->sg_ptr, *sg_tmp; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan = host->chan_rx; + dma_cookie_t cookie; + int ret, i; + bool aligned = true, multiple = true; + unsigned int align = (1 << host->pdata->alignment_shift) - 1; + + for_each_sg(sg, sg_tmp, host->sg_len, i) { + if (sg_tmp->offset & align) + aligned = false; + if (sg_tmp->length & align) { + multiple = false; + break; + } + } + + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || + (align & PAGE_MASK))) || !multiple) { + ret = -EINVAL; + goto pio; + } + + if (sg->length < TMIO_MMC_MIN_DMA_LEN) + return; + + /* The only sg element can be unaligned, use our bounce buffer then */ + if (!aligned) { + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); + host->sg_ptr = &host->bounce_sg; + sg = host->sg_ptr; + } + + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); + if (ret > 0) + desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM, + DMA_CTRL_ACK); + + if (desc) { + reinit_completion(&priv->dma_priv.dma_dataend); + desc->callback = renesas_sdhi_sys_dmac_dma_callback; + desc->callback_param = host; + + cookie = dmaengine_submit(desc); + if (cookie < 0) { + desc = NULL; + ret = cookie; + } + host->dma_on = true; + } +pio: + if (!desc) { + /* DMA failed, fall back to PIO */ + renesas_sdhi_sys_dmac_enable_dma(host, false); + if (ret >= 0) + ret = -EIO; + host->chan_rx = NULL; + dma_release_channel(chan); + /* Free the Tx channel too */ + chan = host->chan_tx; + if (chan) { + host->chan_tx = NULL; + dma_release_channel(chan); + } + dev_warn(&host->pdev->dev, + "DMA failed: %d, falling back to PIO\n", ret); + } +} + +static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host) +{ + struct renesas_sdhi *priv = host_to_priv(host); + struct scatterlist *sg = host->sg_ptr, *sg_tmp; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan = host->chan_tx; + dma_cookie_t cookie; + int ret, i; + bool aligned = true, multiple = true; + unsigned int align = (1 << host->pdata->alignment_shift) - 1; + + for_each_sg(sg, sg_tmp, host->sg_len, i) { + if (sg_tmp->offset & align) + aligned = false; + if (sg_tmp->length & align) { + multiple = false; + break; + } + } + + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || + (align & PAGE_MASK))) || !multiple) { + ret = -EINVAL; + goto pio; + } + + if (sg->length < TMIO_MMC_MIN_DMA_LEN) + return; + + /* The only sg element can be unaligned, use our bounce buffer then */ + if (!aligned) { + unsigned long flags; + void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); + + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); + memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); + tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); + host->sg_ptr = &host->bounce_sg; + sg = host->sg_ptr; + } + + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); + if (ret > 0) + desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV, + DMA_CTRL_ACK); + + if (desc) { + reinit_completion(&priv->dma_priv.dma_dataend); + desc->callback = renesas_sdhi_sys_dmac_dma_callback; + desc->callback_param = host; + + cookie = dmaengine_submit(desc); + if (cookie < 0) { + desc = NULL; + ret = cookie; + } + host->dma_on = true; + } +pio: + if (!desc) { + /* DMA failed, fall back to PIO */ + renesas_sdhi_sys_dmac_enable_dma(host, false); + if (ret >= 0) + ret = -EIO; + host->chan_tx = NULL; + dma_release_channel(chan); + /* Free the Rx channel too */ + chan = host->chan_rx; + if (chan) { + host->chan_rx = NULL; + dma_release_channel(chan); + } + dev_warn(&host->pdev->dev, + "DMA failed: %d, falling back to PIO\n", ret); + } +} + +static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + if (data->flags & MMC_DATA_READ) { + if (host->chan_rx) + renesas_sdhi_sys_dmac_start_dma_rx(host); + } else { + if (host->chan_tx) + renesas_sdhi_sys_dmac_start_dma_tx(host); + } +} + +static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; + struct dma_chan *chan = NULL; + + spin_lock_irq(&host->lock); + + if (host->data) { + if (host->data->flags & MMC_DATA_READ) + chan = host->chan_rx; + else + chan = host->chan_tx; + } + + spin_unlock_irq(&host->lock); + + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + + if (chan) + dma_async_issue_pending(chan); +} + +static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + struct renesas_sdhi *priv = host_to_priv(host); + + /* We can only either use DMA for both Tx and Rx or not use it at all */ + if (!host->pdev->dev.of_node && + (!pdata->chan_priv_tx || !pdata->chan_priv_rx)) + return; + + if (!host->chan_tx && !host->chan_rx) { + struct resource *res = platform_get_resource(host->pdev, + IORESOURCE_MEM, 0); + struct dma_slave_config cfg = {}; + dma_cap_mask_t mask; + int ret; + + if (!res) + return; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->chan_tx = dma_request_slave_channel_compat(mask, + priv->dma_priv.filter, pdata->chan_priv_tx, + &host->pdev->dev, "tx"); + dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, + host->chan_tx); + + if (!host->chan_tx) + return; + + cfg.direction = DMA_MEM_TO_DEV; + cfg.dst_addr = res->start + + (CTL_SD_DATA_PORT << host->bus_shift); + cfg.dst_addr_width = priv->dma_priv.dma_buswidth; + if (!cfg.dst_addr_width) + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.src_addr = 0; + ret = dmaengine_slave_config(host->chan_tx, &cfg); + if (ret < 0) + goto ecfgtx; + + host->chan_rx = dma_request_slave_channel_compat(mask, + priv->dma_priv.filter, pdata->chan_priv_rx, + &host->pdev->dev, "rx"); + dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, + host->chan_rx); + + if (!host->chan_rx) + goto ereqrx; + + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; + cfg.src_addr_width = priv->dma_priv.dma_buswidth; + if (!cfg.src_addr_width) + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.dst_addr = 0; + ret = dmaengine_slave_config(host->chan_rx, &cfg); + if (ret < 0) + goto ecfgrx; + + host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); + if (!host->bounce_buf) + goto ebouncebuf; + + init_completion(&priv->dma_priv.dma_dataend); + tasklet_init(&host->dma_issue, + renesas_sdhi_sys_dmac_issue_tasklet_fn, + (unsigned long)host); + } + + renesas_sdhi_sys_dmac_enable_dma(host, true); + + return; + +ebouncebuf: +ecfgrx: + dma_release_channel(host->chan_rx); + host->chan_rx = NULL; +ereqrx: +ecfgtx: + dma_release_channel(host->chan_tx); + host->chan_tx = NULL; +} + +static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host) +{ + if (host->chan_tx) { + struct dma_chan *chan = host->chan_tx; + + host->chan_tx = NULL; + dma_release_channel(chan); + } + if (host->chan_rx) { + struct dma_chan *chan = host->chan_rx; + + host->chan_rx = NULL; + dma_release_channel(chan); + } + if (host->bounce_buf) { + free_pages((unsigned long)host->bounce_buf, 0); + host->bounce_buf = NULL; + } +} + +static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = { + .start = renesas_sdhi_sys_dmac_start_dma, + .enable = renesas_sdhi_sys_dmac_enable_dma, + .request = renesas_sdhi_sys_dmac_request_dma, + .release = renesas_sdhi_sys_dmac_release_dma, + .abort = renesas_sdhi_sys_dmac_abort_dma, + .dataend = renesas_sdhi_sys_dmac_dataend_dma, +}; + +static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) +{ + return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops); +} + +static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, + tmio_mmc_host_runtime_resume, + NULL) +}; + +static struct platform_driver renesas_sys_dmac_sdhi_driver = { + .driver = { + .name = "sh_mobile_sdhi", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &renesas_sdhi_sys_dmac_dev_pm_ops, + .of_match_table = renesas_sdhi_sys_dmac_of_match, + }, + .probe = renesas_sdhi_sys_dmac_probe, + .remove = renesas_sdhi_remove, +}; + +module_platform_driver(renesas_sys_dmac_sdhi_driver); + +MODULE_DESCRIPTION("Renesas SDHI driver"); +MODULE_AUTHOR("Magnus Damm"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sh_mobile_sdhi"); diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c new file mode 100644 index 000000000..b5cb83bb9 --- /dev/null +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -0,0 +1,1502 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Realtek PCI-Express SD/MMC Card Interface driver + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Wei WANG + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct realtek_pci_sdmmc { + struct platform_device *pdev; + struct rtsx_pcr *pcr; + struct mmc_host *mmc; + struct mmc_request *mrq; +#define SDMMC_WORKQ_NAME "rtsx_pci_sdmmc_workq" + + struct work_struct work; + struct mutex host_mutex; + + u8 ssc_depth; + unsigned int clock; + bool vpclk; + bool double_clk; + bool eject; + bool initial_mode; + int prev_power_state; + int sg_count; + s32 cookie; + int cookie_sg_count; + bool using_cookie; +}; + +static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) +{ + return &(host->pdev->dev); +} + +static inline void sd_clear_error(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, CARD_STOP, + SD_STOP | SD_CLR_ERR, SD_STOP | SD_CLR_ERR); +} + +#ifdef DEBUG +static void dump_reg_range(struct realtek_pci_sdmmc *host, u16 start, u16 end) +{ + u16 len = end - start + 1; + int i; + u8 data[8]; + + for (i = 0; i < len; i += 8) { + int j; + int n = min(8, len - i); + + memset(&data, 0, sizeof(data)); + for (j = 0; j < n; j++) + rtsx_pci_read_register(host->pcr, start + i + j, + data + j); + dev_dbg(sdmmc_dev(host), "0x%04X(%d): %8ph\n", + start + i, n, data); + } +} + +static void sd_print_debug_regs(struct realtek_pci_sdmmc *host) +{ + dump_reg_range(host, 0xFDA0, 0xFDB3); + dump_reg_range(host, 0xFD52, 0xFD69); +} +#else +#define sd_print_debug_regs(host) +#endif /* DEBUG */ + +static inline int sd_get_cd_int(struct realtek_pci_sdmmc *host) +{ + return rtsx_pci_readl(host->pcr, RTSX_BIPR) & SD_EXIST; +} + +static void sd_cmd_set_sd_cmd(struct rtsx_pcr *pcr, struct mmc_command *cmd) +{ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0, 0xFF, + SD_CMD_START | cmd->opcode); + rtsx_pci_write_be32(pcr, SD_CMD1, cmd->arg); +} + +static void sd_cmd_set_data_len(struct rtsx_pcr *pcr, u16 blocks, u16 blksz) +{ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, blocks); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, blocks >> 8); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, blksz); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, blksz >> 8); +} + +static int sd_response_type(struct mmc_command *cmd) +{ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + return SD_RSP_TYPE_R0; + case MMC_RSP_R1: + return SD_RSP_TYPE_R1; + case MMC_RSP_R1_NO_CRC: + return SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; + case MMC_RSP_R1B: + return SD_RSP_TYPE_R1b; + case MMC_RSP_R2: + return SD_RSP_TYPE_R2; + case MMC_RSP_R3: + return SD_RSP_TYPE_R3; + default: + return -EINVAL; + } +} + +static int sd_status_index(int resp_type) +{ + if (resp_type == SD_RSP_TYPE_R0) + return 0; + else if (resp_type == SD_RSP_TYPE_R2) + return 16; + + return 5; +} +/* + * sd_pre_dma_transfer - do dma_map_sg() or using cookie + * + * @pre: if called in pre_req() + * return: + * 0 - do dma_map_sg() + * 1 - using cookie + */ +static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host, + struct mmc_data *data, bool pre) +{ + struct rtsx_pcr *pcr = host->pcr; + int read = data->flags & MMC_DATA_READ; + int count = 0; + int using_cookie = 0; + + if (!pre && data->host_cookie && data->host_cookie != host->cookie) { + dev_err(sdmmc_dev(host), + "error: data->host_cookie = %d, host->cookie = %d\n", + data->host_cookie, host->cookie); + data->host_cookie = 0; + } + + if (pre || data->host_cookie != host->cookie) { + count = rtsx_pci_dma_map_sg(pcr, data->sg, data->sg_len, read); + } else { + count = host->cookie_sg_count; + using_cookie = 1; + } + + if (pre) { + host->cookie_sg_count = count; + if (++host->cookie < 0) + host->cookie = 1; + data->host_cookie = host->cookie; + } else { + host->sg_count = count; + } + + return using_cookie; +} + +static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (data->host_cookie) { + dev_err(sdmmc_dev(host), + "error: reset data->host_cookie = %d\n", + data->host_cookie); + data->host_cookie = 0; + } + + sd_pre_dma_transfer(host, data, true); + dev_dbg(sdmmc_dev(host), "pre dma sg: %d\n", host->cookie_sg_count); +} + +static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + struct mmc_data *data = mrq->data; + int read = data->flags & MMC_DATA_READ; + + rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read); + data->host_cookie = 0; +} + +static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, + struct mmc_command *cmd) +{ + struct rtsx_pcr *pcr = host->pcr; + u8 cmd_idx = (u8)cmd->opcode; + u32 arg = cmd->arg; + int err = 0; + int timeout = 100; + int i; + u8 *ptr; + int rsp_type; + int stat_idx; + bool clock_toggled = false; + + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", + __func__, cmd_idx, arg); + + rsp_type = sd_response_type(cmd); + if (rsp_type < 0) + goto out; + + stat_idx = sd_status_index(rsp_type); + + if (rsp_type == SD_RSP_TYPE_R1b) + timeout = cmd->busy_timeout ? cmd->busy_timeout : 3000; + + if (cmd->opcode == SD_SWITCH_VOLTAGE) { + err = rtsx_pci_write_register(pcr, SD_BUS_STAT, + 0xFF, SD_CLK_TOGGLE_EN); + if (err < 0) + goto out; + + clock_toggled = true; + } + + rtsx_pci_init_cmd(pcr); + sd_cmd_set_sd_cmd(pcr, cmd); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, + 0x01, PINGPONG_BUFFER); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, + 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); + rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END | SD_STAT_IDLE, + SD_TRANSFER_END | SD_STAT_IDLE); + + if (rsp_type == SD_RSP_TYPE_R2) { + /* Read data from ping-pong buffer */ + for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) + rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); + } else if (rsp_type != SD_RSP_TYPE_R0) { + /* Read data from SD_CMDx registers */ + for (i = SD_CMD0; i <= SD_CMD4; i++) + rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); + } + + rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0); + + err = rtsx_pci_send_cmd(pcr, timeout); + if (err < 0) { + sd_print_debug_regs(host); + sd_clear_error(host); + dev_dbg(sdmmc_dev(host), + "rtsx_pci_send_cmd error (err = %d)\n", err); + goto out; + } + + if (rsp_type == SD_RSP_TYPE_R0) { + err = 0; + goto out; + } + + /* Eliminate returned value of CHECK_REG_CMD */ + ptr = rtsx_pci_get_cmd_data(pcr) + 1; + + /* Check (Start,Transmission) bit of Response */ + if ((ptr[0] & 0xC0) != 0) { + err = -EILSEQ; + dev_dbg(sdmmc_dev(host), "Invalid response bit\n"); + goto out; + } + + /* Check CRC7 */ + if (!(rsp_type & SD_NO_CHECK_CRC7)) { + if (ptr[stat_idx] & SD_CRC7_ERR) { + err = -EILSEQ; + dev_dbg(sdmmc_dev(host), "CRC7 error\n"); + goto out; + } + } + + if (rsp_type == SD_RSP_TYPE_R2) { + /* + * The controller offloads the last byte {CRC-7, end bit 1'b1} + * of response type R2. Assign dummy CRC, 0, and end bit to the + * byte(ptr[16], goes into the LSB of resp[3] later). + */ + ptr[16] = 1; + + for (i = 0; i < 4; i++) { + cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4); + dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", + i, cmd->resp[i]); + } + } else { + cmd->resp[0] = get_unaligned_be32(ptr + 1); + dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", + cmd->resp[0]); + } + +out: + cmd->error = err; + + if (err && clock_toggled) + rtsx_pci_write_register(pcr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); +} + +static int sd_read_data(struct realtek_pci_sdmmc *host, struct mmc_command *cmd, + u16 byte_cnt, u8 *buf, int buf_len, int timeout) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + u8 trans_mode; + + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", + __func__, cmd->opcode, cmd->arg); + + if (!buf) + buf_len = 0; + + if (cmd->opcode == MMC_SEND_TUNING_BLOCK) + trans_mode = SD_TM_AUTO_TUNING; + else + trans_mode = SD_TM_NORMAL_READ; + + rtsx_pci_init_cmd(pcr); + sd_cmd_set_sd_cmd(pcr, cmd); + sd_cmd_set_data_len(pcr, 1, byte_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); + if (trans_mode != SD_TM_AUTO_TUNING) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, + 0xFF, trans_mode | SD_TRANSFER_START); + rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + err = rtsx_pci_send_cmd(pcr, timeout); + if (err < 0) { + sd_print_debug_regs(host); + dev_dbg(sdmmc_dev(host), + "rtsx_pci_send_cmd fail (err = %d)\n", err); + return err; + } + + if (buf && buf_len) { + err = rtsx_pci_read_ppbuf(pcr, buf, buf_len); + if (err < 0) { + dev_dbg(sdmmc_dev(host), + "rtsx_pci_read_ppbuf fail (err = %d)\n", err); + return err; + } + } + + return 0; +} + +static int sd_write_data(struct realtek_pci_sdmmc *host, + struct mmc_command *cmd, u16 byte_cnt, u8 *buf, int buf_len, + int timeout) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", + __func__, cmd->opcode, cmd->arg); + + if (!buf) + buf_len = 0; + + sd_send_cmd_get_rsp(host, cmd); + if (cmd->error) + return cmd->error; + + if (buf && buf_len) { + err = rtsx_pci_write_ppbuf(pcr, buf, buf_len); + if (err < 0) { + dev_dbg(sdmmc_dev(host), + "rtsx_pci_write_ppbuf fail (err = %d)\n", err); + return err; + } + } + + rtsx_pci_init_cmd(pcr); + sd_cmd_set_data_len(pcr, 1, byte_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, + SD_TRANSFER_START | SD_TM_AUTO_WRITE_3); + rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + err = rtsx_pci_send_cmd(pcr, timeout); + if (err < 0) { + sd_print_debug_regs(host); + dev_dbg(sdmmc_dev(host), + "rtsx_pci_send_cmd fail (err = %d)\n", err); + return err; + } + + return 0; +} + +static int sd_read_long_data(struct realtek_pci_sdmmc *host, + struct mmc_request *mrq) +{ + struct rtsx_pcr *pcr = host->pcr; + struct mmc_host *mmc = host->mmc; + struct mmc_card *card = mmc->card; + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + int uhs = mmc_card_uhs(card); + u8 cfg2 = 0; + int err; + int resp_type; + size_t data_len = data->blksz * data->blocks; + + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", + __func__, cmd->opcode, cmd->arg); + + resp_type = sd_response_type(cmd); + if (resp_type < 0) + return resp_type; + + if (!uhs) + cfg2 |= SD_NO_CHECK_WAIT_CRC_TO; + + rtsx_pci_init_cmd(pcr); + sd_cmd_set_sd_cmd(pcr, cmd); + sd_cmd_set_data_len(pcr, data->blocks, data->blksz); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, + DMA_DONE_INT, DMA_DONE_INT); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, + 0xFF, (u8)(data_len >> 24)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, + 0xFF, (u8)(data_len >> 16)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, + 0xFF, (u8)(data_len >> 8)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, + 0x03 | DMA_PACK_SIZE_MASK, + DMA_DIR_FROM_CARD | DMA_EN | DMA_512); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, + 0x01, RING_BUFFER); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2 | resp_type); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, + SD_TRANSFER_START | SD_TM_AUTO_READ_2); + rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + rtsx_pci_send_cmd_no_wait(pcr); + + err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, 1, 10000); + if (err < 0) { + sd_print_debug_regs(host); + sd_clear_error(host); + return err; + } + + return 0; +} + +static int sd_write_long_data(struct realtek_pci_sdmmc *host, + struct mmc_request *mrq) +{ + struct rtsx_pcr *pcr = host->pcr; + struct mmc_host *mmc = host->mmc; + struct mmc_card *card = mmc->card; + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + int uhs = mmc_card_uhs(card); + u8 cfg2; + int err; + size_t data_len = data->blksz * data->blocks; + + sd_send_cmd_get_rsp(host, cmd); + if (cmd->error) + return cmd->error; + + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", + __func__, cmd->opcode, cmd->arg); + + cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; + + if (!uhs) + cfg2 |= SD_NO_CHECK_WAIT_CRC_TO; + + rtsx_pci_init_cmd(pcr); + sd_cmd_set_data_len(pcr, data->blocks, data->blksz); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, + DMA_DONE_INT, DMA_DONE_INT); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, + 0xFF, (u8)(data_len >> 24)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, + 0xFF, (u8)(data_len >> 16)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, + 0xFF, (u8)(data_len >> 8)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, + 0x03 | DMA_PACK_SIZE_MASK, + DMA_DIR_TO_CARD | DMA_EN | DMA_512); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, + 0x01, RING_BUFFER); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, + SD_TRANSFER_START | SD_TM_AUTO_WRITE_3); + rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + rtsx_pci_send_cmd_no_wait(pcr); + err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, 0, 10000); + if (err < 0) { + sd_clear_error(host); + return err; + } + + return 0; +} + +static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); +} + +static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); +} + +static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + int err; + + if (host->sg_count < 0) { + data->error = host->sg_count; + dev_dbg(sdmmc_dev(host), "%s: sg_count = %d is invalid\n", + __func__, host->sg_count); + return data->error; + } + + if (data->flags & MMC_DATA_READ) { + if (host->initial_mode) + sd_disable_initial_mode(host); + + err = sd_read_long_data(host, mrq); + + if (host->initial_mode) + sd_enable_initial_mode(host); + + return err; + } + + return sd_write_long_data(host, mrq); +} + +static void sd_normal_rw(struct realtek_pci_sdmmc *host, + struct mmc_request *mrq) +{ + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + u8 *buf; + + buf = kzalloc(data->blksz, GFP_NOIO); + if (!buf) { + cmd->error = -ENOMEM; + return; + } + + if (data->flags & MMC_DATA_READ) { + if (host->initial_mode) + sd_disable_initial_mode(host); + + cmd->error = sd_read_data(host, cmd, (u16)data->blksz, buf, + data->blksz, 200); + + if (host->initial_mode) + sd_enable_initial_mode(host); + + sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz); + } else { + sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz); + + cmd->error = sd_write_data(host, cmd, (u16)data->blksz, buf, + data->blksz, 200); + } + + kfree(buf); +} + +static int sd_change_phase(struct realtek_pci_sdmmc *host, + u8 sample_point, bool rx) +{ + struct rtsx_pcr *pcr = host->pcr; + u16 SD_VP_CTL = 0; + dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n", + __func__, rx ? "RX" : "TX", sample_point); + + rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK); + if (rx) { + SD_VP_CTL = SD_VPRX_CTL; + rtsx_pci_write_register(pcr, SD_VPRX_CTL, + PHASE_SELECT_MASK, sample_point); + } else { + SD_VP_CTL = SD_VPTX_CTL; + rtsx_pci_write_register(pcr, SD_VPTX_CTL, + PHASE_SELECT_MASK, sample_point); + } + rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, 0); + rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, + PHASE_NOT_RESET); + rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0); + rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); + + return 0; +} + +static inline u32 test_phase_bit(u32 phase_map, unsigned int bit) +{ + bit %= RTSX_PHASE_MAX; + return phase_map & (1 << bit); +} + +static int sd_get_phase_len(u32 phase_map, unsigned int start_bit) +{ + int i; + + for (i = 0; i < RTSX_PHASE_MAX; i++) { + if (test_phase_bit(phase_map, start_bit + i) == 0) + return i; + } + return RTSX_PHASE_MAX; +} + +static u8 sd_search_final_phase(struct realtek_pci_sdmmc *host, u32 phase_map) +{ + int start = 0, len = 0; + int start_final = 0, len_final = 0; + u8 final_phase = 0xFF; + + if (phase_map == 0) { + dev_err(sdmmc_dev(host), "phase error: [map:%x]\n", phase_map); + return final_phase; + } + + while (start < RTSX_PHASE_MAX) { + len = sd_get_phase_len(phase_map, start); + if (len_final < len) { + start_final = start; + len_final = len; + } + start += len ? len : 1; + } + + final_phase = (start_final + len_final / 2) % RTSX_PHASE_MAX; + dev_dbg(sdmmc_dev(host), "phase: [map:%x] [maxlen:%d] [final:%d]\n", + phase_map, len_final, final_phase); + + return final_phase; +} + +static void sd_wait_data_idle(struct realtek_pci_sdmmc *host) +{ + int i; + u8 val = 0; + + for (i = 0; i < 100; i++) { + rtsx_pci_read_register(host->pcr, SD_DATA_STATE, &val); + if (val & SD_DATA_IDLE) + return; + + udelay(100); + } +} + +static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host, + u8 opcode, u8 sample_point) +{ + int err; + struct mmc_command cmd = {}; + struct rtsx_pcr *pcr = host->pcr; + + sd_change_phase(host, sample_point, true); + + rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, + SD_RSP_80CLK_TIMEOUT_EN); + + cmd.opcode = opcode; + err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100); + if (err < 0) { + /* Wait till SD DATA IDLE */ + sd_wait_data_idle(host); + sd_clear_error(host); + rtsx_pci_write_register(pcr, SD_CFG3, + SD_RSP_80CLK_TIMEOUT_EN, 0); + return err; + } + + rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); + return 0; +} + +static int sd_tuning_phase(struct realtek_pci_sdmmc *host, + u8 opcode, u32 *phase_map) +{ + int err, i; + u32 raw_phase_map = 0; + + for (i = 0; i < RTSX_PHASE_MAX; i++) { + err = sd_tuning_rx_cmd(host, opcode, (u8)i); + if (err == 0) + raw_phase_map |= 1 << i; + } + + if (phase_map) + *phase_map = raw_phase_map; + + return 0; +} + +static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode) +{ + int err, i; + u32 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map; + u8 final_phase; + + for (i = 0; i < RX_TUNING_CNT; i++) { + err = sd_tuning_phase(host, opcode, &(raw_phase_map[i])); + if (err < 0) + return err; + + if (raw_phase_map[i] == 0) + break; + } + + phase_map = 0xFFFFFFFF; + for (i = 0; i < RX_TUNING_CNT; i++) { + dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%08x\n", + i, raw_phase_map[i]); + phase_map &= raw_phase_map[i]; + } + dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%08x\n", phase_map); + + if (phase_map) { + final_phase = sd_search_final_phase(host, phase_map); + if (final_phase == 0xFF) + return -EINVAL; + + err = sd_change_phase(host, final_phase, true); + if (err < 0) + return err; + } else { + return -EINVAL; + } + + return 0; +} + +static inline int sdio_extblock_cmd(struct mmc_command *cmd, + struct mmc_data *data) +{ + return (cmd->opcode == SD_IO_RW_EXTENDED) && (data->blksz == 512); +} + +static inline int sd_rw_cmd(struct mmc_command *cmd) +{ + return mmc_op_multi(cmd->opcode) || + (cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (cmd->opcode == MMC_WRITE_BLOCK); +} + +static void sd_request(struct work_struct *work) +{ + struct realtek_pci_sdmmc *host = container_of(work, + struct realtek_pci_sdmmc, work); + struct rtsx_pcr *pcr = host->pcr; + + struct mmc_host *mmc = host->mmc; + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + + unsigned int data_size = 0; + int err; + + if (host->eject || !sd_get_cd_int(host)) { + cmd->error = -ENOMEDIUM; + goto finish; + } + + err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); + if (err) { + cmd->error = err; + goto finish; + } + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, + host->initial_mode, host->double_clk, host->vpclk); + rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, SD_MOD_SEL); + rtsx_pci_write_register(pcr, CARD_SHARE_MODE, + CARD_SHARE_MASK, CARD_SHARE_48_SD); + + mutex_lock(&host->host_mutex); + host->mrq = mrq; + mutex_unlock(&host->host_mutex); + + if (mrq->data) + data_size = data->blocks * data->blksz; + + if (!data_size) { + sd_send_cmd_get_rsp(host, cmd); + } else if (sd_rw_cmd(cmd) || sdio_extblock_cmd(cmd, data)) { + cmd->error = sd_rw_multi(host, mrq); + if (!host->using_cookie) + sdmmc_post_req(host->mmc, host->mrq, 0); + + if (mmc_op_multi(cmd->opcode) && mrq->stop) + sd_send_cmd_get_rsp(host, mrq->stop); + } else { + sd_normal_rw(host, mrq); + } + + if (mrq->data) { + if (cmd->error || data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blocks * data->blksz; + } + + mutex_unlock(&pcr->pcr_mutex); + +finish: + if (cmd->error) { + dev_dbg(sdmmc_dev(host), "CMD %d 0x%08x error(%d)\n", + cmd->opcode, cmd->arg, cmd->error); + } + + mutex_lock(&host->host_mutex); + host->mrq = NULL; + mutex_unlock(&host->host_mutex); + + mmc_request_done(mmc, mrq); +} + +static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + mutex_lock(&host->host_mutex); + host->mrq = mrq; + mutex_unlock(&host->host_mutex); + + if (sd_rw_cmd(mrq->cmd) || sdio_extblock_cmd(mrq->cmd, data)) + host->using_cookie = sd_pre_dma_transfer(host, data, false); + + schedule_work(&host->work); +} + +static int sd_set_bus_width(struct realtek_pci_sdmmc *host, + unsigned char bus_width) +{ + int err = 0; + u8 width[] = { + [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT, + [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT, + [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT, + }; + + if (bus_width <= MMC_BUS_WIDTH_8) + err = rtsx_pci_write_register(host->pcr, SD_CFG1, + 0x03, width[bus_width]); + + return err; +} + +static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + + if (host->prev_power_state == MMC_POWER_ON) + return 0; + + if (host->prev_power_state == MMC_POWER_UP) { + rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0); + goto finish; + } + + msleep(100); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE, + CARD_SHARE_MASK, CARD_SHARE_48_SD); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, + SD_CLK_EN, SD_CLK_EN); + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_SD_CARD); + if (err < 0) + return err; + + err = rtsx_pci_card_power_on(pcr, RTSX_SD_CARD); + if (err < 0) + return err; + + mdelay(1); + + err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); + if (err < 0) + return err; + + /* send at least 74 clocks */ + rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN); + +finish: + host->prev_power_state = power_mode; + return 0; +} + +static int sd_power_off(struct realtek_pci_sdmmc *host) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + + host->prev_power_state = MMC_POWER_OFF; + + rtsx_pci_init_cmd(pcr); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0); + + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + err = rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); + if (err < 0) + return err; + + return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); +} + +static int sd_set_power_mode(struct realtek_pci_sdmmc *host, + unsigned char power_mode) +{ + int err; + + if (power_mode == MMC_POWER_OFF) + err = sd_power_off(host); + else + err = sd_power_on(host, power_mode); + + return err; +} + +static int sd_set_timing(struct realtek_pci_sdmmc *host, unsigned char timing) +{ + struct rtsx_pcr *pcr = host->pcr; + int err = 0; + + rtsx_pci_init_cmd(pcr); + + switch (timing) { + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, + 0x0C | SD_ASYNC_FIFO_NOT_RST, + SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); + break; + + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, + 0x0C | SD_ASYNC_FIFO_NOT_RST, + SD_DDR_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, + DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + DDR_VAR_RX_DAT | DDR_VAR_RX_CMD, + DDR_VAR_RX_DAT | DDR_VAR_RX_CMD); + break; + + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, + 0x0C, SD_20_MODE); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, + SD20_TX_SEL_MASK, SD20_TX_14_AHEAD); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + SD20_RX_SEL_MASK, SD20_RX_14_DELAY); + break; + + default: + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + SD_CFG1, 0x0C, SD_20_MODE); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + SD20_RX_SEL_MASK, SD20_RX_POS_EDGE); + break; + } + + err = rtsx_pci_send_cmd(pcr, 100); + + return err; +} + +static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + + if (host->eject) + return; + + if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD)) + return; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + sd_set_bus_width(host, ios->bus_width); + sd_set_power_mode(host, ios->power_mode); + sd_set_timing(host, ios->timing); + + host->vpclk = false; + host->double_clk = true; + + switch (ios->timing) { + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + host->ssc_depth = RTSX_SSC_DEPTH_2M; + host->vpclk = true; + host->double_clk = false; + break; + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_UHS_SDR25: + host->ssc_depth = RTSX_SSC_DEPTH_1M; + break; + default: + host->ssc_depth = RTSX_SSC_DEPTH_500K; + break; + } + + host->initial_mode = (ios->clock <= 1000000) ? true : false; + + host->clock = ios->clock; + rtsx_pci_switch_clock(pcr, ios->clock, host->ssc_depth, + host->initial_mode, host->double_clk, host->vpclk); + + mutex_unlock(&pcr->pcr_mutex); +} + +static int sdmmc_get_ro(struct mmc_host *mmc) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + int ro = 0; + u32 val; + + if (host->eject) + return -ENOMEDIUM; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + /* Check SD mechanical write-protect switch */ + val = rtsx_pci_readl(pcr, RTSX_BIPR); + dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); + if (val & SD_WRITE_PROTECT) + ro = 1; + + mutex_unlock(&pcr->pcr_mutex); + + return ro; +} + +static int sdmmc_get_cd(struct mmc_host *mmc) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + int cd = 0; + u32 val; + + if (host->eject) + return cd; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + /* Check SD card detect */ + val = rtsx_pci_card_exist(pcr); + dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); + if (val & SD_EXIST) + cd = 1; + + mutex_unlock(&pcr->pcr_mutex); + + return cd; +} + +static int sd_wait_voltage_stable_1(struct realtek_pci_sdmmc *host) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + u8 stat; + + /* Reference to Signal Voltage Switch Sequence in SD spec. + * Wait for a period of time so that the card can drive SD_CMD and + * SD_DAT[3:0] to low after sending back CMD11 response. + */ + mdelay(1); + + /* SD_CMD, SD_DAT[3:0] should be driven to low by card; + * If either one of SD_CMD,SD_DAT[3:0] is not low, + * abort the voltage switch sequence; + */ + err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat); + if (err < 0) + return err; + + if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | + SD_DAT1_STATUS | SD_DAT0_STATUS)) + return -EINVAL; + + /* Stop toggle SD clock */ + err = rtsx_pci_write_register(pcr, SD_BUS_STAT, + 0xFF, SD_CLK_FORCE_STOP); + if (err < 0) + return err; + + return 0; +} + +static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + u8 stat, mask, val; + + /* Wait 1.8V output of voltage regulator in card stable */ + msleep(50); + + /* Toggle SD clock again */ + err = rtsx_pci_write_register(pcr, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN); + if (err < 0) + return err; + + /* Wait for a period of time so that the card can drive + * SD_DAT[3:0] to high at 1.8V + */ + msleep(20); + + /* SD_CMD, SD_DAT[3:0] should be pulled high by host */ + err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat); + if (err < 0) + return err; + + mask = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | + SD_DAT1_STATUS | SD_DAT0_STATUS; + val = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | + SD_DAT1_STATUS | SD_DAT0_STATUS; + if ((stat & mask) != val) { + dev_dbg(sdmmc_dev(host), + "%s: SD_BUS_STAT = 0x%x\n", __func__, stat); + rtsx_pci_write_register(pcr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); + rtsx_pci_write_register(pcr, CARD_CLK_EN, 0xFF, 0); + return -EINVAL; + } + + return 0; +} + +static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + int err = 0; + u8 voltage; + + dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n", + __func__, ios->signal_voltage); + + if (host->eject) + return -ENOMEDIUM; + + err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); + if (err) + return err; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) + voltage = OUTPUT_3V3; + else + voltage = OUTPUT_1V8; + + if (voltage == OUTPUT_1V8) { + err = sd_wait_voltage_stable_1(host); + if (err < 0) + goto out; + } + + err = rtsx_pci_switch_output_voltage(pcr, voltage); + if (err < 0) + goto out; + + if (voltage == OUTPUT_1V8) { + err = sd_wait_voltage_stable_2(host); + if (err < 0) + goto out; + } + +out: + /* Stop toggle SD clock in idle */ + err = rtsx_pci_write_register(pcr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); + + mutex_unlock(&pcr->pcr_mutex); + + return err; +} + +static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + int err = 0; + + if (host->eject) + return -ENOMEDIUM; + + err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); + if (err) + return err; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + /* Set initial TX phase */ + switch (mmc->ios.timing) { + case MMC_TIMING_UHS_SDR104: + err = sd_change_phase(host, SDR104_TX_PHASE(pcr), false); + break; + + case MMC_TIMING_UHS_SDR50: + err = sd_change_phase(host, SDR50_TX_PHASE(pcr), false); + break; + + case MMC_TIMING_UHS_DDR50: + err = sd_change_phase(host, DDR50_TX_PHASE(pcr), false); + break; + + default: + err = 0; + } + + if (err) + goto out; + + /* Tuning RX phase */ + if ((mmc->ios.timing == MMC_TIMING_UHS_SDR104) || + (mmc->ios.timing == MMC_TIMING_UHS_SDR50)) + err = sd_tuning_rx(host, opcode); + else if (mmc->ios.timing == MMC_TIMING_UHS_DDR50) + err = sd_change_phase(host, DDR50_RX_PHASE(pcr), true); + +out: + mutex_unlock(&pcr->pcr_mutex); + + return err; +} + +static const struct mmc_host_ops realtek_pci_sdmmc_ops = { + .pre_req = sdmmc_pre_req, + .post_req = sdmmc_post_req, + .request = sdmmc_request, + .set_ios = sdmmc_set_ios, + .get_ro = sdmmc_get_ro, + .get_cd = sdmmc_get_cd, + .start_signal_voltage_switch = sdmmc_switch_voltage, + .execute_tuning = sdmmc_execute_tuning, +}; + +static void init_extra_caps(struct realtek_pci_sdmmc *host) +{ + struct mmc_host *mmc = host->mmc; + struct rtsx_pcr *pcr = host->pcr; + + dev_dbg(sdmmc_dev(host), "pcr->extra_caps = 0x%x\n", pcr->extra_caps); + + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50) + mmc->caps |= MMC_CAP_UHS_SDR50; + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR104) + mmc->caps |= MMC_CAP_UHS_SDR104; + if (pcr->extra_caps & EXTRA_CAPS_SD_DDR50) + mmc->caps |= MMC_CAP_UHS_DDR50; + if (pcr->extra_caps & EXTRA_CAPS_MMC_HSDDR) + mmc->caps |= MMC_CAP_1_8V_DDR; + if (pcr->extra_caps & EXTRA_CAPS_MMC_8BIT) + mmc->caps |= MMC_CAP_8_BIT_DATA; + if (pcr->extra_caps & EXTRA_CAPS_NO_MMC) + mmc->caps2 |= MMC_CAP2_NO_MMC; +} + +static void realtek_init_host(struct realtek_pci_sdmmc *host) +{ + struct mmc_host *mmc = host->mmc; + + mmc->f_min = 250000; + mmc->f_max = 208000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | + MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST | + MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; + mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE; + mmc->max_current_330 = 400; + mmc->max_current_180 = 800; + mmc->ops = &realtek_pci_sdmmc_ops; + + init_extra_caps(host); + + mmc->max_segs = 256; + mmc->max_seg_size = 65536; + mmc->max_blk_size = 512; + mmc->max_blk_count = 65535; + mmc->max_req_size = 524288; +} + +static void rtsx_pci_sdmmc_card_event(struct platform_device *pdev) +{ + struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); + + host->cookie = -1; + mmc_detect_change(host->mmc, 0); +} + +static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct realtek_pci_sdmmc *host; + struct rtsx_pcr *pcr; + struct pcr_handle *handle = pdev->dev.platform_data; + + if (!handle) + return -ENXIO; + + pcr = handle->pcr; + if (!pcr) + return -ENXIO; + + dev_dbg(&(pdev->dev), ": Realtek PCI-E SDMMC controller found\n"); + + mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + host->pcr = pcr; + mmc->ios.power_delay_ms = 5; + host->mmc = mmc; + host->pdev = pdev; + host->cookie = -1; + host->prev_power_state = MMC_POWER_OFF; + INIT_WORK(&host->work, sd_request); + platform_set_drvdata(pdev, host); + pcr->slots[RTSX_SD_CARD].p_dev = pdev; + pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; + + mutex_init(&host->host_mutex); + + realtek_init_host(host); + + mmc_add_host(mmc); + + return 0; +} + +static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) +{ + struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); + struct rtsx_pcr *pcr; + struct mmc_host *mmc; + + if (!host) + return 0; + + pcr = host->pcr; + pcr->slots[RTSX_SD_CARD].p_dev = NULL; + pcr->slots[RTSX_SD_CARD].card_event = NULL; + mmc = host->mmc; + + cancel_work_sync(&host->work); + + mutex_lock(&host->host_mutex); + if (host->mrq) { + dev_dbg(&(pdev->dev), + "%s: Controller removed during transfer\n", + mmc_hostname(mmc)); + + rtsx_pci_complete_unfinished_transfer(pcr); + + host->mrq->cmd->error = -ENOMEDIUM; + if (host->mrq->stop) + host->mrq->stop->error = -ENOMEDIUM; + mmc_request_done(mmc, host->mrq); + } + mutex_unlock(&host->host_mutex); + + mmc_remove_host(mmc); + host->eject = true; + + flush_work(&host->work); + + mmc_free_host(mmc); + + dev_dbg(&(pdev->dev), + ": Realtek PCI-E SDMMC controller has been removed\n"); + + return 0; +} + +static const struct platform_device_id rtsx_pci_sdmmc_ids[] = { + { + .name = DRV_NAME_RTSX_PCI_SDMMC, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, rtsx_pci_sdmmc_ids); + +static struct platform_driver rtsx_pci_sdmmc_driver = { + .probe = rtsx_pci_sdmmc_drv_probe, + .remove = rtsx_pci_sdmmc_drv_remove, + .id_table = rtsx_pci_sdmmc_ids, + .driver = { + .name = DRV_NAME_RTSX_PCI_SDMMC, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_platform_driver(rtsx_pci_sdmmc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Wei WANG "); +MODULE_DESCRIPTION("Realtek PCI-E SD/MMC Card Host Driver"); diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c new file mode 100644 index 000000000..1be3a355f --- /dev/null +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -0,0 +1,1473 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Realtek USB SD/MMC Card Interface driver + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Roger Tseng + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ + defined(CONFIG_MMC_REALTEK_USB_MODULE)) +#include +#include +#define RTSX_USB_USE_LEDS_CLASS +#endif + +struct rtsx_usb_sdmmc { + struct platform_device *pdev; + struct rtsx_ucr *ucr; + struct mmc_host *mmc; + struct mmc_request *mrq; + + struct mutex host_mutex; + + u8 ssc_depth; + unsigned int clock; + bool vpclk; + bool double_clk; + bool host_removal; + bool card_exist; + bool initial_mode; + bool ddr_mode; + + unsigned char power_mode; + +#ifdef RTSX_USB_USE_LEDS_CLASS + struct led_classdev led; + char led_name[32]; + struct work_struct led_work; +#endif +}; + +static inline struct device *sdmmc_dev(struct rtsx_usb_sdmmc *host) +{ + return &(host->pdev->dev); +} + +static inline void sd_clear_error(struct rtsx_usb_sdmmc *host) +{ + struct rtsx_ucr *ucr = host->ucr; + rtsx_usb_ep0_write_register(ucr, CARD_STOP, + SD_STOP | SD_CLR_ERR, + SD_STOP | SD_CLR_ERR); + + rtsx_usb_clear_dma_err(ucr); + rtsx_usb_clear_fsm_err(ucr); +} + +#ifdef DEBUG +static void sd_print_debug_regs(struct rtsx_usb_sdmmc *host) +{ + struct rtsx_ucr *ucr = host->ucr; + u8 val = 0; + + rtsx_usb_ep0_read_register(ucr, SD_STAT1, &val); + dev_dbg(sdmmc_dev(host), "SD_STAT1: 0x%x\n", val); + rtsx_usb_ep0_read_register(ucr, SD_STAT2, &val); + dev_dbg(sdmmc_dev(host), "SD_STAT2: 0x%x\n", val); + rtsx_usb_ep0_read_register(ucr, SD_BUS_STAT, &val); + dev_dbg(sdmmc_dev(host), "SD_BUS_STAT: 0x%x\n", val); +} +#else +#define sd_print_debug_regs(host) +#endif /* DEBUG */ + +static int sd_read_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd, + u16 byte_cnt, u8 *buf, int buf_len, int timeout) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + u8 trans_mode; + + if (!buf) + buf_len = 0; + + rtsx_usb_init_cmd(ucr); + if (cmd != NULL) { + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__ + , cmd->opcode); + if (cmd->opcode == MMC_SEND_TUNING_BLOCK) + trans_mode = SD_TM_AUTO_TUNING; + else + trans_mode = SD_TM_NORMAL_READ; + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD0, 0xFF, (u8)(cmd->opcode) | 0x40); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD1, 0xFF, (u8)(cmd->arg >> 24)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD2, 0xFF, (u8)(cmd->arg >> 16)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD3, 0xFF, (u8)(cmd->arg >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD4, 0xFF, (u8)cmd->arg); + } else { + trans_mode = SD_TM_AUTO_READ_3; + } + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, + 0xFF, (u8)(byte_cnt >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); + if (trans_mode != SD_TM_AUTO_TUNING) + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, + 0xFF, trans_mode | SD_TRANSFER_START); + rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + if (cmd != NULL) { + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD1, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD2, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD3, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD4, 0, 0); + } + + err = rtsx_usb_send_cmd(ucr, MODE_CR, timeout); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_send_cmd failed (err = %d)\n", err); + return err; + } + + err = rtsx_usb_get_rsp(ucr, !cmd ? 1 : 5, timeout); + if (err || (ucr->rsp_buf[0] & SD_TRANSFER_ERR)) { + sd_print_debug_regs(host); + + if (!err) { + dev_dbg(sdmmc_dev(host), + "Transfer failed (SD_TRANSFER = %02x)\n", + ucr->rsp_buf[0]); + err = -EIO; + } else { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_get_rsp failed (err = %d)\n", err); + } + + return err; + } + + if (cmd != NULL) { + cmd->resp[0] = get_unaligned_be32(ucr->rsp_buf + 1); + dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", + cmd->resp[0]); + } + + if (buf && buf_len) { + /* 2-byte aligned part */ + err = rtsx_usb_read_ppbuf(ucr, buf, byte_cnt - (byte_cnt % 2)); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_read_ppbuf failed (err = %d)\n", err); + return err; + } + + /* unaligned byte */ + if (byte_cnt % 2) + return rtsx_usb_read_register(ucr, + PPBUF_BASE2 + byte_cnt, + buf + byte_cnt - 1); + } + + return 0; +} + +static int sd_write_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd, + u16 byte_cnt, u8 *buf, int buf_len, int timeout) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + u8 trans_mode; + + if (!buf) + buf_len = 0; + + if (buf && buf_len) { + err = rtsx_usb_write_ppbuf(ucr, buf, buf_len); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_write_ppbuf failed (err = %d)\n", + err); + return err; + } + } + + trans_mode = (cmd != NULL) ? SD_TM_AUTO_WRITE_2 : SD_TM_AUTO_WRITE_3; + rtsx_usb_init_cmd(ucr); + + if (cmd != NULL) { + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__, + cmd->opcode); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD0, 0xFF, (u8)(cmd->opcode) | 0x40); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD1, 0xFF, (u8)(cmd->arg >> 24)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD2, 0xFF, (u8)(cmd->arg >> 16)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD3, 0xFF, (u8)(cmd->arg >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD4, 0xFF, (u8)cmd->arg); + } + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, + 0xFF, (u8)(byte_cnt >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, + trans_mode | SD_TRANSFER_START); + rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + if (cmd != NULL) { + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD1, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD2, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD3, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD4, 0, 0); + } + + err = rtsx_usb_send_cmd(ucr, MODE_CR, timeout); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_send_cmd failed (err = %d)\n", err); + return err; + } + + err = rtsx_usb_get_rsp(ucr, !cmd ? 1 : 5, timeout); + if (err) { + sd_print_debug_regs(host); + dev_dbg(sdmmc_dev(host), + "rtsx_usb_get_rsp failed (err = %d)\n", err); + return err; + } + + if (cmd != NULL) { + cmd->resp[0] = get_unaligned_be32(ucr->rsp_buf + 1); + dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", + cmd->resp[0]); + } + + return 0; +} + +static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host, + struct mmc_command *cmd) +{ + struct rtsx_ucr *ucr = host->ucr; + u8 cmd_idx = (u8)cmd->opcode; + u32 arg = cmd->arg; + int err = 0; + int timeout = 100; + int i; + u8 *ptr; + int stat_idx = 0; + int len = 2; + u8 rsp_type; + + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", + __func__, cmd_idx, arg); + + /* Response type: + * R0 + * R1, R5, R6, R7 + * R1b + * R2 + * R3, R4 + */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + rsp_type = SD_RSP_TYPE_R0; + break; + case MMC_RSP_R1: + rsp_type = SD_RSP_TYPE_R1; + break; + case MMC_RSP_R1_NO_CRC: + rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; + break; + case MMC_RSP_R1B: + rsp_type = SD_RSP_TYPE_R1b; + break; + case MMC_RSP_R2: + rsp_type = SD_RSP_TYPE_R2; + break; + case MMC_RSP_R3: + rsp_type = SD_RSP_TYPE_R3; + break; + default: + dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n"); + err = -EINVAL; + goto out; + } + + if (rsp_type == SD_RSP_TYPE_R1b) + timeout = cmd->busy_timeout ? cmd->busy_timeout : 3000; + + if (cmd->opcode == SD_SWITCH_VOLTAGE) { + err = rtsx_usb_write_register(ucr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, + SD_CLK_TOGGLE_EN); + if (err) + goto out; + } + + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8)(arg >> 24)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8)(arg >> 16)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8)(arg >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8)arg); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE, + 0x01, PINGPONG_BUFFER); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, + 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); + rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END | SD_STAT_IDLE, + SD_TRANSFER_END | SD_STAT_IDLE); + + if (rsp_type == SD_RSP_TYPE_R2) { + /* Read data from ping-pong buffer */ + for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) + rtsx_usb_add_cmd(ucr, READ_REG_CMD, (u16)i, 0, 0); + stat_idx = 16; + } else if (rsp_type != SD_RSP_TYPE_R0) { + /* Read data from SD_CMDx registers */ + for (i = SD_CMD0; i <= SD_CMD4; i++) + rtsx_usb_add_cmd(ucr, READ_REG_CMD, (u16)i, 0, 0); + stat_idx = 5; + } + len += stat_idx; + + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_STAT1, 0, 0); + + err = rtsx_usb_send_cmd(ucr, MODE_CR, 100); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_send_cmd error (err = %d)\n", err); + goto out; + } + + err = rtsx_usb_get_rsp(ucr, len, timeout); + if (err || (ucr->rsp_buf[0] & SD_TRANSFER_ERR)) { + sd_print_debug_regs(host); + sd_clear_error(host); + + if (!err) { + dev_dbg(sdmmc_dev(host), + "Transfer failed (SD_TRANSFER = %02x)\n", + ucr->rsp_buf[0]); + err = -EIO; + } else { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_get_rsp failed (err = %d)\n", err); + } + + goto out; + } + + if (rsp_type == SD_RSP_TYPE_R0) { + err = 0; + goto out; + } + + /* Skip result of CHECK_REG_CMD */ + ptr = ucr->rsp_buf + 1; + + /* Check (Start,Transmission) bit of Response */ + if ((ptr[0] & 0xC0) != 0) { + err = -EILSEQ; + dev_dbg(sdmmc_dev(host), "Invalid response bit\n"); + goto out; + } + + /* Check CRC7 */ + if (!(rsp_type & SD_NO_CHECK_CRC7)) { + if (ptr[stat_idx] & SD_CRC7_ERR) { + err = -EILSEQ; + dev_dbg(sdmmc_dev(host), "CRC7 error\n"); + goto out; + } + } + + if (rsp_type == SD_RSP_TYPE_R2) { + /* + * The controller offloads the last byte {CRC-7, end bit 1'b1} + * of response type R2. Assign dummy CRC, 0, and end bit to the + * byte(ptr[16], goes into the LSB of resp[3] later). + */ + ptr[16] = 1; + + for (i = 0; i < 4; i++) { + cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4); + dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", + i, cmd->resp[i]); + } + } else { + cmd->resp[0] = get_unaligned_be32(ptr + 1); + dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", + cmd->resp[0]); + } + +out: + cmd->error = err; +} + +static int sd_rw_multi(struct rtsx_usb_sdmmc *host, struct mmc_request *mrq) +{ + struct rtsx_ucr *ucr = host->ucr; + struct mmc_data *data = mrq->data; + int read = (data->flags & MMC_DATA_READ) ? 1 : 0; + u8 cfg2, trans_mode; + int err; + u8 flag; + size_t data_len = data->blksz * data->blocks; + unsigned int pipe; + + if (read) { + dev_dbg(sdmmc_dev(host), "%s: read %zu bytes\n", + __func__, data_len); + cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0; + trans_mode = SD_TM_AUTO_READ_3; + } else { + dev_dbg(sdmmc_dev(host), "%s: write %zu bytes\n", + __func__, data_len); + cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; + trans_mode = SD_TM_AUTO_WRITE_3; + } + + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, + 0xFF, (u8)data->blocks); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, + 0xFF, (u8)(data->blocks >> 8)); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE, + 0x01, RING_BUFFER); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC3, + 0xFF, (u8)(data_len >> 24)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC2, + 0xFF, (u8)(data_len >> 16)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC1, + 0xFF, (u8)(data_len >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC0, + 0xFF, (u8)data_len); + if (read) { + flag = MODE_CDIR; + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL, + 0x03 | DMA_PACK_SIZE_MASK, + DMA_DIR_FROM_CARD | DMA_EN | DMA_512); + } else { + flag = MODE_CDOR; + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL, + 0x03 | DMA_PACK_SIZE_MASK, + DMA_DIR_TO_CARD | DMA_EN | DMA_512); + } + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, + trans_mode | SD_TRANSFER_START); + rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + err = rtsx_usb_send_cmd(ucr, flag, 100); + if (err) + return err; + + if (read) + pipe = usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN); + else + pipe = usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT); + + err = rtsx_usb_transfer_data(ucr, pipe, data->sg, data_len, + data->sg_len, NULL, 10000); + if (err) { + dev_dbg(sdmmc_dev(host), "rtsx_usb_transfer_data error %d\n" + , err); + sd_clear_error(host); + return err; + } + + return rtsx_usb_get_rsp(ucr, 1, 2000); +} + +static inline void sd_enable_initial_mode(struct rtsx_usb_sdmmc *host) +{ + rtsx_usb_write_register(host->ucr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); +} + +static inline void sd_disable_initial_mode(struct rtsx_usb_sdmmc *host) +{ + rtsx_usb_write_register(host->ucr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); +} + +static void sd_normal_rw(struct rtsx_usb_sdmmc *host, + struct mmc_request *mrq) +{ + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + u8 *buf; + + buf = kzalloc(data->blksz, GFP_NOIO); + if (!buf) { + cmd->error = -ENOMEM; + return; + } + + if (data->flags & MMC_DATA_READ) { + if (host->initial_mode) + sd_disable_initial_mode(host); + + cmd->error = sd_read_data(host, cmd, (u16)data->blksz, buf, + data->blksz, 200); + + if (host->initial_mode) + sd_enable_initial_mode(host); + + sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz); + } else { + sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz); + + cmd->error = sd_write_data(host, cmd, (u16)data->blksz, buf, + data->blksz, 200); + } + + kfree(buf); +} + +static int sd_change_phase(struct rtsx_usb_sdmmc *host, u8 sample_point, int tx) +{ + struct rtsx_ucr *ucr = host->ucr; + + dev_dbg(sdmmc_dev(host), "%s: %s sample_point = %d\n", + __func__, tx ? "TX" : "RX", sample_point); + + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE); + + if (tx) + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, + 0x0F, sample_point); + else + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK1_CTL, + 0x0F, sample_point); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_RST, 0); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static inline u32 get_phase_point(u32 phase_map, unsigned int idx) +{ + idx &= MAX_PHASE; + return phase_map & (1 << idx); +} + +static int get_phase_len(u32 phase_map, unsigned int idx) +{ + int i; + + for (i = 0; i < MAX_PHASE + 1; i++) { + if (get_phase_point(phase_map, idx + i) == 0) + return i; + } + return MAX_PHASE + 1; +} + +static u8 sd_search_final_phase(struct rtsx_usb_sdmmc *host, u32 phase_map) +{ + int start = 0, len = 0; + int start_final = 0, len_final = 0; + u8 final_phase = 0xFF; + + if (phase_map == 0) { + dev_dbg(sdmmc_dev(host), "Phase: [map:%x]\n", phase_map); + return final_phase; + } + + while (start < MAX_PHASE + 1) { + len = get_phase_len(phase_map, start); + if (len_final < len) { + start_final = start; + len_final = len; + } + start += len ? len : 1; + } + + final_phase = (start_final + len_final / 2) & MAX_PHASE; + dev_dbg(sdmmc_dev(host), "Phase: [map:%x] [maxlen:%d] [final:%d]\n", + phase_map, len_final, final_phase); + + return final_phase; +} + +static void sd_wait_data_idle(struct rtsx_usb_sdmmc *host) +{ + int i; + u8 val = 0; + + for (i = 0; i < 100; i++) { + rtsx_usb_ep0_read_register(host->ucr, SD_DATA_STATE, &val); + if (val & SD_DATA_IDLE) + return; + + usleep_range(100, 1000); + } +} + +static int sd_tuning_rx_cmd(struct rtsx_usb_sdmmc *host, + u8 opcode, u8 sample_point) +{ + int err; + struct mmc_command cmd = {}; + + err = sd_change_phase(host, sample_point, 0); + if (err) + return err; + + cmd.opcode = MMC_SEND_TUNING_BLOCK; + err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100); + if (err) { + /* Wait till SD DATA IDLE */ + sd_wait_data_idle(host); + sd_clear_error(host); + return err; + } + + return 0; +} + +static void sd_tuning_phase(struct rtsx_usb_sdmmc *host, + u8 opcode, u16 *phase_map) +{ + int err, i; + u16 raw_phase_map = 0; + + for (i = MAX_PHASE; i >= 0; i--) { + err = sd_tuning_rx_cmd(host, opcode, (u8)i); + if (!err) + raw_phase_map |= 1 << i; + } + + if (phase_map) + *phase_map = raw_phase_map; +} + +static int sd_tuning_rx(struct rtsx_usb_sdmmc *host, u8 opcode) +{ + int err, i; + u16 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map; + u8 final_phase; + + /* setting fixed default TX phase */ + err = sd_change_phase(host, 0x01, 1); + if (err) { + dev_dbg(sdmmc_dev(host), "TX phase setting failed\n"); + return err; + } + + /* tuning RX phase */ + for (i = 0; i < RX_TUNING_CNT; i++) { + sd_tuning_phase(host, opcode, &(raw_phase_map[i])); + + if (raw_phase_map[i] == 0) + break; + } + + phase_map = 0xFFFF; + for (i = 0; i < RX_TUNING_CNT; i++) { + dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%04x\n", + i, raw_phase_map[i]); + phase_map &= raw_phase_map[i]; + } + dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%04x\n", phase_map); + + if (phase_map) { + final_phase = sd_search_final_phase(host, phase_map); + if (final_phase == 0xFF) + return -EINVAL; + + err = sd_change_phase(host, final_phase, 0); + if (err) + return err; + } else { + return -EINVAL; + } + + return 0; +} + +static int sdmmc_get_ro(struct mmc_host *mmc) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err; + u16 val; + + if (host->host_removal) + return -ENOMEDIUM; + + mutex_lock(&ucr->dev_mutex); + + /* Check SD card detect */ + err = rtsx_usb_get_card_status(ucr, &val); + + mutex_unlock(&ucr->dev_mutex); + + + /* Treat failed detection as non-ro */ + if (err) + return 0; + + if (val & SD_WP) + return 1; + + return 0; +} + +static int sdmmc_get_cd(struct mmc_host *mmc) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err; + u16 val; + + if (host->host_removal) + return -ENOMEDIUM; + + mutex_lock(&ucr->dev_mutex); + + /* Check SD card detect */ + err = rtsx_usb_get_card_status(ucr, &val); + + mutex_unlock(&ucr->dev_mutex); + + /* Treat failed detection as non-exist */ + if (err) + goto no_card; + + if (val & SD_CD) { + host->card_exist = true; + return 1; + } + +no_card: + host->card_exist = false; + return 0; +} + +static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + unsigned int data_size = 0; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + + if (host->host_removal) { + cmd->error = -ENOMEDIUM; + goto finish; + } + + if ((!host->card_exist)) { + cmd->error = -ENOMEDIUM; + goto finish_detect_card; + } + + mutex_lock(&ucr->dev_mutex); + + mutex_lock(&host->host_mutex); + host->mrq = mrq; + mutex_unlock(&host->host_mutex); + + if (mrq->data) + data_size = data->blocks * data->blksz; + + if (!data_size) { + sd_send_cmd_get_rsp(host, cmd); + } else if ((!(data_size % 512) && cmd->opcode != MMC_SEND_EXT_CSD) || + mmc_op_multi(cmd->opcode)) { + sd_send_cmd_get_rsp(host, cmd); + + if (!cmd->error) { + sd_rw_multi(host, mrq); + + if (mmc_op_multi(cmd->opcode) && mrq->stop) { + sd_send_cmd_get_rsp(host, mrq->stop); + rtsx_usb_write_register(ucr, MC_FIFO_CTL, + FIFO_FLUSH, FIFO_FLUSH); + } + } + } else { + sd_normal_rw(host, mrq); + } + + if (mrq->data) { + if (cmd->error || data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blocks * data->blksz; + } + + mutex_unlock(&ucr->dev_mutex); + +finish_detect_card: + if (cmd->error) { + /* + * detect card when fail to update card existence state and + * speed up card removal when retry + */ + sdmmc_get_cd(mmc); + dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); + } + +finish: + mutex_lock(&host->host_mutex); + host->mrq = NULL; + mutex_unlock(&host->host_mutex); + + mmc_request_done(mmc, mrq); +} + +static int sd_set_bus_width(struct rtsx_usb_sdmmc *host, + unsigned char bus_width) +{ + int err = 0; + static const u8 width[] = { + [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT, + [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT, + [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT, + }; + + if (bus_width <= MMC_BUS_WIDTH_8) + err = rtsx_usb_write_register(host->ucr, SD_CFG1, + 0x03, width[bus_width]); + + return err; +} + +static int sd_pull_ctl_disable_lqfp48(struct rtsx_ucr *ucr) +{ + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_pull_ctl_disable_qfn24(struct rtsx_ucr *ucr) +{ + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_pull_ctl_enable_lqfp48(struct rtsx_ucr *ucr) +{ + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xAA); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0xAA); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA9); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_pull_ctl_enable_qfn24(struct rtsx_ucr *ucr) +{ + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xA5); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x9A); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA5); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x9A); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x65); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x5A); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_power_on(struct rtsx_usb_sdmmc *host) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + rtsx_usb_init_cmd(ucr); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SHARE_MODE, + CARD_SHARE_MASK, CARD_SHARE_SD); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, + SD_CLK_EN, SD_CLK_EN); + err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + if (err) + return err; + + if (CHECK_PKG(ucr, LQFP48)) + err = sd_pull_ctl_enable_lqfp48(ucr); + else + err = sd_pull_ctl_enable_qfn24(ucr); + if (err) + return err; + + err = rtsx_usb_write_register(ucr, CARD_PWR_CTL, + POWER_MASK, PARTIAL_POWER_ON); + if (err) + return err; + + usleep_range(800, 1000); + + rtsx_usb_init_cmd(ucr); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, + POWER_MASK|LDO3318_PWR_MASK, POWER_ON|LDO_ON); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, + SD_OUTPUT_EN, SD_OUTPUT_EN); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_power_off(struct rtsx_usb_sdmmc *host) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, + POWER_MASK, POWER_OFF); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, + POWER_MASK|LDO3318_PWR_MASK, POWER_OFF|LDO_SUSPEND); + + err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + if (err) + return err; + + if (CHECK_PKG(ucr, LQFP48)) + return sd_pull_ctl_disable_lqfp48(ucr); + return sd_pull_ctl_disable_qfn24(ucr); +} + +static int sd_set_power_mode(struct rtsx_usb_sdmmc *host, + unsigned char power_mode) +{ + int err; + + if (power_mode != MMC_POWER_OFF) + power_mode = MMC_POWER_ON; + + if (power_mode == host->power_mode) + return 0; + + if (power_mode == MMC_POWER_OFF) { + err = sd_power_off(host); + pm_runtime_put_noidle(sdmmc_dev(host)); + } else { + pm_runtime_get_noresume(sdmmc_dev(host)); + err = sd_power_on(host); + } + + if (!err) + host->power_mode = power_mode; + + return err; +} + +static int sd_set_timing(struct rtsx_usb_sdmmc *host, + unsigned char timing, bool *ddr_mode) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + + *ddr_mode = false; + + rtsx_usb_init_cmd(ucr); + + switch (timing) { + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, + 0x0C | SD_ASYNC_FIFO_RST, + SD_30_MODE | SD_ASYNC_FIFO_RST); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + break; + + case MMC_TIMING_UHS_DDR50: + *ddr_mode = true; + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, + 0x0C | SD_ASYNC_FIFO_RST, + SD_DDR_MODE | SD_ASYNC_FIFO_RST); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, + DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + DDR_VAR_RX_DAT | DDR_VAR_RX_CMD, + DDR_VAR_RX_DAT | DDR_VAR_RX_CMD); + break; + + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, + 0x0C, SD_20_MODE); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, + SD20_TX_SEL_MASK, SD20_TX_14_AHEAD); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + SD20_RX_SEL_MASK, SD20_RX_14_DELAY); + break; + + default: + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CFG1, 0x0C, SD_20_MODE); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + SD20_RX_SEL_MASK, SD20_RX_POS_EDGE); + break; + } + + err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + + return err; +} + +static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + mutex_lock(&ucr->dev_mutex); + + sd_set_power_mode(host, ios->power_mode); + sd_set_bus_width(host, ios->bus_width); + sd_set_timing(host, ios->timing, &host->ddr_mode); + + host->vpclk = false; + host->double_clk = true; + + switch (ios->timing) { + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + host->ssc_depth = SSC_DEPTH_2M; + host->vpclk = true; + host->double_clk = false; + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_UHS_SDR25: + host->ssc_depth = SSC_DEPTH_1M; + break; + default: + host->ssc_depth = SSC_DEPTH_512K; + break; + } + + host->initial_mode = (ios->clock <= 1000000) ? true : false; + host->clock = ios->clock; + + rtsx_usb_switch_clock(host->ucr, host->clock, host->ssc_depth, + host->initial_mode, host->double_clk, host->vpclk); + + mutex_unlock(&ucr->dev_mutex); + dev_dbg(sdmmc_dev(host), "%s end\n", __func__); +} + +static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err = 0; + + dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n", + __func__, ios->signal_voltage); + + if (host->host_removal) + return -ENOMEDIUM; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_120) + return -EPERM; + + mutex_lock(&ucr->dev_mutex); + + err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD); + if (err) { + mutex_unlock(&ucr->dev_mutex); + return err; + } + + /* Let mmc core do the busy checking, simply stop the forced-toggle + * clock(while issuing CMD11) and switch voltage. + */ + rtsx_usb_init_cmd(ucr); + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PAD_CTL, + SD_IO_USING_1V8, SD_IO_USING_3V3); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, + TUNE_SD18_MASK, TUNE_SD18_3V3); + } else { + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, + SD_CLK_FORCE_STOP); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PAD_CTL, + SD_IO_USING_1V8, SD_IO_USING_1V8); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, + TUNE_SD18_MASK, TUNE_SD18_1V8); + } + + err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + mutex_unlock(&ucr->dev_mutex); + + return err; +} + +static int sdmmc_card_busy(struct mmc_host *mmc) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err; + u8 stat; + u8 mask = SD_DAT3_STATUS | SD_DAT2_STATUS | SD_DAT1_STATUS + | SD_DAT0_STATUS; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + + mutex_lock(&ucr->dev_mutex); + + err = rtsx_usb_write_register(ucr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, + SD_CLK_TOGGLE_EN); + if (err) + goto out; + + mdelay(1); + + err = rtsx_usb_read_register(ucr, SD_BUS_STAT, &stat); + if (err) + goto out; + + err = rtsx_usb_write_register(ucr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); +out: + mutex_unlock(&ucr->dev_mutex); + + if (err) + return err; + + /* check if any pin between dat[0:3] is low */ + if ((stat & mask) != mask) + return 1; + else + return 0; +} + +static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err = 0; + + if (host->host_removal) + return -ENOMEDIUM; + + mutex_lock(&ucr->dev_mutex); + + if (!host->ddr_mode) + err = sd_tuning_rx(host, MMC_SEND_TUNING_BLOCK); + + mutex_unlock(&ucr->dev_mutex); + + return err; +} + +static const struct mmc_host_ops rtsx_usb_sdmmc_ops = { + .request = sdmmc_request, + .set_ios = sdmmc_set_ios, + .get_ro = sdmmc_get_ro, + .get_cd = sdmmc_get_cd, + .start_signal_voltage_switch = sdmmc_switch_voltage, + .card_busy = sdmmc_card_busy, + .execute_tuning = sdmmc_execute_tuning, +}; + +#ifdef RTSX_USB_USE_LEDS_CLASS +static void rtsx_usb_led_control(struct led_classdev *led, + enum led_brightness brightness) +{ + struct rtsx_usb_sdmmc *host = container_of(led, + struct rtsx_usb_sdmmc, led); + + if (host->host_removal) + return; + + host->led.brightness = brightness; + schedule_work(&host->led_work); +} + +static void rtsx_usb_update_led(struct work_struct *work) +{ + struct rtsx_usb_sdmmc *host = + container_of(work, struct rtsx_usb_sdmmc, led_work); + struct rtsx_ucr *ucr = host->ucr; + + pm_runtime_get_noresume(sdmmc_dev(host)); + mutex_lock(&ucr->dev_mutex); + + if (host->power_mode == MMC_POWER_OFF) + goto out; + + if (host->led.brightness == LED_OFF) + rtsx_usb_turn_off_led(ucr); + else + rtsx_usb_turn_on_led(ucr); + +out: + mutex_unlock(&ucr->dev_mutex); + pm_runtime_put_sync_suspend(sdmmc_dev(host)); +} +#endif + +static void rtsx_usb_init_host(struct rtsx_usb_sdmmc *host) +{ + struct mmc_host *mmc = host->mmc; + + mmc->f_min = 250000; + mmc->f_max = 208000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | + MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST | + MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | + MMC_CAP_SYNC_RUNTIME_PM; + mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE | + MMC_CAP2_NO_SDIO; + + mmc->max_current_330 = 400; + mmc->max_current_180 = 800; + mmc->ops = &rtsx_usb_sdmmc_ops; + mmc->max_segs = 256; + mmc->max_seg_size = 65536; + mmc->max_blk_size = 512; + mmc->max_blk_count = 65535; + mmc->max_req_size = 524288; + + host->power_mode = MMC_POWER_OFF; +} + +static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct rtsx_usb_sdmmc *host; + struct rtsx_ucr *ucr; +#ifdef RTSX_USB_USE_LEDS_CLASS + int err; +#endif + int ret; + + ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent)); + if (!ucr) + return -ENXIO; + + dev_dbg(&(pdev->dev), ": Realtek USB SD/MMC controller found\n"); + + mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + host->ucr = ucr; + host->mmc = mmc; + host->pdev = pdev; + platform_set_drvdata(pdev, host); + + mutex_init(&host->host_mutex); + rtsx_usb_init_host(host); + pm_runtime_enable(&pdev->dev); + +#ifdef RTSX_USB_USE_LEDS_CLASS + snprintf(host->led_name, sizeof(host->led_name), + "%s::", mmc_hostname(mmc)); + host->led.name = host->led_name; + host->led.brightness = LED_OFF; + host->led.default_trigger = mmc_hostname(mmc); + host->led.brightness_set = rtsx_usb_led_control; + + err = led_classdev_register(mmc_dev(mmc), &host->led); + if (err) + dev_err(&(pdev->dev), + "Failed to register LED device: %d\n", err); + INIT_WORK(&host->led_work, rtsx_usb_update_led); + +#endif + ret = mmc_add_host(mmc); + if (ret) { +#ifdef RTSX_USB_USE_LEDS_CLASS + led_classdev_unregister(&host->led); +#endif + mmc_free_host(mmc); + pm_runtime_disable(&pdev->dev); + return ret; + } + + return 0; +} + +static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev) +{ + struct rtsx_usb_sdmmc *host = platform_get_drvdata(pdev); + struct mmc_host *mmc; + + if (!host) + return 0; + + mmc = host->mmc; + host->host_removal = true; + + mutex_lock(&host->host_mutex); + if (host->mrq) { + dev_dbg(&(pdev->dev), + "%s: Controller removed during transfer\n", + mmc_hostname(mmc)); + host->mrq->cmd->error = -ENOMEDIUM; + if (host->mrq->stop) + host->mrq->stop->error = -ENOMEDIUM; + mmc_request_done(mmc, host->mrq); + } + mutex_unlock(&host->host_mutex); + + mmc_remove_host(mmc); + +#ifdef RTSX_USB_USE_LEDS_CLASS + cancel_work_sync(&host->led_work); + led_classdev_unregister(&host->led); +#endif + + mmc_free_host(mmc); + pm_runtime_disable(&pdev->dev); + platform_set_drvdata(pdev, NULL); + + dev_dbg(&(pdev->dev), + ": Realtek USB SD/MMC module has been removed\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int rtsx_usb_sdmmc_runtime_suspend(struct device *dev) +{ + struct rtsx_usb_sdmmc *host = dev_get_drvdata(dev); + + host->mmc->caps &= ~MMC_CAP_NEEDS_POLL; + return 0; +} + +static int rtsx_usb_sdmmc_runtime_resume(struct device *dev) +{ + struct rtsx_usb_sdmmc *host = dev_get_drvdata(dev); + + host->mmc->caps |= MMC_CAP_NEEDS_POLL; + if (sdmmc_get_cd(host->mmc) == 1) + mmc_detect_change(host->mmc, 0); + return 0; +} +#endif + +static const struct dev_pm_ops rtsx_usb_sdmmc_dev_pm_ops = { + SET_RUNTIME_PM_OPS(rtsx_usb_sdmmc_runtime_suspend, + rtsx_usb_sdmmc_runtime_resume, NULL) +}; + +static const struct platform_device_id rtsx_usb_sdmmc_ids[] = { + { + .name = "rtsx_usb_sdmmc", + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, rtsx_usb_sdmmc_ids); + +static struct platform_driver rtsx_usb_sdmmc_driver = { + .probe = rtsx_usb_sdmmc_drv_probe, + .remove = rtsx_usb_sdmmc_drv_remove, + .id_table = rtsx_usb_sdmmc_ids, + .driver = { + .name = "rtsx_usb_sdmmc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &rtsx_usb_sdmmc_dev_pm_ops, + }, +}; +module_platform_driver(rtsx_usb_sdmmc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Roger Tseng "); +MODULE_DESCRIPTION("Realtek USB SD/MMC Card Host Driver"); diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c new file mode 100644 index 000000000..643d54ece --- /dev/null +++ b/drivers/mmc/host/s3cmci.c @@ -0,0 +1,1782 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver + * + * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel + * + * Current driver maintained by Ben Dooks and Simtec Electronics + * Copyright (C) 2008 Simtec Electronics + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "s3cmci.h" + +#define DRIVER_NAME "s3c-mci" + +#define S3C2410_SDICON (0x00) +#define S3C2410_SDIPRE (0x04) +#define S3C2410_SDICMDARG (0x08) +#define S3C2410_SDICMDCON (0x0C) +#define S3C2410_SDICMDSTAT (0x10) +#define S3C2410_SDIRSP0 (0x14) +#define S3C2410_SDIRSP1 (0x18) +#define S3C2410_SDIRSP2 (0x1C) +#define S3C2410_SDIRSP3 (0x20) +#define S3C2410_SDITIMER (0x24) +#define S3C2410_SDIBSIZE (0x28) +#define S3C2410_SDIDCON (0x2C) +#define S3C2410_SDIDCNT (0x30) +#define S3C2410_SDIDSTA (0x34) +#define S3C2410_SDIFSTA (0x38) + +#define S3C2410_SDIDATA (0x3C) +#define S3C2410_SDIIMSK (0x40) + +#define S3C2440_SDIDATA (0x40) +#define S3C2440_SDIIMSK (0x3C) + +#define S3C2440_SDICON_SDRESET (1 << 8) +#define S3C2410_SDICON_SDIOIRQ (1 << 3) +#define S3C2410_SDICON_FIFORESET (1 << 1) +#define S3C2410_SDICON_CLOCKTYPE (1 << 0) + +#define S3C2410_SDICMDCON_LONGRSP (1 << 10) +#define S3C2410_SDICMDCON_WAITRSP (1 << 9) +#define S3C2410_SDICMDCON_CMDSTART (1 << 8) +#define S3C2410_SDICMDCON_SENDERHOST (1 << 6) +#define S3C2410_SDICMDCON_INDEX (0x3f) + +#define S3C2410_SDICMDSTAT_CRCFAIL (1 << 12) +#define S3C2410_SDICMDSTAT_CMDSENT (1 << 11) +#define S3C2410_SDICMDSTAT_CMDTIMEOUT (1 << 10) +#define S3C2410_SDICMDSTAT_RSPFIN (1 << 9) + +#define S3C2440_SDIDCON_DS_WORD (2 << 22) +#define S3C2410_SDIDCON_TXAFTERRESP (1 << 20) +#define S3C2410_SDIDCON_RXAFTERCMD (1 << 19) +#define S3C2410_SDIDCON_BLOCKMODE (1 << 17) +#define S3C2410_SDIDCON_WIDEBUS (1 << 16) +#define S3C2410_SDIDCON_DMAEN (1 << 15) +#define S3C2410_SDIDCON_STOP (1 << 14) +#define S3C2440_SDIDCON_DATSTART (1 << 14) + +#define S3C2410_SDIDCON_XFER_RXSTART (2 << 12) +#define S3C2410_SDIDCON_XFER_TXSTART (3 << 12) + +#define S3C2410_SDIDCON_BLKNUM_MASK (0xFFF) + +#define S3C2410_SDIDSTA_SDIOIRQDETECT (1 << 9) +#define S3C2410_SDIDSTA_FIFOFAIL (1 << 8) +#define S3C2410_SDIDSTA_CRCFAIL (1 << 7) +#define S3C2410_SDIDSTA_RXCRCFAIL (1 << 6) +#define S3C2410_SDIDSTA_DATATIMEOUT (1 << 5) +#define S3C2410_SDIDSTA_XFERFINISH (1 << 4) +#define S3C2410_SDIDSTA_TXDATAON (1 << 1) +#define S3C2410_SDIDSTA_RXDATAON (1 << 0) + +#define S3C2440_SDIFSTA_FIFORESET (1 << 16) +#define S3C2440_SDIFSTA_FIFOFAIL (3 << 14) +#define S3C2410_SDIFSTA_TFDET (1 << 13) +#define S3C2410_SDIFSTA_RFDET (1 << 12) +#define S3C2410_SDIFSTA_COUNTMASK (0x7f) + +#define S3C2410_SDIIMSK_RESPONSECRC (1 << 17) +#define S3C2410_SDIIMSK_CMDSENT (1 << 16) +#define S3C2410_SDIIMSK_CMDTIMEOUT (1 << 15) +#define S3C2410_SDIIMSK_RESPONSEND (1 << 14) +#define S3C2410_SDIIMSK_SDIOIRQ (1 << 12) +#define S3C2410_SDIIMSK_FIFOFAIL (1 << 11) +#define S3C2410_SDIIMSK_CRCSTATUS (1 << 10) +#define S3C2410_SDIIMSK_DATACRC (1 << 9) +#define S3C2410_SDIIMSK_DATATIMEOUT (1 << 8) +#define S3C2410_SDIIMSK_DATAFINISH (1 << 7) +#define S3C2410_SDIIMSK_TXFIFOHALF (1 << 4) +#define S3C2410_SDIIMSK_RXFIFOLAST (1 << 2) +#define S3C2410_SDIIMSK_RXFIFOHALF (1 << 0) + +enum dbg_channels { + dbg_err = (1 << 0), + dbg_debug = (1 << 1), + dbg_info = (1 << 2), + dbg_irq = (1 << 3), + dbg_sg = (1 << 4), + dbg_dma = (1 << 5), + dbg_pio = (1 << 6), + dbg_fail = (1 << 7), + dbg_conf = (1 << 8), +}; + +static const int dbgmap_err = dbg_fail; +static const int dbgmap_info = dbg_info | dbg_conf; +static const int dbgmap_debug = dbg_err | dbg_debug; + +#define dbg(host, channels, args...) \ + do { \ + if (dbgmap_err & channels) \ + dev_err(&host->pdev->dev, args); \ + else if (dbgmap_info & channels) \ + dev_info(&host->pdev->dev, args); \ + else if (dbgmap_debug & channels) \ + dev_dbg(&host->pdev->dev, args); \ + } while (0) + +static void finalize_request(struct s3cmci_host *host); +static void s3cmci_send_request(struct mmc_host *mmc); +static void s3cmci_reset(struct s3cmci_host *host); + +#ifdef CONFIG_MMC_DEBUG + +static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) +{ + u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer; + u32 datcon, datcnt, datsta, fsta; + + con = readl(host->base + S3C2410_SDICON); + pre = readl(host->base + S3C2410_SDIPRE); + cmdarg = readl(host->base + S3C2410_SDICMDARG); + cmdcon = readl(host->base + S3C2410_SDICMDCON); + cmdsta = readl(host->base + S3C2410_SDICMDSTAT); + r0 = readl(host->base + S3C2410_SDIRSP0); + r1 = readl(host->base + S3C2410_SDIRSP1); + r2 = readl(host->base + S3C2410_SDIRSP2); + r3 = readl(host->base + S3C2410_SDIRSP3); + timer = readl(host->base + S3C2410_SDITIMER); + datcon = readl(host->base + S3C2410_SDIDCON); + datcnt = readl(host->base + S3C2410_SDIDCNT); + datsta = readl(host->base + S3C2410_SDIDSTA); + fsta = readl(host->base + S3C2410_SDIFSTA); + + dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n", + prefix, con, pre, timer); + + dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n", + prefix, cmdcon, cmdarg, cmdsta); + + dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]" + " DSTA:[%08x] DCNT:[%08x]\n", + prefix, datcon, fsta, datsta, datcnt); + + dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]" + " R2:[%08x] R3:[%08x]\n", + prefix, r0, r1, r2, r3); +} + +static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, + int stop) +{ + snprintf(host->dbgmsg_cmd, 300, + "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u", + host->ccnt, (stop ? " (STOP)" : ""), + cmd->opcode, cmd->arg, cmd->flags, cmd->retries); + + if (cmd->data) { + snprintf(host->dbgmsg_dat, 300, + "#%u bsize:%u blocks:%u bytes:%u", + host->dcnt, cmd->data->blksz, + cmd->data->blocks, + cmd->data->blocks * cmd->data->blksz); + } else { + host->dbgmsg_dat[0] = '\0'; + } +} + +static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd, + int fail) +{ + unsigned int dbglvl = fail ? dbg_fail : dbg_debug; + + if (!cmd) + return; + + if (cmd->error == 0) { + dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n", + host->dbgmsg_cmd, cmd->resp[0]); + } else { + dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n", + cmd->error, host->dbgmsg_cmd, host->status); + } + + if (!cmd->data) + return; + + if (cmd->data->error == 0) { + dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat); + } else { + dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n", + cmd->data->error, host->dbgmsg_dat, + readl(host->base + S3C2410_SDIDCNT)); + } +} +#else +static void dbg_dumpcmd(struct s3cmci_host *host, + struct mmc_command *cmd, int fail) { } + +static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, + int stop) { } + +static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { } + +#endif /* CONFIG_MMC_DEBUG */ + +/** + * s3cmci_host_usedma - return whether the host is using dma or pio + * @host: The host state + * + * Return true if the host is using DMA to transfer data, else false + * to use PIO mode. Will return static data depending on the driver + * configuration. + */ +static inline bool s3cmci_host_usedma(struct s3cmci_host *host) +{ +#ifdef CONFIG_MMC_S3C_PIO + return false; +#else /* CONFIG_MMC_S3C_DMA */ + return true; +#endif +} + +static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) +{ + u32 newmask; + + newmask = readl(host->base + host->sdiimsk); + newmask |= imask; + + writel(newmask, host->base + host->sdiimsk); + + return newmask; +} + +static inline u32 disable_imask(struct s3cmci_host *host, u32 imask) +{ + u32 newmask; + + newmask = readl(host->base + host->sdiimsk); + newmask &= ~imask; + + writel(newmask, host->base + host->sdiimsk); + + return newmask; +} + +static inline void clear_imask(struct s3cmci_host *host) +{ + u32 mask = readl(host->base + host->sdiimsk); + + /* preserve the SDIO IRQ mask state */ + mask &= S3C2410_SDIIMSK_SDIOIRQ; + writel(mask, host->base + host->sdiimsk); +} + +/** + * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled + * @host: The host to check. + * + * Test to see if the SDIO interrupt is being signalled in case the + * controller has failed to re-detect a card interrupt. Read GPE8 and + * see if it is low and if so, signal a SDIO interrupt. + * + * This is currently called if a request is finished (we assume that the + * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is + * already being indicated. +*/ +static void s3cmci_check_sdio_irq(struct s3cmci_host *host) +{ + if (host->sdio_irqen) { + if (host->pdata->bus[3] && + gpiod_get_value(host->pdata->bus[3]) == 0) { + pr_debug("%s: signalling irq\n", __func__); + mmc_signal_sdio_irq(host->mmc); + } + } +} + +static inline int get_data_buffer(struct s3cmci_host *host, + u32 *bytes, u32 **pointer) +{ + struct scatterlist *sg; + + if (host->pio_active == XFER_NONE) + return -EINVAL; + + if ((!host->mrq) || (!host->mrq->data)) + return -EINVAL; + + if (host->pio_sgptr >= host->mrq->data->sg_len) { + dbg(host, dbg_debug, "no more buffers (%i/%i)\n", + host->pio_sgptr, host->mrq->data->sg_len); + return -EBUSY; + } + sg = &host->mrq->data->sg[host->pio_sgptr]; + + *bytes = sg->length; + *pointer = sg_virt(sg); + + host->pio_sgptr++; + + dbg(host, dbg_sg, "new buffer (%i/%i)\n", + host->pio_sgptr, host->mrq->data->sg_len); + + return 0; +} + +static inline u32 fifo_count(struct s3cmci_host *host) +{ + u32 fifostat = readl(host->base + S3C2410_SDIFSTA); + + fifostat &= S3C2410_SDIFSTA_COUNTMASK; + return fifostat; +} + +static inline u32 fifo_free(struct s3cmci_host *host) +{ + u32 fifostat = readl(host->base + S3C2410_SDIFSTA); + + fifostat &= S3C2410_SDIFSTA_COUNTMASK; + return 63 - fifostat; +} + +/** + * s3cmci_enable_irq - enable IRQ, after having disabled it. + * @host: The device state. + * @more: True if more IRQs are expected from transfer. + * + * Enable the main IRQ if needed after it has been disabled. + * + * The IRQ can be one of the following states: + * - disabled during IDLE + * - disabled whilst processing data + * - enabled during transfer + * - enabled whilst awaiting SDIO interrupt detection + */ +static void s3cmci_enable_irq(struct s3cmci_host *host, bool more) +{ + unsigned long flags; + bool enable = false; + + local_irq_save(flags); + + host->irq_enabled = more; + host->irq_disabled = false; + + enable = more | host->sdio_irqen; + + if (host->irq_state != enable) { + host->irq_state = enable; + + if (enable) + enable_irq(host->irq); + else + disable_irq(host->irq); + } + + local_irq_restore(flags); +} + +static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer) +{ + unsigned long flags; + + local_irq_save(flags); + + /* pr_debug("%s: transfer %d\n", __func__, transfer); */ + + host->irq_disabled = transfer; + + if (transfer && host->irq_state) { + host->irq_state = false; + disable_irq(host->irq); + } + + local_irq_restore(flags); +} + +static void do_pio_read(struct s3cmci_host *host) +{ + int res; + u32 fifo; + u32 *ptr; + u32 fifo_words; + void __iomem *from_ptr; + + /* write real prescaler to host, it might be set slow to fix */ + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + from_ptr = host->base + host->sdidata; + + while ((fifo = fifo_count(host))) { + if (!host->pio_bytes) { + res = get_data_buffer(host, &host->pio_bytes, + &host->pio_ptr); + if (res) { + host->pio_active = XFER_NONE; + host->complete_what = COMPLETION_FINALIZE; + + dbg(host, dbg_pio, "pio_read(): " + "complete (no more data).\n"); + return; + } + + dbg(host, dbg_pio, + "pio_read(): new target: [%i]@[%p]\n", + host->pio_bytes, host->pio_ptr); + } + + dbg(host, dbg_pio, + "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n", + fifo, host->pio_bytes, + readl(host->base + S3C2410_SDIDCNT)); + + /* If we have reached the end of the block, we can + * read a word and get 1 to 3 bytes. If we in the + * middle of the block, we have to read full words, + * otherwise we will write garbage, so round down to + * an even multiple of 4. */ + if (fifo >= host->pio_bytes) + fifo = host->pio_bytes; + else + fifo -= fifo & 3; + + host->pio_bytes -= fifo; + host->pio_count += fifo; + + fifo_words = fifo >> 2; + ptr = host->pio_ptr; + while (fifo_words--) + *ptr++ = readl(from_ptr); + host->pio_ptr = ptr; + + if (fifo & 3) { + u32 n = fifo & 3; + u32 data = readl(from_ptr); + u8 *p = (u8 *)host->pio_ptr; + + while (n--) { + *p++ = data; + data >>= 8; + } + } + } + + if (!host->pio_bytes) { + res = get_data_buffer(host, &host->pio_bytes, &host->pio_ptr); + if (res) { + dbg(host, dbg_pio, + "pio_read(): complete (no more buffers).\n"); + host->pio_active = XFER_NONE; + host->complete_what = COMPLETION_FINALIZE; + + return; + } + } + + enable_imask(host, + S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST); +} + +static void do_pio_write(struct s3cmci_host *host) +{ + void __iomem *to_ptr; + int res; + u32 fifo; + u32 *ptr; + + to_ptr = host->base + host->sdidata; + + while ((fifo = fifo_free(host)) > 3) { + if (!host->pio_bytes) { + res = get_data_buffer(host, &host->pio_bytes, + &host->pio_ptr); + if (res) { + dbg(host, dbg_pio, + "pio_write(): complete (no more data).\n"); + host->pio_active = XFER_NONE; + + return; + } + + dbg(host, dbg_pio, + "pio_write(): new source: [%i]@[%p]\n", + host->pio_bytes, host->pio_ptr); + + } + + /* If we have reached the end of the block, we have to + * write exactly the remaining number of bytes. If we + * in the middle of the block, we have to write full + * words, so round down to an even multiple of 4. */ + if (fifo >= host->pio_bytes) + fifo = host->pio_bytes; + else + fifo -= fifo & 3; + + host->pio_bytes -= fifo; + host->pio_count += fifo; + + fifo = (fifo + 3) >> 2; + ptr = host->pio_ptr; + while (fifo--) + writel(*ptr++, to_ptr); + host->pio_ptr = ptr; + } + + enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); +} + +static void pio_tasklet(unsigned long data) +{ + struct s3cmci_host *host = (struct s3cmci_host *) data; + + s3cmci_disable_irq(host, true); + + if (host->pio_active == XFER_WRITE) + do_pio_write(host); + + if (host->pio_active == XFER_READ) + do_pio_read(host); + + if (host->complete_what == COMPLETION_FINALIZE) { + clear_imask(host); + if (host->pio_active != XFER_NONE) { + dbg(host, dbg_err, "unfinished %s " + "- pio_count:[%u] pio_bytes:[%u]\n", + (host->pio_active == XFER_READ) ? "read" : "write", + host->pio_count, host->pio_bytes); + + if (host->mrq->data) + host->mrq->data->error = -EINVAL; + } + + s3cmci_enable_irq(host, false); + finalize_request(host); + } else + s3cmci_enable_irq(host, true); +} + +/* + * ISR for SDI Interface IRQ + * Communication between driver and ISR works as follows: + * host->mrq points to current request + * host->complete_what Indicates when the request is considered done + * COMPLETION_CMDSENT when the command was sent + * COMPLETION_RSPFIN when a response was received + * COMPLETION_XFERFINISH when the data transfer is finished + * COMPLETION_XFERFINISH_RSPFIN both of the above. + * host->complete_request is the completion-object the driver waits for + * + * 1) Driver sets up host->mrq and host->complete_what + * 2) Driver prepares the transfer + * 3) Driver enables interrupts + * 4) Driver starts transfer + * 5) Driver waits for host->complete_rquest + * 6) ISR checks for request status (errors and success) + * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error + * 7) ISR completes host->complete_request + * 8) ISR disables interrupts + * 9) Driver wakes up and takes care of the request + * + * Note: "->error"-fields are expected to be set to 0 before the request + * was issued by mmc.c - therefore they are only set, when an error + * contition comes up + */ + +static irqreturn_t s3cmci_irq(int irq, void *dev_id) +{ + struct s3cmci_host *host = dev_id; + struct mmc_command *cmd; + u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk; + u32 mci_cclear = 0, mci_dclear; + unsigned long iflags; + + mci_dsta = readl(host->base + S3C2410_SDIDSTA); + mci_imsk = readl(host->base + host->sdiimsk); + + if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) { + if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) { + mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT; + writel(mci_dclear, host->base + S3C2410_SDIDSTA); + + mmc_signal_sdio_irq(host->mmc); + return IRQ_HANDLED; + } + } + + spin_lock_irqsave(&host->complete_lock, iflags); + + mci_csta = readl(host->base + S3C2410_SDICMDSTAT); + mci_dcnt = readl(host->base + S3C2410_SDIDCNT); + mci_fsta = readl(host->base + S3C2410_SDIFSTA); + mci_dclear = 0; + + if ((host->complete_what == COMPLETION_NONE) || + (host->complete_what == COMPLETION_FINALIZE)) { + host->status = "nothing to complete"; + clear_imask(host); + goto irq_out; + } + + if (!host->mrq) { + host->status = "no active mrq"; + clear_imask(host); + goto irq_out; + } + + cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd; + + if (!cmd) { + host->status = "no active cmd"; + clear_imask(host); + goto irq_out; + } + + if (!s3cmci_host_usedma(host)) { + if ((host->pio_active == XFER_WRITE) && + (mci_fsta & S3C2410_SDIFSTA_TFDET)) { + + disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); + tasklet_schedule(&host->pio_tasklet); + host->status = "pio tx"; + } + + if ((host->pio_active == XFER_READ) && + (mci_fsta & S3C2410_SDIFSTA_RFDET)) { + + disable_imask(host, + S3C2410_SDIIMSK_RXFIFOHALF | + S3C2410_SDIIMSK_RXFIFOLAST); + + tasklet_schedule(&host->pio_tasklet); + host->status = "pio rx"; + } + } + + if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) { + dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n"); + cmd->error = -ETIMEDOUT; + host->status = "error: command timeout"; + goto fail_transfer; + } + + if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) { + if (host->complete_what == COMPLETION_CMDSENT) { + host->status = "ok: command sent"; + goto close_transfer; + } + + mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT; + } + + if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) { + if (cmd->flags & MMC_RSP_CRC) { + if (host->mrq->cmd->flags & MMC_RSP_136) { + dbg(host, dbg_irq, + "fixup: ignore CRC fail with long rsp\n"); + } else { + /* note, we used to fail the transfer + * here, but it seems that this is just + * the hardware getting it wrong. + * + * cmd->error = -EILSEQ; + * host->status = "error: bad command crc"; + * goto fail_transfer; + */ + } + } + + mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL; + } + + if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) { + if (host->complete_what == COMPLETION_RSPFIN) { + host->status = "ok: command response received"; + goto close_transfer; + } + + if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) + host->complete_what = COMPLETION_XFERFINISH; + + mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN; + } + + /* errors handled after this point are only relevant + when a data transfer is in progress */ + + if (!cmd->data) + goto clear_status_bits; + + /* Check for FIFO failure */ + if (host->is2440) { + if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) { + dbg(host, dbg_err, "FIFO failure\n"); + host->mrq->data->error = -EILSEQ; + host->status = "error: 2440 fifo failure"; + goto fail_transfer; + } + } else { + if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) { + dbg(host, dbg_err, "FIFO failure\n"); + cmd->data->error = -EILSEQ; + host->status = "error: fifo failure"; + goto fail_transfer; + } + } + + if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) { + dbg(host, dbg_err, "bad data crc (outgoing)\n"); + cmd->data->error = -EILSEQ; + host->status = "error: bad data crc (outgoing)"; + goto fail_transfer; + } + + if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) { + dbg(host, dbg_err, "bad data crc (incoming)\n"); + cmd->data->error = -EILSEQ; + host->status = "error: bad data crc (incoming)"; + goto fail_transfer; + } + + if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) { + dbg(host, dbg_err, "data timeout\n"); + cmd->data->error = -ETIMEDOUT; + host->status = "error: data timeout"; + goto fail_transfer; + } + + if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) { + if (host->complete_what == COMPLETION_XFERFINISH) { + host->status = "ok: data transfer completed"; + goto close_transfer; + } + + if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) + host->complete_what = COMPLETION_RSPFIN; + + mci_dclear |= S3C2410_SDIDSTA_XFERFINISH; + } + +clear_status_bits: + writel(mci_cclear, host->base + S3C2410_SDICMDSTAT); + writel(mci_dclear, host->base + S3C2410_SDIDSTA); + + goto irq_out; + +fail_transfer: + host->pio_active = XFER_NONE; + +close_transfer: + host->complete_what = COMPLETION_FINALIZE; + + clear_imask(host); + tasklet_schedule(&host->pio_tasklet); + + goto irq_out; + +irq_out: + dbg(host, dbg_irq, + "csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n", + mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status); + + spin_unlock_irqrestore(&host->complete_lock, iflags); + return IRQ_HANDLED; + +} + +static void s3cmci_dma_done_callback(void *arg) +{ + struct s3cmci_host *host = arg; + unsigned long iflags; + + BUG_ON(!host->mrq); + BUG_ON(!host->mrq->data); + + spin_lock_irqsave(&host->complete_lock, iflags); + + dbg(host, dbg_dma, "DMA FINISHED\n"); + + host->dma_complete = 1; + host->complete_what = COMPLETION_FINALIZE; + + tasklet_schedule(&host->pio_tasklet); + spin_unlock_irqrestore(&host->complete_lock, iflags); + +} + +static void finalize_request(struct s3cmci_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd; + int debug_as_failure = 0; + + if (host->complete_what != COMPLETION_FINALIZE) + return; + + if (!mrq) + return; + cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; + + if (cmd->data && (cmd->error == 0) && + (cmd->data->error == 0)) { + if (s3cmci_host_usedma(host) && (!host->dma_complete)) { + dbg(host, dbg_dma, "DMA Missing (%d)!\n", + host->dma_complete); + return; + } + } + + /* Read response from controller. */ + cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0); + cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1); + cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2); + cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3); + + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + if (cmd->error) + debug_as_failure = 1; + + if (cmd->data && cmd->data->error) + debug_as_failure = 1; + + dbg_dumpcmd(host, cmd, debug_as_failure); + + /* Cleanup controller */ + writel(0, host->base + S3C2410_SDICMDARG); + writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); + writel(0, host->base + S3C2410_SDICMDCON); + clear_imask(host); + + if (cmd->data && cmd->error) + cmd->data->error = cmd->error; + + if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) { + host->cmd_is_stop = 1; + s3cmci_send_request(host->mmc); + return; + } + + /* If we have no data transfer we are finished here */ + if (!mrq->data) + goto request_done; + + /* Calculate the amout of bytes transfer if there was no error */ + if (mrq->data->error == 0) { + mrq->data->bytes_xfered = + (mrq->data->blocks * mrq->data->blksz); + } else { + mrq->data->bytes_xfered = 0; + } + + /* If we had an error while transferring data we flush the + * DMA channel and the fifo to clear out any garbage. */ + if (mrq->data->error != 0) { + if (s3cmci_host_usedma(host)) + dmaengine_terminate_all(host->dma); + + if (host->is2440) { + /* Clear failure register and reset fifo. */ + writel(S3C2440_SDIFSTA_FIFORESET | + S3C2440_SDIFSTA_FIFOFAIL, + host->base + S3C2410_SDIFSTA); + } else { + u32 mci_con; + + /* reset fifo */ + mci_con = readl(host->base + S3C2410_SDICON); + mci_con |= S3C2410_SDICON_FIFORESET; + + writel(mci_con, host->base + S3C2410_SDICON); + } + } + +request_done: + host->complete_what = COMPLETION_NONE; + host->mrq = NULL; + + s3cmci_check_sdio_irq(host); + mmc_request_done(host->mmc, mrq); +} + +static void s3cmci_send_command(struct s3cmci_host *host, + struct mmc_command *cmd) +{ + u32 ccon, imsk; + + imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT | + S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT | + S3C2410_SDIIMSK_RESPONSECRC; + + enable_imask(host, imsk); + + if (cmd->data) + host->complete_what = COMPLETION_XFERFINISH_RSPFIN; + else if (cmd->flags & MMC_RSP_PRESENT) + host->complete_what = COMPLETION_RSPFIN; + else + host->complete_what = COMPLETION_CMDSENT; + + writel(cmd->arg, host->base + S3C2410_SDICMDARG); + + ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX; + ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART; + + if (cmd->flags & MMC_RSP_PRESENT) + ccon |= S3C2410_SDICMDCON_WAITRSP; + + if (cmd->flags & MMC_RSP_136) + ccon |= S3C2410_SDICMDCON_LONGRSP; + + writel(ccon, host->base + S3C2410_SDICMDCON); +} + +static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data) +{ + u32 dcon, imsk, stoptries = 3; + + if ((data->blksz & 3) != 0) { + /* We cannot deal with unaligned blocks with more than + * one block being transferred. */ + + if (data->blocks > 1) { + pr_warn("%s: can't do non-word sized block transfers (blksz %d)\n", + __func__, data->blksz); + return -EINVAL; + } + } + + while (readl(host->base + S3C2410_SDIDSTA) & + (S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) { + + dbg(host, dbg_err, + "mci_setup_data() transfer stillin progress.\n"); + + writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); + s3cmci_reset(host); + + if ((stoptries--) == 0) { + dbg_dumpregs(host, "DRF"); + return -EINVAL; + } + } + + dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK; + + if (s3cmci_host_usedma(host)) + dcon |= S3C2410_SDIDCON_DMAEN; + + if (host->bus_width == MMC_BUS_WIDTH_4) + dcon |= S3C2410_SDIDCON_WIDEBUS; + + dcon |= S3C2410_SDIDCON_BLOCKMODE; + + if (data->flags & MMC_DATA_WRITE) { + dcon |= S3C2410_SDIDCON_TXAFTERRESP; + dcon |= S3C2410_SDIDCON_XFER_TXSTART; + } + + if (data->flags & MMC_DATA_READ) { + dcon |= S3C2410_SDIDCON_RXAFTERCMD; + dcon |= S3C2410_SDIDCON_XFER_RXSTART; + } + + if (host->is2440) { + dcon |= S3C2440_SDIDCON_DS_WORD; + dcon |= S3C2440_SDIDCON_DATSTART; + } + + writel(dcon, host->base + S3C2410_SDIDCON); + + /* write BSIZE register */ + + writel(data->blksz, host->base + S3C2410_SDIBSIZE); + + /* add to IMASK register */ + imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC | + S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH; + + enable_imask(host, imsk); + + /* write TIMER register */ + + if (host->is2440) { + writel(0x007FFFFF, host->base + S3C2410_SDITIMER); + } else { + writel(0x0000FFFF, host->base + S3C2410_SDITIMER); + + /* FIX: set slow clock to prevent timeouts on read */ + if (data->flags & MMC_DATA_READ) + writel(0xFF, host->base + S3C2410_SDIPRE); + } + + return 0; +} + +#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ) + +static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data) +{ + int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; + + BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); + + host->pio_sgptr = 0; + host->pio_bytes = 0; + host->pio_count = 0; + host->pio_active = rw ? XFER_WRITE : XFER_READ; + + if (rw) { + do_pio_write(host); + enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); + } else { + enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF + | S3C2410_SDIIMSK_RXFIFOLAST); + } + + return 0; +} + +static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) +{ + int rw = data->flags & MMC_DATA_WRITE; + struct dma_async_tx_descriptor *desc; + struct dma_slave_config conf = { + .src_addr = host->mem->start + host->sdidata, + .dst_addr = host->mem->start + host->sdidata, + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + }; + + BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); + + /* Restore prescaler value */ + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + if (!rw) + conf.direction = DMA_DEV_TO_MEM; + else + conf.direction = DMA_MEM_TO_DEV; + + dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + + dmaengine_slave_config(host->dma, &conf); + desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len, + conf.direction, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) + goto unmap_exit; + desc->callback = s3cmci_dma_done_callback; + desc->callback_param = host; + dmaengine_submit(desc); + dma_async_issue_pending(host->dma); + + return 0; + +unmap_exit: + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + return -ENOMEM; +} + +static void s3cmci_send_request(struct mmc_host *mmc) +{ + struct s3cmci_host *host = mmc_priv(mmc); + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; + + host->ccnt++; + prepare_dbgmsg(host, cmd, host->cmd_is_stop); + + /* Clear command, data and fifo status registers + Fifo clear only necessary on 2440, but doesn't hurt on 2410 + */ + writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT); + writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA); + writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA); + + if (cmd->data) { + int res = s3cmci_setup_data(host, cmd->data); + + host->dcnt++; + + if (res) { + dbg(host, dbg_err, "setup data error %d\n", res); + cmd->error = res; + cmd->data->error = res; + + mmc_request_done(mmc, mrq); + return; + } + + if (s3cmci_host_usedma(host)) + res = s3cmci_prepare_dma(host, cmd->data); + else + res = s3cmci_prepare_pio(host, cmd->data); + + if (res) { + dbg(host, dbg_err, "data prepare error %d\n", res); + cmd->error = res; + cmd->data->error = res; + + mmc_request_done(mmc, mrq); + return; + } + } + + /* Send command */ + s3cmci_send_command(host, cmd); + + /* Enable Interrupt */ + s3cmci_enable_irq(host, true); +} + +static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct s3cmci_host *host = mmc_priv(mmc); + + host->status = "mmc request"; + host->cmd_is_stop = 0; + host->mrq = mrq; + + if (mmc_gpio_get_cd(mmc) == 0) { + dbg(host, dbg_err, "%s: no medium present\n", __func__); + host->mrq->cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, mrq); + } else + s3cmci_send_request(mmc); +} + +static void s3cmci_set_clk(struct s3cmci_host *host, struct mmc_ios *ios) +{ + u32 mci_psc; + + /* Set clock */ + for (mci_psc = 0; mci_psc < 255; mci_psc++) { + host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1)); + + if (host->real_rate <= ios->clock) + break; + } + + if (mci_psc > 255) + mci_psc = 255; + + host->prescaler = mci_psc; + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + /* If requested clock is 0, real_rate will be 0, too */ + if (ios->clock == 0) + host->real_rate = 0; +} + +static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct s3cmci_host *host = mmc_priv(mmc); + u32 mci_con; + + /* Set the power state */ + + mci_con = readl(host->base + S3C2410_SDICON); + + switch (ios->power_mode) { + case MMC_POWER_ON: + case MMC_POWER_UP: + if (!host->is2440) + mci_con |= S3C2410_SDICON_FIFORESET; + break; + + case MMC_POWER_OFF: + default: + if (host->is2440) + mci_con |= S3C2440_SDICON_SDRESET; + break; + } + + if (host->pdata->set_power) + host->pdata->set_power(ios->power_mode, ios->vdd); + + s3cmci_set_clk(host, ios); + + /* Set CLOCK_ENABLE */ + if (ios->clock) + mci_con |= S3C2410_SDICON_CLOCKTYPE; + else + mci_con &= ~S3C2410_SDICON_CLOCKTYPE; + + writel(mci_con, host->base + S3C2410_SDICON); + + if ((ios->power_mode == MMC_POWER_ON) || + (ios->power_mode == MMC_POWER_UP)) { + dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n", + host->real_rate/1000, ios->clock/1000); + } else { + dbg(host, dbg_conf, "powered down.\n"); + } + + host->bus_width = ios->bus_width; +} + +static void s3cmci_reset(struct s3cmci_host *host) +{ + u32 con = readl(host->base + S3C2410_SDICON); + + con |= S3C2440_SDICON_SDRESET; + writel(con, host->base + S3C2410_SDICON); +} + +static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct s3cmci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 con; + + local_irq_save(flags); + + con = readl(host->base + S3C2410_SDICON); + host->sdio_irqen = enable; + + if (enable == host->sdio_irqen) + goto same_state; + + if (enable) { + con |= S3C2410_SDICON_SDIOIRQ; + enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ); + + if (!host->irq_state && !host->irq_disabled) { + host->irq_state = true; + enable_irq(host->irq); + } + } else { + disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ); + con &= ~S3C2410_SDICON_SDIOIRQ; + + if (!host->irq_enabled && host->irq_state) { + disable_irq_nosync(host->irq); + host->irq_state = false; + } + } + + writel(con, host->base + S3C2410_SDICON); + + same_state: + local_irq_restore(flags); + + s3cmci_check_sdio_irq(host); +} + +static const struct mmc_host_ops s3cmci_ops = { + .request = s3cmci_request, + .set_ios = s3cmci_set_ios, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, + .enable_sdio_irq = s3cmci_enable_sdio_irq, +}; + +#ifdef CONFIG_ARM_S3C24XX_CPUFREQ + +static int s3cmci_cpufreq_transition(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct s3cmci_host *host; + struct mmc_host *mmc; + unsigned long newclk; + unsigned long flags; + + host = container_of(nb, struct s3cmci_host, freq_transition); + newclk = clk_get_rate(host->clk); + mmc = host->mmc; + + if ((val == CPUFREQ_PRECHANGE && newclk > host->clk_rate) || + (val == CPUFREQ_POSTCHANGE && newclk < host->clk_rate)) { + spin_lock_irqsave(&mmc->lock, flags); + + host->clk_rate = newclk; + + if (mmc->ios.power_mode != MMC_POWER_OFF && + mmc->ios.clock != 0) + s3cmci_set_clk(host, &mmc->ios); + + spin_unlock_irqrestore(&mmc->lock, flags); + } + + return 0; +} + +static inline int s3cmci_cpufreq_register(struct s3cmci_host *host) +{ + host->freq_transition.notifier_call = s3cmci_cpufreq_transition; + + return cpufreq_register_notifier(&host->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} + +static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host) +{ + cpufreq_unregister_notifier(&host->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} + +#else +static inline int s3cmci_cpufreq_register(struct s3cmci_host *host) +{ + return 0; +} + +static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host) +{ +} +#endif + + +#ifdef CONFIG_DEBUG_FS + +static int s3cmci_state_show(struct seq_file *seq, void *v) +{ + struct s3cmci_host *host = seq->private; + + seq_printf(seq, "Register base = 0x%p\n", host->base); + seq_printf(seq, "Clock rate = %ld\n", host->clk_rate); + seq_printf(seq, "Prescale = %d\n", host->prescaler); + seq_printf(seq, "is2440 = %d\n", host->is2440); + seq_printf(seq, "IRQ = %d\n", host->irq); + seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled); + seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled); + seq_printf(seq, "IRQ state = %d\n", host->irq_state); + seq_printf(seq, "CD IRQ = %d\n", host->irq_cd); + seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host)); + seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk); + seq_printf(seq, "SDIDATA at %d\n", host->sdidata); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(s3cmci_state); + +#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r } + +struct s3cmci_reg { + unsigned short addr; + unsigned char *name; +}; + +static const struct s3cmci_reg debug_regs[] = { + DBG_REG(CON), + DBG_REG(PRE), + DBG_REG(CMDARG), + DBG_REG(CMDCON), + DBG_REG(CMDSTAT), + DBG_REG(RSP0), + DBG_REG(RSP1), + DBG_REG(RSP2), + DBG_REG(RSP3), + DBG_REG(TIMER), + DBG_REG(BSIZE), + DBG_REG(DCON), + DBG_REG(DCNT), + DBG_REG(DSTA), + DBG_REG(FSTA), + {} +}; + +static int s3cmci_regs_show(struct seq_file *seq, void *v) +{ + struct s3cmci_host *host = seq->private; + const struct s3cmci_reg *rptr = debug_regs; + + for (; rptr->name; rptr++) + seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name, + readl(host->base + rptr->addr)); + + seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk)); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(s3cmci_regs); + +static void s3cmci_debugfs_attach(struct s3cmci_host *host) +{ + struct device *dev = &host->pdev->dev; + struct dentry *root; + + root = debugfs_create_dir(dev_name(dev), NULL); + host->debug_root = root; + + debugfs_create_file("state", 0444, root, host, &s3cmci_state_fops); + debugfs_create_file("regs", 0444, root, host, &s3cmci_regs_fops); +} + +static void s3cmci_debugfs_remove(struct s3cmci_host *host) +{ + debugfs_remove_recursive(host->debug_root); +} + +#else +static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { } +static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { } + +#endif /* CONFIG_DEBUG_FS */ + +static int s3cmci_probe_pdata(struct s3cmci_host *host) +{ + struct platform_device *pdev = host->pdev; + struct mmc_host *mmc = host->mmc; + struct s3c24xx_mci_pdata *pdata; + int i, ret; + + host->is2440 = platform_get_device_id(pdev)->driver_data; + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "need platform data"); + return -ENXIO; + } + + for (i = 0; i < 6; i++) { + pdata->bus[i] = devm_gpiod_get_index(&pdev->dev, "bus", i, + GPIOD_OUT_LOW); + if (IS_ERR(pdata->bus[i])) { + dev_err(&pdev->dev, "failed to get gpio %d\n", i); + return PTR_ERR(pdata->bus[i]); + } + } + + if (pdata->no_wprotect) + mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; + + if (pdata->no_detect) + mmc->caps |= MMC_CAP_NEEDS_POLL; + + if (pdata->wprotect_invert) + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + /* If we get -ENOENT we have no card detect GPIO line */ + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); + if (ret != -ENOENT) { + dev_err(&pdev->dev, "error requesting GPIO for CD %d\n", + ret); + return ret; + } + + ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); + if (ret != -ENOENT) { + dev_err(&pdev->dev, "error requesting GPIO for WP %d\n", + ret); + return ret; + } + + return 0; +} + +static int s3cmci_probe_dt(struct s3cmci_host *host) +{ + struct platform_device *pdev = host->pdev; + struct s3c24xx_mci_pdata *pdata; + struct mmc_host *mmc = host->mmc; + int ret; + + host->is2440 = (long) of_device_get_match_data(&pdev->dev); + + ret = mmc_of_parse(mmc); + if (ret) + return ret; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdev->dev.platform_data = pdata; + + return 0; +} + +static int s3cmci_probe(struct platform_device *pdev) +{ + struct s3cmci_host *host; + struct mmc_host *mmc; + int ret; + + mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto probe_out; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + host->pdev = pdev; + + if (pdev->dev.of_node) + ret = s3cmci_probe_dt(host); + else + ret = s3cmci_probe_pdata(host); + + if (ret) + goto probe_free_host; + + host->pdata = pdev->dev.platform_data; + + spin_lock_init(&host->complete_lock); + tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host); + + if (host->is2440) { + host->sdiimsk = S3C2440_SDIIMSK; + host->sdidata = S3C2440_SDIDATA; + host->clk_div = 1; + } else { + host->sdiimsk = S3C2410_SDIIMSK; + host->sdidata = S3C2410_SDIDATA; + host->clk_div = 2; + } + + host->complete_what = COMPLETION_NONE; + host->pio_active = XFER_NONE; + + host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!host->mem) { + dev_err(&pdev->dev, + "failed to get io memory region resource.\n"); + + ret = -ENOENT; + goto probe_free_host; + } + + host->mem = request_mem_region(host->mem->start, + resource_size(host->mem), pdev->name); + + if (!host->mem) { + dev_err(&pdev->dev, "failed to request io memory region.\n"); + ret = -ENOENT; + goto probe_free_host; + } + + host->base = ioremap(host->mem->start, resource_size(host->mem)); + if (!host->base) { + dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); + ret = -EINVAL; + goto probe_free_mem_region; + } + + host->irq = platform_get_irq(pdev, 0); + if (host->irq <= 0) { + ret = -EINVAL; + goto probe_iounmap; + } + + if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) { + dev_err(&pdev->dev, "failed to request mci interrupt.\n"); + ret = -ENOENT; + goto probe_iounmap; + } + + /* We get spurious interrupts even when we have set the IMSK + * register to ignore everything, so use disable_irq() to make + * ensure we don't lock the system with un-serviceable requests. */ + + disable_irq(host->irq); + host->irq_state = false; + + /* Depending on the dma state, get a DMA channel to use. */ + + if (s3cmci_host_usedma(host)) { + host->dma = dma_request_chan(&pdev->dev, "rx-tx"); + ret = PTR_ERR_OR_ZERO(host->dma); + if (ret) { + dev_err(&pdev->dev, "cannot get DMA channel.\n"); + goto probe_free_irq; + } + } + + host->clk = clk_get(&pdev->dev, "sdi"); + if (IS_ERR(host->clk)) { + dev_err(&pdev->dev, "failed to find clock source.\n"); + ret = PTR_ERR(host->clk); + host->clk = NULL; + goto probe_free_dma; + } + + ret = clk_prepare_enable(host->clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock source.\n"); + goto clk_free; + } + + host->clk_rate = clk_get_rate(host->clk); + + mmc->ops = &s3cmci_ops; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; +#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; +#else + mmc->caps = MMC_CAP_4_BIT_DATA; +#endif + mmc->f_min = host->clk_rate / (host->clk_div * 256); + mmc->f_max = host->clk_rate / host->clk_div; + + if (host->pdata->ocr_avail) + mmc->ocr_avail = host->pdata->ocr_avail; + + mmc->max_blk_count = 4095; + mmc->max_blk_size = 4095; + mmc->max_req_size = 4095 * 512; + mmc->max_seg_size = mmc->max_req_size; + + mmc->max_segs = 128; + + dbg(host, dbg_debug, + "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%p.\n", + (host->is2440?"2440":""), + host->base, host->irq, host->irq_cd, host->dma); + + ret = s3cmci_cpufreq_register(host); + if (ret) { + dev_err(&pdev->dev, "failed to register cpufreq\n"); + goto free_dmabuf; + } + + ret = mmc_add_host(mmc); + if (ret) { + dev_err(&pdev->dev, "failed to add mmc host.\n"); + goto free_cpufreq; + } + + s3cmci_debugfs_attach(host); + + platform_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc), + s3cmci_host_usedma(host) ? "dma" : "pio", + mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw"); + + return 0; + + free_cpufreq: + s3cmci_cpufreq_deregister(host); + + free_dmabuf: + clk_disable_unprepare(host->clk); + + clk_free: + clk_put(host->clk); + + probe_free_dma: + if (s3cmci_host_usedma(host)) + dma_release_channel(host->dma); + + probe_free_irq: + free_irq(host->irq, host); + + probe_iounmap: + iounmap(host->base); + + probe_free_mem_region: + release_mem_region(host->mem->start, resource_size(host->mem)); + + probe_free_host: + mmc_free_host(mmc); + + probe_out: + return ret; +} + +static void s3cmci_shutdown(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct s3cmci_host *host = mmc_priv(mmc); + + if (host->irq_cd >= 0) + free_irq(host->irq_cd, host); + + s3cmci_debugfs_remove(host); + s3cmci_cpufreq_deregister(host); + mmc_remove_host(mmc); + clk_disable_unprepare(host->clk); +} + +static int s3cmci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct s3cmci_host *host = mmc_priv(mmc); + + s3cmci_shutdown(pdev); + + clk_put(host->clk); + + tasklet_disable(&host->pio_tasklet); + + if (s3cmci_host_usedma(host)) + dma_release_channel(host->dma); + + free_irq(host->irq, host); + + iounmap(host->base); + release_mem_region(host->mem->start, resource_size(host->mem)); + + mmc_free_host(mmc); + return 0; +} + +static const struct of_device_id s3cmci_dt_match[] = { + { + .compatible = "samsung,s3c2410-sdi", + .data = (void *)0, + }, + { + .compatible = "samsung,s3c2412-sdi", + .data = (void *)1, + }, + { + .compatible = "samsung,s3c2440-sdi", + .data = (void *)1, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, s3cmci_dt_match); + +static const struct platform_device_id s3cmci_driver_ids[] = { + { + .name = "s3c2410-sdi", + .driver_data = 0, + }, { + .name = "s3c2412-sdi", + .driver_data = 1, + }, { + .name = "s3c2440-sdi", + .driver_data = 1, + }, + { } +}; + +MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids); + +static struct platform_driver s3cmci_driver = { + .driver = { + .name = "s3c-sdi", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = s3cmci_dt_match, + }, + .id_table = s3cmci_driver_ids, + .probe = s3cmci_probe, + .remove = s3cmci_remove, + .shutdown = s3cmci_shutdown, +}; + +module_platform_driver(s3cmci_driver); + +MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Thomas Kleffel , Ben Dooks "); diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h new file mode 100644 index 000000000..8b65d7ad9 --- /dev/null +++ b/drivers/mmc/host/s3cmci.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver + * + * Copyright (C) 2004-2006 Thomas Kleffel, All Rights Reserved. + */ + +enum s3cmci_waitfor { + COMPLETION_NONE, + COMPLETION_FINALIZE, + COMPLETION_CMDSENT, + COMPLETION_RSPFIN, + COMPLETION_XFERFINISH, + COMPLETION_XFERFINISH_RSPFIN, +}; + +struct s3cmci_host { + struct platform_device *pdev; + struct s3c24xx_mci_pdata *pdata; + struct mmc_host *mmc; + struct resource *mem; + struct clk *clk; + void __iomem *base; + int irq; + int irq_cd; + struct dma_chan *dma; + + unsigned long clk_rate; + unsigned long clk_div; + unsigned long real_rate; + u8 prescaler; + + int is2440; + unsigned sdiimsk; + unsigned sdidata; + + bool irq_disabled; + bool irq_enabled; + bool irq_state; + int sdio_irqen; + + struct mmc_request *mrq; + int cmd_is_stop; + + spinlock_t complete_lock; + enum s3cmci_waitfor complete_what; + + int dma_complete; + + u32 pio_sgptr; + u32 pio_bytes; + u32 pio_count; + u32 *pio_ptr; +#define XFER_NONE 0 +#define XFER_READ 1 +#define XFER_WRITE 2 + u32 pio_active; + + int bus_width; + + char dbgmsg_cmd[301]; + char dbgmsg_dat[301]; + char *status; + + unsigned int ccnt, dcnt; + struct tasklet_struct pio_tasklet; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_root; +#endif + +#ifdef CONFIG_ARM_S3C24XX_CPUFREQ + struct notifier_block freq_transition; +#endif +}; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c new file mode 100644 index 000000000..2a2810177 --- /dev/null +++ b/drivers/mmc/host/sdhci-acpi.c @@ -0,0 +1,1090 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Secure Digital Host Controller Interface ACPI driver. + * + * Copyright (c) 2012, Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_X86 +#include +#include +#include +#include +#endif + +#include "sdhci.h" + +enum { + SDHCI_ACPI_SD_CD = BIT(0), + SDHCI_ACPI_RUNTIME_PM = BIT(1), + SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL = BIT(2), +}; + +struct sdhci_acpi_chip { + const struct sdhci_ops *ops; + unsigned int quirks; + unsigned int quirks2; + unsigned long caps; + unsigned int caps2; + mmc_pm_flag_t pm_caps; +}; + +struct sdhci_acpi_slot { + const struct sdhci_acpi_chip *chip; + unsigned int quirks; + unsigned int quirks2; + unsigned long caps; + unsigned int caps2; + mmc_pm_flag_t pm_caps; + unsigned int flags; + size_t priv_size; + int (*probe_slot)(struct platform_device *, struct acpi_device *); + int (*remove_slot)(struct platform_device *); + int (*free_slot)(struct platform_device *pdev); + int (*setup_host)(struct platform_device *pdev); +}; + +struct sdhci_acpi_host { + struct sdhci_host *host; + const struct sdhci_acpi_slot *slot; + struct platform_device *pdev; + bool use_runtime_pm; + bool is_intel; + bool reset_signal_volt_on_suspend; + unsigned long private[] ____cacheline_aligned; +}; + +enum { + DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0), + DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1), +}; + +static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c) +{ + return (void *)c->private; +} + +static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) +{ + return c->slot && (c->slot->flags & flag); +} + +#define INTEL_DSM_HS_CAPS_SDR25 BIT(0) +#define INTEL_DSM_HS_CAPS_DDR50 BIT(1) +#define INTEL_DSM_HS_CAPS_SDR50 BIT(2) +#define INTEL_DSM_HS_CAPS_SDR104 BIT(3) + +enum { + INTEL_DSM_FNS = 0, + INTEL_DSM_V18_SWITCH = 3, + INTEL_DSM_V33_SWITCH = 4, + INTEL_DSM_HS_CAPS = 8, +}; + +struct intel_host { + u32 dsm_fns; + u32 hs_caps; +}; + +static const guid_t intel_dsm_guid = + GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, + 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); + +static int __intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + union acpi_object *obj; + int err = 0; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); + if (!obj) + return -EOPNOTSUPP; + + if (obj->type == ACPI_TYPE_INTEGER) { + *result = obj->integer.value; + } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) { + size_t len = min_t(size_t, obj->buffer.length, 4); + + *result = 0; + memcpy(result, obj->buffer.pointer, len); + } else { + dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n", + __func__, fn, obj->type, obj->buffer.length); + err = -EINVAL; + } + + ACPI_FREE(obj); + + return err; +} + +static int intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) + return -EOPNOTSUPP; + + return __intel_dsm(intel_host, dev, fn, result); +} + +static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, + struct mmc_host *mmc) +{ + int err; + + intel_host->hs_caps = ~0; + + err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); + if (err) { + pr_debug("%s: DSM not supported, error %d\n", + mmc_hostname(mmc), err); + return; + } + + pr_debug("%s: DSM function mask %#x\n", + mmc_hostname(mmc), intel_host->dsm_fns); + + intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps); +} + +static int intel_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct device *dev = mmc_dev(mmc); + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + struct intel_host *intel_host = sdhci_acpi_priv(c); + unsigned int fn; + u32 result = 0; + int err; + + err = sdhci_start_signal_voltage_switch(mmc, ios); + if (err) + return err; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + fn = INTEL_DSM_V33_SWITCH; + break; + case MMC_SIGNAL_VOLTAGE_180: + fn = INTEL_DSM_V18_SWITCH; + break; + default: + return 0; + } + + err = intel_dsm(intel_host, dev, fn, &result); + pr_debug("%s: %s DSM fn %u error %d result %u\n", + mmc_hostname(mmc), __func__, fn, err, result); + + return 0; +} + +static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) +{ + u8 reg; + + reg = sdhci_readb(host, SDHCI_POWER_CONTROL); + reg |= 0x10; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + /* For eMMC, minimum is 1us but give it 9us for good measure */ + udelay(9); + reg &= ~0x10; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + /* For eMMC, minimum is 200us but give it 300us for good measure */ + usleep_range(300, 1000); +} + +static const struct sdhci_ops sdhci_acpi_ops_dflt = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_ops sdhci_acpi_ops_int = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .hw_reset = sdhci_acpi_int_hw_reset, +}; + +static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { + .ops = &sdhci_acpi_ops_int, +}; + +#ifdef CONFIG_X86 + +static bool sdhci_acpi_byt(void) +{ + static const struct x86_cpu_id byt[] = { + X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), + {} + }; + + return x86_match_cpu(byt); +} + +static bool sdhci_acpi_cht(void) +{ + static const struct x86_cpu_id cht[] = { + X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), + {} + }; + + return x86_match_cpu(cht); +} + +#define BYT_IOSF_SCCEP 0x63 +#define BYT_IOSF_OCP_NETCTRL0 0x1078 +#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) + +static void sdhci_acpi_byt_setting(struct device *dev) +{ + u32 val = 0; + + if (!sdhci_acpi_byt()) + return; + + if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, + &val)) { + dev_err(dev, "%s read error\n", __func__); + return; + } + + if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) + return; + + val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; + + if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, + val)) { + dev_err(dev, "%s write error\n", __func__); + return; + } + + dev_dbg(dev, "%s completed\n", __func__); +} + +static bool sdhci_acpi_byt_defer(struct device *dev) +{ + if (!sdhci_acpi_byt()) + return false; + + if (!iosf_mbi_available()) + return true; + + sdhci_acpi_byt_setting(dev); + + return false; +} + +static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device, + unsigned int slot, unsigned int parent_slot) +{ + struct pci_dev *dev, *parent, *from = NULL; + + while (1) { + dev = pci_get_device(vendor, device, from); + pci_dev_put(from); + if (!dev) + break; + parent = pci_upstream_bridge(dev); + if (ACPI_COMPANION(&dev->dev) && PCI_SLOT(dev->devfn) == slot && + parent && PCI_SLOT(parent->devfn) == parent_slot && + !pci_upstream_bridge(parent)) { + pci_dev_put(dev); + return true; + } + from = dev; + } + + return false; +} + +/* + * GPDwin uses PCI wifi which conflicts with SDIO's use of + * acpi_device_fix_up_power() on child device nodes. Identifying GPDwin is + * problematic, but since SDIO is only used for wifi, the presence of the PCI + * wifi card in the expected slot with an ACPI companion node, is used to + * indicate that acpi_device_fix_up_power() should be avoided. + */ +static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev) +{ + return sdhci_acpi_cht() && + acpi_dev_hid_uid_match(adev, "80860F14", "2") && + sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28); +} + +#else + +static inline void sdhci_acpi_byt_setting(struct device *dev) +{ +} + +static inline bool sdhci_acpi_byt_defer(struct device *dev) +{ + return false; +} + +static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev) +{ + return false; +} + +#endif + +static int bxt_get_cd(struct mmc_host *mmc) +{ + int gpio_cd = mmc_gpio_get_cd(mmc); + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + int ret = 0; + + if (!gpio_cd) + return 0; + + spin_lock_irqsave(&host->lock, flags); + + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + + ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +out: + spin_unlock_irqrestore(&host->lock, flags); + + return ret; +} + +static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev) +{ + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct intel_host *intel_host = sdhci_acpi_priv(c); + struct sdhci_host *host = c->host; + + if (acpi_dev_hid_uid_match(adev, "80860F14", "1") && + sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 && + sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807) + host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ + + if (acpi_dev_hid_uid_match(adev, "80865ACA", NULL)) + host->mmc_host_ops.get_cd = bxt_get_cd; + + intel_dsm_init(intel_host, &pdev->dev, host->mmc); + + host->mmc_host_ops.start_signal_voltage_switch = + intel_start_signal_voltage_switch; + + c->is_intel = true; + + return 0; +} + +static int intel_setup_host(struct platform_device *pdev) +{ + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct intel_host *intel_host = sdhci_acpi_priv(c); + + if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25)) + c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25; + + if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50)) + c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50; + + if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50)) + c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50; + + if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104)) + c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104; + + return 0; +} + +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { + .chip = &sdhci_acpi_chip_int, + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | + MMC_CAP_CMD_DURING_TFR | MMC_CAP_WAIT_WHILE_BUSY, + .flags = SDHCI_ACPI_RUNTIME_PM, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_STOP_WITH_TC | + SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400, + .probe_slot = intel_probe_slot, + .setup_host = intel_setup_host, + .priv_size = sizeof(struct intel_host), +}; + +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_NO_LED | + SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, + .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD | + MMC_CAP_WAIT_WHILE_BUSY, + .flags = SDHCI_ACPI_RUNTIME_PM, + .pm_caps = MMC_PM_KEEP_POWER, + .probe_slot = intel_probe_slot, + .setup_host = intel_setup_host, + .priv_size = sizeof(struct intel_host), +}; + +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { + .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL | + SDHCI_ACPI_RUNTIME_PM, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED, + .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | + SDHCI_QUIRK2_STOP_WITH_TC, + .caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM, + .probe_slot = intel_probe_slot, + .setup_host = intel_setup_host, + .priv_size = sizeof(struct intel_host), +}; + +#define VENDOR_SPECIFIC_PWRCTL_CLEAR_REG 0x1a8 +#define VENDOR_SPECIFIC_PWRCTL_CTL_REG 0x1ac +static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr) +{ + struct sdhci_host *host = ptr; + + sdhci_writel(host, 0x3, VENDOR_SPECIFIC_PWRCTL_CLEAR_REG); + sdhci_writel(host, 0x1, VENDOR_SPECIFIC_PWRCTL_CTL_REG); + + return IRQ_HANDLED; +} + +static int qcom_probe_slot(struct platform_device *pdev, struct acpi_device *adev) +{ + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct sdhci_host *host = c->host; + int *irq = sdhci_acpi_priv(c); + + *irq = -EINVAL; + + if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL)) + return 0; + + *irq = platform_get_irq(pdev, 1); + if (*irq < 0) + return 0; + + return request_threaded_irq(*irq, NULL, sdhci_acpi_qcom_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + "sdhci_qcom", host); +} + +static int qcom_free_slot(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct sdhci_host *host = c->host; + struct acpi_device *adev; + int *irq = sdhci_acpi_priv(c); + + adev = ACPI_COMPANION(dev); + if (!adev) + return -ENODEV; + + if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL)) + return 0; + + if (*irq < 0) + return 0; + + free_irq(*irq, host); + return 0; +} + +static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, + .quirks2 = SDHCI_QUIRK2_NO_1_8_V, + .caps = MMC_CAP_NONREMOVABLE, + .priv_size = sizeof(int), + .probe_slot = qcom_probe_slot, + .free_slot = qcom_free_slot, +}; + +static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, + .caps = MMC_CAP_NONREMOVABLE, +}; + +struct amd_sdhci_host { + bool tuned_clock; + bool dll_enabled; +}; + +/* AMD sdhci reset dll register. */ +#define SDHCI_AMD_RESET_DLL_REGISTER 0x908 + +static int amd_select_drive_strength(struct mmc_card *card, + unsigned int max_dtr, int host_drv, + int card_drv, int *drv_type) +{ + *drv_type = MMC_SET_DRIVER_TYPE_A; + return MMC_SET_DRIVER_TYPE_A; +} + +static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) +{ + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + + /* AMD Platform requires dll setting */ + sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); + usleep_range(10, 20); + if (enable) + sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); + + amd_host->dll_enabled = enable; +} + +/* + * The initialization sequence for HS400 is: + * HS->HS200->Perform Tuning->HS->HS400 + * + * The re-tuning sequence is: + * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400 + * + * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400 + * mode. If we switch to a different mode, we need to disable the tuned clock. + * If we have previously performed tuning and switch back to HS200 or + * HS400, we can re-enable the tuned clock. + * + */ +static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + unsigned int old_timing = host->timing; + u16 val; + + sdhci_set_ios(mmc, ios); + + if (old_timing != host->timing && amd_host->tuned_clock) { + if (host->timing == MMC_TIMING_MMC_HS400 || + host->timing == MMC_TIMING_MMC_HS200) { + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val |= SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); + } else { + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val &= ~SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); + } + + /* DLL is only required for HS400 */ + if (host->timing == MMC_TIMING_MMC_HS400 && + !amd_host->dll_enabled) + sdhci_acpi_amd_hs400_dll(host, true); + } +} + +static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + int err; + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + + amd_host->tuned_clock = false; + + err = sdhci_execute_tuning(mmc, opcode); + + if (!err && !host->tuning_err) + amd_host->tuned_clock = true; + + return err; +} + +static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + + if (mask & SDHCI_RESET_ALL) { + amd_host->tuned_clock = false; + sdhci_acpi_amd_hs400_dll(host, false); + } + + sdhci_reset(host, mask); +} + +static const struct sdhci_ops sdhci_acpi_ops_amd = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = amd_sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = { + .ops = &sdhci_acpi_ops_amd, +}; + +static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, + struct acpi_device *adev) +{ + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct sdhci_host *host = c->host; + + sdhci_read_caps(host); + if (host->caps1 & SDHCI_SUPPORT_DDR50) + host->mmc->caps = MMC_CAP_1_8V_DDR; + + if ((host->caps1 & SDHCI_SUPPORT_SDR104) && + (host->mmc->caps & MMC_CAP_1_8V_DDR)) + host->mmc->caps2 = MMC_CAP2_HS400_1_8V; + + /* + * There are two types of presets out in the wild: + * 1) Default/broken presets. + * These presets have two sets of problems: + * a) The clock divisor for SDR12, SDR25, and SDR50 is too small. + * This results in clock frequencies that are 2x higher than + * acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 = + * 100 MHz.x + * b) The HS200 and HS400 driver strengths don't match. + * By default, the SDR104 preset register has a driver strength of + * A, but the (internal) HS400 preset register has a driver + * strength of B. As part of initializing HS400, HS200 tuning + * needs to be performed. Having different driver strengths + * between tuning and operation is wrong. It results in different + * rise/fall times that lead to incorrect sampling. + * 2) Firmware with properly initialized presets. + * These presets have proper clock divisors. i.e., SDR12 => 12MHz, + * SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and + * HS400 preset driver strengths match. + * + * Enabling presets for HS400 doesn't work for the following reasons: + * 1) sdhci_set_ios has a hard coded list of timings that are used + * to determine if presets should be enabled. + * 2) sdhci_get_preset_value is using a non-standard register to + * read out HS400 presets. The AMD controller doesn't support this + * non-standard register. In fact, it doesn't expose the HS400 + * preset register anywhere in the SDHCI memory map. This results + * in reading a garbage value and using the wrong presets. + * + * Since HS400 and HS200 presets must be identical, we could + * instead use the the SDR104 preset register. + * + * If the above issues are resolved we could remove this quirk for + * firmware that that has valid presets (i.e., SDR12 <= 12 MHz). + */ + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + + host->mmc_host_ops.select_drive_strength = amd_select_drive_strength; + host->mmc_host_ops.set_ios = amd_set_ios; + host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning; + return 0; +} + +static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { + .chip = &sdhci_acpi_chip_amd, + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE, + .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, + .probe_slot = sdhci_acpi_emmc_amd_probe_slot, + .priv_size = sizeof(struct amd_sdhci_host), +}; + +struct sdhci_acpi_uid_slot { + const char *hid; + const char *uid; + const struct sdhci_acpi_slot *slot; +}; + +static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { + { "80865ACA", NULL, &sdhci_acpi_slot_int_sd }, + { "80865ACC", NULL, &sdhci_acpi_slot_int_emmc }, + { "80865AD0", NULL, &sdhci_acpi_slot_int_sdio }, + { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, + { "80860F14" , "2" , &sdhci_acpi_slot_int_sdio }, + { "80860F14" , "3" , &sdhci_acpi_slot_int_sd }, + { "80860F16" , NULL, &sdhci_acpi_slot_int_sd }, + { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio }, + { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, + { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, + { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, + { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, + { "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd }, + { "PNP0D40" }, + { "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v }, + { "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd }, + { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc }, + { }, +}; + +static const struct acpi_device_id sdhci_acpi_ids[] = { + { "80865ACA" }, + { "80865ACC" }, + { "80865AD0" }, + { "80860F14" }, + { "80860F16" }, + { "INT33BB" }, + { "INT33C6" }, + { "INT3436" }, + { "INT344D" }, + { "PNP0D40" }, + { "QCOM8051" }, + { "QCOM8052" }, + { "AMDI0040" }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); + +static const struct dmi_system_id sdhci_acpi_quirks[] = { + { + /* + * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of + * the SHC1 ACPI device, this bug causes it to reprogram the + * wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the + * card is (runtime) suspended + resumed. DLDO3 is used for + * the LCD and setting it to 1.8V causes the LCD to go black. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), + }, + .driver_data = (void *)DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP, + }, + { + /* + * The Acer Aspire Switch 10 (SW5-012) microSD slot always + * reports the card being write-protected even though microSD + * cards do not have a write-protect switch at all. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), + }, + .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, + }, + { + /* + * The Toshiba WT8-B's microSD slot always reports the card being + * write-protected. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA ENCORE 2 WT8-B"), + }, + .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, + }, + {} /* Terminating entry */ +}; + +static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev) +{ + const struct sdhci_acpi_uid_slot *u; + + for (u = sdhci_acpi_uids; u->hid; u++) { + if (acpi_dev_hid_uid_match(adev, u->hid, u->uid)) + return u->slot; + } + return NULL; +} + +static int sdhci_acpi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct sdhci_acpi_slot *slot; + struct acpi_device *device, *child; + const struct dmi_system_id *id; + struct sdhci_acpi_host *c; + struct sdhci_host *host; + struct resource *iomem; + resource_size_t len; + size_t priv_size; + int quirks = 0; + int err; + + device = ACPI_COMPANION(dev); + if (!device) + return -ENODEV; + + id = dmi_first_match(sdhci_acpi_quirks); + if (id) + quirks = (long)id->driver_data; + + slot = sdhci_acpi_get_slot(device); + + /* Power on the SDHCI controller and its children */ + acpi_device_fix_up_power(device); + if (!sdhci_acpi_no_fixup_child_power(device)) { + list_for_each_entry(child, &device->children, node) + if (child->status.present && child->status.enabled) + acpi_device_fix_up_power(child); + } + + if (sdhci_acpi_byt_defer(dev)) + return -EPROBE_DEFER; + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iomem) + return -ENOMEM; + + len = resource_size(iomem); + if (len < 0x100) + dev_err(dev, "Invalid iomem size!\n"); + + if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) + return -ENOMEM; + + priv_size = slot ? slot->priv_size : 0; + host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size); + if (IS_ERR(host)) + return PTR_ERR(host); + + c = sdhci_priv(host); + c->host = host; + c->slot = slot; + c->pdev = pdev; + c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); + + platform_set_drvdata(pdev, c); + + host->hw_name = "ACPI"; + host->ops = &sdhci_acpi_ops_dflt; + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) { + err = host->irq; + goto err_free; + } + + host->ioaddr = devm_ioremap(dev, iomem->start, + resource_size(iomem)); + if (host->ioaddr == NULL) { + err = -ENOMEM; + goto err_free; + } + + if (c->slot) { + if (c->slot->probe_slot) { + err = c->slot->probe_slot(pdev, device); + if (err) + goto err_free; + } + if (c->slot->chip) { + host->ops = c->slot->chip->ops; + host->quirks |= c->slot->chip->quirks; + host->quirks2 |= c->slot->chip->quirks2; + host->mmc->caps |= c->slot->chip->caps; + host->mmc->caps2 |= c->slot->chip->caps2; + host->mmc->pm_caps |= c->slot->chip->pm_caps; + } + host->quirks |= c->slot->quirks; + host->quirks2 |= c->slot->quirks2; + host->mmc->caps |= c->slot->caps; + host->mmc->caps2 |= c->slot->caps2; + host->mmc->pm_caps |= c->slot->pm_caps; + } + + host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; + + if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { + bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); + + err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0); + if (err) { + if (err == -EPROBE_DEFER) + goto err_free; + dev_warn(dev, "failed to setup card detect gpio\n"); + c->use_runtime_pm = false; + } + + if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP) + c->reset_signal_volt_on_suspend = true; + + if (quirks & DMI_QUIRK_SD_NO_WRITE_PROTECT) + host->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; + } + + err = sdhci_setup_host(host); + if (err) + goto err_free; + + if (c->slot && c->slot->setup_host) { + err = c->slot->setup_host(pdev); + if (err) + goto err_cleanup; + } + + err = __sdhci_add_host(host); + if (err) + goto err_cleanup; + + if (c->use_runtime_pm) { + pm_runtime_set_active(dev); + pm_suspend_ignore_children(dev, 1); + pm_runtime_set_autosuspend_delay(dev, 50); + pm_runtime_use_autosuspend(dev); + pm_runtime_enable(dev); + } + + device_enable_async_suspend(dev); + + return 0; + +err_cleanup: + sdhci_cleanup_host(c->host); +err_free: + if (c->slot && c->slot->free_slot) + c->slot->free_slot(pdev); + + sdhci_free_host(c->host); + return err; +} + +static int sdhci_acpi_remove(struct platform_device *pdev) +{ + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int dead; + + if (c->use_runtime_pm) { + pm_runtime_get_sync(dev); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + } + + if (c->slot && c->slot->remove_slot) + c->slot->remove_slot(pdev); + + dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); + sdhci_remove_host(c->host, dead); + + if (c->slot && c->slot->free_slot) + c->slot->free_slot(pdev); + + sdhci_free_host(c->host); + + return 0; +} + +static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed( + struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + struct sdhci_host *host = c->host; + + if (c->is_intel && c->reset_signal_volt_on_suspend && + host->mmc->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_330) { + struct intel_host *intel_host = sdhci_acpi_priv(c); + unsigned int fn = INTEL_DSM_V33_SWITCH; + u32 result = 0; + + intel_dsm(intel_host, dev, fn, &result); + } +} + +#ifdef CONFIG_PM_SLEEP + +static int sdhci_acpi_suspend(struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + struct sdhci_host *host = c->host; + int ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + ret = sdhci_suspend_host(host); + if (ret) + return ret; + + sdhci_acpi_reset_signal_voltage_if_needed(dev); + return 0; +} + +static int sdhci_acpi_resume(struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + + sdhci_acpi_byt_setting(&c->pdev->dev); + + return sdhci_resume_host(c->host); +} + +#endif + +#ifdef CONFIG_PM + +static int sdhci_acpi_runtime_suspend(struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + struct sdhci_host *host = c->host; + int ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + ret = sdhci_runtime_suspend_host(host); + if (ret) + return ret; + + sdhci_acpi_reset_signal_voltage_if_needed(dev); + return 0; +} + +static int sdhci_acpi_runtime_resume(struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + + sdhci_acpi_byt_setting(&c->pdev->dev); + + return sdhci_runtime_resume_host(c->host, 0); +} + +#endif + +static const struct dev_pm_ops sdhci_acpi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume) + SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend, + sdhci_acpi_runtime_resume, NULL) +}; + +static struct platform_driver sdhci_acpi_driver = { + .driver = { + .name = "sdhci-acpi", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .acpi_match_table = sdhci_acpi_ids, + .pm = &sdhci_acpi_pm_ops, + }, + .probe = sdhci_acpi_probe, + .remove = sdhci_acpi_remove, +}; + +module_platform_driver(sdhci_acpi_driver); + +MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); +MODULE_AUTHOR("Adrian Hunter"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c new file mode 100644 index 000000000..4d4aac85c --- /dev/null +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -0,0 +1,338 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" +#include "sdhci.h" + +#define SDHCI_SOFT_RESET 0x01000000 +#define KONA_SDHOST_CORECTRL 0x8000 +#define KONA_SDHOST_CD_PINCTRL 0x00000008 +#define KONA_SDHOST_STOP_HCLK 0x00000004 +#define KONA_SDHOST_RESET 0x00000002 +#define KONA_SDHOST_EN 0x00000001 + +#define KONA_SDHOST_CORESTAT 0x8004 +#define KONA_SDHOST_WP 0x00000002 +#define KONA_SDHOST_CD_SW 0x00000001 + +#define KONA_SDHOST_COREIMR 0x8008 +#define KONA_SDHOST_IP 0x00000001 + +#define KONA_SDHOST_COREISR 0x800C +#define KONA_SDHOST_COREIMSR 0x8010 +#define KONA_SDHOST_COREDBG1 0x8014 +#define KONA_SDHOST_COREGPO_MASK 0x8018 + +#define SD_DETECT_GPIO_DEBOUNCE_128MS 128 + +#define KONA_MMC_AUTOSUSPEND_DELAY (50) + +struct sdhci_bcm_kona_dev { + struct mutex write_lock; /* protect back to back writes */ +}; + + +static int sdhci_bcm_kona_sd_reset(struct sdhci_host *host) +{ + unsigned int val; + unsigned long timeout; + + /* This timeout should be sufficent for core to reset */ + timeout = jiffies + msecs_to_jiffies(100); + + /* reset the host using the top level reset */ + val = sdhci_readl(host, KONA_SDHOST_CORECTRL); + val |= KONA_SDHOST_RESET; + sdhci_writel(host, val, KONA_SDHOST_CORECTRL); + + while (!(sdhci_readl(host, KONA_SDHOST_CORECTRL) & KONA_SDHOST_RESET)) { + if (time_is_before_jiffies(timeout)) { + pr_err("Error: sd host is stuck in reset!!!\n"); + return -EFAULT; + } + } + + /* bring the host out of reset */ + val = sdhci_readl(host, KONA_SDHOST_CORECTRL); + val &= ~KONA_SDHOST_RESET; + + /* + * Back-to-Back register write needs a delay of 1ms at bootup (min 10uS) + * Back-to-Back writes to same register needs delay when SD bus clock + * is very low w.r.t AHB clock, mainly during boot-time and during card + * insert-removal. + */ + usleep_range(1000, 5000); + sdhci_writel(host, val, KONA_SDHOST_CORECTRL); + + return 0; +} + +static void sdhci_bcm_kona_sd_init(struct sdhci_host *host) +{ + unsigned int val; + + /* enable the interrupt from the IP core */ + val = sdhci_readl(host, KONA_SDHOST_COREIMR); + val |= KONA_SDHOST_IP; + sdhci_writel(host, val, KONA_SDHOST_COREIMR); + + /* Enable the AHB clock gating module to the host */ + val = sdhci_readl(host, KONA_SDHOST_CORECTRL); + val |= KONA_SDHOST_EN; + + /* + * Back-to-Back register write needs a delay of 1ms at bootup (min 10uS) + * Back-to-Back writes to same register needs delay when SD bus clock + * is very low w.r.t AHB clock, mainly during boot-time and during card + * insert-removal. + */ + usleep_range(1000, 5000); + sdhci_writel(host, val, KONA_SDHOST_CORECTRL); +} + +/* + * Software emulation of the SD card insertion/removal. Set insert=1 for insert + * and insert=0 for removal. The card detection is done by GPIO. For Broadcom + * IP to function properly the bit 0 of CORESTAT register needs to be set/reset + * to generate the CD IRQ handled in sdhci.c which schedules card_tasklet. + */ +static int sdhci_bcm_kona_sd_card_emulate(struct sdhci_host *host, int insert) +{ + struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host); + struct sdhci_bcm_kona_dev *kona_dev = sdhci_pltfm_priv(pltfm_priv); + u32 val; + + /* + * Back-to-Back register write needs a delay of min 10uS. + * Back-to-Back writes to same register needs delay when SD bus clock + * is very low w.r.t AHB clock, mainly during boot-time and during card + * insert-removal. + * We keep 20uS + */ + mutex_lock(&kona_dev->write_lock); + udelay(20); + val = sdhci_readl(host, KONA_SDHOST_CORESTAT); + + if (insert) { + int ret; + + ret = mmc_gpio_get_ro(host->mmc); + if (ret >= 0) + val = (val & ~KONA_SDHOST_WP) | + ((ret) ? KONA_SDHOST_WP : 0); + + val |= KONA_SDHOST_CD_SW; + sdhci_writel(host, val, KONA_SDHOST_CORESTAT); + } else { + val &= ~KONA_SDHOST_CD_SW; + sdhci_writel(host, val, KONA_SDHOST_CORESTAT); + } + mutex_unlock(&kona_dev->write_lock); + + return 0; +} + +/* + * SD card interrupt event callback + */ +static void sdhci_bcm_kona_card_event(struct sdhci_host *host) +{ + if (mmc_gpio_get_cd(host->mmc) > 0) { + dev_dbg(mmc_dev(host->mmc), + "card inserted\n"); + sdhci_bcm_kona_sd_card_emulate(host, 1); + } else { + dev_dbg(mmc_dev(host->mmc), + "card removed\n"); + sdhci_bcm_kona_sd_card_emulate(host, 0); + } +} + +static void sdhci_bcm_kona_init_74_clocks(struct sdhci_host *host, + u8 power_mode) +{ + /* + * JEDEC and SD spec specify supplying 74 continuous clocks to + * device after power up. With minimum bus (100KHz) that + * that translates to 740us + */ + if (power_mode != MMC_POWER_OFF) + udelay(740); +} + +static const struct sdhci_ops sdhci_bcm_kona_ops = { + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, + .platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .card_event = sdhci_bcm_kona_card_event, +}; + +static const struct sdhci_pltfm_data sdhci_pltfm_data_kona = { + .ops = &sdhci_bcm_kona_ops, + .quirks = SDHCI_QUIRK_NO_CARD_NO_RESET | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_32BIT_ADMA_SIZE | + SDHCI_QUIRK_FORCE_BLK_SZ_2048 | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +}; + +static const struct of_device_id sdhci_bcm_kona_of_match[] = { + { .compatible = "brcm,kona-sdhci"}, + { .compatible = "bcm,kona-sdhci"}, /* deprecated name */ + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_bcm_kona_of_match); + +static int sdhci_bcm_kona_probe(struct platform_device *pdev) +{ + struct sdhci_bcm_kona_dev *kona_dev = NULL; + struct sdhci_pltfm_host *pltfm_priv; + struct device *dev = &pdev->dev; + struct sdhci_host *host; + int ret; + + ret = 0; + + host = sdhci_pltfm_init(pdev, &sdhci_pltfm_data_kona, + sizeof(*kona_dev)); + if (IS_ERR(host)) + return PTR_ERR(host); + + dev_dbg(dev, "%s: inited. IOADDR=%p\n", __func__, host->ioaddr); + + pltfm_priv = sdhci_priv(host); + + kona_dev = sdhci_pltfm_priv(pltfm_priv); + mutex_init(&kona_dev->write_lock); + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err_pltfm_free; + + if (!host->mmc->f_max) { + dev_err(&pdev->dev, "Missing max-freq for SDHCI cfg\n"); + ret = -ENXIO; + goto err_pltfm_free; + } + + /* Get and enable the core clock */ + pltfm_priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pltfm_priv->clk)) { + dev_err(dev, "Failed to get core clock\n"); + ret = PTR_ERR(pltfm_priv->clk); + goto err_pltfm_free; + } + + ret = clk_set_rate(pltfm_priv->clk, host->mmc->f_max); + if (ret) { + dev_err(dev, "Failed to set rate core clock\n"); + goto err_pltfm_free; + } + + ret = clk_prepare_enable(pltfm_priv->clk); + if (ret) { + dev_err(dev, "Failed to enable core clock\n"); + goto err_pltfm_free; + } + + dev_dbg(dev, "non-removable=%c\n", + mmc_card_is_removable(host->mmc) ? 'N' : 'Y'); + dev_dbg(dev, "cd_gpio %c, wp_gpio %c\n", + (mmc_gpio_get_cd(host->mmc) != -ENOSYS) ? 'Y' : 'N', + (mmc_gpio_get_ro(host->mmc) != -ENOSYS) ? 'Y' : 'N'); + + if (!mmc_card_is_removable(host->mmc)) + host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + dev_dbg(dev, "is_8bit=%c\n", + (host->mmc->caps & MMC_CAP_8_BIT_DATA) ? 'Y' : 'N'); + + ret = sdhci_bcm_kona_sd_reset(host); + if (ret) + goto err_clk_disable; + + sdhci_bcm_kona_sd_init(host); + + ret = sdhci_add_host(host); + if (ret) + goto err_reset; + + /* if device is eMMC, emulate card insert right here */ + if (!mmc_card_is_removable(host->mmc)) { + ret = sdhci_bcm_kona_sd_card_emulate(host, 1); + if (ret) { + dev_err(dev, + "unable to emulate card insertion\n"); + goto err_remove_host; + } + } + /* + * Since the card detection GPIO interrupt is configured to be + * edge sensitive, check the initial GPIO value here, emulate + * only if the card is present + */ + if (mmc_gpio_get_cd(host->mmc) > 0) + sdhci_bcm_kona_sd_card_emulate(host, 1); + + dev_dbg(dev, "initialized properly\n"); + return 0; + +err_remove_host: + sdhci_remove_host(host, 0); + +err_reset: + sdhci_bcm_kona_sd_reset(host); + +err_clk_disable: + clk_disable_unprepare(pltfm_priv->clk); + +err_pltfm_free: + sdhci_pltfm_free(pdev); + + dev_err(dev, "Probing of sdhci-pltfm failed: %d\n", ret); + return ret; +} + +static struct platform_driver sdhci_bcm_kona_driver = { + .driver = { + .name = "sdhci-kona", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sdhci_pltfm_pmops, + .of_match_table = sdhci_bcm_kona_of_match, + }, + .probe = sdhci_bcm_kona_probe, + .remove = sdhci_pltfm_unregister, +}; +module_platform_driver(sdhci_bcm_kona_driver); + +MODULE_DESCRIPTION("SDHCI driver for Broadcom Kona platform"); +MODULE_AUTHOR("Broadcom"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c new file mode 100644 index 000000000..4d42b1810 --- /dev/null +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sdhci-brcmstb.c Support for SDHCI on Broadcom BRCMSTB SoC's + * + * Copyright (C) 2015 Broadcom Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "sdhci-cqhci.h" +#include "sdhci-pltfm.h" +#include "cqhci.h" + +#define SDHCI_VENDOR 0x78 +#define SDHCI_VENDOR_ENHANCED_STRB 0x1 +#define SDHCI_VENDOR_GATE_SDCLK_EN 0x2 + +#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0) +#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1) +#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2) + +#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0) +#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1) + +#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 + +struct sdhci_brcmstb_priv { + void __iomem *cfg_regs; + unsigned int flags; +}; + +struct brcmstb_match_priv { + void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios); + struct sdhci_ops *ops; + const unsigned int flags; +}; + +static inline void enable_clock_gating(struct sdhci_host *host) +{ + u32 reg; + + reg = sdhci_readl(host, SDHCI_VENDOR); + reg |= SDHCI_VENDOR_GATE_SDCLK_EN; + sdhci_writel(host, reg, SDHCI_VENDOR); +} + +void brcmstb_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); + + sdhci_and_cqhci_reset(host, mask); + + /* Reset will clear this, so re-enable it */ + if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK) + enable_clock_gating(host); +} + +static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + + u32 reg; + + dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n", + __func__); + reg = readl(host->ioaddr + SDHCI_VENDOR); + if (ios->enhanced_strobe) + reg |= SDHCI_VENDOR_ENHANCED_STRB; + else + reg &= ~SDHCI_VENDOR_ENHANCED_STRB; + writel(reg, host->ioaddr + SDHCI_VENDOR); +} + +static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + + host->mmc->actual_clock = 0; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + sdhci_enable_clk(host, clk); +} + +static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u16 ctrl_2; + + dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n", + __func__, timing); + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if ((timing == MMC_TIMING_MMC_HS200) || + (timing == MMC_TIMING_UHS_SDR104)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (timing == MMC_TIMING_SD_HS || + timing == MMC_TIMING_MMC_HS || + timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if ((timing == MMC_TIMING_UHS_DDR50) || + (timing == MMC_TIMING_MMC_DDR52)) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + else if (timing == MMC_TIMING_MMC_HS400) + ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + } + + sdhci_cqe_enable(mmc); +} + +static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = { + .enable = sdhci_brcmstb_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_brcmstb_dumpregs, +}; + +static struct sdhci_ops sdhci_brcmstb_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static struct sdhci_ops sdhci_brcmstb_ops_7216 = { + .set_clock = sdhci_brcmstb_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = brcmstb_reset, + .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, +}; + +static struct brcmstb_match_priv match_priv_7425 = { + .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT | + BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, + .ops = &sdhci_brcmstb_ops, +}; + +static struct brcmstb_match_priv match_priv_7445 = { + .flags = BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, + .ops = &sdhci_brcmstb_ops, +}; + +static const struct brcmstb_match_priv match_priv_7216 = { + .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE, + .hs400es = sdhci_brcmstb_hs400es, + .ops = &sdhci_brcmstb_ops_7216, +}; + +static const struct of_device_id sdhci_brcm_of_match[] = { + { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 }, + { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 }, + { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 }, + {}, +}; + +static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static int sdhci_brcmstb_add_host(struct sdhci_host *host, + struct sdhci_brcmstb_priv *priv) +{ + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if ((priv->flags & BRCMSTB_PRIV_FLAGS_HAS_CQE) == 0) + return sdhci_add_host(host); + + dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n"); + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(mmc_dev(host->mmc), + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR; + cq_host->ops = &sdhci_brcmstb_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) { + dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n"); + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + } + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + +static int sdhci_brcmstb_probe(struct platform_device *pdev) +{ + const struct brcmstb_match_priv *match_priv; + struct sdhci_pltfm_data brcmstb_pdata; + struct sdhci_pltfm_host *pltfm_host; + const struct of_device_id *match; + struct sdhci_brcmstb_priv *priv; + struct sdhci_host *host; + struct resource *iomem; + struct clk *clk; + int res; + + match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node); + match_priv = match->data; + + dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible); + + clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(clk), + "Failed to get clock from Device Tree\n"); + + res = clk_prepare_enable(clk); + if (res) + return res; + + memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata)); + brcmstb_pdata.ops = match_priv->ops; + host = sdhci_pltfm_init(pdev, &brcmstb_pdata, + sizeof(struct sdhci_brcmstb_priv)); + if (IS_ERR(host)) { + res = PTR_ERR(host); + goto err_clk; + } + + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + if (device_property_read_bool(&pdev->dev, "supports-cqe")) { + priv->flags |= BRCMSTB_PRIV_FLAGS_HAS_CQE; + match_priv->ops->irq = sdhci_brcmstb_cqhci_irq; + } + + /* Map in the non-standard CFG registers */ + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem); + if (IS_ERR(priv->cfg_regs)) { + res = PTR_ERR(priv->cfg_regs); + goto err; + } + + sdhci_get_of_property(pdev); + res = mmc_of_parse(host->mmc); + if (res) + goto err; + + /* + * Automatic clock gating does not work for SD cards that may + * voltage switch so only enable it for non-removable devices. + */ + if ((match_priv->flags & BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE) && + (host->mmc->caps & MMC_CAP_NONREMOVABLE)) + priv->flags |= BRCMSTB_PRIV_FLAGS_GATE_CLOCK; + + /* + * If the chip has enhanced strobe and it's enabled, add + * callback + */ + if (match_priv->hs400es && + (host->mmc->caps2 & MMC_CAP2_HS400_ES)) + host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es; + + /* + * Supply the existing CAPS, but clear the UHS modes. This + * will allow these modes to be specified by device tree + * properties through mmc_of_parse(). + */ + host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); + if (match_priv->flags & BRCMSTB_MATCH_FLAGS_NO_64BIT) + host->caps &= ~SDHCI_CAN_64BIT; + host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); + host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_DDR50); + host->quirks |= SDHCI_QUIRK_MISSING_CAPS; + + if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + + res = sdhci_brcmstb_add_host(host, priv); + if (res) + goto err; + + pltfm_host->clk = clk; + return res; + +err: + sdhci_pltfm_free(pdev); +err_clk: + clk_disable_unprepare(clk); + return res; +} + +static void sdhci_brcmstb_shutdown(struct platform_device *pdev) +{ + sdhci_pltfm_suspend(&pdev->dev); +} + +MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); + +static struct platform_driver sdhci_brcmstb_driver = { + .driver = { + .name = "sdhci-brcmstb", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sdhci_pltfm_pmops, + .of_match_table = of_match_ptr(sdhci_brcm_of_match), + }, + .probe = sdhci_brcmstb_probe, + .remove = sdhci_pltfm_unregister, + .shutdown = sdhci_brcmstb_shutdown, +}; + +module_platform_driver(sdhci_brcmstb_driver); + +MODULE_DESCRIPTION("SDHCI driver for Broadcom BRCMSTB SoCs"); +MODULE_AUTHOR("Broadcom"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c new file mode 100644 index 000000000..6f2de54a5 --- /dev/null +++ b/drivers/mmc/host/sdhci-cadence.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2016 Socionext Inc. + * Author: Masahiro Yamada + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" + +/* HRS - Host Register Set (specific to Cadence) */ +#define SDHCI_CDNS_HRS04 0x10 /* PHY access port */ +#define SDHCI_CDNS_HRS04_ACK BIT(26) +#define SDHCI_CDNS_HRS04_RD BIT(25) +#define SDHCI_CDNS_HRS04_WR BIT(24) +#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16) +#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8) +#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0) + +#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */ +#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15) +#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8) +#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0) +#define SDHCI_CDNS_HRS06_MODE_SD 0x0 +#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2 +#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3 +#define SDHCI_CDNS_HRS06_MODE_MMC_HS200 0x4 +#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5 +#define SDHCI_CDNS_HRS06_MODE_MMC_HS400ES 0x6 + +/* SRS - Slot Register Set (SDHCI-compatible) */ +#define SDHCI_CDNS_SRS_BASE 0x200 + +/* PHY */ +#define SDHCI_CDNS_PHY_DLY_SD_HS 0x00 +#define SDHCI_CDNS_PHY_DLY_SD_DEFAULT 0x01 +#define SDHCI_CDNS_PHY_DLY_UHS_SDR12 0x02 +#define SDHCI_CDNS_PHY_DLY_UHS_SDR25 0x03 +#define SDHCI_CDNS_PHY_DLY_UHS_SDR50 0x04 +#define SDHCI_CDNS_PHY_DLY_UHS_DDR50 0x05 +#define SDHCI_CDNS_PHY_DLY_EMMC_LEGACY 0x06 +#define SDHCI_CDNS_PHY_DLY_EMMC_SDR 0x07 +#define SDHCI_CDNS_PHY_DLY_EMMC_DDR 0x08 +#define SDHCI_CDNS_PHY_DLY_SDCLK 0x0b +#define SDHCI_CDNS_PHY_DLY_HSMMC 0x0c +#define SDHCI_CDNS_PHY_DLY_STROBE 0x0d + +/* + * The tuned val register is 6 bit-wide, but not the whole of the range is + * available. The range 0-42 seems to be available (then 43 wraps around to 0) + * but I am not quite sure if it is official. Use only 0 to 39 for safety. + */ +#define SDHCI_CDNS_MAX_TUNING_LOOP 40 + +struct sdhci_cdns_phy_param { + u8 addr; + u8 data; +}; + +struct sdhci_cdns_priv { + void __iomem *hrs_addr; + bool enhanced_strobe; + unsigned int nr_phy_params; + struct sdhci_cdns_phy_param phy_params[]; +}; + +struct sdhci_cdns_phy_cfg { + const char *property; + u8 addr; +}; + +static const struct sdhci_cdns_phy_cfg sdhci_cdns_phy_cfgs[] = { + { "cdns,phy-input-delay-sd-highspeed", SDHCI_CDNS_PHY_DLY_SD_HS, }, + { "cdns,phy-input-delay-legacy", SDHCI_CDNS_PHY_DLY_SD_DEFAULT, }, + { "cdns,phy-input-delay-sd-uhs-sdr12", SDHCI_CDNS_PHY_DLY_UHS_SDR12, }, + { "cdns,phy-input-delay-sd-uhs-sdr25", SDHCI_CDNS_PHY_DLY_UHS_SDR25, }, + { "cdns,phy-input-delay-sd-uhs-sdr50", SDHCI_CDNS_PHY_DLY_UHS_SDR50, }, + { "cdns,phy-input-delay-sd-uhs-ddr50", SDHCI_CDNS_PHY_DLY_UHS_DDR50, }, + { "cdns,phy-input-delay-mmc-highspeed", SDHCI_CDNS_PHY_DLY_EMMC_SDR, }, + { "cdns,phy-input-delay-mmc-ddr", SDHCI_CDNS_PHY_DLY_EMMC_DDR, }, + { "cdns,phy-dll-delay-sdclk", SDHCI_CDNS_PHY_DLY_SDCLK, }, + { "cdns,phy-dll-delay-sdclk-hsmmc", SDHCI_CDNS_PHY_DLY_HSMMC, }, + { "cdns,phy-dll-delay-strobe", SDHCI_CDNS_PHY_DLY_STROBE, }, +}; + +static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv, + u8 addr, u8 data) +{ + void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS04; + u32 tmp; + int ret; + + ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK), + 0, 10); + if (ret) + return ret; + + tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) | + FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr); + writel(tmp, reg); + + tmp |= SDHCI_CDNS_HRS04_WR; + writel(tmp, reg); + + ret = readl_poll_timeout(reg, tmp, tmp & SDHCI_CDNS_HRS04_ACK, 0, 10); + if (ret) + return ret; + + tmp &= ~SDHCI_CDNS_HRS04_WR; + writel(tmp, reg); + + ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK), + 0, 10); + + return ret; +} + +static unsigned int sdhci_cdns_phy_param_count(struct device_node *np) +{ + unsigned int count = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++) + if (of_property_read_bool(np, sdhci_cdns_phy_cfgs[i].property)) + count++; + + return count; +} + +static void sdhci_cdns_phy_param_parse(struct device_node *np, + struct sdhci_cdns_priv *priv) +{ + struct sdhci_cdns_phy_param *p = priv->phy_params; + u32 val; + int ret, i; + + for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++) { + ret = of_property_read_u32(np, sdhci_cdns_phy_cfgs[i].property, + &val); + if (ret) + continue; + + p->addr = sdhci_cdns_phy_cfgs[i].addr; + p->data = val; + p++; + } +} + +static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv) +{ + int ret, i; + + for (i = 0; i < priv->nr_phy_params; i++) { + ret = sdhci_cdns_write_phy_reg(priv, priv->phy_params[i].addr, + priv->phy_params[i].data); + if (ret) + return ret; + } + + return 0; +} + +static void *sdhci_cdns_priv(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return sdhci_pltfm_priv(pltfm_host); +} + +static unsigned int sdhci_cdns_get_timeout_clock(struct sdhci_host *host) +{ + /* + * Cadence's spec says the Timeout Clock Frequency is the same as the + * Base Clock Frequency. + */ + return host->max_clk; +} + +static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode) +{ + u32 tmp; + + /* The speed mode for eMMC is selected by HRS06 register */ + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06); + tmp &= ~SDHCI_CDNS_HRS06_MODE; + tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode); + writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06); +} + +static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv) +{ + u32 tmp; + + tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06); + return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp); +} + +static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val) +{ + struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host); + void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06; + u32 tmp; + int i, ret; + + if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val))) + return -EINVAL; + + tmp = readl(reg); + tmp &= ~SDHCI_CDNS_HRS06_TUNE; + tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val); + + /* + * Workaround for IP errata: + * The IP6116 SD/eMMC PHY design has a timing issue on receive data + * path. Send tune request twice. + */ + for (i = 0; i < 2; i++) { + tmp |= SDHCI_CDNS_HRS06_TUNE_UP; + writel(tmp, reg); + + ret = readl_poll_timeout(reg, tmp, + !(tmp & SDHCI_CDNS_HRS06_TUNE_UP), + 0, 1); + if (ret) + return ret; + } + + return 0; +} + +/* + * In SD mode, software must not use the hardware tuning and instead perform + * an almost identical procedure to eMMC. + */ +static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + int cur_streak = 0; + int max_streak = 0; + int end_of_streak = 0; + int i; + + /* + * Do not execute tuning for UHS_SDR50 or UHS_DDR50. + * The delay is set by probe, based on the DT properties. + */ + if (host->timing != MMC_TIMING_MMC_HS200 && + host->timing != MMC_TIMING_UHS_SDR104) + return 0; + + for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) { + if (sdhci_cdns_set_tune_val(host, i) || + mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */ + cur_streak = 0; + } else { /* good */ + cur_streak++; + if (cur_streak > max_streak) { + max_streak = cur_streak; + end_of_streak = i; + } + } + } + + if (!max_streak) { + dev_err(mmc_dev(host->mmc), "no tuning point found\n"); + return -EIO; + } + + return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2); +} + +static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host); + u32 mode; + + switch (timing) { + case MMC_TIMING_MMC_HS: + mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR; + break; + case MMC_TIMING_MMC_DDR52: + mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR; + break; + case MMC_TIMING_MMC_HS200: + mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200; + break; + case MMC_TIMING_MMC_HS400: + if (priv->enhanced_strobe) + mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES; + else + mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400; + break; + default: + mode = SDHCI_CDNS_HRS06_MODE_SD; + break; + } + + sdhci_cdns_set_emmc_mode(priv, mode); + + /* For SD, fall back to the default handler */ + if (mode == SDHCI_CDNS_HRS06_MODE_SD) + sdhci_set_uhs_signaling(host, timing); +} + +static const struct sdhci_ops sdhci_cdns_ops = { + .set_clock = sdhci_set_clock, + .get_timeout_clock = sdhci_cdns_get_timeout_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .platform_execute_tuning = sdhci_cdns_execute_tuning, + .set_uhs_signaling = sdhci_cdns_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = { + .ops = &sdhci_cdns_ops, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + +static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = { + .ops = &sdhci_cdns_ops, +}; + +static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host); + u32 mode; + + priv->enhanced_strobe = ios->enhanced_strobe; + + mode = sdhci_cdns_get_emmc_mode(priv); + + if (mode == SDHCI_CDNS_HRS06_MODE_MMC_HS400 && ios->enhanced_strobe) + sdhci_cdns_set_emmc_mode(priv, + SDHCI_CDNS_HRS06_MODE_MMC_HS400ES); + + if (mode == SDHCI_CDNS_HRS06_MODE_MMC_HS400ES && !ios->enhanced_strobe) + sdhci_cdns_set_emmc_mode(priv, + SDHCI_CDNS_HRS06_MODE_MMC_HS400); +} + +static int sdhci_cdns_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + const struct sdhci_pltfm_data *data; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_cdns_priv *priv; + struct clk *clk; + unsigned int nr_phy_params; + int ret; + struct device *dev = &pdev->dev; + static const u16 version = SDHCI_SPEC_400 << SDHCI_SPEC_VER_SHIFT; + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + + data = of_device_get_match_data(dev); + if (!data) + data = &sdhci_cdns_pltfm_data; + + nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node); + host = sdhci_pltfm_init(pdev, data, + struct_size(priv, phy_params, nr_phy_params)); + if (IS_ERR(host)) { + ret = PTR_ERR(host); + goto disable_clk; + } + + pltfm_host = sdhci_priv(host); + pltfm_host->clk = clk; + + priv = sdhci_pltfm_priv(pltfm_host); + priv->nr_phy_params = nr_phy_params; + priv->hrs_addr = host->ioaddr; + priv->enhanced_strobe = false; + host->ioaddr += SDHCI_CDNS_SRS_BASE; + host->mmc_host_ops.hs400_enhanced_strobe = + sdhci_cdns_hs400_enhanced_strobe; + sdhci_enable_v4_mode(host); + __sdhci_read_caps(host, &version, NULL, NULL); + + sdhci_get_of_property(pdev); + + ret = mmc_of_parse(host->mmc); + if (ret) + goto free; + + sdhci_cdns_phy_param_parse(dev->of_node, priv); + + ret = sdhci_cdns_phy_init(priv); + if (ret) + goto free; + + ret = sdhci_add_host(host); + if (ret) + goto free; + + return 0; +free: + sdhci_pltfm_free(pdev); +disable_clk: + clk_disable_unprepare(clk); + + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_cdns_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_cdns_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; + + ret = sdhci_cdns_phy_init(priv); + if (ret) + goto disable_clk; + + ret = sdhci_resume_host(host); + if (ret) + goto disable_clk; + + return 0; + +disable_clk: + clk_disable_unprepare(pltfm_host->clk); + + return ret; +} +#endif + +static const struct dev_pm_ops sdhci_cdns_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_cdns_resume) +}; + +static const struct of_device_id sdhci_cdns_match[] = { + { + .compatible = "socionext,uniphier-sd4hc", + .data = &sdhci_cdns_uniphier_pltfm_data, + }, + { .compatible = "cdns,sd4hc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sdhci_cdns_match); + +static struct platform_driver sdhci_cdns_driver = { + .driver = { + .name = "sdhci-cdns", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sdhci_cdns_pm_ops, + .of_match_table = sdhci_cdns_match, + }, + .probe = sdhci_cdns_probe, + .remove = sdhci_pltfm_unregister, +}; +module_platform_driver(sdhci_cdns_driver); + +MODULE_AUTHOR("Masahiro Yamada "); +MODULE_DESCRIPTION("Cadence SD/SDIO/eMMC Host Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c new file mode 100644 index 000000000..2a29c7a4f --- /dev/null +++ b/drivers/mmc/host/sdhci-cns3xxx.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SDHCI support for CNS3xxx SoC + * + * Copyright 2008 Cavium Networks + * Copyright 2010 MontaVista Software, LLC. + * + * Authors: Scott Shu + * Anton Vorontsov + */ + +#include +#include +#include +#include +#include "sdhci-pltfm.h" + +static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host) +{ + return 150000000; +} + +static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct device *dev = mmc_dev(host->mmc); + int div = 1; + u16 clk; + unsigned long timeout; + + host->mmc->actual_clock = 0; + + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + while (host->max_clk / div > clock) { + /* + * On CNS3xxx divider grows linearly up to 4, and then + * exponentially up to 256. + */ + if (div < 4) + div += 1; + else if (div < 256) + div *= 2; + else + break; + } + + dev_dbg(dev, "desired SD clock: %d, actual: %d\n", + clock, host->max_clk / div); + + /* Divide by 3 is special. */ + if (div != 3) + div >>= 1; + + clk = div << SDHCI_DIVIDER_SHIFT; + clk |= SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + timeout = 20; + while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) + & SDHCI_CLOCK_INT_STABLE)) { + if (timeout == 0) { + dev_warn(dev, "clock is unstable"); + break; + } + timeout--; + mdelay(1); + } + + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); +} + +static const struct sdhci_ops sdhci_cns3xxx_ops = { + .get_max_clock = sdhci_cns3xxx_get_max_clk, + .set_clock = sdhci_cns3xxx_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { + .ops = &sdhci_cns3xxx_ops, + .quirks = SDHCI_QUIRK_BROKEN_DMA | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_INVERTED_WRITE_PROTECT | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, +}; + +static int sdhci_cns3xxx_probe(struct platform_device *pdev) +{ + return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata, 0); +} + +static struct platform_driver sdhci_cns3xxx_driver = { + .driver = { + .name = "sdhci-cns3xxx", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sdhci_pltfm_pmops, + }, + .probe = sdhci_cns3xxx_probe, + .remove = sdhci_pltfm_unregister, +}; + +module_platform_driver(sdhci_cns3xxx_driver); + +MODULE_DESCRIPTION("SDHCI driver for CNS3xxx"); +MODULE_AUTHOR("Scott Shu, " + "Anton Vorontsov "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-cqhci.h b/drivers/mmc/host/sdhci-cqhci.h new file mode 100644 index 000000000..cf8e7ba71 --- /dev/null +++ b/drivers/mmc/host/sdhci-cqhci.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2022 The Chromium OS Authors + * + * Support that applies to the combination of SDHCI and CQHCI, while not + * expressing a dependency between the two modules. + */ + +#ifndef __MMC_HOST_SDHCI_CQHCI_H__ +#define __MMC_HOST_SDHCI_CQHCI_H__ + +#include "cqhci.h" +#include "sdhci.h" + +static inline void sdhci_and_cqhci_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) && + host->mmc->cqe_private) + cqhci_deactivate(host->mmc); + + sdhci_reset(host, mask); +} + +#endif /* __MMC_HOST_SDHCI_CQHCI_H__ */ diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c new file mode 100644 index 000000000..5e5bf82e5 --- /dev/null +++ b/drivers/mmc/host/sdhci-dove.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC + * + * Author: Saeed Bishara + * Mike Rapoport + * Based on sdhci-cns3xxx.c + */ + +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" + +static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) +{ + u16 ret; + + switch (reg) { + case SDHCI_HOST_VERSION: + case SDHCI_SLOT_INT_STATUS: + /* those registers don't exist */ + return 0; + default: + ret = readw(host->ioaddr + reg); + } + return ret; +} + +static u32 sdhci_dove_readl(struct sdhci_host *host, int reg) +{ + u32 ret; + + ret = readl(host->ioaddr + reg); + + switch (reg) { + case SDHCI_CAPABILITIES: + /* Mask the support for 3.0V */ + ret &= ~SDHCI_CAN_VDD_300; + break; + } + return ret; +} + +static const struct sdhci_ops sdhci_dove_ops = { + .read_w = sdhci_dove_readw, + .read_l = sdhci_dove_readl, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_dove_pdata = { + .ops = &sdhci_dove_ops, + .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | + SDHCI_QUIRK_NO_BUSY_IRQ | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_FORCE_DMA | + SDHCI_QUIRK_NO_HISPD_BIT, +}; + +static int sdhci_dove_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + int ret; + + host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata, 0); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + pltfm_host->clk = devm_clk_get(&pdev->dev, NULL); + + if (!IS_ERR(pltfm_host->clk)) + clk_prepare_enable(pltfm_host->clk); + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err_sdhci_add; + + ret = sdhci_add_host(host); + if (ret) + goto err_sdhci_add; + + return 0; + +err_sdhci_add: + clk_disable_unprepare(pltfm_host->clk); + sdhci_pltfm_free(pdev); + return ret; +} + +static const struct of_device_id sdhci_dove_of_match_table[] = { + { .compatible = "marvell,dove-sdhci", }, + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table); + +static struct platform_driver sdhci_dove_driver = { + .driver = { + .name = "sdhci-dove", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sdhci_pltfm_pmops, + .of_match_table = sdhci_dove_of_match_table, + }, + .probe = sdhci_dove_probe, + .remove = sdhci_pltfm_unregister, +}; + +module_platform_driver(sdhci_dove_driver); + +MODULE_DESCRIPTION("SDHCI driver for Dove"); +MODULE_AUTHOR("Saeed Bishara , " + "Mike Rapoport "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c new file mode 100644 index 000000000..b030f657e --- /dev/null +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -0,0 +1,1881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Freescale eSDHC i.MX controller driver for the platform bus. + * + * derived from the OF-version. + * + * Copyright (c) 2010 Pengutronix e.K. + * Author: Wolfram Sang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdhci-cqhci.h" +#include "sdhci-pltfm.h" +#include "sdhci-esdhc.h" +#include "cqhci.h" + +#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f +#define ESDHC_CTRL_D3CD 0x08 +#define ESDHC_BURST_LEN_EN_INCR (1 << 27) +/* VENDOR SPEC register */ +#define ESDHC_VENDOR_SPEC 0xc0 +#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1) +#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1) +#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8) +#define ESDHC_DEBUG_SEL_AND_STATUS_REG 0xc2 +#define ESDHC_DEBUG_SEL_REG 0xc3 +#define ESDHC_DEBUG_SEL_MASK 0xf +#define ESDHC_DEBUG_SEL_CMD_STATE 1 +#define ESDHC_DEBUG_SEL_DATA_STATE 2 +#define ESDHC_DEBUG_SEL_TRANS_STATE 3 +#define ESDHC_DEBUG_SEL_DMA_STATE 4 +#define ESDHC_DEBUG_SEL_ADMA_STATE 5 +#define ESDHC_DEBUG_SEL_FIFO_STATE 6 +#define ESDHC_DEBUG_SEL_ASYNC_FIFO_STATE 7 +#define ESDHC_WTMK_LVL 0x44 +#define ESDHC_WTMK_DEFAULT_VAL 0x10401040 +#define ESDHC_WTMK_LVL_RD_WML_MASK 0x000000FF +#define ESDHC_WTMK_LVL_RD_WML_SHIFT 0 +#define ESDHC_WTMK_LVL_WR_WML_MASK 0x00FF0000 +#define ESDHC_WTMK_LVL_WR_WML_SHIFT 16 +#define ESDHC_WTMK_LVL_WML_VAL_DEF 64 +#define ESDHC_WTMK_LVL_WML_VAL_MAX 128 +#define ESDHC_MIX_CTRL 0x48 +#define ESDHC_MIX_CTRL_DDREN (1 << 3) +#define ESDHC_MIX_CTRL_AC23EN (1 << 7) +#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22) +#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23) +#define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24) +#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25) +#define ESDHC_MIX_CTRL_HS400_EN (1 << 26) +#define ESDHC_MIX_CTRL_HS400_ES_EN (1 << 27) +/* Bits 3 and 6 are not SDHCI standard definitions */ +#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7 +/* Tuning bits */ +#define ESDHC_MIX_CTRL_TUNING_MASK 0x03c00000 + +/* dll control register */ +#define ESDHC_DLL_CTRL 0x60 +#define ESDHC_DLL_OVERRIDE_VAL_SHIFT 9 +#define ESDHC_DLL_OVERRIDE_EN_SHIFT 8 + +/* tune control register */ +#define ESDHC_TUNE_CTRL_STATUS 0x68 +#define ESDHC_TUNE_CTRL_STEP 1 +#define ESDHC_TUNE_CTRL_MIN 0 +#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1) + +/* strobe dll register */ +#define ESDHC_STROBE_DLL_CTRL 0x70 +#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0) +#define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1) +#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT 0x7 +#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3 +#define ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT (4 << 20) + +#define ESDHC_STROBE_DLL_STATUS 0x74 +#define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1) +#define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1 + +#define ESDHC_VEND_SPEC2 0xc8 +#define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8) + +#define ESDHC_TUNING_CTRL 0xcc +#define ESDHC_STD_TUNING_EN (1 << 24) +/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ +#define ESDHC_TUNING_START_TAP_DEFAULT 0x1 +#define ESDHC_TUNING_START_TAP_MASK 0x7f +#define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7) +#define ESDHC_TUNING_STEP_DEFAULT 0x1 +#define ESDHC_TUNING_STEP_MASK 0x00070000 +#define ESDHC_TUNING_STEP_SHIFT 16 + +/* pinctrl state */ +#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz" +#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz" + +/* + * Our interpretation of the SDHCI_HOST_CONTROL register + */ +#define ESDHC_CTRL_4BITBUS (0x1 << 1) +#define ESDHC_CTRL_8BITBUS (0x2 << 1) +#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) + +/* + * There is an INT DMA ERR mismatch between eSDHC and STD SDHC SPEC: + * Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design, + * but bit28 is used as the INT DMA ERR in fsl eSDHC design. + * Define this macro DMA error INT for fsl eSDHC + */ +#define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28) + +/* the address offset of CQHCI */ +#define ESDHC_CQHCI_ADDR_OFFSET 0x100 + +/* + * The CMDTYPE of the CMD register (offset 0xE) should be set to + * "11" when the STOP CMD12 is issued on imx53 to abort one + * open ended multi-blk IO. Otherwise the TC INT wouldn't + * be generated. + * In exact block transfer, the controller doesn't complete the + * operations automatically as required at the end of the + * transfer and remains on hold if the abort command is not sent. + * As a result, the TC flag is not asserted and SW received timeout + * exception. Bit1 of Vendor Spec register is used to fix it. + */ +#define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1) +/* + * The flag tells that the ESDHC controller is an USDHC block that is + * integrated on the i.MX6 series. + */ +#define ESDHC_FLAG_USDHC BIT(3) +/* The IP supports manual tuning process */ +#define ESDHC_FLAG_MAN_TUNING BIT(4) +/* The IP supports standard tuning process */ +#define ESDHC_FLAG_STD_TUNING BIT(5) +/* The IP has SDHCI_CAPABILITIES_1 register */ +#define ESDHC_FLAG_HAVE_CAP1 BIT(6) +/* + * The IP has erratum ERR004536 + * uSDHC: ADMA Length Mismatch Error occurs if the AHB read access is slow, + * when reading data from the card + * This flag is also set for i.MX25 and i.MX35 in order to get + * SDHCI_QUIRK_BROKEN_ADMA, but for different reasons (ADMA capability bits). + */ +#define ESDHC_FLAG_ERR004536 BIT(7) +/* The IP supports HS200 mode */ +#define ESDHC_FLAG_HS200 BIT(8) +/* The IP supports HS400 mode */ +#define ESDHC_FLAG_HS400 BIT(9) +/* + * The IP has errata ERR010450 + * uSDHC: At 1.8V due to the I/O timing limit, for SDR mode, SD card + * clock can't exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz. + */ +#define ESDHC_FLAG_ERR010450 BIT(10) +/* The IP supports HS400ES mode */ +#define ESDHC_FLAG_HS400_ES BIT(11) +/* The IP has Host Controller Interface for Command Queuing */ +#define ESDHC_FLAG_CQHCI BIT(12) +/* need request pmqos during low power */ +#define ESDHC_FLAG_PMQOS BIT(13) +/* The IP state got lost in low power mode */ +#define ESDHC_FLAG_STATE_LOST_IN_LPMODE BIT(14) +/* The IP lost clock rate in PM_RUNTIME */ +#define ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME BIT(15) +/* + * The IP do not support the ACMD23 feature completely when use ADMA mode. + * In ADMA mode, it only use the 16 bit block count of the register 0x4 + * (BLOCK_ATT) as the CMD23's argument for ACMD23 mode, which means it will + * ignore the upper 16 bit of the CMD23's argument. This will block the reliable + * write operation in RPMB, because RPMB reliable write need to set the bit31 + * of the CMD23's argument. + * imx6qpdl/imx6sx/imx6sl/imx7d has this limitation only for ADMA mode, SDMA + * do not has this limitation. so when these SoC use ADMA mode, it need to + * disable the ACMD23 feature. + */ +#define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16) + +struct esdhc_soc_data { + u32 flags; +}; + +static const struct esdhc_soc_data esdhc_imx25_data = { + .flags = ESDHC_FLAG_ERR004536, +}; + +static const struct esdhc_soc_data esdhc_imx35_data = { + .flags = ESDHC_FLAG_ERR004536, +}; + +static const struct esdhc_soc_data esdhc_imx51_data = { + .flags = 0, +}; + +static const struct esdhc_soc_data esdhc_imx53_data = { + .flags = ESDHC_FLAG_MULTIBLK_NO_INT, +}; + +static const struct esdhc_soc_data usdhc_imx6q_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING + | ESDHC_FLAG_BROKEN_AUTO_CMD23, +}; + +static const struct esdhc_soc_data usdhc_imx6sl_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536 + | ESDHC_FLAG_HS200 + | ESDHC_FLAG_BROKEN_AUTO_CMD23, +}; + +static const struct esdhc_soc_data usdhc_imx6sll_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_HS400 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE, +}; + +static const struct esdhc_soc_data usdhc_imx6sx_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE + | ESDHC_FLAG_BROKEN_AUTO_CMD23, +}; + +static const struct esdhc_soc_data usdhc_imx6ull_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_ERR010450 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE, +}; + +static const struct esdhc_soc_data usdhc_imx7d_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_HS400 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE + | ESDHC_FLAG_BROKEN_AUTO_CMD23, +}; + +static struct esdhc_soc_data usdhc_imx7ulp_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE, +}; + +static struct esdhc_soc_data usdhc_imx8qxp_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES + | ESDHC_FLAG_STATE_LOST_IN_LPMODE + | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME, +}; + +static struct esdhc_soc_data usdhc_imx8mm_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES + | ESDHC_FLAG_STATE_LOST_IN_LPMODE, +}; + +struct pltfm_imx_data { + u32 scratchpad; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_100mhz; + struct pinctrl_state *pins_200mhz; + const struct esdhc_soc_data *socdata; + struct esdhc_platform_data boarddata; + struct clk *clk_ipg; + struct clk *clk_ahb; + struct clk *clk_per; + unsigned int actual_clock; + enum { + NO_CMD_PENDING, /* no multiblock command pending */ + MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ + WAIT_FOR_INT, /* sent CMD12, waiting for response INT */ + } multiblock_status; + u32 is_ddr; + struct pm_qos_request pm_qos_req; +}; + +static const struct of_device_id imx_esdhc_dt_ids[] = { + { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, }, + { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, }, + { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, }, + { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, }, + { .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, }, + { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, + { .compatible = "fsl,imx6sll-usdhc", .data = &usdhc_imx6sll_data, }, + { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, + { .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, }, + { .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, }, + { .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, }, + { .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, }, + { .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); + +static inline int is_imx25_esdhc(struct pltfm_imx_data *data) +{ + return data->socdata == &esdhc_imx25_data; +} + +static inline int is_imx53_esdhc(struct pltfm_imx_data *data) +{ + return data->socdata == &esdhc_imx53_data; +} + +static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) +{ + return data->socdata == &usdhc_imx6q_data; +} + +static inline int esdhc_is_usdhc(struct pltfm_imx_data *data) +{ + return !!(data->socdata->flags & ESDHC_FLAG_USDHC); +} + +static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) +{ + void __iomem *base = host->ioaddr + (reg & ~0x3); + u32 shift = (reg & 0x3) * 8; + + writel(((readl(base) & ~(mask << shift)) | (val << shift)), base); +} + +#define DRIVER_NAME "sdhci-esdhc-imx" +#define ESDHC_IMX_DUMP(f, x...) \ + pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) +static void esdhc_dump_debug_regs(struct sdhci_host *host) +{ + int i; + char *debug_status[7] = { + "cmd debug status", + "data debug status", + "trans debug status", + "dma debug status", + "adma debug status", + "fifo debug status", + "async fifo debug status" + }; + + ESDHC_IMX_DUMP("========= ESDHC IMX DEBUG STATUS DUMP =========\n"); + for (i = 0; i < 7; i++) { + esdhc_clrset_le(host, ESDHC_DEBUG_SEL_MASK, + ESDHC_DEBUG_SEL_CMD_STATE + i, ESDHC_DEBUG_SEL_REG); + ESDHC_IMX_DUMP("%s: 0x%04x\n", debug_status[i], + readw(host->ioaddr + ESDHC_DEBUG_SEL_AND_STATUS_REG)); + } + + esdhc_clrset_le(host, ESDHC_DEBUG_SEL_MASK, 0, ESDHC_DEBUG_SEL_REG); + +} + +static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host) +{ + u32 present_state; + int ret; + + ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, present_state, + (present_state & ESDHC_CLOCK_GATE_OFF), 2, 100); + if (ret == -ETIMEDOUT) + dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__); +} + +static u32 esdhc_readl_le(struct sdhci_host *host, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + u32 val = readl(host->ioaddr + reg); + + if (unlikely(reg == SDHCI_PRESENT_STATE)) { + u32 fsl_prss = val; + /* save the least 20 bits */ + val = fsl_prss & 0x000FFFFF; + /* move dat[0-3] bits */ + val |= (fsl_prss & 0x0F000000) >> 4; + /* move cmd line bit */ + val |= (fsl_prss & 0x00800000) << 1; + } + + if (unlikely(reg == SDHCI_CAPABILITIES)) { + /* ignore bit[0-15] as it stores cap_1 register val for mx6sl */ + if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) + val &= 0xffff0000; + + /* In FSL esdhc IC module, only bit20 is used to indicate the + * ADMA2 capability of esdhc, but this bit is messed up on + * some SOCs (e.g. on MX25, MX35 this bit is set, but they + * don't actually support ADMA2). So set the BROKEN_ADMA + * quirk on MX25/35 platforms. + */ + + if (val & SDHCI_CAN_DO_ADMA1) { + val &= ~SDHCI_CAN_DO_ADMA1; + val |= SDHCI_CAN_DO_ADMA2; + } + } + + if (unlikely(reg == SDHCI_CAPABILITIES_1)) { + if (esdhc_is_usdhc(imx_data)) { + if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) + val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF; + else + /* imx6q/dl does not have cap_1 register, fake one */ + val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104 + | SDHCI_SUPPORT_SDR50 + | SDHCI_USE_SDR50_TUNING + | FIELD_PREP(SDHCI_RETUNING_MODE_MASK, + SDHCI_TUNING_MODE_3); + + if (imx_data->socdata->flags & ESDHC_FLAG_HS400) + val |= SDHCI_SUPPORT_HS400; + + /* + * Do not advertise faster UHS modes if there are no + * pinctrl states for 100MHz/200MHz. + */ + if (IS_ERR_OR_NULL(imx_data->pins_100mhz) || + IS_ERR_OR_NULL(imx_data->pins_200mhz)) + val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50 + | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400); + } + } + + if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) { + val = 0; + val |= FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, 0xFF); + val |= FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, 0xFF); + val |= FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, 0xFF); + } + + if (unlikely(reg == SDHCI_INT_STATUS)) { + if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) { + val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; + val |= SDHCI_INT_ADMA_ERROR; + } + + /* + * mask off the interrupt we get in response to the manually + * sent CMD12 + */ + if ((imx_data->multiblock_status == WAIT_FOR_INT) && + ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) { + val &= ~SDHCI_INT_RESPONSE; + writel(SDHCI_INT_RESPONSE, host->ioaddr + + SDHCI_INT_STATUS); + imx_data->multiblock_status = NO_CMD_PENDING; + } + } + + return val; +} + +static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + u32 data; + + if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE || + reg == SDHCI_INT_STATUS)) { + if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) { + /* + * Clear and then set D3CD bit to avoid missing the + * card interrupt. This is an eSDHC controller problem + * so we need to apply the following workaround: clear + * and set D3CD bit will make eSDHC re-sample the card + * interrupt. In case a card interrupt was lost, + * re-sample it by the following steps. + */ + data = readl(host->ioaddr + SDHCI_HOST_CONTROL); + data &= ~ESDHC_CTRL_D3CD; + writel(data, host->ioaddr + SDHCI_HOST_CONTROL); + data |= ESDHC_CTRL_D3CD; + writel(data, host->ioaddr + SDHCI_HOST_CONTROL); + } + + if (val & SDHCI_INT_ADMA_ERROR) { + val &= ~SDHCI_INT_ADMA_ERROR; + val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR; + } + } + + if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + && (reg == SDHCI_INT_STATUS) + && (val & SDHCI_INT_DATA_END))) { + u32 v; + v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK; + writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); + + if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS) + { + /* send a manual CMD12 with RESPTYP=none */ + data = MMC_STOP_TRANSMISSION << 24 | + SDHCI_CMD_ABORTCMD << 16; + writel(data, host->ioaddr + SDHCI_TRANSFER_MODE); + imx_data->multiblock_status = WAIT_FOR_INT; + } + } + + writel(val, host->ioaddr + reg); +} + +static u16 esdhc_readw_le(struct sdhci_host *host, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + u16 ret = 0; + u32 val; + + if (unlikely(reg == SDHCI_HOST_VERSION)) { + reg ^= 2; + if (esdhc_is_usdhc(imx_data)) { + /* + * The usdhc register returns a wrong host version. + * Correct it here. + */ + return SDHCI_SPEC_300; + } + } + + if (unlikely(reg == SDHCI_HOST_CONTROL2)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & ESDHC_VENDOR_SPEC_VSELECT) + ret |= SDHCI_CTRL_VDD_180; + + if (esdhc_is_usdhc(imx_data)) { + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) + val = readl(host->ioaddr + ESDHC_MIX_CTRL); + else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) + /* the std tuning bits is in ACMD12_ERR for imx6sl */ + val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); + } + + if (val & ESDHC_MIX_CTRL_EXE_TUNE) + ret |= SDHCI_CTRL_EXEC_TUNING; + if (val & ESDHC_MIX_CTRL_SMPCLK_SEL) + ret |= SDHCI_CTRL_TUNED_CLK; + + ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + + return ret; + } + + if (unlikely(reg == SDHCI_TRANSFER_MODE)) { + if (esdhc_is_usdhc(imx_data)) { + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); + ret = m & ESDHC_MIX_CTRL_SDHCI_MASK; + /* Swap AC23 bit */ + if (m & ESDHC_MIX_CTRL_AC23EN) { + ret &= ~ESDHC_MIX_CTRL_AC23EN; + ret |= SDHCI_TRNS_AUTO_CMD23; + } + } else { + ret = readw(host->ioaddr + SDHCI_TRANSFER_MODE); + } + + return ret; + } + + return readw(host->ioaddr + reg); +} + +static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + u32 new_val = 0; + + switch (reg) { + case SDHCI_CLOCK_CONTROL: + new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & SDHCI_CLOCK_CARD_EN) + new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; + else + new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; + writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); + if (!(new_val & ESDHC_VENDOR_SPEC_FRC_SDCLK_ON)) + esdhc_wait_for_card_clock_gate_off(host); + return; + case SDHCI_HOST_CONTROL2: + new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & SDHCI_CTRL_VDD_180) + new_val |= ESDHC_VENDOR_SPEC_VSELECT; + else + new_val &= ~ESDHC_VENDOR_SPEC_VSELECT; + writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { + new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); + if (val & SDHCI_CTRL_TUNED_CLK) { + new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL; + new_val |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; + } else { + new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + new_val &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN; + } + writel(new_val , host->ioaddr + ESDHC_MIX_CTRL); + } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { + u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); + if (val & SDHCI_CTRL_TUNED_CLK) { + v |= ESDHC_MIX_CTRL_SMPCLK_SEL; + } else { + v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + m &= ~ESDHC_MIX_CTRL_FBCLK_SEL; + m &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN; + } + + if (val & SDHCI_CTRL_EXEC_TUNING) { + v |= ESDHC_MIX_CTRL_EXE_TUNE; + m |= ESDHC_MIX_CTRL_FBCLK_SEL; + m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; + } else { + v &= ~ESDHC_MIX_CTRL_EXE_TUNE; + } + + writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS); + writel(m, host->ioaddr + ESDHC_MIX_CTRL); + } + return; + case SDHCI_TRANSFER_MODE: + if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + && (host->cmd->opcode == SD_IO_RW_EXTENDED) + && (host->cmd->data->blocks > 1) + && (host->cmd->data->flags & MMC_DATA_READ)) { + u32 v; + v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + v |= ESDHC_VENDOR_SPEC_SDIO_QUIRK; + writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); + } + + if (esdhc_is_usdhc(imx_data)) { + u32 wml; + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); + /* Swap AC23 bit */ + if (val & SDHCI_TRNS_AUTO_CMD23) { + val &= ~SDHCI_TRNS_AUTO_CMD23; + val |= ESDHC_MIX_CTRL_AC23EN; + } + m = val | (m & ~ESDHC_MIX_CTRL_SDHCI_MASK); + writel(m, host->ioaddr + ESDHC_MIX_CTRL); + + /* Set watermark levels for PIO access to maximum value + * (128 words) to accommodate full 512 bytes buffer. + * For DMA access restore the levels to default value. + */ + m = readl(host->ioaddr + ESDHC_WTMK_LVL); + if (val & SDHCI_TRNS_DMA) { + wml = ESDHC_WTMK_LVL_WML_VAL_DEF; + } else { + u8 ctrl; + wml = ESDHC_WTMK_LVL_WML_VAL_MAX; + + /* + * Since already disable DMA mode, so also need + * to clear the DMASEL. Otherwise, for standard + * tuning, when send tuning command, usdhc will + * still prefetch the ADMA script from wrong + * DMA address, then we will see IOMMU report + * some error which show lack of TLB mapping. + */ + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_DMA_MASK; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } + m &= ~(ESDHC_WTMK_LVL_RD_WML_MASK | + ESDHC_WTMK_LVL_WR_WML_MASK); + m |= (wml << ESDHC_WTMK_LVL_RD_WML_SHIFT) | + (wml << ESDHC_WTMK_LVL_WR_WML_SHIFT); + writel(m, host->ioaddr + ESDHC_WTMK_LVL); + } else { + /* + * Postpone this write, we must do it together with a + * command write that is down below. + */ + imx_data->scratchpad = val; + } + return; + case SDHCI_COMMAND: + if (host->cmd->opcode == MMC_STOP_TRANSMISSION) + val |= SDHCI_CMD_ABORTCMD; + + if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) && + (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) + imx_data->multiblock_status = MULTIBLK_IN_PROCESS; + + if (esdhc_is_usdhc(imx_data)) + writel(val << 16, + host->ioaddr + SDHCI_TRANSFER_MODE); + else + writel(val << 16 | imx_data->scratchpad, + host->ioaddr + SDHCI_TRANSFER_MODE); + return; + case SDHCI_BLOCK_SIZE: + val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); + break; + } + esdhc_clrset_le(host, 0xffff, val, reg); +} + +static u8 esdhc_readb_le(struct sdhci_host *host, int reg) +{ + u8 ret; + u32 val; + + switch (reg) { + case SDHCI_HOST_CONTROL: + val = readl(host->ioaddr + reg); + + ret = val & SDHCI_CTRL_LED; + ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK; + ret |= (val & ESDHC_CTRL_4BITBUS); + ret |= (val & ESDHC_CTRL_8BITBUS) << 3; + return ret; + } + + return readb(host->ioaddr + reg); +} + +static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + u32 new_val = 0; + u32 mask; + + switch (reg) { + case SDHCI_POWER_CONTROL: + /* + * FSL put some DMA bits here + * If your board has a regulator, code should be here + */ + return; + case SDHCI_HOST_CONTROL: + /* FSL messed up here, so we need to manually compose it. */ + new_val = val & SDHCI_CTRL_LED; + /* ensure the endianness */ + new_val |= ESDHC_HOST_CONTROL_LE; + /* bits 8&9 are reserved on mx25 */ + if (!is_imx25_esdhc(imx_data)) { + /* DMA mode bits are shifted */ + new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; + } + + /* + * Do not touch buswidth bits here. This is done in + * esdhc_pltfm_bus_width. + * Do not touch the D3CD bit either which is used for the + * SDIO interrupt erratum workaround. + */ + mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD); + + esdhc_clrset_le(host, mask, new_val, reg); + return; + case SDHCI_SOFTWARE_RESET: + if (val & SDHCI_RESET_DATA) + new_val = readl(host->ioaddr + SDHCI_HOST_CONTROL); + break; + } + esdhc_clrset_le(host, 0xff, val, reg); + + if (reg == SDHCI_SOFTWARE_RESET) { + if (val & SDHCI_RESET_ALL) { + /* + * The esdhc has a design violation to SDHC spec which + * tells that software reset should not affect card + * detection circuit. But esdhc clears its SYSCTL + * register bits [0..2] during the software reset. This + * will stop those clocks that card detection circuit + * relies on. To work around it, we turn the clocks on + * back to keep card detection circuit functional. + */ + esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL); + /* + * The reset on usdhc fails to clear MIX_CTRL register. + * Do it manually here. + */ + if (esdhc_is_usdhc(imx_data)) { + /* + * the tuning bits should be kept during reset + */ + new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); + writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK, + host->ioaddr + ESDHC_MIX_CTRL); + imx_data->is_ddr = 0; + } + } else if (val & SDHCI_RESET_DATA) { + /* + * The eSDHC DAT line software reset clears at least the + * data transfer width on i.MX25, so make sure that the + * Host Control register is unaffected. + */ + esdhc_clrset_le(host, 0xff, new_val, + SDHCI_HOST_CONTROL); + } + } +} + +static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return pltfm_host->clock; +} + +static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return pltfm_host->clock / 256 / 16; +} + +static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, + unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + unsigned int host_clock = pltfm_host->clock; + int ddr_pre_div = imx_data->is_ddr ? 2 : 1; + int pre_div = 1; + int div = 1; + int ret; + u32 temp, val; + + if (esdhc_is_usdhc(imx_data)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + esdhc_wait_for_card_clock_gate_off(host); + } + + if (clock == 0) { + host->mmc->actual_clock = 0; + return; + } + + /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */ + if (is_imx53_esdhc(imx_data)) { + /* + * According to the i.MX53 reference manual, if DLLCTRL[10] can + * be set, then the controller is eSDHCv3, else it is eSDHCv2. + */ + val = readl(host->ioaddr + ESDHC_DLL_CTRL); + writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL); + temp = readl(host->ioaddr + ESDHC_DLL_CTRL); + writel(val, host->ioaddr + ESDHC_DLL_CTRL); + if (temp & BIT(10)) + pre_div = 2; + } + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + if ((imx_data->socdata->flags & ESDHC_FLAG_ERR010450) && + (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V))) { + unsigned int max_clock; + + max_clock = imx_data->is_ddr ? 45000000 : 150000000; + + clock = min(clock, max_clock); + } + + while (host_clock / (16 * pre_div * ddr_pre_div) > clock && + pre_div < 256) + pre_div *= 2; + + while (host_clock / (div * pre_div * ddr_pre_div) > clock && div < 16) + div++; + + host->mmc->actual_clock = host_clock / (div * pre_div * ddr_pre_div); + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->mmc->actual_clock); + + pre_div >>= 1; + div--; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + /* need to wait the bit 3 of the PRSSTAT to be set, make sure card clock is stable */ + ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, temp, + (temp & ESDHC_CLOCK_STABLE), 2, 100); + if (ret == -ETIMEDOUT) + dev_warn(mmc_dev(host->mmc), "card clock still not stable in 100us!.\n"); + + if (esdhc_is_usdhc(imx_data)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + } + +} + +static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + + switch (boarddata->wp_type) { + case ESDHC_WP_GPIO: + return mmc_gpio_get_ro(host->mmc); + case ESDHC_WP_CONTROLLER: + return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) & + SDHCI_WRITE_PROTECT); + case ESDHC_WP_NONE: + break; + } + + return -ENOSYS; +} + +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) +{ + u32 ctrl; + + switch (width) { + case MMC_BUS_WIDTH_8: + ctrl = ESDHC_CTRL_8BITBUS; + break; + case MMC_BUS_WIDTH_4: + ctrl = ESDHC_CTRL_4BITBUS; + break; + default: + ctrl = 0; + break; + } + + esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl, + SDHCI_HOST_CONTROL); +} + +static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* + * i.MX uSDHC internally already uses a fixed optimized timing for + * DDR50, normally does not require tuning for DDR50 mode. + */ + if (host->timing == MMC_TIMING_UHS_DDR50) + return 0; + + return sdhci_execute_tuning(mmc, opcode); +} + +static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) +{ + u32 reg; + u8 sw_rst; + int ret; + + /* FIXME: delay a bit for card to be ready for next tuning due to errors */ + mdelay(1); + + /* IC suggest to reset USDHC before every tuning command */ + esdhc_clrset_le(host, 0xff, SDHCI_RESET_ALL, SDHCI_SOFTWARE_RESET); + ret = readb_poll_timeout(host->ioaddr + SDHCI_SOFTWARE_RESET, sw_rst, + !(sw_rst & SDHCI_RESET_ALL), 10, 100); + if (ret == -ETIMEDOUT) + dev_warn(mmc_dev(host->mmc), + "warning! RESET_ALL never complete before sending tuning command\n"); + + reg = readl(host->ioaddr + ESDHC_MIX_CTRL); + reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL | + ESDHC_MIX_CTRL_FBCLK_SEL; + writel(reg, host->ioaddr + ESDHC_MIX_CTRL); + writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); + dev_dbg(mmc_dev(host->mmc), + "tuning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n", + val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS)); +} + +static void esdhc_post_tuning(struct sdhci_host *host) +{ + u32 reg; + + reg = readl(host->ioaddr + ESDHC_MIX_CTRL); + reg &= ~ESDHC_MIX_CTRL_EXE_TUNE; + reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN; + writel(reg, host->ioaddr + ESDHC_MIX_CTRL); +} + +static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) +{ + int min, max, avg, ret; + + /* find the mininum delay first which can pass tuning */ + min = ESDHC_TUNE_CTRL_MIN; + while (min < ESDHC_TUNE_CTRL_MAX) { + esdhc_prepare_tuning(host, min); + if (!mmc_send_tuning(host->mmc, opcode, NULL)) + break; + min += ESDHC_TUNE_CTRL_STEP; + } + + /* find the maxinum delay which can not pass tuning */ + max = min + ESDHC_TUNE_CTRL_STEP; + while (max < ESDHC_TUNE_CTRL_MAX) { + esdhc_prepare_tuning(host, max); + if (mmc_send_tuning(host->mmc, opcode, NULL)) { + max -= ESDHC_TUNE_CTRL_STEP; + break; + } + max += ESDHC_TUNE_CTRL_STEP; + } + + /* use average delay to get the best timing */ + avg = (min + max) / 2; + esdhc_prepare_tuning(host, avg); + ret = mmc_send_tuning(host->mmc, opcode, NULL); + esdhc_post_tuning(host); + + dev_dbg(mmc_dev(host->mmc), "tuning %s at 0x%x ret %d\n", + ret ? "failed" : "passed", avg, ret); + + return ret; +} + +static void esdhc_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 m; + + m = readl(host->ioaddr + ESDHC_MIX_CTRL); + if (ios->enhanced_strobe) + m |= ESDHC_MIX_CTRL_HS400_ES_EN; + else + m &= ~ESDHC_MIX_CTRL_HS400_ES_EN; + writel(m, host->ioaddr + ESDHC_MIX_CTRL); +} + +static int esdhc_change_pinstate(struct sdhci_host *host, + unsigned int uhs) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + struct pinctrl_state *pinctrl; + + dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); + + if (IS_ERR(imx_data->pinctrl) || + IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) + return -EINVAL; + + switch (uhs) { + case MMC_TIMING_UHS_SDR50: + case MMC_TIMING_UHS_DDR50: + pinctrl = imx_data->pins_100mhz; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + case MMC_TIMING_MMC_HS400: + pinctrl = imx_data->pins_200mhz; + break; + default: + /* back to default state for other legacy timing */ + return pinctrl_select_default_state(mmc_dev(host->mmc)); + } + + return pinctrl_select_state(imx_data->pinctrl, pinctrl); +} + +/* + * For HS400 eMMC, there is a data_strobe line. This signal is generated + * by the device and used for data output and CRC status response output + * in HS400 mode. The frequency of this signal follows the frequency of + * CLK generated by host. The host receives the data which is aligned to the + * edge of data_strobe line. Due to the time delay between CLK line and + * data_strobe line, if the delay time is larger than one clock cycle, + * then CLK and data_strobe line will be misaligned, read error shows up. + */ +static void esdhc_set_strobe_dll(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + u32 strobe_delay; + u32 v; + int ret; + + /* disable clock before enabling strobe dll */ + writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) & + ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + esdhc_wait_for_card_clock_gate_off(host); + + /* force a reset on strobe dll */ + writel(ESDHC_STROBE_DLL_CTRL_RESET, + host->ioaddr + ESDHC_STROBE_DLL_CTRL); + /* clear the reset bit on strobe dll before any setting */ + writel(0, host->ioaddr + ESDHC_STROBE_DLL_CTRL); + + /* + * enable strobe dll ctrl and adjust the delay target + * for the uSDHC loopback read clock + */ + if (imx_data->boarddata.strobe_dll_delay_target) + strobe_delay = imx_data->boarddata.strobe_dll_delay_target; + else + strobe_delay = ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT; + v = ESDHC_STROBE_DLL_CTRL_ENABLE | + ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT | + (strobe_delay << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); + writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL); + + /* wait max 50us to get the REF/SLV lock */ + ret = readl_poll_timeout(host->ioaddr + ESDHC_STROBE_DLL_STATUS, v, + ((v & ESDHC_STROBE_DLL_STS_REF_LOCK) && (v & ESDHC_STROBE_DLL_STS_SLV_LOCK)), 1, 50); + if (ret == -ETIMEDOUT) + dev_warn(mmc_dev(host->mmc), + "warning! HS400 strobe DLL status REF/SLV not lock in 50us, STROBE DLL status is %x!\n", v); +} + +static void esdhc_reset_tuning(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + u32 ctrl; + int ret; + + /* Reset the tuning circuit */ + if (esdhc_is_usdhc(imx_data)) { + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { + ctrl = readl(host->ioaddr + ESDHC_MIX_CTRL); + ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + ctrl &= ~ESDHC_MIX_CTRL_FBCLK_SEL; + writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL); + writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); + } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { + ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); + ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE; + writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS); + /* Make sure ESDHC_MIX_CTRL_EXE_TUNE cleared */ + ret = readl_poll_timeout(host->ioaddr + SDHCI_AUTO_CMD_STATUS, + ctrl, !(ctrl & ESDHC_MIX_CTRL_EXE_TUNE), 1, 50); + if (ret == -ETIMEDOUT) + dev_warn(mmc_dev(host->mmc), + "Warning! clear execute tuning bit failed\n"); + /* + * SDHCI_INT_DATA_AVAIL is W1C bit, set this bit will clear the + * usdhc IP internal logic flag execute_tuning_with_clr_buf, which + * will finally make sure the normal data transfer logic correct. + */ + ctrl = readl(host->ioaddr + SDHCI_INT_STATUS); + ctrl |= SDHCI_INT_DATA_AVAIL; + writel(ctrl, host->ioaddr + SDHCI_INT_STATUS); + } + } +} + +static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) +{ + u32 m; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + + /* disable ddr mode and disable HS400 mode */ + m = readl(host->ioaddr + ESDHC_MIX_CTRL); + m &= ~(ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN); + imx_data->is_ddr = 0; + + switch (timing) { + case MMC_TIMING_UHS_SDR12: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_SDR50: + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS: + case MMC_TIMING_MMC_HS200: + writel(m, host->ioaddr + ESDHC_MIX_CTRL); + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + m |= ESDHC_MIX_CTRL_DDREN; + writel(m, host->ioaddr + ESDHC_MIX_CTRL); + imx_data->is_ddr = 1; + if (boarddata->delay_line) { + u32 v; + v = boarddata->delay_line << + ESDHC_DLL_OVERRIDE_VAL_SHIFT | + (1 << ESDHC_DLL_OVERRIDE_EN_SHIFT); + if (is_imx53_esdhc(imx_data)) + v <<= 1; + writel(v, host->ioaddr + ESDHC_DLL_CTRL); + } + break; + case MMC_TIMING_MMC_HS400: + m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN; + writel(m, host->ioaddr + ESDHC_MIX_CTRL); + imx_data->is_ddr = 1; + /* update clock after enable DDR for strobe DLL lock */ + host->ops->set_clock(host, host->clock); + esdhc_set_strobe_dll(host); + break; + case MMC_TIMING_LEGACY: + default: + esdhc_reset_tuning(host); + break; + } + + esdhc_change_pinstate(host, timing); +} + +static void esdhc_reset(struct sdhci_host *host, u8 mask) +{ + sdhci_and_cqhci_reset(host, mask); + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +} + +static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + + /* Doc Erratum: the uSDHC actual maximum timeout count is 1 << 29 */ + return esdhc_is_usdhc(imx_data) ? 1 << 29 : 1 << 27; +} + +static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + + /* use maximum timeout counter */ + esdhc_clrset_le(host, ESDHC_SYS_CTRL_DTOCV_MASK, + esdhc_is_usdhc(imx_data) ? 0xF : 0xE, + SDHCI_TIMEOUT_CONTROL); +} + +static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static struct sdhci_ops sdhci_esdhc_ops = { + .read_l = esdhc_readl_le, + .read_w = esdhc_readw_le, + .read_b = esdhc_readb_le, + .write_l = esdhc_writel_le, + .write_w = esdhc_writew_le, + .write_b = esdhc_writeb_le, + .set_clock = esdhc_pltfm_set_clock, + .get_max_clock = esdhc_pltfm_get_max_clock, + .get_min_clock = esdhc_pltfm_get_min_clock, + .get_max_timeout_count = esdhc_get_max_timeout_count, + .get_ro = esdhc_pltfm_get_ro, + .set_timeout = esdhc_set_timeout, + .set_bus_width = esdhc_pltfm_set_bus_width, + .set_uhs_signaling = esdhc_set_uhs_signaling, + .reset = esdhc_reset, + .irq = esdhc_cqhci_irq, + .dump_vendor_regs = esdhc_dump_debug_regs, +}; + +static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC + | SDHCI_QUIRK_BROKEN_CARD_DETECTION, + .ops = &sdhci_esdhc_ops, +}; + +static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host = host->mmc->cqe_private; + u32 tmp; + + if (esdhc_is_usdhc(imx_data)) { + /* + * The imx6q ROM code will change the default watermark + * level setting to something insane. Change it back here. + */ + writel(ESDHC_WTMK_DEFAULT_VAL, host->ioaddr + ESDHC_WTMK_LVL); + + /* + * ROM code will change the bit burst_length_enable setting + * to zero if this usdhc is chosen to boot system. Change + * it back here, otherwise it will impact the performance a + * lot. This bit is used to enable/disable the burst length + * for the external AHB2AXI bridge. It's useful especially + * for INCR transfer because without burst length indicator, + * the AHB2AXI bridge does not know the burst length in + * advance. And without burst length indicator, AHB INCR + * transfer can only be converted to singles on the AXI side. + */ + writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) + | ESDHC_BURST_LEN_EN_INCR, + host->ioaddr + SDHCI_HOST_CONTROL); + + /* + * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL + * TO1.1, it's harmless for MX6SL + */ + writel(readl(host->ioaddr + 0x6c) & ~BIT(7), + host->ioaddr + 0x6c); + + /* disable DLL_CTRL delay line settings */ + writel(0x0, host->ioaddr + ESDHC_DLL_CTRL); + + /* + * For the case of command with busy, if set the bit + * ESDHC_VEND_SPEC2_EN_BUSY_IRQ, USDHC will generate a + * transfer complete interrupt when busy is deasserted. + * When CQHCI use DCMD to send a CMD need R1b respons, + * CQHCI require to set ESDHC_VEND_SPEC2_EN_BUSY_IRQ, + * otherwise DCMD will always meet timeout waiting for + * hardware interrupt issue. + */ + if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) { + tmp = readl(host->ioaddr + ESDHC_VEND_SPEC2); + tmp |= ESDHC_VEND_SPEC2_EN_BUSY_IRQ; + writel(tmp, host->ioaddr + ESDHC_VEND_SPEC2); + + host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; + } + + if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { + tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); + tmp |= ESDHC_STD_TUNING_EN; + + /* + * ROM code or bootloader may config the start tap + * and step, unmask them first. + */ + tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK); + if (imx_data->boarddata.tuning_start_tap) + tmp |= imx_data->boarddata.tuning_start_tap; + else + tmp |= ESDHC_TUNING_START_TAP_DEFAULT; + + if (imx_data->boarddata.tuning_step) { + tmp |= imx_data->boarddata.tuning_step + << ESDHC_TUNING_STEP_SHIFT; + } else { + tmp |= ESDHC_TUNING_STEP_DEFAULT + << ESDHC_TUNING_STEP_SHIFT; + } + + /* Disable the CMD CRC check for tuning, if not, need to + * add some delay after every tuning command, because + * hardware standard tuning logic will directly go to next + * step once it detect the CMD CRC error, will not wait for + * the card side to finally send out the tuning data, trigger + * the buffer read ready interrupt immediately. If usdhc send + * the next tuning command some eMMC card will stuck, can't + * response, block the tuning procedure or the first command + * after the whole tuning procedure always can't get any response. + */ + tmp |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE; + writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); + } else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { + /* + * ESDHC_STD_TUNING_EN may be configed in bootloader + * or ROM code, so clear this bit here to make sure + * the manual tuning can work. + */ + tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); + tmp &= ~ESDHC_STD_TUNING_EN; + writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); + } + + /* + * On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card + * as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the + * the 1st linux configure power/clock for the 2nd Linux. + * + * When the 2nd Linux is booting into rootfs stage, we let the 1st Linux + * to destroy the 2nd linux, then restart the 2nd linux, we met SDHCI dump. + * After we clear the pending interrupt and halt CQCTL, issue gone. + */ + if (cq_host) { + tmp = cqhci_readl(cq_host, CQHCI_IS); + cqhci_writel(cq_host, tmp, CQHCI_IS); + cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); + } + } +} + +static void esdhc_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct cqhci_host *cq_host = mmc->cqe_private; + u32 reg; + u16 mode; + int count = 10; + + /* + * CQE gets stuck if it sees Buffer Read Enable bit set, which can be + * the case after tuning, so ensure the buffer is drained. + */ + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + if (count-- == 0) { + dev_warn(mmc_dev(host->mmc), + "CQE may get stuck because the Buffer Read Enable bit is set\n"); + break; + } + mdelay(1); + } + + /* + * Runtime resume will reset the entire host controller, which + * will also clear the DMAEN/BCEN of register ESDHC_MIX_CTRL. + * Here set DMAEN and BCEN when enable CMDQ. + */ + mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); + if (host->flags & SDHCI_REQ_USE_DMA) + mode |= SDHCI_TRNS_DMA; + if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) + mode |= SDHCI_TRNS_BLK_CNT_EN; + sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); + + /* + * Though Runtime resume reset the entire host controller, + * but do not impact the CQHCI side, need to clear the + * HALT bit, avoid CQHCI stuck in the first request when + * system resume back. + */ + cqhci_writel(cq_host, 0, CQHCI_CTL); + if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) + dev_err(mmc_dev(host->mmc), + "failed to exit halt state when enable CQE\n"); + + + sdhci_cqe_enable(mmc); +} + +static void esdhc_sdhci_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static const struct cqhci_host_ops esdhc_cqhci_ops = { + .enable = esdhc_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = esdhc_sdhci_dumpregs, +}; + +#ifdef CONFIG_OF +static int +sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, + struct sdhci_host *host, + struct pltfm_imx_data *imx_data) +{ + struct device_node *np = pdev->dev.of_node; + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + int ret; + + if (of_get_property(np, "fsl,wp-controller", NULL)) + boarddata->wp_type = ESDHC_WP_CONTROLLER; + + /* + * If we have this property, then activate WP check. + * Retrieveing and requesting the actual WP GPIO will happen + * in the call to mmc_of_parse(). + */ + if (of_property_read_bool(np, "wp-gpios")) + boarddata->wp_type = ESDHC_WP_GPIO; + + of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step); + of_property_read_u32(np, "fsl,tuning-start-tap", + &boarddata->tuning_start_tap); + + of_property_read_u32(np, "fsl,strobe-dll-delay-target", + &boarddata->strobe_dll_delay_target); + if (of_find_property(np, "no-1-8-v", NULL)) + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + + if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line)) + boarddata->delay_line = 0; + + mmc_of_parse_voltage(np, &host->ocr_mask); + + if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) { + imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_100MHZ); + imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_200MHZ); + } + + /* call to generic mmc_of_parse to support additional capabilities */ + ret = mmc_of_parse(host->mmc); + if (ret) + return ret; + + if (mmc_gpio_get_cd(host->mmc) >= 0) + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + return 0; +} +#else +static inline int +sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, + struct sdhci_host *host, + struct pltfm_imx_data *imx_data) +{ + return -ENODEV; +} +#endif + +static int sdhci_esdhc_imx_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id = + of_match_device(imx_esdhc_dt_ids, &pdev->dev); + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_host *host; + struct cqhci_host *cq_host; + int err; + struct pltfm_imx_data *imx_data; + + host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, + sizeof(*imx_data)); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + + imx_data = sdhci_pltfm_priv(pltfm_host); + + imx_data->socdata = of_id->data; + + if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) + cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0); + + imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(imx_data->clk_ipg)) { + err = PTR_ERR(imx_data->clk_ipg); + goto free_sdhci; + } + + imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(imx_data->clk_ahb)) { + err = PTR_ERR(imx_data->clk_ahb); + goto free_sdhci; + } + + imx_data->clk_per = devm_clk_get(&pdev->dev, "per"); + if (IS_ERR(imx_data->clk_per)) { + err = PTR_ERR(imx_data->clk_per); + goto free_sdhci; + } + + pltfm_host->clk = imx_data->clk_per; + pltfm_host->clock = clk_get_rate(pltfm_host->clk); + err = clk_prepare_enable(imx_data->clk_per); + if (err) + goto free_sdhci; + err = clk_prepare_enable(imx_data->clk_ipg); + if (err) + goto disable_per_clk; + err = clk_prepare_enable(imx_data->clk_ahb); + if (err) + goto disable_ipg_clk; + + imx_data->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(imx_data->pinctrl)) + dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n"); + + if (esdhc_is_usdhc(imx_data)) { + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; + + /* GPIO CD can be set as a wakeup source */ + host->mmc->caps |= MMC_CAP_CD_WAKE; + + if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200)) + host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; + + /* clear tuning bits in case ROM has set it already */ + writel(0x0, host->ioaddr + ESDHC_MIX_CTRL); + writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS); + writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); + + /* + * Link usdhc specific mmc_host_ops execute_tuning function, + * to replace the standard one in sdhci_ops. + */ + host->mmc_host_ops.execute_tuning = usdhc_execute_tuning; + } + + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); + if (err) + goto disable_ahb_clk; + + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) + sdhci_esdhc_ops.platform_execute_tuning = + esdhc_executing_tuning; + + if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) + host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; + + if (host->mmc->caps & MMC_CAP_8_BIT_DATA && + imx_data->socdata->flags & ESDHC_FLAG_HS400) + host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400; + + if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23) + host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; + + if (host->mmc->caps & MMC_CAP_8_BIT_DATA && + imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { + host->mmc->caps2 |= MMC_CAP2_HS400_ES; + host->mmc_host_ops.hs400_enhanced_strobe = + esdhc_hs400_enhanced_strobe; + } + + if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) { + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + err = -ENOMEM; + goto disable_ahb_clk; + } + + cq_host->mmio = host->ioaddr + ESDHC_CQHCI_ADDR_OFFSET; + cq_host->ops = &esdhc_cqhci_ops; + + err = cqhci_init(cq_host, host->mmc, false); + if (err) + goto disable_ahb_clk; + } + + sdhci_esdhc_imx_hwinit(host); + + err = sdhci_add_host(host); + if (err) + goto disable_ahb_clk; + + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); + pm_runtime_enable(&pdev->dev); + + return 0; + +disable_ahb_clk: + clk_disable_unprepare(imx_data->clk_ahb); +disable_ipg_clk: + clk_disable_unprepare(imx_data->clk_ipg); +disable_per_clk: + clk_disable_unprepare(imx_data->clk_per); +free_sdhci: + if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) + cpu_latency_qos_remove_request(&imx_data->pm_qos_req); + sdhci_pltfm_free(pdev); + return err; +} + +static int sdhci_esdhc_imx_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + int dead; + + pm_runtime_get_sync(&pdev->dev); + dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + sdhci_remove_host(host, dead); + + clk_disable_unprepare(imx_data->clk_per); + clk_disable_unprepare(imx_data->clk_ipg); + clk_disable_unprepare(imx_data->clk_ahb); + + if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) + cpu_latency_qos_remove_request(&imx_data->pm_qos_req); + + sdhci_pltfm_free(pdev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_esdhc_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + int ret; + + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + + if ((imx_data->socdata->flags & ESDHC_FLAG_STATE_LOST_IN_LPMODE) && + (host->tuning_mode != SDHCI_TUNING_MODE_1)) { + mmc_retune_timer_stop(host->mmc); + mmc_retune_needed(host->mmc); + } + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + ret = sdhci_suspend_host(host); + if (ret) + return ret; + + ret = pinctrl_pm_select_sleep_state(dev); + if (ret) + return ret; + + ret = mmc_gpio_set_cd_wake(host->mmc, true); + + return ret; +} + +static int sdhci_esdhc_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + return ret; + + /* re-initialize hw state in case it's lost in low power mode */ + sdhci_esdhc_imx_hwinit(host); + + ret = sdhci_resume_host(host); + if (ret) + return ret; + + if (host->mmc->caps2 & MMC_CAP2_CQE) + ret = cqhci_resume(host->mmc); + + if (!ret) + ret = mmc_gpio_set_cd_wake(host->mmc, false); + + return ret; +} +#endif + +#ifdef CONFIG_PM +static int sdhci_esdhc_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + int ret; + + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + + ret = sdhci_runtime_suspend_host(host); + if (ret) + return ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + imx_data->actual_clock = host->mmc->actual_clock; + esdhc_pltfm_set_clock(host, 0); + clk_disable_unprepare(imx_data->clk_per); + clk_disable_unprepare(imx_data->clk_ipg); + clk_disable_unprepare(imx_data->clk_ahb); + + if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) + cpu_latency_qos_remove_request(&imx_data->pm_qos_req); + + return ret; +} + +static int sdhci_esdhc_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); + int err; + + if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) + cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0); + + if (imx_data->socdata->flags & ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME) + clk_set_rate(imx_data->clk_per, pltfm_host->clock); + + err = clk_prepare_enable(imx_data->clk_ahb); + if (err) + goto remove_pm_qos_request; + + err = clk_prepare_enable(imx_data->clk_per); + if (err) + goto disable_ahb_clk; + + err = clk_prepare_enable(imx_data->clk_ipg); + if (err) + goto disable_per_clk; + + esdhc_pltfm_set_clock(host, imx_data->actual_clock); + + err = sdhci_runtime_resume_host(host, 0); + if (err) + goto disable_ipg_clk; + + if (host->mmc->caps2 & MMC_CAP2_CQE) + err = cqhci_resume(host->mmc); + + return err; + +disable_ipg_clk: + clk_disable_unprepare(imx_data->clk_ipg); +disable_per_clk: + clk_disable_unprepare(imx_data->clk_per); +disable_ahb_clk: + clk_disable_unprepare(imx_data->clk_ahb); +remove_pm_qos_request: + if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) + cpu_latency_qos_remove_request(&imx_data->pm_qos_req); + return err; +} +#endif + +static const struct dev_pm_ops sdhci_esdhc_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume) + SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend, + sdhci_esdhc_runtime_resume, NULL) +}; + +static struct platform_driver sdhci_esdhc_imx_driver = { + .driver = { + .name = "sdhci-esdhc-imx", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = imx_esdhc_dt_ids, + .pm = &sdhci_esdhc_pmops, + }, + .probe = sdhci_esdhc_imx_probe, + .remove = sdhci_esdhc_imx_remove, +}; + +module_platform_driver(sdhci_esdhc_imx_driver); + +MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC"); +MODULE_AUTHOR("Wolfram Sang "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-esdhc-mcf.c b/drivers/mmc/host/sdhci-esdhc-mcf.c new file mode 100644 index 000000000..ca7a1690b --- /dev/null +++ b/drivers/mmc/host/sdhci-esdhc-mcf.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Freescale eSDHC ColdFire family controller driver, platform bus. + * + * Copyright (c) 2020 Timesys Corporation + * Author: Angelo Dureghello + */ + +#include +#include +#include +#include +#include "sdhci-pltfm.h" +#include "sdhci-esdhc.h" + +#define ESDHC_PROCTL_D3CD 0x08 +#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f +#define ESDHC_DEFAULT_HOST_CONTROL 0x28 + +/* + * Freescale eSDHC has DMA ERR flag at bit 28, not as std spec says, bit 25. + */ +#define ESDHC_INT_VENDOR_SPEC_DMA_ERR BIT(28) + +struct pltfm_mcf_data { + struct clk *clk_ipg; + struct clk *clk_ahb; + struct clk *clk_per; + int aside; + int current_bus_width; +}; + +static inline void esdhc_mcf_buffer_swap32(u32 *buf, int len) +{ + int i; + u32 temp; + + len = (len + 3) >> 2; + + for (i = 0; i < len; i++) { + temp = swab32(*buf); + *buf++ = temp; + } +} + +static inline void esdhc_clrset_be(struct sdhci_host *host, + u32 mask, u32 val, int reg) +{ + void __iomem *base = host->ioaddr + (reg & ~3); + u8 shift = (reg & 3) << 3; + + mask <<= shift; + val <<= shift; + + if (reg == SDHCI_HOST_CONTROL) + val |= ESDHC_PROCTL_D3CD; + + writel((readl(base) & ~mask) | val, base); +} + +/* + * Note: mcf is big-endian, single bytes need to be accessed at big endian + * offsets. + */ +static void esdhc_mcf_writeb_be(struct sdhci_host *host, u8 val, int reg) +{ + void __iomem *base = host->ioaddr + (reg & ~3); + u8 shift = (reg & 3) << 3; + u32 mask = ~(0xff << shift); + + if (reg == SDHCI_HOST_CONTROL) { + u32 host_ctrl = ESDHC_DEFAULT_HOST_CONTROL; + u8 dma_bits = (val & SDHCI_CTRL_DMA_MASK) >> 3; + u8 tmp = readb(host->ioaddr + SDHCI_HOST_CONTROL + 1); + + tmp &= ~0x03; + tmp |= dma_bits; + + /* + * Recomposition needed, restore always endianness and + * keep D3CD and AI, just setting bus width. + */ + host_ctrl |= val; + host_ctrl |= (dma_bits << 8); + writel(host_ctrl, host->ioaddr + SDHCI_HOST_CONTROL); + + return; + } + + writel((readl(base) & mask) | (val << shift), base); +} + +static void esdhc_mcf_writew_be(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host); + void __iomem *base = host->ioaddr + (reg & ~3); + u8 shift = (reg & 3) << 3; + u32 mask = ~(0xffff << shift); + + switch (reg) { + case SDHCI_TRANSFER_MODE: + mcf_data->aside = val; + return; + case SDHCI_COMMAND: + if (host->cmd->opcode == MMC_STOP_TRANSMISSION) + val |= SDHCI_CMD_ABORTCMD; + + /* + * As for the fsl driver, + * we have to set the mode in a single write here. + */ + writel(val << 16 | mcf_data->aside, + host->ioaddr + SDHCI_TRANSFER_MODE); + return; + } + + writel((readl(base) & mask) | (val << shift), base); +} + +static void esdhc_mcf_writel_be(struct sdhci_host *host, u32 val, int reg) +{ + writel(val, host->ioaddr + reg); +} + +static u8 esdhc_mcf_readb_be(struct sdhci_host *host, int reg) +{ + if (reg == SDHCI_HOST_CONTROL) { + u8 __iomem *base = host->ioaddr + (reg & ~3); + u16 val = readw(base + 2); + u8 dma_bits = (val >> 5) & SDHCI_CTRL_DMA_MASK; + u8 host_ctrl = val & 0xff; + + host_ctrl &= ~SDHCI_CTRL_DMA_MASK; + host_ctrl |= dma_bits; + + return host_ctrl; + } + + return readb(host->ioaddr + (reg ^ 0x3)); +} + +static u16 esdhc_mcf_readw_be(struct sdhci_host *host, int reg) +{ + /* + * For SDHCI_HOST_VERSION, sdhci specs defines 0xFE, + * a wrong offset for us, we are at 0xFC. + */ + if (reg == SDHCI_HOST_VERSION) + reg -= 2; + + return readw(host->ioaddr + (reg ^ 0x2)); +} + +static u32 esdhc_mcf_readl_be(struct sdhci_host *host, int reg) +{ + u32 val; + + val = readl(host->ioaddr + reg); + + /* + * RM (25.3.9) sd pin clock must never exceed 25Mhz. + * So forcing legacy mode at 25Mhz. + */ + if (unlikely(reg == SDHCI_CAPABILITIES)) + val &= ~SDHCI_CAN_DO_HISPD; + + if (unlikely(reg == SDHCI_INT_STATUS)) { + if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) { + val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; + val |= SDHCI_INT_ADMA_ERROR; + } + } + + return val; +} + +static unsigned int esdhc_mcf_get_max_timeout_count(struct sdhci_host *host) +{ + return 1 << 27; +} + +static void esdhc_mcf_set_timeout(struct sdhci_host *host, + struct mmc_command *cmd) +{ + /* Use maximum timeout counter */ + esdhc_clrset_be(host, ESDHC_SYS_CTRL_DTOCV_MASK, 0xE, + SDHCI_TIMEOUT_CONTROL); +} + +static void esdhc_mcf_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host); + + sdhci_reset(host, mask); + + esdhc_clrset_be(host, ESDHC_CTRL_BUSWIDTH_MASK, + mcf_data->current_bus_width, SDHCI_HOST_CONTROL); + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +} + +static unsigned int esdhc_mcf_pltfm_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return pltfm_host->clock; +} + +static unsigned int esdhc_mcf_pltfm_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return pltfm_host->clock / 256 / 16; +} + +static void esdhc_mcf_pltfm_set_clock(struct sdhci_host *host, + unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + unsigned long *pll_dr = (unsigned long *)MCF_PLL_DR; + u32 fvco, fsys, fesdhc, temp; + const int sdclkfs[] = {2, 4, 8, 16, 32, 64, 128, 256}; + int delta, old_delta = clock; + int i, q, ri, rq; + + if (clock == 0) { + host->mmc->actual_clock = 0; + return; + } + + /* + * ColdFire eSDHC clock.s + * + * pll -+-> / outdiv1 --> fsys + * +-> / outdiv3 --> eSDHC clock ---> / SDCCLKFS / DVS + * + * mcf5441x datasheet says: + * (8.1.2) eSDHC should be 40 MHz max + * (25.3.9) eSDHC input is, as example, 96 Mhz ... + * (25.3.9) sd pin clock must never exceed 25Mhz + * + * fvco = fsys * outdvi1 + 1 + * fshdc = fvco / outdiv3 + 1 + */ + temp = readl(pll_dr); + fsys = pltfm_host->clock; + fvco = fsys * ((temp & 0x1f) + 1); + fesdhc = fvco / (((temp >> 10) & 0x1f) + 1); + + for (i = 0; i < 8; ++i) { + int result = fesdhc / sdclkfs[i]; + + for (q = 1; q < 17; ++q) { + int finale = result / q; + + delta = abs(clock - finale); + + if (delta < old_delta) { + old_delta = delta; + ri = i; + rq = q; + } + } + } + + /* + * Apply divisors and re-enable all the clocks + */ + temp = ((sdclkfs[ri] >> 1) << 8) | ((rq - 1) << 4) | + (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN); + esdhc_clrset_be(host, 0x0000fff7, temp, SDHCI_CLOCK_CONTROL); + + host->mmc->actual_clock = clock; + + mdelay(1); +} + +static void esdhc_mcf_pltfm_set_bus_width(struct sdhci_host *host, int width) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host); + + switch (width) { + case MMC_BUS_WIDTH_4: + mcf_data->current_bus_width = ESDHC_CTRL_4BITBUS; + break; + default: + mcf_data->current_bus_width = 0; + break; + } + + esdhc_clrset_be(host, ESDHC_CTRL_BUSWIDTH_MASK, + mcf_data->current_bus_width, SDHCI_HOST_CONTROL); +} + +static void esdhc_mcf_request_done(struct sdhci_host *host, + struct mmc_request *mrq) +{ + struct scatterlist *sg; + u32 *buffer; + int i; + + if (!mrq->data || !mrq->data->bytes_xfered) + goto exit_done; + + if (mmc_get_dma_dir(mrq->data) != DMA_FROM_DEVICE) + goto exit_done; + + /* + * On mcf5441x there is no hw sdma option/flag to select the dma + * transfer endiannes. A swap after the transfer is needed. + */ + for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) { + buffer = (u32 *)sg_virt(sg); + esdhc_mcf_buffer_swap32(buffer, sg->length); + } + +exit_done: + mmc_request_done(host->mmc, mrq); +} + +static void esdhc_mcf_copy_to_bounce_buffer(struct sdhci_host *host, + struct mmc_data *data, + unsigned int length) +{ + sg_copy_to_buffer(data->sg, data->sg_len, + host->bounce_buffer, length); + + esdhc_mcf_buffer_swap32((u32 *)host->bounce_buffer, + data->blksz * data->blocks); +} + +static struct sdhci_ops sdhci_esdhc_ops = { + .reset = esdhc_mcf_reset, + .set_clock = esdhc_mcf_pltfm_set_clock, + .get_max_clock = esdhc_mcf_pltfm_get_max_clock, + .get_min_clock = esdhc_mcf_pltfm_get_min_clock, + .set_bus_width = esdhc_mcf_pltfm_set_bus_width, + .get_max_timeout_count = esdhc_mcf_get_max_timeout_count, + .set_timeout = esdhc_mcf_set_timeout, + .write_b = esdhc_mcf_writeb_be, + .write_w = esdhc_mcf_writew_be, + .write_l = esdhc_mcf_writel_be, + .read_b = esdhc_mcf_readb_be, + .read_w = esdhc_mcf_readw_be, + .read_l = esdhc_mcf_readl_be, + .copy_to_bounce_buffer = esdhc_mcf_copy_to_bounce_buffer, + .request_done = esdhc_mcf_request_done, +}; + +static const struct sdhci_pltfm_data sdhci_esdhc_mcf_pdata = { + .ops = &sdhci_esdhc_ops, + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_FORCE_DMA, + /* + * Mandatory quirk, + * controller does not support cmd23, + * without, on > 8G cards cmd23 is used, and + * driver times out. + */ + SDHCI_QUIRK2_HOST_NO_CMD23, +}; + +static int esdhc_mcf_plat_init(struct sdhci_host *host, + struct pltfm_mcf_data *mcf_data) +{ + struct mcf_esdhc_platform_data *plat_data; + + if (!host->mmc->parent->platform_data) { + dev_err(mmc_dev(host->mmc), "no platform data!\n"); + return -EINVAL; + } + + plat_data = (struct mcf_esdhc_platform_data *) + host->mmc->parent->platform_data; + + /* Card_detect */ + switch (plat_data->cd_type) { + default: + case ESDHC_CD_CONTROLLER: + /* We have a working card_detect back */ + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + break; + case ESDHC_CD_PERMANENT: + host->mmc->caps |= MMC_CAP_NONREMOVABLE; + break; + case ESDHC_CD_NONE: + break; + } + + switch (plat_data->max_bus_width) { + case 4: + host->mmc->caps |= MMC_CAP_4_BIT_DATA; + break; + case 1: + default: + host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; + break; + } + + return 0; +} + +static int sdhci_esdhc_mcf_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct pltfm_mcf_data *mcf_data; + int err; + + host = sdhci_pltfm_init(pdev, &sdhci_esdhc_mcf_pdata, + sizeof(*mcf_data)); + + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + mcf_data = sdhci_pltfm_priv(pltfm_host); + + host->sdma_boundary = 0; + + host->flags |= SDHCI_AUTO_CMD12; + + mcf_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(mcf_data->clk_ipg)) { + err = PTR_ERR(mcf_data->clk_ipg); + goto err_exit; + } + + mcf_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(mcf_data->clk_ahb)) { + err = PTR_ERR(mcf_data->clk_ahb); + goto err_exit; + } + + mcf_data->clk_per = devm_clk_get(&pdev->dev, "per"); + if (IS_ERR(mcf_data->clk_per)) { + err = PTR_ERR(mcf_data->clk_per); + goto err_exit; + } + + pltfm_host->clk = mcf_data->clk_per; + pltfm_host->clock = clk_get_rate(pltfm_host->clk); + err = clk_prepare_enable(mcf_data->clk_per); + if (err) + goto err_exit; + + err = clk_prepare_enable(mcf_data->clk_ipg); + if (err) + goto unprep_per; + + err = clk_prepare_enable(mcf_data->clk_ahb); + if (err) + goto unprep_ipg; + + err = esdhc_mcf_plat_init(host, mcf_data); + if (err) + goto unprep_ahb; + + err = sdhci_setup_host(host); + if (err) + goto unprep_ahb; + + if (!host->bounce_buffer) { + dev_err(&pdev->dev, "bounce buffer not allocated"); + err = -ENOMEM; + goto cleanup; + } + + err = __sdhci_add_host(host); + if (err) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); +unprep_ahb: + clk_disable_unprepare(mcf_data->clk_ahb); +unprep_ipg: + clk_disable_unprepare(mcf_data->clk_ipg); +unprep_per: + clk_disable_unprepare(mcf_data->clk_per); +err_exit: + sdhci_pltfm_free(pdev); + + return err; +} + +static int sdhci_esdhc_mcf_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host); + + sdhci_remove_host(host, 0); + + clk_disable_unprepare(mcf_data->clk_ipg); + clk_disable_unprepare(mcf_data->clk_ahb); + clk_disable_unprepare(mcf_data->clk_per); + + sdhci_pltfm_free(pdev); + + return 0; +} + +static struct platform_driver sdhci_esdhc_mcf_driver = { + .driver = { + .name = "sdhci-esdhc-mcf", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .probe = sdhci_esdhc_mcf_probe, + .remove = sdhci_esdhc_mcf_remove, +}; + +module_platform_driver(sdhci_esdhc_mcf_driver); + +MODULE_DESCRIPTION("SDHCI driver for Freescale ColdFire eSDHC"); +MODULE_AUTHOR("Angelo Dureghello "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h new file mode 100644 index 000000000..6de02f09c --- /dev/null +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Freescale eSDHC controller driver generics for OF and pltfm. + * + * Copyright (c) 2007 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * Copyright (c) 2010 Pengutronix e.K. + * Copyright 2020 NXP + * Author: Wolfram Sang + */ + +#ifndef _DRIVERS_MMC_SDHCI_ESDHC_H +#define _DRIVERS_MMC_SDHCI_ESDHC_H + +/* + * Ops and quirks for the Freescale eSDHC controller. + */ + +#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ + SDHCI_QUIRK_32BIT_DMA_ADDR | \ + SDHCI_QUIRK_NO_BUSY_IRQ | \ + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ + SDHCI_QUIRK_PIO_NEEDS_DELAY | \ + SDHCI_QUIRK_NO_HISPD_BIT) + +/* pltfm-specific */ +#define ESDHC_HOST_CONTROL_LE 0x20 + +/* + * eSDHC register definition + */ + +/* Present State Register */ +#define ESDHC_PRSSTAT 0x24 +#define ESDHC_CLOCK_GATE_OFF 0x00000080 +#define ESDHC_CLOCK_STABLE 0x00000008 + +/* Protocol Control Register */ +#define ESDHC_PROCTL 0x28 +#define ESDHC_VOLT_SEL 0x00000400 +#define ESDHC_CTRL_4BITBUS (0x1 << 1) +#define ESDHC_CTRL_8BITBUS (0x2 << 1) +#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) +#define ESDHC_HOST_CONTROL_RES 0x01 + +/* System Control Register */ +#define ESDHC_SYSTEM_CONTROL 0x2c +#define ESDHC_CLOCK_MASK 0x0000fff0 +#define ESDHC_PREDIV_SHIFT 8 +#define ESDHC_DIVIDER_SHIFT 4 +#define ESDHC_CLOCK_SDCLKEN 0x00000008 +#define ESDHC_CLOCK_PEREN 0x00000004 +#define ESDHC_CLOCK_HCKEN 0x00000002 +#define ESDHC_CLOCK_IPGEN 0x00000001 + +/* System Control 2 Register */ +#define ESDHC_SYSTEM_CONTROL_2 0x3c +#define ESDHC_SMPCLKSEL 0x00800000 +#define ESDHC_EXTN 0x00400000 + +/* Host Controller Capabilities Register 2 */ +#define ESDHC_CAPABILITIES_1 0x114 + +/* Tuning Block Control Register */ +#define ESDHC_TBCTL 0x120 +#define ESDHC_HS400_WNDW_ADJUST 0x00000040 +#define ESDHC_HS400_MODE 0x00000010 +#define ESDHC_TB_EN 0x00000004 +#define ESDHC_TB_MODE_MASK 0x00000003 +#define ESDHC_TB_MODE_SW 0x00000003 +#define ESDHC_TB_MODE_3 0x00000002 + +#define ESDHC_TBSTAT 0x124 + +#define ESDHC_TBPTR 0x128 +#define ESDHC_WNDW_STRT_PTR_SHIFT 8 +#define ESDHC_WNDW_STRT_PTR_MASK (0x7f << 8) +#define ESDHC_WNDW_END_PTR_MASK 0x7f + +/* SD Clock Control Register */ +#define ESDHC_SDCLKCTL 0x144 +#define ESDHC_LPBK_CLK_SEL 0x80000000 +#define ESDHC_CMD_CLK_CTL 0x00008000 + +/* SD Timing Control Register */ +#define ESDHC_SDTIMNGCTL 0x148 +#define ESDHC_FLW_CTL_BG 0x00008000 + +/* DLL Config 0 Register */ +#define ESDHC_DLLCFG0 0x160 +#define ESDHC_DLL_ENABLE 0x80000000 +#define ESDHC_DLL_RESET 0x40000000 +#define ESDHC_DLL_FREQ_SEL 0x08000000 + +/* DLL Config 1 Register */ +#define ESDHC_DLLCFG1 0x164 +#define ESDHC_DLL_PD_PULSE_STRETCH_SEL 0x80000000 + +/* DLL Status 0 Register */ +#define ESDHC_DLLSTAT0 0x170 +#define ESDHC_DLL_STS_SLV_LOCK 0x08000000 + +/* Control Register for DMA transfer */ +#define ESDHC_DMA_SYSCTL 0x40c +#define ESDHC_PERIPHERAL_CLK_SEL 0x00080000 +#define ESDHC_FLUSH_ASYNC_FIFO 0x00040000 +#define ESDHC_DMA_SNOOP 0x00000040 + +#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c new file mode 100644 index 000000000..b9eb2ec61 --- /dev/null +++ b/drivers/mmc/host/sdhci-iproc.c @@ -0,0 +1,420 @@ +/* + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * iProc SDHCI platform driver + */ + +#include +#include +#include +#include +#include +#include +#include "sdhci-pltfm.h" + +struct sdhci_iproc_data { + const struct sdhci_pltfm_data *pdata; + u32 caps; + u32 caps1; + u32 mmc_caps; +}; + +struct sdhci_iproc_host { + const struct sdhci_iproc_data *data; + u32 shadow_cmd; + u32 shadow_blk; + bool is_cmd_shadowed; + bool is_blk_shadowed; +}; + +#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) + +static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg) +{ + u32 val = readl(host->ioaddr + reg); + + pr_debug("%s: readl [0x%02x] 0x%08x\n", + mmc_hostname(host->mmc), reg, val); + return val; +} + +static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host); + u32 val; + u16 word; + + if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) { + /* Get the saved transfer mode */ + val = iproc_host->shadow_cmd; + } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && + iproc_host->is_blk_shadowed) { + /* Get the saved block info */ + val = iproc_host->shadow_blk; + } else { + val = sdhci_iproc_readl(host, (reg & ~3)); + } + word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; + return word; +} + +static u8 sdhci_iproc_readb(struct sdhci_host *host, int reg) +{ + u32 val = sdhci_iproc_readl(host, (reg & ~3)); + u8 byte = val >> REG_OFFSET_IN_BITS(reg) & 0xff; + return byte; +} + +static inline void sdhci_iproc_writel(struct sdhci_host *host, u32 val, int reg) +{ + pr_debug("%s: writel [0x%02x] 0x%08x\n", + mmc_hostname(host->mmc), reg, val); + + writel(val, host->ioaddr + reg); + + if (host->clock <= 400000) { + /* Round up to micro-second four SD clock delay */ + if (host->clock) + udelay((4 * 1000000 + host->clock - 1) / host->clock); + else + udelay(10); + } +} + +/* + * The Arasan has a bugette whereby it may lose the content of successive + * writes to the same register that are within two SD-card clock cycles of + * each other (a clock domain crossing problem). The data + * register does not have this problem, which is just as well - otherwise we'd + * have to nobble the DMA engine too. + * + * This wouldn't be a problem with the code except that we can only write the + * controller with 32-bit writes. So two different 16-bit registers are + * written back to back creates the problem. + * + * In reality, this only happens when SDHCI_BLOCK_SIZE and SDHCI_BLOCK_COUNT + * are written followed by SDHCI_TRANSFER_MODE and SDHCI_COMMAND. + * The BLOCK_SIZE and BLOCK_COUNT are meaningless until a command issued so + * the work around can be further optimized. We can keep shadow values of + * BLOCK_SIZE, BLOCK_COUNT, and TRANSFER_MODE until a COMMAND is issued. + * Then, write the BLOCK_SIZE+BLOCK_COUNT in a single 32-bit write followed + * by the TRANSFER+COMMAND in another 32-bit write. + */ +static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host); + u32 word_shift = REG_OFFSET_IN_BITS(reg); + u32 mask = 0xffff << word_shift; + u32 oldval, newval; + + if (reg == SDHCI_COMMAND) { + /* Write the block now as we are issuing a command */ + if (iproc_host->is_blk_shadowed) { + sdhci_iproc_writel(host, iproc_host->shadow_blk, + SDHCI_BLOCK_SIZE); + iproc_host->is_blk_shadowed = false; + } + oldval = iproc_host->shadow_cmd; + iproc_host->is_cmd_shadowed = false; + } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && + iproc_host->is_blk_shadowed) { + /* Block size and count are stored in shadow reg */ + oldval = iproc_host->shadow_blk; + } else { + /* Read reg, all other registers are not shadowed */ + oldval = sdhci_iproc_readl(host, (reg & ~3)); + } + newval = (oldval & ~mask) | (val << word_shift); + + if (reg == SDHCI_TRANSFER_MODE) { + /* Save the transfer mode until the command is issued */ + iproc_host->shadow_cmd = newval; + iproc_host->is_cmd_shadowed = true; + } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { + /* Save the block info until the command is issued */ + iproc_host->shadow_blk = newval; + iproc_host->is_blk_shadowed = true; + } else { + /* Command or other regular 32-bit write */ + sdhci_iproc_writel(host, newval, reg & ~3); + } +} + +static void sdhci_iproc_writeb(struct sdhci_host *host, u8 val, int reg) +{ + u32 oldval = sdhci_iproc_readl(host, (reg & ~3)); + u32 byte_shift = REG_OFFSET_IN_BITS(reg); + u32 mask = 0xff << byte_shift; + u32 newval = (oldval & ~mask) | (val << byte_shift); + + sdhci_iproc_writel(host, newval, reg & ~3); +} + +static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + if (pltfm_host->clk) + return sdhci_pltfm_clk_get_max_clock(host); + else + return pltfm_host->clock; +} + +/* + * There is a known bug on BCM2711's SDHCI core integration where the + * controller will hang when the difference between the core clock and the bus + * clock is too great. Specifically this can be reproduced under the following + * conditions: + * + * - No SD card plugged in, polling thread is running, probing cards at + * 100 kHz. + * - BCM2711's core clock configured at 500MHz or more + * + * So we set 200kHz as the minimum clock frequency available for that SoC. + */ +static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host) +{ + return 200000; +} + +static const struct sdhci_ops sdhci_iproc_ops = { + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_iproc_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_ops sdhci_iproc_32only_ops = { + .read_l = sdhci_iproc_readl, + .read_w = sdhci_iproc_readw, + .read_b = sdhci_iproc_readb, + .write_l = sdhci_iproc_writel, + .write_w = sdhci_iproc_writew, + .write_b = sdhci_iproc_writeb, + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_iproc_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_NO_HISPD_BIT, + .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON, + .ops = &sdhci_iproc_32only_ops, +}; + +static const struct sdhci_iproc_data iproc_cygnus_data = { + .pdata = &sdhci_iproc_cygnus_pltfm_data, + .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT) + & SDHCI_MAX_BLOCK_MASK) | + SDHCI_CAN_VDD_330 | + SDHCI_CAN_VDD_180 | + SDHCI_CAN_DO_SUSPEND | + SDHCI_CAN_DO_HISPD | + SDHCI_CAN_DO_ADMA2 | + SDHCI_CAN_DO_SDMA, + .caps1 = SDHCI_DRIVER_TYPE_C | + SDHCI_DRIVER_TYPE_D | + SDHCI_SUPPORT_DDR50, + .mmc_caps = MMC_CAP_1_8V_DDR, +}; + +static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 | + SDHCI_QUIRK_NO_HISPD_BIT, + .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, + .ops = &sdhci_iproc_ops, +}; + +static const struct sdhci_iproc_data iproc_data = { + .pdata = &sdhci_iproc_pltfm_data, + .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT) + & SDHCI_MAX_BLOCK_MASK) | + SDHCI_CAN_VDD_330 | + SDHCI_CAN_VDD_180 | + SDHCI_CAN_DO_SUSPEND | + SDHCI_CAN_DO_HISPD | + SDHCI_CAN_DO_ADMA2 | + SDHCI_CAN_DO_SDMA, + .caps1 = SDHCI_DRIVER_TYPE_C | + SDHCI_DRIVER_TYPE_D | + SDHCI_SUPPORT_DDR50, +}; + +static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_MISSING_CAPS | + SDHCI_QUIRK_NO_HISPD_BIT, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &sdhci_iproc_32only_ops, +}; + +static const struct sdhci_iproc_data bcm2835_data = { + .pdata = &sdhci_bcm2835_pltfm_data, + .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT) + & SDHCI_MAX_BLOCK_MASK) | + SDHCI_CAN_VDD_330 | + SDHCI_CAN_DO_HISPD, + .caps1 = SDHCI_DRIVER_TYPE_A | + SDHCI_DRIVER_TYPE_C, + .mmc_caps = 0x00000000, +}; + +static const struct sdhci_ops sdhci_iproc_bcm2711_ops = { + .read_l = sdhci_iproc_readl, + .read_w = sdhci_iproc_readw, + .read_b = sdhci_iproc_readb, + .write_l = sdhci_iproc_writel, + .write_w = sdhci_iproc_writew, + .write_b = sdhci_iproc_writeb, + .set_clock = sdhci_set_clock, + .set_power = sdhci_set_power_and_bus_voltage, + .get_max_clock = sdhci_iproc_get_max_clock, + .get_min_clock = sdhci_iproc_bcm2711_get_min_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = { + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .ops = &sdhci_iproc_bcm2711_ops, +}; + +static const struct sdhci_iproc_data bcm2711_data = { + .pdata = &sdhci_bcm2711_pltfm_data, + .mmc_caps = MMC_CAP_3_3V_DDR, +}; + +static const struct of_device_id sdhci_iproc_of_match[] = { + { .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data }, + { .compatible = "brcm,bcm2711-emmc2", .data = &bcm2711_data }, + { .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_cygnus_data}, + { .compatible = "brcm,sdhci-iproc", .data = &iproc_data }, + { } +}; +MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match); + +#ifdef CONFIG_ACPI +/* + * This is a duplicate of bcm2835_(pltfrm_)data without caps quirks + * which are provided by the ACPI table. + */ +static const struct sdhci_pltfm_data sdhci_bcm_arasan_data = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_NO_HISPD_BIT, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &sdhci_iproc_32only_ops, +}; + +static const struct sdhci_iproc_data bcm_arasan_data = { + .pdata = &sdhci_bcm_arasan_data, +}; + +static const struct acpi_device_id sdhci_iproc_acpi_ids[] = { + { .id = "BRCM5871", .driver_data = (kernel_ulong_t)&iproc_cygnus_data }, + { .id = "BRCM5872", .driver_data = (kernel_ulong_t)&iproc_data }, + { .id = "BCM2847", .driver_data = (kernel_ulong_t)&bcm_arasan_data }, + { .id = "BRCME88C", .driver_data = (kernel_ulong_t)&bcm2711_data }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, sdhci_iproc_acpi_ids); +#endif + +static int sdhci_iproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct sdhci_iproc_data *iproc_data = NULL; + struct sdhci_host *host; + struct sdhci_iproc_host *iproc_host; + struct sdhci_pltfm_host *pltfm_host; + int ret; + + iproc_data = device_get_match_data(dev); + if (!iproc_data) + return -ENODEV; + + host = sdhci_pltfm_init(pdev, iproc_data->pdata, sizeof(*iproc_host)); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + iproc_host = sdhci_pltfm_priv(pltfm_host); + + iproc_host->data = iproc_data; + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err; + + sdhci_get_property(pdev); + + host->mmc->caps |= iproc_host->data->mmc_caps; + + if (dev->of_node) { + pltfm_host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pltfm_host->clk)) { + ret = PTR_ERR(pltfm_host->clk); + goto err; + } + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) { + dev_err(dev, "failed to enable host clk\n"); + goto err; + } + } + + if (iproc_host->data->pdata->quirks & SDHCI_QUIRK_MISSING_CAPS) { + host->caps = iproc_host->data->caps; + host->caps1 = iproc_host->data->caps1; + } + + ret = sdhci_add_host(host); + if (ret) + goto err_clk; + + return 0; + +err_clk: + if (dev->of_node) + clk_disable_unprepare(pltfm_host->clk); +err: + sdhci_pltfm_free(pdev); + return ret; +} + +static struct platform_driver sdhci_iproc_driver = { + .driver = { + .name = "sdhci-iproc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_iproc_of_match, + .acpi_match_table = ACPI_PTR(sdhci_iproc_acpi_ids), + .pm = &sdhci_pltfm_pmops, + }, + .probe = sdhci_iproc_probe, + .remove = sdhci_pltfm_unregister, +}; +module_platform_driver(sdhci_iproc_driver); + +MODULE_AUTHOR("Broadcom"); +MODULE_DESCRIPTION("IPROC SDHCI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c new file mode 100644 index 000000000..148b37ac6 --- /dev/null +++ b/drivers/mmc/host/sdhci-milbeaut.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd + * Vincent Yang + * Copyright (C) 2015 Linaro Ltd Andy Green + * Copyright (C) 2019 Socionext Inc. + * Takao Orito + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" +#include "sdhci_f_sdh30.h" + +/* milbeaut bridge controller register */ +#define MLB_SOFT_RESET 0x0200 +#define MLB_SOFT_RESET_RSTX BIT(0) + +#define MLB_WP_CD_LED_SET 0x0210 +#define MLB_WP_CD_LED_SET_LED_INV BIT(2) + +#define MLB_CR_SET 0x0220 +#define MLB_CR_SET_CR_TOCLKUNIT BIT(24) +#define MLB_CR_SET_CR_TOCLKFREQ_SFT (16) +#define MLB_CR_SET_CR_TOCLKFREQ_MASK (0x3F << MLB_CR_SET_CR_TOCLKFREQ_SFT) +#define MLB_CR_SET_CR_BCLKFREQ_SFT (8) +#define MLB_CR_SET_CR_BCLKFREQ_MASK (0xFF << MLB_CR_SET_CR_BCLKFREQ_SFT) +#define MLB_CR_SET_CR_RTUNTIMER_SFT (4) +#define MLB_CR_SET_CR_RTUNTIMER_MASK (0xF << MLB_CR_SET_CR_RTUNTIMER_SFT) + +#define MLB_SD_TOCLK_I_DIV 16 +#define MLB_TOCLKFREQ_UNIT_THRES 16000000 +#define MLB_CAL_TOCLKFREQ_MHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000000) +#define MLB_CAL_TOCLKFREQ_KHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000) +#define MLB_TOCLKFREQ_MAX 63 +#define MLB_TOCLKFREQ_MIN 1 + +#define MLB_SD_BCLK_I_DIV 4 +#define MLB_CAL_BCLKFREQ(rate) (rate / MLB_SD_BCLK_I_DIV / 1000000) +#define MLB_BCLKFREQ_MAX 255 +#define MLB_BCLKFREQ_MIN 1 + +#define MLB_CDR_SET 0x0230 +#define MLB_CDR_SET_CLK2POW16 3 + +struct f_sdhost_priv { + struct clk *clk_iface; + struct clk *clk; + struct device *dev; + bool enable_cmd_dat_delay; +}; + +static void sdhci_milbeaut_soft_voltage_switch(struct sdhci_host *host) +{ + u32 ctrl = 0; + + usleep_range(2500, 3000); + ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2); + ctrl |= F_SDH30_CRES_O_DN; + sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2); + ctrl |= F_SDH30_MSEL_O_1_8; + sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2); + + ctrl &= ~F_SDH30_CRES_O_DN; + sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2); + usleep_range(2500, 3000); + + ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING); + ctrl |= F_SDH30_CMD_CHK_DIS; + sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING); +} + +static unsigned int sdhci_milbeaut_get_min_clock(struct sdhci_host *host) +{ + return F_SDH30_MIN_CLOCK; +} + +static void sdhci_milbeaut_reset(struct sdhci_host *host, u8 mask) +{ + struct f_sdhost_priv *priv = sdhci_priv(host); + u16 clk; + u32 ctl; + ktime_t timeout; + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk = (clk & ~SDHCI_CLOCK_CARD_EN) | SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + sdhci_reset(host, mask); + + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + timeout = ktime_add_ms(ktime_get(), 10); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (clk & SDHCI_CLOCK_INT_STABLE) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + return; + } + udelay(10); + } + + if (priv->enable_cmd_dat_delay) { + ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL); + ctl |= F_SDH30_CMD_DAT_DELAY; + sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL); + } +} + +static const struct sdhci_ops sdhci_milbeaut_ops = { + .voltage_switch = sdhci_milbeaut_soft_voltage_switch, + .get_min_clock = sdhci_milbeaut_get_min_clock, + .reset = sdhci_milbeaut_reset, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_power = sdhci_set_power_and_bus_voltage, +}; + +static void sdhci_milbeaut_bridge_reset(struct sdhci_host *host, + int reset_flag) +{ + if (reset_flag) + sdhci_writel(host, 0, MLB_SOFT_RESET); + else + sdhci_writel(host, MLB_SOFT_RESET_RSTX, MLB_SOFT_RESET); +} + +static void sdhci_milbeaut_bridge_init(struct sdhci_host *host, + int rate) +{ + u32 val, clk; + + /* IO_SDIO_CR_SET should be set while reset */ + val = sdhci_readl(host, MLB_CR_SET); + val &= ~(MLB_CR_SET_CR_TOCLKFREQ_MASK | MLB_CR_SET_CR_TOCLKUNIT | + MLB_CR_SET_CR_BCLKFREQ_MASK); + if (rate >= MLB_TOCLKFREQ_UNIT_THRES) { + clk = MLB_CAL_TOCLKFREQ_MHZ(rate); + clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk); + val |= MLB_CR_SET_CR_TOCLKUNIT | + (clk << MLB_CR_SET_CR_TOCLKFREQ_SFT); + } else { + clk = MLB_CAL_TOCLKFREQ_KHZ(rate); + clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk); + clk = max_t(u32, MLB_TOCLKFREQ_MIN, clk); + val |= clk << MLB_CR_SET_CR_TOCLKFREQ_SFT; + } + + clk = MLB_CAL_BCLKFREQ(rate); + clk = min_t(u32, MLB_BCLKFREQ_MAX, clk); + clk = max_t(u32, MLB_BCLKFREQ_MIN, clk); + val |= clk << MLB_CR_SET_CR_BCLKFREQ_SFT; + val &= ~MLB_CR_SET_CR_RTUNTIMER_MASK; + sdhci_writel(host, val, MLB_CR_SET); + + sdhci_writel(host, MLB_CDR_SET_CLK2POW16, MLB_CDR_SET); + + sdhci_writel(host, MLB_WP_CD_LED_SET_LED_INV, MLB_WP_CD_LED_SET); +} + +static void sdhci_milbeaut_vendor_init(struct sdhci_host *host) +{ + struct f_sdhost_priv *priv = sdhci_priv(host); + u32 ctl; + + ctl = sdhci_readl(host, F_SDH30_IO_CONTROL2); + ctl |= F_SDH30_CRES_O_DN; + sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2); + ctl &= ~F_SDH30_MSEL_O_1_8; + sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2); + ctl &= ~F_SDH30_CRES_O_DN; + sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2); + + ctl = sdhci_readw(host, F_SDH30_AHB_CONFIG); + ctl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 | + F_SDH30_AHB_INCR_4; + ctl &= ~(F_SDH30_AHB_BIGED | F_SDH30_BUSLOCK_EN); + sdhci_writew(host, ctl, F_SDH30_AHB_CONFIG); + + if (priv->enable_cmd_dat_delay) { + ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL); + ctl |= F_SDH30_CMD_DAT_DELAY; + sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL); + } +} + +static const struct of_device_id mlb_dt_ids[] = { + { + .compatible = "socionext,milbeaut-m10v-sdhci-3.0", + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mlb_dt_ids); + +static void sdhci_milbeaut_init(struct sdhci_host *host) +{ + struct f_sdhost_priv *priv = sdhci_priv(host); + int rate = clk_get_rate(priv->clk); + u16 ctl; + + sdhci_milbeaut_bridge_reset(host, 0); + + ctl = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + ctl &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN); + sdhci_writew(host, ctl, SDHCI_CLOCK_CONTROL); + + sdhci_milbeaut_bridge_reset(host, 1); + + sdhci_milbeaut_bridge_init(host, rate); + sdhci_milbeaut_bridge_reset(host, 0); + + sdhci_milbeaut_vendor_init(host); +} + +static int sdhci_milbeaut_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct device *dev = &pdev->dev; + int irq, ret = 0; + struct f_sdhost_priv *priv; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); + if (IS_ERR(host)) + return PTR_ERR(host); + + priv = sdhci_priv(host); + priv->dev = dev; + + host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_INVERTED_WRITE_PROTECT | + SDHCI_QUIRK_CLOCK_BEFORE_RESET | + SDHCI_QUIRK_DELAY_AFTER_POWER; + host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE | + SDHCI_QUIRK2_TUNING_WORK_AROUND | + SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + + priv->enable_cmd_dat_delay = device_property_read_bool(dev, + "fujitsu,cmd-dat-delay-select"); + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err; + + platform_set_drvdata(pdev, host); + + host->hw_name = "f_sdh30"; + host->ops = &sdhci_milbeaut_ops; + host->irq = irq; + + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->ioaddr)) { + ret = PTR_ERR(host->ioaddr); + goto err; + } + + if (dev_of_node(dev)) { + sdhci_get_of_property(pdev); + + priv->clk_iface = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(priv->clk_iface)) { + ret = PTR_ERR(priv->clk_iface); + goto err; + } + + ret = clk_prepare_enable(priv->clk_iface); + if (ret) + goto err; + + priv->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto err_clk; + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + goto err_clk; + } + + sdhci_milbeaut_init(host); + + ret = sdhci_add_host(host); + if (ret) + goto err_add_host; + + return 0; + +err_add_host: + clk_disable_unprepare(priv->clk); +err_clk: + clk_disable_unprepare(priv->clk_iface); +err: + sdhci_free_host(host); + return ret; +} + +static int sdhci_milbeaut_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct f_sdhost_priv *priv = sdhci_priv(host); + + sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) == + 0xffffffff); + + clk_disable_unprepare(priv->clk_iface); + clk_disable_unprepare(priv->clk); + + sdhci_free_host(host); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver sdhci_milbeaut_driver = { + .driver = { + .name = "sdhci-milbeaut", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(mlb_dt_ids), + }, + .probe = sdhci_milbeaut_probe, + .remove = sdhci_milbeaut_remove, +}; + +module_platform_driver(sdhci_milbeaut_driver); + +MODULE_DESCRIPTION("MILBEAUT SD Card Controller driver"); +MODULE_AUTHOR("Takao Orito "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sdhci-milbeaut"); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c new file mode 100644 index 000000000..3366956a4 --- /dev/null +++ b/drivers/mmc/host/sdhci-msm.c @@ -0,0 +1,2628 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver + * + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" +#include "cqhci.h" + +#define CORE_MCI_VERSION 0x50 +#define CORE_VERSION_MAJOR_SHIFT 28 +#define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT) +#define CORE_VERSION_MINOR_MASK 0xff + +#define CORE_MCI_GENERICS 0x70 +#define SWITCHABLE_SIGNALING_VOLTAGE BIT(29) + +#define HC_MODE_EN 0x1 +#define CORE_POWER 0x0 +#define CORE_SW_RST BIT(7) +#define FF_CLK_SW_RST_DIS BIT(13) + +#define CORE_PWRCTL_BUS_OFF BIT(0) +#define CORE_PWRCTL_BUS_ON BIT(1) +#define CORE_PWRCTL_IO_LOW BIT(2) +#define CORE_PWRCTL_IO_HIGH BIT(3) +#define CORE_PWRCTL_BUS_SUCCESS BIT(0) +#define CORE_PWRCTL_BUS_FAIL BIT(1) +#define CORE_PWRCTL_IO_SUCCESS BIT(2) +#define CORE_PWRCTL_IO_FAIL BIT(3) +#define REQ_BUS_OFF BIT(0) +#define REQ_BUS_ON BIT(1) +#define REQ_IO_LOW BIT(2) +#define REQ_IO_HIGH BIT(3) +#define INT_MASK 0xf +#define MAX_PHASES 16 +#define CORE_DLL_LOCK BIT(7) +#define CORE_DDR_DLL_LOCK BIT(11) +#define CORE_DLL_EN BIT(16) +#define CORE_CDR_EN BIT(17) +#define CORE_CK_OUT_EN BIT(18) +#define CORE_CDR_EXT_EN BIT(19) +#define CORE_DLL_PDN BIT(29) +#define CORE_DLL_RST BIT(30) +#define CORE_CMD_DAT_TRACK_SEL BIT(0) + +#define CORE_DDR_CAL_EN BIT(0) +#define CORE_FLL_CYCLE_CNT BIT(18) +#define CORE_DLL_CLOCK_DISABLE BIT(21) + +#define DLL_USR_CTL_POR_VAL 0x10800 +#define ENABLE_DLL_LOCK_STATUS BIT(26) +#define FINE_TUNE_MODE_EN BIT(27) +#define BIAS_OK_SIGNAL BIT(29) + +#define DLL_CONFIG_3_LOW_FREQ_VAL 0x08 +#define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10 + +#define CORE_VENDOR_SPEC_POR_VAL 0xa9c +#define CORE_CLK_PWRSAVE BIT(1) +#define CORE_HC_MCLK_SEL_DFLT (2 << 8) +#define CORE_HC_MCLK_SEL_HS400 (3 << 8) +#define CORE_HC_MCLK_SEL_MASK (3 << 8) +#define CORE_IO_PAD_PWR_SWITCH_EN BIT(15) +#define CORE_IO_PAD_PWR_SWITCH BIT(16) +#define CORE_HC_SELECT_IN_EN BIT(18) +#define CORE_HC_SELECT_IN_HS400 (6 << 19) +#define CORE_HC_SELECT_IN_MASK (7 << 19) + +#define CORE_3_0V_SUPPORT BIT(25) +#define CORE_1_8V_SUPPORT BIT(26) +#define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT) + +#define CORE_CSR_CDC_CTLR_CFG0 0x130 +#define CORE_SW_TRIG_FULL_CALIB BIT(16) +#define CORE_HW_AUTOCAL_ENA BIT(17) + +#define CORE_CSR_CDC_CTLR_CFG1 0x134 +#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138 +#define CORE_TIMER_ENA BIT(16) + +#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C +#define CORE_CSR_CDC_REFCOUNT_CFG 0x140 +#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144 +#define CORE_CDC_OFFSET_CFG 0x14C +#define CORE_CSR_CDC_DELAY_CFG 0x150 +#define CORE_CDC_SLAVE_DDA_CFG 0x160 +#define CORE_CSR_CDC_STATUS0 0x164 +#define CORE_CALIBRATION_DONE BIT(0) + +#define CORE_CDC_ERROR_CODE_MASK 0x7000000 + +#define CORE_CSR_CDC_GEN_CFG 0x178 +#define CORE_CDC_SWITCH_BYPASS_OFF BIT(0) +#define CORE_CDC_SWITCH_RC_EN BIT(1) + +#define CORE_CDC_T4_DLY_SEL BIT(0) +#define CORE_CMDIN_RCLK_EN BIT(1) +#define CORE_START_CDC_TRAFFIC BIT(6) + +#define CORE_PWRSAVE_DLL BIT(3) + +#define DDR_CONFIG_POR_VAL 0x80040873 + + +#define INVALID_TUNING_PHASE -1 +#define SDHCI_MSM_MIN_CLOCK 400000 +#define CORE_FREQ_100MHZ (100 * 1000 * 1000) + +#define CDR_SELEXT_SHIFT 20 +#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT) +#define CMUX_SHIFT_PHASE_SHIFT 24 +#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT) + +#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50 + +/* Timeout value to avoid infinite waiting for pwr_irq */ +#define MSM_PWR_IRQ_TIMEOUT_MS 5000 + +/* Max load for eMMC Vdd-io supply */ +#define MMC_VQMMC_MAX_LOAD_UA 325000 + +#define msm_host_readl(msm_host, host, offset) \ + msm_host->var_ops->msm_readl_relaxed(host, offset) + +#define msm_host_writel(msm_host, val, host, offset) \ + msm_host->var_ops->msm_writel_relaxed(val, host, offset) + +/* CQHCI vendor specific registers */ +#define CQHCI_VENDOR_CFG1 0xA00 +#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13) + +struct sdhci_msm_offset { + u32 core_hc_mode; + u32 core_mci_data_cnt; + u32 core_mci_status; + u32 core_mci_fifo_cnt; + u32 core_mci_version; + u32 core_generics; + u32 core_testbus_config; + u32 core_testbus_sel2_bit; + u32 core_testbus_ena; + u32 core_testbus_sel2; + u32 core_pwrctl_status; + u32 core_pwrctl_mask; + u32 core_pwrctl_clear; + u32 core_pwrctl_ctl; + u32 core_sdcc_debug_reg; + u32 core_dll_config; + u32 core_dll_status; + u32 core_vendor_spec; + u32 core_vendor_spec_adma_err_addr0; + u32 core_vendor_spec_adma_err_addr1; + u32 core_vendor_spec_func2; + u32 core_vendor_spec_capabilities0; + u32 core_ddr_200_cfg; + u32 core_vendor_spec3; + u32 core_dll_config_2; + u32 core_dll_config_3; + u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */ + u32 core_ddr_config; + u32 core_dll_usr_ctl; /* Present on SDCC5.1 onwards */ +}; + +static const struct sdhci_msm_offset sdhci_msm_v5_offset = { + .core_mci_data_cnt = 0x35c, + .core_mci_status = 0x324, + .core_mci_fifo_cnt = 0x308, + .core_mci_version = 0x318, + .core_generics = 0x320, + .core_testbus_config = 0x32c, + .core_testbus_sel2_bit = 3, + .core_testbus_ena = (1 << 31), + .core_testbus_sel2 = (1 << 3), + .core_pwrctl_status = 0x240, + .core_pwrctl_mask = 0x244, + .core_pwrctl_clear = 0x248, + .core_pwrctl_ctl = 0x24c, + .core_sdcc_debug_reg = 0x358, + .core_dll_config = 0x200, + .core_dll_status = 0x208, + .core_vendor_spec = 0x20c, + .core_vendor_spec_adma_err_addr0 = 0x214, + .core_vendor_spec_adma_err_addr1 = 0x218, + .core_vendor_spec_func2 = 0x210, + .core_vendor_spec_capabilities0 = 0x21c, + .core_ddr_200_cfg = 0x224, + .core_vendor_spec3 = 0x250, + .core_dll_config_2 = 0x254, + .core_dll_config_3 = 0x258, + .core_ddr_config = 0x25c, + .core_dll_usr_ctl = 0x388, +}; + +static const struct sdhci_msm_offset sdhci_msm_mci_offset = { + .core_hc_mode = 0x78, + .core_mci_data_cnt = 0x30, + .core_mci_status = 0x34, + .core_mci_fifo_cnt = 0x44, + .core_mci_version = 0x050, + .core_generics = 0x70, + .core_testbus_config = 0x0cc, + .core_testbus_sel2_bit = 4, + .core_testbus_ena = (1 << 3), + .core_testbus_sel2 = (1 << 4), + .core_pwrctl_status = 0xdc, + .core_pwrctl_mask = 0xe0, + .core_pwrctl_clear = 0xe4, + .core_pwrctl_ctl = 0xe8, + .core_sdcc_debug_reg = 0x124, + .core_dll_config = 0x100, + .core_dll_status = 0x108, + .core_vendor_spec = 0x10c, + .core_vendor_spec_adma_err_addr0 = 0x114, + .core_vendor_spec_adma_err_addr1 = 0x118, + .core_vendor_spec_func2 = 0x110, + .core_vendor_spec_capabilities0 = 0x11c, + .core_ddr_200_cfg = 0x184, + .core_vendor_spec3 = 0x1b0, + .core_dll_config_2 = 0x1b4, + .core_ddr_config_old = 0x1b8, + .core_ddr_config = 0x1bc, +}; + +struct sdhci_msm_variant_ops { + u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset); + void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host, + u32 offset); +}; + +/* + * From V5, register spaces have changed. Wrap this info in a structure + * and choose the data_structure based on version info mentioned in DT. + */ +struct sdhci_msm_variant_info { + bool mci_removed; + bool restore_dll_config; + bool uses_tassadar_dll; + const struct sdhci_msm_variant_ops *var_ops; + const struct sdhci_msm_offset *offset; +}; + +struct sdhci_msm_host { + struct platform_device *pdev; + void __iomem *core_mem; /* MSM SDCC mapped address */ + int pwr_irq; /* power irq */ + struct clk *bus_clk; /* SDHC bus voter clock */ + struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/ + struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */ + unsigned long clk_rate; + struct mmc_host *mmc; + struct opp_table *opp_table; + bool use_14lpp_dll_reset; + bool tuning_done; + bool calibration_done; + u8 saved_tuning_phase; + bool use_cdclp533; + u32 curr_pwr_state; + u32 curr_io_level; + wait_queue_head_t pwr_irq_wait; + bool pwr_irq_flag; + u32 caps_0; + bool mci_removed; + bool restore_dll_config; + const struct sdhci_msm_variant_ops *var_ops; + const struct sdhci_msm_offset *offset; + bool use_cdr; + u32 transfer_mode; + bool updated_ddr_cfg; + bool uses_tassadar_dll; + u32 dll_config; + u32 ddr_config; + bool vqmmc_enabled; +}; + +static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + return msm_host->offset; +} + +/* + * APIs to read/write to vendor specific registers which were there in the + * core_mem region before MCI was removed. + */ +static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host, + u32 offset) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + return readl_relaxed(msm_host->core_mem + offset); +} + +static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host, + u32 offset) +{ + return readl_relaxed(host->ioaddr + offset); +} + +static void sdhci_msm_mci_variant_writel_relaxed(u32 val, + struct sdhci_host *host, u32 offset) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + writel_relaxed(val, msm_host->core_mem + offset); +} + +static void sdhci_msm_v5_variant_writel_relaxed(u32 val, + struct sdhci_host *host, u32 offset) +{ + writel_relaxed(val, host->ioaddr + offset); +} + +static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host, + unsigned int clock) +{ + struct mmc_ios ios = host->mmc->ios; + /* + * The SDHC requires internal clock frequency to be double the + * actual clock that will be set for DDR mode. The controller + * uses the faster clock(100/400MHz) for some of its parts and + * send the actual required clock (50/200MHz) to the card. + */ + if (ios.timing == MMC_TIMING_UHS_DDR50 || + ios.timing == MMC_TIMING_MMC_DDR52 || + ios.timing == MMC_TIMING_MMC_HS400 || + host->flags & SDHCI_HS400_TUNING) + clock *= 2; + return clock; +} + +static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, + unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct mmc_ios curr_ios = host->mmc->ios; + struct clk *core_clk = msm_host->bulk_clks[0].clk; + int rc; + + clock = msm_get_clock_rate_for_bus_mode(host, clock); + rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock); + if (rc) { + pr_err("%s: Failed to set clock at rate %u at timing %d\n", + mmc_hostname(host->mmc), clock, + curr_ios.timing); + return; + } + msm_host->clk_rate = clock; + pr_debug("%s: Setting clock at rate %lu at timing %d\n", + mmc_hostname(host->mmc), clk_get_rate(core_clk), + curr_ios.timing); +} + +/* Platform specific tuning */ +static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) +{ + u32 wait_cnt = 50; + u8 ck_out_en; + struct mmc_host *mmc = host->mmc; + const struct sdhci_msm_offset *msm_offset = + sdhci_priv_msm_offset(host); + + /* Poll for CK_OUT_EN bit. max. poll time = 50us */ + ck_out_en = !!(readl_relaxed(host->ioaddr + + msm_offset->core_dll_config) & CORE_CK_OUT_EN); + + while (ck_out_en != poll) { + if (--wait_cnt == 0) { + dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n", + mmc_hostname(mmc), poll); + return -ETIMEDOUT; + } + udelay(1); + + ck_out_en = !!(readl_relaxed(host->ioaddr + + msm_offset->core_dll_config) & CORE_CK_OUT_EN); + } + + return 0; +} + +static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) +{ + int rc; + static const u8 grey_coded_phase_table[] = { + 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4, + 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8 + }; + unsigned long flags; + u32 config; + struct mmc_host *mmc = host->mmc; + const struct sdhci_msm_offset *msm_offset = + sdhci_priv_msm_offset(host); + + if (phase > 0xf) + return -EINVAL; + + spin_lock_irqsave(&host->lock, flags); + + config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); + config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN); + config |= (CORE_CDR_EXT_EN | CORE_DLL_EN); + writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); + + /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */ + rc = msm_dll_poll_ck_out_en(host, 0); + if (rc) + goto err_out; + + /* + * Write the selected DLL clock output phase (0 ... 15) + * to CDR_SELEXT bit field of DLL_CONFIG register. + */ + config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); + config &= ~CDR_SELEXT_MASK; + config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT; + writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); + + config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); + config |= CORE_CK_OUT_EN; + writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); + + /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */ + rc = msm_dll_poll_ck_out_en(host, 1); + if (rc) + goto err_out; + + config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); + config |= CORE_CDR_EN; + config &= ~CORE_CDR_EXT_EN; + writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); + goto out; + +err_out: + dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n", + mmc_hostname(mmc), phase); +out: + spin_unlock_irqrestore(&host->lock, flags); + return rc; +} + +/* + * Find out the greatest range of consecuitive selected + * DLL clock output phases that can be used as sampling + * setting for SD3.0 UHS-I card read operation (in SDR104 + * timing mode) or for eMMC4.5 card read operation (in + * HS400/HS200 timing mode). + * Select the 3/4 of the range and configure the DLL with the + * selected DLL clock output phase. + */ + +static int msm_find_most_appropriate_phase(struct sdhci_host *host, + u8 *phase_table, u8 total_phases) +{ + int ret; + u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} }; + u8 phases_per_row[MAX_PHASES] = { 0 }; + int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; + int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; + bool phase_0_found = false, phase_15_found = false; + struct mmc_host *mmc = host->mmc; + + if (!total_phases || (total_phases > MAX_PHASES)) { + dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n", + mmc_hostname(mmc), total_phases); + return -EINVAL; + } + + for (cnt = 0; cnt < total_phases; cnt++) { + ranges[row_index][col_index] = phase_table[cnt]; + phases_per_row[row_index] += 1; + col_index++; + + if ((cnt + 1) == total_phases) { + continue; + /* check if next phase in phase_table is consecutive or not */ + } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) { + row_index++; + col_index = 0; + } + } + + if (row_index >= MAX_PHASES) + return -EINVAL; + + /* Check if phase-0 is present in first valid window? */ + if (!ranges[0][0]) { + phase_0_found = true; + phase_0_raw_index = 0; + /* Check if cycle exist between 2 valid windows */ + for (cnt = 1; cnt <= row_index; cnt++) { + if (phases_per_row[cnt]) { + for (i = 0; i < phases_per_row[cnt]; i++) { + if (ranges[cnt][i] == 15) { + phase_15_found = true; + phase_15_raw_index = cnt; + break; + } + } + } + } + } + + /* If 2 valid windows form cycle then merge them as single window */ + if (phase_0_found && phase_15_found) { + /* number of phases in raw where phase 0 is present */ + u8 phases_0 = phases_per_row[phase_0_raw_index]; + /* number of phases in raw where phase 15 is present */ + u8 phases_15 = phases_per_row[phase_15_raw_index]; + + if (phases_0 + phases_15 >= MAX_PHASES) + /* + * If there are more than 1 phase windows then total + * number of phases in both the windows should not be + * more than or equal to MAX_PHASES. + */ + return -EINVAL; + + /* Merge 2 cyclic windows */ + i = phases_15; + for (cnt = 0; cnt < phases_0; cnt++) { + ranges[phase_15_raw_index][i] = + ranges[phase_0_raw_index][cnt]; + if (++i >= MAX_PHASES) + break; + } + + phases_per_row[phase_0_raw_index] = 0; + phases_per_row[phase_15_raw_index] = phases_15 + phases_0; + } + + for (cnt = 0; cnt <= row_index; cnt++) { + if (phases_per_row[cnt] > curr_max) { + curr_max = phases_per_row[cnt]; + selected_row_index = cnt; + } + } + + i = (curr_max * 3) / 4; + if (i) + i--; + + ret = ranges[selected_row_index][i]; + + if (ret >= MAX_PHASES) { + ret = -EINVAL; + dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n", + mmc_hostname(mmc), ret); + } + + return ret; +} + +static inline void msm_cm_dll_set_freq(struct sdhci_host *host) +{ + u32 mclk_freq = 0, config; + const struct sdhci_msm_offset *msm_offset = + sdhci_priv_msm_offset(host); + + /* Program the MCLK value to MCLK_FREQ bit field */ + if (host->clock <= 112000000) + mclk_freq = 0; + else if (host->clock <= 125000000) + mclk_freq = 1; + else if (host->clock <= 137000000) + mclk_freq = 2; + else if (host->clock <= 150000000) + mclk_freq = 3; + else if (host->clock <= 162000000) + mclk_freq = 4; + else if (host->clock <= 175000000) + mclk_freq = 5; + else if (host->clock <= 187000000) + mclk_freq = 6; + else if (host->clock <= 200000000) + mclk_freq = 7; + + config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); + config &= ~CMUX_SHIFT_PHASE_MASK; + config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT; + writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); +} + +/* Initialize the DLL (Programmable Delay Line) */ +static int msm_init_cm_dll(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + int wait_cnt = 50; + unsigned long flags, xo_clk = 0; + u32 config; + const struct sdhci_msm_offset *msm_offset = + msm_host->offset; + + if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk)) + xo_clk = clk_get_rate(msm_host->xo_clk); + + spin_lock_irqsave(&host->lock, flags); + + /* + * Make sure that clock is always enabled when DLL + * tuning is in progress. Keeping PWRSAVE ON may + * turn off the clock. + */ + config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); + config &= ~CORE_CLK_PWRSAVE; + writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); + + if (msm_host->dll_config) + writel_relaxed(msm_host->dll_config, + host->ioaddr + msm_offset->core_dll_config); + + if (msm_host->use_14lpp_dll_reset) { + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config &= ~CORE_CK_OUT_EN; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config_2); + config |= CORE_DLL_CLOCK_DISABLE; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config_2); + } + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config |= CORE_DLL_RST; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config |= CORE_DLL_PDN; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + + if (!msm_host->dll_config) + msm_cm_dll_set_freq(host); + + if (msm_host->use_14lpp_dll_reset && + !IS_ERR_OR_NULL(msm_host->xo_clk)) { + u32 mclk_freq = 0; + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config_2); + config &= CORE_FLL_CYCLE_CNT; + if (config) + mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8), + xo_clk); + else + mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4), + xo_clk); + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config_2); + config &= ~(0xFF << 10); + config |= mclk_freq << 10; + + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config_2); + /* wait for 5us before enabling DLL clock */ + udelay(5); + } + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config &= ~CORE_DLL_RST; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config &= ~CORE_DLL_PDN; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + + if (msm_host->use_14lpp_dll_reset) { + if (!msm_host->dll_config) + msm_cm_dll_set_freq(host); + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config_2); + config &= ~CORE_DLL_CLOCK_DISABLE; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config_2); + } + + /* + * Configure DLL user control register to enable DLL status. + * This setting is applicable to SDCC v5.1 onwards only. + */ + if (msm_host->uses_tassadar_dll) { + config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN | + ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_usr_ctl); + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config_3); + config &= ~0xFF; + if (msm_host->clk_rate < 150000000) + config |= DLL_CONFIG_3_LOW_FREQ_VAL; + else + config |= DLL_CONFIG_3_HIGH_FREQ_VAL; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config_3); + } + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config |= CORE_DLL_EN; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config |= CORE_CK_OUT_EN; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + + /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */ + while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) & + CORE_DLL_LOCK)) { + /* max. wait for 50us sec for LOCK bit to be set */ + if (--wait_cnt == 0) { + dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n", + mmc_hostname(mmc)); + spin_unlock_irqrestore(&host->lock, flags); + return -ETIMEDOUT; + } + udelay(1); + } + + spin_unlock_irqrestore(&host->lock, flags); + return 0; +} + +static void msm_hc_select_default(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + u32 config; + const struct sdhci_msm_offset *msm_offset = + msm_host->offset; + + if (!msm_host->use_cdclp533) { + config = readl_relaxed(host->ioaddr + + msm_offset->core_vendor_spec3); + config &= ~CORE_PWRSAVE_DLL; + writel_relaxed(config, host->ioaddr + + msm_offset->core_vendor_spec3); + } + + config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); + config &= ~CORE_HC_MCLK_SEL_MASK; + config |= CORE_HC_MCLK_SEL_DFLT; + writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); + + /* + * Disable HC_SELECT_IN to be able to use the UHS mode select + * configuration from Host Control2 register for all other + * modes. + * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field + * in VENDOR_SPEC_FUNC + */ + config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); + config &= ~CORE_HC_SELECT_IN_EN; + config &= ~CORE_HC_SELECT_IN_MASK; + writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); + + /* + * Make sure above writes impacting free running MCLK are completed + * before changing the clk_rate at GCC. + */ + wmb(); +} + +static void msm_hc_select_hs400(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct mmc_ios ios = host->mmc->ios; + u32 config, dll_lock; + int rc; + const struct sdhci_msm_offset *msm_offset = + msm_host->offset; + + /* Select the divided clock (free running MCLK/2) */ + config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec); + config &= ~CORE_HC_MCLK_SEL_MASK; + config |= CORE_HC_MCLK_SEL_HS400; + + writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); + /* + * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC + * register + */ + if ((msm_host->tuning_done || ios.enhanced_strobe) && + !msm_host->calibration_done) { + config = readl_relaxed(host->ioaddr + + msm_offset->core_vendor_spec); + config |= CORE_HC_SELECT_IN_HS400; + config |= CORE_HC_SELECT_IN_EN; + writel_relaxed(config, host->ioaddr + + msm_offset->core_vendor_spec); + } + if (!msm_host->clk_rate && !msm_host->use_cdclp533) { + /* + * Poll on DLL_LOCK or DDR_DLL_LOCK bits in + * core_dll_status to be set. This should get set + * within 15 us at 200 MHz. + */ + rc = readl_relaxed_poll_timeout(host->ioaddr + + msm_offset->core_dll_status, + dll_lock, + (dll_lock & + (CORE_DLL_LOCK | + CORE_DDR_DLL_LOCK)), 10, + 1000); + if (rc == -ETIMEDOUT) + pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n", + mmc_hostname(host->mmc), dll_lock); + } + /* + * Make sure above writes impacting free running MCLK are completed + * before changing the clk_rate at GCC. + */ + wmb(); +} + +/* + * sdhci_msm_hc_select_mode :- In general all timing modes are + * controlled via UHS mode select in Host Control2 register. + * eMMC specific HS200/HS400 doesn't have their respective modes + * defined here, hence we use these values. + * + * HS200 - SDR104 (Since they both are equivalent in functionality) + * HS400 - This involves multiple configurations + * Initially SDR104 - when tuning is required as HS200 + * Then when switching to DDR @ 400MHz (HS400) we use + * the vendor specific HC_SELECT_IN to control the mode. + * + * In addition to controlling the modes we also need to select the + * correct input clock for DLL depending on the mode. + * + * HS400 - divided clock (free running MCLK/2) + * All other modes - default (free running MCLK) + */ +static void sdhci_msm_hc_select_mode(struct sdhci_host *host) +{ + struct mmc_ios ios = host->mmc->ios; + + if (ios.timing == MMC_TIMING_MMC_HS400 || + host->flags & SDHCI_HS400_TUNING) + msm_hc_select_hs400(host); + else + msm_hc_select_default(host); +} + +static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + u32 config, calib_done; + int ret; + const struct sdhci_msm_offset *msm_offset = + msm_host->offset; + + pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); + + /* + * Retuning in HS400 (DDR mode) will fail, just reset the + * tuning block and restore the saved tuning phase. + */ + ret = msm_init_cm_dll(host); + if (ret) + goto out; + + /* Set the selected phase in delay line hw block */ + ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); + if (ret) + goto out; + + config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config); + config |= CORE_CMD_DAT_TRACK_SEL; + writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); + + config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); + config &= ~CORE_CDC_T4_DLY_SEL; + writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); + + config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); + config &= ~CORE_CDC_SWITCH_BYPASS_OFF; + writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); + + config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG); + config |= CORE_CDC_SWITCH_RC_EN; + writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG); + + config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); + config &= ~CORE_START_CDC_TRAFFIC; + writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); + + /* Perform CDC Register Initialization Sequence */ + + writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); + writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1); + writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); + writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1); + writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG); + writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG); + writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG); + writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG); + writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG); + + /* CDC HW Calibration */ + + config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); + config |= CORE_SW_TRIG_FULL_CALIB; + writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); + + config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); + config &= ~CORE_SW_TRIG_FULL_CALIB; + writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); + + config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); + config |= CORE_HW_AUTOCAL_ENA; + writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0); + + config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); + config |= CORE_TIMER_ENA; + writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0); + + ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0, + calib_done, + (calib_done & CORE_CALIBRATION_DONE), + 1, 50); + + if (ret == -ETIMEDOUT) { + pr_err("%s: %s: CDC calibration was not completed\n", + mmc_hostname(host->mmc), __func__); + goto out; + } + + ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0) + & CORE_CDC_ERROR_CODE_MASK; + if (ret) { + pr_err("%s: %s: CDC error code %d\n", + mmc_hostname(host->mmc), __func__, ret); + ret = -EINVAL; + goto out; + } + + config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg); + config |= CORE_START_CDC_TRAFFIC; + writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg); +out: + pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), + __func__, ret); + return ret; +} + +static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + u32 dll_status, config, ddr_cfg_offset; + int ret; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_msm_offset *msm_offset = + sdhci_priv_msm_offset(host); + + pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); + + /* + * Currently the core_ddr_config register defaults to desired + * configuration on reset. Currently reprogramming the power on + * reset (POR) value in case it might have been modified by + * bootloaders. In the future, if this changes, then the desired + * values will need to be programmed appropriately. + */ + if (msm_host->updated_ddr_cfg) + ddr_cfg_offset = msm_offset->core_ddr_config; + else + ddr_cfg_offset = msm_offset->core_ddr_config_old; + writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset); + + if (mmc->ios.enhanced_strobe) { + config = readl_relaxed(host->ioaddr + + msm_offset->core_ddr_200_cfg); + config |= CORE_CMDIN_RCLK_EN; + writel_relaxed(config, host->ioaddr + + msm_offset->core_ddr_200_cfg); + } + + config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2); + config |= CORE_DDR_CAL_EN; + writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2); + + ret = readl_relaxed_poll_timeout(host->ioaddr + + msm_offset->core_dll_status, + dll_status, + (dll_status & CORE_DDR_DLL_LOCK), + 10, 1000); + + if (ret == -ETIMEDOUT) { + pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n", + mmc_hostname(host->mmc), __func__); + goto out; + } + + /* + * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3. + * When MCLK is gated OFF, it is not gated for less than 0.5us + * and MCLK must be switched on for at-least 1us before DATA + * starts coming. Controllers with 14lpp and later tech DLL cannot + * guarantee above requirement. So PWRSAVE_DLL should not be + * turned on for host controllers using this DLL. + */ + if (!msm_host->use_14lpp_dll_reset) { + config = readl_relaxed(host->ioaddr + + msm_offset->core_vendor_spec3); + config |= CORE_PWRSAVE_DLL; + writel_relaxed(config, host->ioaddr + + msm_offset->core_vendor_spec3); + } + + /* + * Drain writebuffer to ensure above DLL calibration + * and PWRSAVE DLL is enabled. + */ + wmb(); +out: + pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), + __func__, ret); + return ret; +} + +static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct mmc_host *mmc = host->mmc; + int ret; + u32 config; + const struct sdhci_msm_offset *msm_offset = + msm_host->offset; + + pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__); + + /* + * Retuning in HS400 (DDR mode) will fail, just reset the + * tuning block and restore the saved tuning phase. + */ + ret = msm_init_cm_dll(host); + if (ret) + goto out; + + if (!mmc->ios.enhanced_strobe) { + /* Set the selected phase in delay line hw block */ + ret = msm_config_cm_dll_phase(host, + msm_host->saved_tuning_phase); + if (ret) + goto out; + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config |= CORE_CMD_DAT_TRACK_SEL; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + } + + if (msm_host->use_cdclp533) + ret = sdhci_msm_cdclp533_calibration(host); + else + ret = sdhci_msm_cm_dll_sdc4_calibration(host); +out: + pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc), + __func__, ret); + return ret; +} + +static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host) +{ + struct mmc_ios *ios = &host->mmc->ios; + + /* + * Tuning is required for SDR104, HS200 and HS400 cards and + * if clock frequency is greater than 100MHz in these modes. + */ + if (host->clock <= CORE_FREQ_100MHZ || + !(ios->timing == MMC_TIMING_MMC_HS400 || + ios->timing == MMC_TIMING_MMC_HS200 || + ios->timing == MMC_TIMING_UHS_SDR104) || + ios->enhanced_strobe) + return false; + + return true; +} + +static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + int ret; + + /* + * SDR DLL comes into picture only for timing modes which needs + * tuning. + */ + if (!sdhci_msm_is_tuning_needed(host)) + return 0; + + /* Reset the tuning block */ + ret = msm_init_cm_dll(host); + if (ret) + return ret; + + /* Restore the tuning block */ + ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); + + return ret; +} + +static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) +{ + const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); + u32 config, oldconfig = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + + config = oldconfig; + if (enable) { + config |= CORE_CDR_EN; + config &= ~CORE_CDR_EXT_EN; + } else { + config &= ~CORE_CDR_EN; + config |= CORE_CDR_EXT_EN; + } + + if (config != oldconfig) { + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + } +} + +static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + int tuning_seq_cnt = 10; + u8 phase, tuned_phases[16], tuned_phase_cnt = 0; + int rc; + struct mmc_ios ios = host->mmc->ios; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + if (!sdhci_msm_is_tuning_needed(host)) { + msm_host->use_cdr = false; + sdhci_msm_set_cdr(host, false); + return 0; + } + + /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ + msm_host->use_cdr = true; + + /* + * Clear tuning_done flag before tuning to ensure proper + * HS400 settings. + */ + msm_host->tuning_done = 0; + + /* + * For HS400 tuning in HS200 timing requires: + * - select MCLK/2 in VENDOR_SPEC + * - program MCLK to 400MHz (or nearest supported) in GCC + */ + if (host->flags & SDHCI_HS400_TUNING) { + sdhci_msm_hc_select_mode(host); + msm_set_clock_rate_for_bus_mode(host, ios.clock); + host->flags &= ~SDHCI_HS400_TUNING; + } + +retry: + /* First of all reset the tuning block */ + rc = msm_init_cm_dll(host); + if (rc) + return rc; + + phase = 0; + do { + /* Set the phase in delay line hw block */ + rc = msm_config_cm_dll_phase(host, phase); + if (rc) + return rc; + + rc = mmc_send_tuning(mmc, opcode, NULL); + if (!rc) { + /* Tuning is successful at this tuning point */ + tuned_phases[tuned_phase_cnt++] = phase; + dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", + mmc_hostname(mmc), phase); + } + } while (++phase < ARRAY_SIZE(tuned_phases)); + + if (tuned_phase_cnt) { + if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { + /* + * All phases valid is _almost_ as bad as no phases + * valid. Probably all phases are not really reliable + * but we didn't detect where the unreliable place is. + * That means we'll essentially be guessing and hoping + * we get a good phase. Better to try a few times. + */ + dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", + mmc_hostname(mmc)); + if (--tuning_seq_cnt) { + tuned_phase_cnt = 0; + goto retry; + } + } + + rc = msm_find_most_appropriate_phase(host, tuned_phases, + tuned_phase_cnt); + if (rc < 0) + return rc; + else + phase = rc; + + /* + * Finally set the selected phase in delay + * line hw block. + */ + rc = msm_config_cm_dll_phase(host, phase); + if (rc) + return rc; + msm_host->saved_tuning_phase = phase; + dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", + mmc_hostname(mmc), phase); + } else { + if (--tuning_seq_cnt) + goto retry; + /* Tuning failed */ + dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", + mmc_hostname(mmc)); + rc = -EIO; + } + + if (!rc) + msm_host->tuning_done = true; + return rc; +} + +/* + * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation. + * This needs to be done for both tuning and enhanced_strobe mode. + * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz + * fixed feedback clock is used. + */ +static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + int ret; + + if (host->clock > CORE_FREQ_100MHZ && + (msm_host->tuning_done || ios->enhanced_strobe) && + !msm_host->calibration_done) { + ret = sdhci_msm_hs400_dll_calibration(host); + if (!ret) + msm_host->calibration_done = true; + else + pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n", + mmc_hostname(host->mmc), ret); + } +} + +static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host, + unsigned int uhs) +{ + struct mmc_host *mmc = host->mmc; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + u16 ctrl_2; + u32 config; + const struct sdhci_msm_offset *msm_offset = + msm_host->offset; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + switch (uhs) { + case MMC_TIMING_UHS_SDR12: + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + break; + case MMC_TIMING_UHS_SDR25: + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + break; + case MMC_TIMING_UHS_SDR50: + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + break; + case MMC_TIMING_MMC_HS400: + case MMC_TIMING_MMC_HS200: + case MMC_TIMING_UHS_SDR104: + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + break; + } + + /* + * When clock frequency is less than 100MHz, the feedback clock must be + * provided and DLL must not be used so that tuning can be skipped. To + * provide feedback clock, the mode selection can be any value less + * than 3'b011 in bits [2:0] of HOST CONTROL2 register. + */ + if (host->clock <= CORE_FREQ_100MHZ) { + if (uhs == MMC_TIMING_MMC_HS400 || + uhs == MMC_TIMING_MMC_HS200 || + uhs == MMC_TIMING_UHS_SDR104) + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + /* + * DLL is not required for clock <= 100MHz + * Thus, make sure DLL it is disabled when not required + */ + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config |= CORE_DLL_RST; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + + config = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + config |= CORE_DLL_PDN; + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); + + /* + * The DLL needs to be restored and CDCLP533 recalibrated + * when the clock frequency is set back to 400MHz. + */ + msm_host->calibration_done = false; + } + + dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n", + mmc_hostname(host->mmc), host->clock, uhs, ctrl_2); + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + + if (mmc->ios.timing == MMC_TIMING_MMC_HS400) + sdhci_msm_hs400(host, &mmc->ios); +} + +static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level) +{ + struct platform_device *pdev = msm_host->pdev; + int ret; + + if (level) + ret = pinctrl_pm_select_default_state(&pdev->dev); + else + ret = pinctrl_pm_select_sleep_state(&pdev->dev); + + return ret; +} + +static int sdhci_msm_set_vmmc(struct mmc_host *mmc) +{ + if (IS_ERR(mmc->supply.vmmc)) + return 0; + + return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd); +} + +static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host, + struct mmc_host *mmc, bool level) +{ + int ret; + struct mmc_ios ios; + + if (msm_host->vqmmc_enabled == level) + return 0; + + if (level) { + /* Set the IO voltage regulator to default voltage level */ + if (msm_host->caps_0 & CORE_3_0V_SUPPORT) + ios.signal_voltage = MMC_SIGNAL_VOLTAGE_330; + else if (msm_host->caps_0 & CORE_1_8V_SUPPORT) + ios.signal_voltage = MMC_SIGNAL_VOLTAGE_180; + + if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { + ret = mmc_regulator_set_vqmmc(mmc, &ios); + if (ret < 0) { + dev_err(mmc_dev(mmc), "%s: vqmmc set volgate failed: %d\n", + mmc_hostname(mmc), ret); + goto out; + } + } + ret = regulator_enable(mmc->supply.vqmmc); + } else { + ret = regulator_disable(mmc->supply.vqmmc); + } + + if (ret) + dev_err(mmc_dev(mmc), "%s: vqmm %sable failed: %d\n", + mmc_hostname(mmc), level ? "en":"dis", ret); + else + msm_host->vqmmc_enabled = level; +out: + return ret; +} + +static int msm_config_vqmmc_mode(struct sdhci_msm_host *msm_host, + struct mmc_host *mmc, bool hpm) +{ + int load, ret; + + load = hpm ? MMC_VQMMC_MAX_LOAD_UA : 0; + ret = regulator_set_load(mmc->supply.vqmmc, load); + if (ret) + dev_err(mmc_dev(mmc), "%s: vqmmc set load failed: %d\n", + mmc_hostname(mmc), ret); + return ret; +} + +static int sdhci_msm_set_vqmmc(struct sdhci_msm_host *msm_host, + struct mmc_host *mmc, bool level) +{ + int ret; + bool always_on; + + if (IS_ERR(mmc->supply.vqmmc) || + (mmc->ios.power_mode == MMC_POWER_UNDEFINED)) + return 0; + /* + * For eMMC don't turn off Vqmmc, Instead just configure it in LPM + * and HPM modes by setting the corresponding load. + * + * Till eMMC is initialized (i.e. always_on == 0), just turn on/off + * Vqmmc. Vqmmc gets turned off only if init fails and mmc_power_off + * gets invoked. Once eMMC is initialized (i.e. always_on == 1), + * Vqmmc should remain ON, So just set the load instead of turning it + * off/on. + */ + always_on = !mmc_card_is_removable(mmc) && + mmc->card && mmc_card_mmc(mmc->card); + + if (always_on) + ret = msm_config_vqmmc_mode(msm_host, mmc, level); + else + ret = msm_toggle_vqmmc(msm_host, mmc, level); + + return ret; +} + +static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host) +{ + init_waitqueue_head(&msm_host->pwr_irq_wait); +} + +static inline void sdhci_msm_complete_pwr_irq_wait( + struct sdhci_msm_host *msm_host) +{ + wake_up(&msm_host->pwr_irq_wait); +} + +/* + * sdhci_msm_check_power_status API should be called when registers writes + * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens. + * To what state the register writes will change the IO lines should be passed + * as the argument req_type. This API will check whether the IO line's state + * is already the expected state and will wait for power irq only if + * power irq is expected to be triggered based on the current IO line state + * and expected IO line state. + */ +static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + bool done = false; + u32 val = SWITCHABLE_SIGNALING_VOLTAGE; + const struct sdhci_msm_offset *msm_offset = + msm_host->offset; + + pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n", + mmc_hostname(host->mmc), __func__, req_type, + msm_host->curr_pwr_state, msm_host->curr_io_level); + + /* + * The power interrupt will not be generated for signal voltage + * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set. + * Since sdhci-msm-v5, this bit has been removed and SW must consider + * it as always set. + */ + if (!msm_host->mci_removed) + val = msm_host_readl(msm_host, host, + msm_offset->core_generics); + if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) && + !(val & SWITCHABLE_SIGNALING_VOLTAGE)) { + return; + } + + /* + * The IRQ for request type IO High/LOW will be generated when - + * there is a state change in 1.8V enable bit (bit 3) of + * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0 + * which indicates 3.3V IO voltage. So, when MMC core layer tries + * to set it to 3.3V before card detection happens, the + * IRQ doesn't get triggered as there is no state change in this bit. + * The driver already handles this case by changing the IO voltage + * level to high as part of controller power up sequence. Hence, check + * for host->pwr to handle a case where IO voltage high request is + * issued even before controller power up. + */ + if ((req_type & REQ_IO_HIGH) && !host->pwr) { + pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n", + mmc_hostname(host->mmc), req_type); + return; + } + if ((req_type & msm_host->curr_pwr_state) || + (req_type & msm_host->curr_io_level)) + done = true; + /* + * This is needed here to handle cases where register writes will + * not change the current bus state or io level of the controller. + * In this case, no power irq will be triggerred and we should + * not wait. + */ + if (!done) { + if (!wait_event_timeout(msm_host->pwr_irq_wait, + msm_host->pwr_irq_flag, + msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) + dev_warn(&msm_host->pdev->dev, + "%s: pwr_irq for req: (%d) timed out\n", + mmc_hostname(host->mmc), req_type); + } + pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), + __func__, req_type); +} + +static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_msm_offset *msm_offset = + msm_host->offset; + + pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n", + mmc_hostname(host->mmc), + msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status), + msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask), + msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl)); +} + +static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct mmc_host *mmc = host->mmc; + u32 irq_status, irq_ack = 0; + int retry = 10, ret; + u32 pwr_state = 0, io_level = 0; + u32 config; + const struct sdhci_msm_offset *msm_offset = msm_host->offset; + + irq_status = msm_host_readl(msm_host, host, + msm_offset->core_pwrctl_status); + irq_status &= INT_MASK; + + msm_host_writel(msm_host, irq_status, host, + msm_offset->core_pwrctl_clear); + + /* + * There is a rare HW scenario where the first clear pulse could be + * lost when actual reset and clear/read of status register is + * happening at a time. Hence, retry for at least 10 times to make + * sure status register is cleared. Otherwise, this will result in + * a spurious power IRQ resulting in system instability. + */ + while (irq_status & msm_host_readl(msm_host, host, + msm_offset->core_pwrctl_status)) { + if (retry == 0) { + pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n", + mmc_hostname(host->mmc), irq_status); + sdhci_msm_dump_pwr_ctrl_regs(host); + WARN_ON(1); + break; + } + msm_host_writel(msm_host, irq_status, host, + msm_offset->core_pwrctl_clear); + retry--; + udelay(10); + } + + /* Handle BUS ON/OFF*/ + if (irq_status & CORE_PWRCTL_BUS_ON) { + pwr_state = REQ_BUS_ON; + io_level = REQ_IO_HIGH; + } + if (irq_status & CORE_PWRCTL_BUS_OFF) { + pwr_state = REQ_BUS_OFF; + io_level = REQ_IO_LOW; + } + + if (pwr_state) { + ret = sdhci_msm_set_vmmc(mmc); + if (!ret) + ret = sdhci_msm_set_vqmmc(msm_host, mmc, + pwr_state & REQ_BUS_ON); + if (!ret) + ret = sdhci_msm_set_pincfg(msm_host, + pwr_state & REQ_BUS_ON); + if (!ret) + irq_ack |= CORE_PWRCTL_BUS_SUCCESS; + else + irq_ack |= CORE_PWRCTL_BUS_FAIL; + } + + /* Handle IO LOW/HIGH */ + if (irq_status & CORE_PWRCTL_IO_LOW) + io_level = REQ_IO_LOW; + + if (irq_status & CORE_PWRCTL_IO_HIGH) + io_level = REQ_IO_HIGH; + + if (io_level) + irq_ack |= CORE_PWRCTL_IO_SUCCESS; + + if (io_level && !IS_ERR(mmc->supply.vqmmc) && !pwr_state) { + ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios); + if (ret < 0) { + dev_err(mmc_dev(mmc), "%s: IO_level setting failed(%d). signal_voltage: %d, vdd: %d irq_status: 0x%08x\n", + mmc_hostname(mmc), ret, + mmc->ios.signal_voltage, mmc->ios.vdd, + irq_status); + irq_ack |= CORE_PWRCTL_IO_FAIL; + } + } + + /* + * The driver has to acknowledge the interrupt, switch voltages and + * report back if it succeded or not to this register. The voltage + * switches are handled by the sdhci core, so just report success. + */ + msm_host_writel(msm_host, irq_ack, host, + msm_offset->core_pwrctl_ctl); + + /* + * If we don't have info regarding the voltage levels supported by + * regulators, don't change the IO PAD PWR SWITCH. + */ + if (msm_host->caps_0 & CORE_VOLT_SUPPORT) { + u32 new_config; + /* + * We should unset IO PAD PWR switch only if the register write + * can set IO lines high and the regulator also switches to 3 V. + * Else, we should keep the IO PAD PWR switch set. + * This is applicable to certain targets where eMMC vccq supply + * is only 1.8V. In such targets, even during REQ_IO_HIGH, the + * IO PAD PWR switch must be kept set to reflect actual + * regulator voltage. This way, during initialization of + * controllers with only 1.8V, we will set the IO PAD bit + * without waiting for a REQ_IO_LOW. + */ + config = readl_relaxed(host->ioaddr + + msm_offset->core_vendor_spec); + new_config = config; + + if ((io_level & REQ_IO_HIGH) && + (msm_host->caps_0 & CORE_3_0V_SUPPORT)) + new_config &= ~CORE_IO_PAD_PWR_SWITCH; + else if ((io_level & REQ_IO_LOW) || + (msm_host->caps_0 & CORE_1_8V_SUPPORT)) + new_config |= CORE_IO_PAD_PWR_SWITCH; + + if (config ^ new_config) + writel_relaxed(new_config, host->ioaddr + + msm_offset->core_vendor_spec); + } + + if (pwr_state) + msm_host->curr_pwr_state = pwr_state; + if (io_level) + msm_host->curr_io_level = io_level; + + dev_dbg(mmc_dev(mmc), "%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n", + mmc_hostname(msm_host->mmc), __func__, irq, irq_status, + irq_ack); +} + +static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) +{ + struct sdhci_host *host = (struct sdhci_host *)data; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + sdhci_msm_handle_pwr_irq(host, irq); + msm_host->pwr_irq_flag = 1; + sdhci_msm_complete_pwr_irq_wait(msm_host); + + + return IRQ_HANDLED; +} + +static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct clk *core_clk = msm_host->bulk_clks[0].clk; + + return clk_round_rate(core_clk, ULONG_MAX); +} + +static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) +{ + return SDHCI_MSM_MIN_CLOCK; +} + +/* + * __sdhci_msm_set_clock - sdhci_msm clock control. + * + * Description: + * MSM controller does not use internal divider and + * instead directly control the GCC clock as per + * HW recommendation. + **/ +static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + /* + * Keep actual_clock as zero - + * - since there is no divider used so no need of having actual_clock. + * - MSM controller uses SDCLK for data timeout calculation. If + * actual_clock is zero, host->clock is taken for calculation. + */ + host->mmc->actual_clock = 0; + + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + /* + * MSM controller do not use clock divider. + * Thus read SDHCI_CLOCK_CONTROL and only enable + * clock with no divider value programmed. + */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + sdhci_enable_clk(host, clk); +} + +/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */ +static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + if (!clock) { + msm_host->clk_rate = clock; + goto out; + } + + sdhci_msm_hc_select_mode(host); + + msm_set_clock_rate_for_bus_mode(host, clock); +out: + __sdhci_msm_set_clock(host, clock); +} + +/*****************************************************************************\ + * * + * MSM Command Queue Engine (CQE) * + * * +\*****************************************************************************/ + +static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + return 0; +} + +static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 ctrl; + + /* + * When CQE is halted, the legacy SDHCI path operates only + * on 16-byte descriptors in 64bit mode. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 16; + + spin_lock_irqsave(&host->lock, flags); + + /* + * During CQE command transfers, command complete bit gets latched. + * So s/w should clear command complete interrupt status when CQE is + * either halted or disabled. Otherwise unexpected SDCHI legacy + * interrupt gets triggered when CQE is halted/disabled. + */ + ctrl = sdhci_readl(host, SDHCI_INT_ENABLE); + ctrl |= SDHCI_INT_RESPONSE; + sdhci_writel(host, ctrl, SDHCI_INT_ENABLE); + sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); + + spin_unlock_irqrestore(&host->lock, flags); + + sdhci_cqe_disable(mmc, recovery); +} + +static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +{ + u32 count, start = 15; + + __sdhci_set_timeout(host, cmd); + count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL); + /* + * Update software timeout value if its value is less than hardware data + * timeout value. Qcom SoC hardware data timeout value was calculated + * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock. + */ + if (cmd && cmd->data && host->clock > 400000 && + host->clock <= 50000000 && + ((1 << (count + start)) > (10 * host->clock))) + host->data_timeout = 22LL * NSEC_PER_SEC; +} + +static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { + .enable = sdhci_cqe_enable, + .disable = sdhci_msm_cqe_disable, +}; + +static int sdhci_msm_cqe_add_host(struct sdhci_host *host, + struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host; + bool dma64; + u32 cqcfg; + int ret; + + /* + * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. + * So ensure ADMA table is allocated for 16byte descriptors. + */ + if (host->caps & SDHCI_CAN_64BIT) + host->alloc_desc_sz = 16; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = cqhci_pltfm_init(pdev); + if (IS_ERR(cq_host)) { + ret = PTR_ERR(cq_host); + dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); + goto cleanup; + } + + msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host->ops = &sdhci_msm_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) { + dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", + mmc_hostname(host->mmc), ret); + goto cleanup; + } + + /* Disable cqe reset due to cqe enable signal */ + cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1); + cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN; + cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1); + + /* + * SDHC expects 12byte ADMA descriptors till CQE is enabled. + * So limit desc_sz to 12 so that the data commands that are sent + * during card initialization (before CQE gets enabled) would + * get executed without any issues. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 12; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + dev_info(&pdev->dev, "%s: CQE init: success\n", + mmc_hostname(host->mmc)); + return ret; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + +/* + * Platform specific register write functions. This is so that, if any + * register write needs to be followed up by platform specific actions, + * they can be added here. These functions can go to sleep when writes + * to certain registers are done. + * These functions are relying on sdhci_set_ios not using spinlock. + */ +static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + u32 req_type = 0; + + switch (reg) { + case SDHCI_HOST_CONTROL2: + req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW : + REQ_IO_HIGH; + break; + case SDHCI_SOFTWARE_RESET: + if (host->pwr && (val & SDHCI_RESET_ALL)) + req_type = REQ_BUS_OFF; + break; + case SDHCI_POWER_CONTROL: + req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; + break; + case SDHCI_TRANSFER_MODE: + msm_host->transfer_mode = val; + break; + case SDHCI_COMMAND: + if (!msm_host->use_cdr) + break; + if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 && + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK) + sdhci_msm_set_cdr(host, true); + else + sdhci_msm_set_cdr(host, false); + break; + } + + if (req_type) { + msm_host->pwr_irq_flag = 0; + /* + * Since this register write may trigger a power irq, ensure + * all previous register writes are complete by this point. + */ + mb(); + } + return req_type; +} + +/* This function may sleep*/ +static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) +{ + u32 req_type = 0; + + req_type = __sdhci_msm_check_write(host, val, reg); + writew_relaxed(val, host->ioaddr + reg); + + if (req_type) + sdhci_msm_check_power_status(host, req_type); +} + +/* This function may sleep*/ +static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) +{ + u32 req_type = 0; + + req_type = __sdhci_msm_check_write(host, val, reg); + + writeb_relaxed(val, host->ioaddr + reg); + + if (req_type) + sdhci_msm_check_power_status(host, req_type); +} + +static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host) +{ + struct mmc_host *mmc = msm_host->mmc; + struct regulator *supply = mmc->supply.vqmmc; + u32 caps = 0, config; + struct sdhci_host *host = mmc_priv(mmc); + const struct sdhci_msm_offset *msm_offset = msm_host->offset; + + if (!IS_ERR(mmc->supply.vqmmc)) { + if (regulator_is_supported_voltage(supply, 1700000, 1950000)) + caps |= CORE_1_8V_SUPPORT; + if (regulator_is_supported_voltage(supply, 2700000, 3600000)) + caps |= CORE_3_0V_SUPPORT; + + if (!caps) + pr_warn("%s: 1.8/3V not supported for vqmmc\n", + mmc_hostname(mmc)); + } + + if (caps) { + /* + * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH + * bit can be used as required later on. + */ + u32 io_level = msm_host->curr_io_level; + + config = readl_relaxed(host->ioaddr + + msm_offset->core_vendor_spec); + config |= CORE_IO_PAD_PWR_SWITCH_EN; + + if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT)) + config &= ~CORE_IO_PAD_PWR_SWITCH; + else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT)) + config |= CORE_IO_PAD_PWR_SWITCH; + + writel_relaxed(config, + host->ioaddr + msm_offset->core_vendor_spec); + } + msm_host->caps_0 |= caps; + pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps); +} + +static void sdhci_msm_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL)) + cqhci_deactivate(host->mmc); + sdhci_reset(host, mask); +} + +static int sdhci_msm_register_vreg(struct sdhci_msm_host *msm_host) +{ + int ret; + + ret = mmc_regulator_get_supply(msm_host->mmc); + if (ret) + return ret; + + sdhci_msm_set_regulator_caps(msm_host); + + return 0; +} + +static int sdhci_msm_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + u16 ctrl, status; + + /* + * Signal Voltage Switching is only applicable for Host Controllers + * v3.00 and above. + */ + if (host->version < SDHCI_SPEC_300) + return 0; + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + if (!(host->flags & SDHCI_SIGNALING_330)) + return -EINVAL; + + /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ + ctrl &= ~SDHCI_CTRL_VDD_180; + break; + case MMC_SIGNAL_VOLTAGE_180: + if (!(host->flags & SDHCI_SIGNALING_180)) + return -EINVAL; + + /* Enable 1.8V Signal Enable in the Host Control2 register */ + ctrl |= SDHCI_CTRL_VDD_180; + break; + + default: + return -EINVAL; + } + + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + /* Wait for 5ms */ + usleep_range(5000, 5500); + + /* regulator output should be stable within 5 ms */ + status = ctrl & SDHCI_CTRL_VDD_180; + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if ((ctrl & SDHCI_CTRL_VDD_180) == status) + return 0; + + dev_warn(mmc_dev(mmc), "%s: Regulator output did not became stable\n", + mmc_hostname(mmc)); + + return -EAGAIN; +} + +#define DRIVER_NAME "sdhci_msm" +#define SDHCI_MSM_DUMP(f, x...) \ + pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) + +static void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_msm_offset *msm_offset = msm_host->offset; + + SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n"); + + SDHCI_MSM_DUMP( + "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n", + readl_relaxed(host->ioaddr + msm_offset->core_dll_status), + readl_relaxed(host->ioaddr + msm_offset->core_dll_config), + readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2)); + SDHCI_MSM_DUMP( + "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n", + readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3), + readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl), + readl_relaxed(host->ioaddr + msm_offset->core_ddr_config)); + SDHCI_MSM_DUMP( + "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n", + readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec), + readl_relaxed(host->ioaddr + + msm_offset->core_vendor_spec_func2), + readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3)); +} + +static const struct sdhci_msm_variant_ops mci_var_ops = { + .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed, + .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed, +}; + +static const struct sdhci_msm_variant_ops v5_var_ops = { + .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed, + .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed, +}; + +static const struct sdhci_msm_variant_info sdhci_msm_mci_var = { + .var_ops = &mci_var_ops, + .offset = &sdhci_msm_mci_offset, +}; + +static const struct sdhci_msm_variant_info sdhci_msm_v5_var = { + .mci_removed = true, + .var_ops = &v5_var_ops, + .offset = &sdhci_msm_v5_offset, +}; + +static const struct sdhci_msm_variant_info sdm845_sdhci_var = { + .mci_removed = true, + .restore_dll_config = true, + .var_ops = &v5_var_ops, + .offset = &sdhci_msm_v5_offset, +}; + +static const struct sdhci_msm_variant_info sm8250_sdhci_var = { + .mci_removed = true, + .uses_tassadar_dll = true, + .var_ops = &v5_var_ops, + .offset = &sdhci_msm_v5_offset, +}; + +static const struct of_device_id sdhci_msm_dt_match[] = { + {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var}, + {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var}, + {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var}, + {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var}, + {.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var}, + {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var}, + {}, +}; + +MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match); + +static const struct sdhci_ops sdhci_msm_ops = { + .reset = sdhci_msm_reset, + .set_clock = sdhci_msm_set_clock, + .get_min_clock = sdhci_msm_get_min_clock, + .get_max_clock = sdhci_msm_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = sdhci_msm_set_uhs_signaling, + .write_w = sdhci_msm_writew, + .write_b = sdhci_msm_writeb, + .irq = sdhci_msm_cqe_irq, + .dump_vendor_regs = sdhci_msm_dump_vendor_regs, + .set_power = sdhci_set_power_noreg, + .set_timeout = sdhci_msm_set_timeout, +}; + +static const struct sdhci_pltfm_data sdhci_msm_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &sdhci_msm_ops, +}; + +static inline void sdhci_msm_get_of_property(struct platform_device *pdev, + struct sdhci_host *host) +{ + struct device_node *node = pdev->dev.of_node; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + if (of_property_read_u32(node, "qcom,ddr-config", + &msm_host->ddr_config)) + msm_host->ddr_config = DDR_CONFIG_POR_VAL; + + of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); + + if (of_device_is_compatible(node, "qcom,msm8916-sdhci")) + host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA; +} + +static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host) +{ + struct reset_control *reset; + int ret = 0; + + reset = reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(reset)) + return dev_err_probe(dev, PTR_ERR(reset), + "unable to acquire core_reset\n"); + + if (!reset) + return ret; + + ret = reset_control_assert(reset); + if (ret) { + reset_control_put(reset); + return dev_err_probe(dev, ret, "core_reset assert failed\n"); + } + + /* + * The hardware requirement for delay between assert/deassert + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to + * ~125us (4/32768). To be on the safe side add 200us delay. + */ + usleep_range(200, 210); + + ret = reset_control_deassert(reset); + if (ret) { + reset_control_put(reset); + return dev_err_probe(dev, ret, "core_reset deassert failed\n"); + } + + usleep_range(200, 210); + reset_control_put(reset); + + return ret; +} + +static int sdhci_msm_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_msm_host *msm_host; + struct clk *clk; + int ret; + u16 host_version, core_minor; + u32 core_version, config; + u8 core_major; + const struct sdhci_msm_offset *msm_offset; + const struct sdhci_msm_variant_info *var_info; + struct device_node *node = pdev->dev.of_node; + + host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); + if (IS_ERR(host)) + return PTR_ERR(host); + + host->sdma_boundary = 0; + pltfm_host = sdhci_priv(host); + msm_host = sdhci_pltfm_priv(pltfm_host); + msm_host->mmc = host->mmc; + msm_host->pdev = pdev; + + ret = mmc_of_parse(host->mmc); + if (ret) + goto pltfm_free; + + /* + * Based on the compatible string, load the required msm host info from + * the data associated with the version info. + */ + var_info = of_device_get_match_data(&pdev->dev); + + msm_host->mci_removed = var_info->mci_removed; + msm_host->restore_dll_config = var_info->restore_dll_config; + msm_host->var_ops = var_info->var_ops; + msm_host->offset = var_info->offset; + msm_host->uses_tassadar_dll = var_info->uses_tassadar_dll; + + msm_offset = msm_host->offset; + + sdhci_get_of_property(pdev); + sdhci_msm_get_of_property(pdev, host); + + msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; + + ret = sdhci_msm_gcc_reset(&pdev->dev, host); + if (ret) + goto pltfm_free; + + /* Setup SDCC bus voter clock. */ + msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(msm_host->bus_clk)) { + /* Vote for max. clk rate for max. performance */ + ret = clk_set_rate(msm_host->bus_clk, INT_MAX); + if (ret) + goto pltfm_free; + ret = clk_prepare_enable(msm_host->bus_clk); + if (ret) + goto pltfm_free; + } + + /* Setup main peripheral bus clock */ + clk = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); + goto bus_clk_disable; + } + msm_host->bulk_clks[1].clk = clk; + + /* Setup SDC MMC clock */ + clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); + goto bus_clk_disable; + } + msm_host->bulk_clks[0].clk = clk; + + /* Check for optional interconnect paths */ + ret = dev_pm_opp_of_find_icc_paths(&pdev->dev, NULL); + if (ret) + goto bus_clk_disable; + + msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core"); + if (IS_ERR(msm_host->opp_table)) { + ret = PTR_ERR(msm_host->opp_table); + goto bus_clk_disable; + } + + /* OPP table is optional */ + ret = dev_pm_opp_of_add_table(&pdev->dev); + if (ret && ret != -ENODEV) { + dev_err(&pdev->dev, "Invalid OPP table in Device tree\n"); + goto opp_put_clkname; + } + + /* Vote for maximum clock rate for maximum performance */ + ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX); + if (ret) + dev_warn(&pdev->dev, "core clock boost failed\n"); + + clk = devm_clk_get(&pdev->dev, "cal"); + if (IS_ERR(clk)) + clk = NULL; + msm_host->bulk_clks[2].clk = clk; + + clk = devm_clk_get(&pdev->dev, "sleep"); + if (IS_ERR(clk)) + clk = NULL; + msm_host->bulk_clks[3].clk = clk; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); + if (ret) + goto opp_cleanup; + + /* + * xo clock is needed for FLL feature of cm_dll. + * In case if xo clock is not mentioned in DT, warn and proceed. + */ + msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo"); + if (IS_ERR(msm_host->xo_clk)) { + ret = PTR_ERR(msm_host->xo_clk); + dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); + } + + if (!msm_host->mci_removed) { + msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(msm_host->core_mem)) { + ret = PTR_ERR(msm_host->core_mem); + goto clk_disable; + } + } + + /* Reset the vendor spec register to power on reset state */ + writel_relaxed(CORE_VENDOR_SPEC_POR_VAL, + host->ioaddr + msm_offset->core_vendor_spec); + + if (!msm_host->mci_removed) { + /* Set HC_MODE_EN bit in HC_MODE register */ + msm_host_writel(msm_host, HC_MODE_EN, host, + msm_offset->core_hc_mode); + config = msm_host_readl(msm_host, host, + msm_offset->core_hc_mode); + config |= FF_CLK_SW_RST_DIS; + msm_host_writel(msm_host, config, host, + msm_offset->core_hc_mode); + } + + host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); + dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", + host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT)); + + core_version = msm_host_readl(msm_host, host, + msm_offset->core_mci_version); + core_major = (core_version & CORE_VERSION_MAJOR_MASK) >> + CORE_VERSION_MAJOR_SHIFT; + core_minor = core_version & CORE_VERSION_MINOR_MASK; + dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", + core_version, core_major, core_minor); + + if (core_major == 1 && core_minor >= 0x42) + msm_host->use_14lpp_dll_reset = true; + + /* + * SDCC 5 controller with major version 1, minor version 0x34 and later + * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL. + */ + if (core_major == 1 && core_minor < 0x34) + msm_host->use_cdclp533 = true; + + /* + * Support for some capabilities is not advertised by newer + * controller versions and must be explicitly enabled. + */ + if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) { + config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); + config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT; + writel_relaxed(config, host->ioaddr + + msm_offset->core_vendor_spec_capabilities0); + } + + if (core_major == 1 && core_minor >= 0x49) + msm_host->updated_ddr_cfg = true; + + ret = sdhci_msm_register_vreg(msm_host); + if (ret) + goto clk_disable; + + /* + * Power on reset state may trigger power irq if previous status of + * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq + * interrupt in GIC, any pending power irq interrupt should be + * acknowledged. Otherwise power irq interrupt handler would be + * fired prematurely. + */ + sdhci_msm_handle_pwr_irq(host, 0); + + /* + * Ensure that above writes are propogated before interrupt enablement + * in GIC. + */ + mb(); + + /* Setup IRQ for handling power/voltage tasks with PMIC */ + msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); + if (msm_host->pwr_irq < 0) { + ret = msm_host->pwr_irq; + goto clk_disable; + } + + sdhci_msm_init_pwr_irq_wait(msm_host); + /* Enable pwr irq interrupts */ + msm_host_writel(msm_host, INT_MASK, host, + msm_offset->core_pwrctl_mask); + + ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, + sdhci_msm_pwr_irq, IRQF_ONESHOT, + dev_name(&pdev->dev), host); + if (ret) { + dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); + goto clk_disable; + } + + msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, + MSM_MMC_AUTOSUSPEND_DELAY_MS); + pm_runtime_use_autosuspend(&pdev->dev); + + host->mmc_host_ops.start_signal_voltage_switch = + sdhci_msm_start_signal_voltage_switch; + host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; + if (of_property_read_bool(node, "supports-cqe")) + ret = sdhci_msm_cqe_add_host(host, pdev); + else + ret = sdhci_add_host(host); + if (ret) + goto pm_runtime_disable; + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; + +pm_runtime_disable: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); +clk_disable: + clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); +opp_cleanup: + dev_pm_opp_of_remove_table(&pdev->dev); +opp_put_clkname: + dev_pm_opp_put_clkname(msm_host->opp_table); +bus_clk_disable: + if (!IS_ERR(msm_host->bus_clk)) + clk_disable_unprepare(msm_host->bus_clk); +pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_msm_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == + 0xffffffff); + + sdhci_remove_host(host, dead); + + dev_pm_opp_of_remove_table(&pdev->dev); + dev_pm_opp_put_clkname(msm_host->opp_table); + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); + if (!IS_ERR(msm_host->bus_clk)) + clk_disable_unprepare(msm_host->bus_clk); + sdhci_pltfm_free(pdev); + return 0; +} + +static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + /* Drop the performance vote */ + dev_pm_opp_set_rate(dev, 0); + clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); + + return 0; +} + +static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); + if (ret) + return ret; + /* + * Whenever core-clock is gated dynamically, it's needed to + * restore the SDR DLL settings when the clock is ungated. + */ + if (msm_host->restore_dll_config && msm_host->clk_rate) + ret = sdhci_msm_restore_sdr_dll_config(host); + + dev_pm_opp_set_rate(dev, msm_host->clk_rate); + + return ret; +} + +static const struct dev_pm_ops sdhci_msm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, + sdhci_msm_runtime_resume, + NULL) +}; + +static struct platform_driver sdhci_msm_driver = { + .probe = sdhci_msm_probe, + .remove = sdhci_msm_remove, + .driver = { + .name = "sdhci_msm", + .of_match_table = sdhci_msm_dt_match, + .pm = &sdhci_msm_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +module_platform_driver(sdhci_msm_driver); + +MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c new file mode 100644 index 000000000..9da49dc15 --- /dev/null +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -0,0 +1,1729 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Arasan Secure Digital Host Controller Interface. + * Copyright (C) 2011 - 2012 Michal Simek + * Copyright (c) 2012 Wind River Systems, Inc. + * Copyright (C) 2013 Pengutronix e.K. + * Copyright (C) 2013 Xilinx Inc. + * + * Based on sdhci-of-esdhc.c + * + * Copyright (c) 2007 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * + * Authors: Xiaobo Xie + * Anton Vorontsov + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cqhci.h" +#include "sdhci-cqhci.h" +#include "sdhci-pltfm.h" + +#define SDHCI_ARASAN_VENDOR_REGISTER 0x78 + +#define SDHCI_ARASAN_ITAPDLY_REGISTER 0xF0F8 +#define SDHCI_ARASAN_ITAPDLY_SEL_MASK 0xFF + +#define SDHCI_ARASAN_OTAPDLY_REGISTER 0xF0FC +#define SDHCI_ARASAN_OTAPDLY_SEL_MASK 0x3F + +#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 +#define VENDOR_ENHANCED_STROBE BIT(0) + +#define PHY_CLK_TOO_SLOW_HZ 400000 + +#define SDHCI_ITAPDLY_CHGWIN 0x200 +#define SDHCI_ITAPDLY_ENABLE 0x100 +#define SDHCI_OTAPDLY_ENABLE 0x40 + +/* Default settings for ZynqMP Clock Phases */ +#define ZYNQMP_ICLK_PHASE {0, 63, 63, 0, 63, 0, 0, 183, 54, 0, 0} +#define ZYNQMP_OCLK_PHASE {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0} + +#define VERSAL_ICLK_PHASE {0, 132, 132, 0, 132, 0, 0, 162, 90, 0, 0} +#define VERSAL_OCLK_PHASE {0, 60, 48, 0, 48, 72, 90, 36, 60, 90, 0} + +/* + * On some SoCs the syscon area has a feature where the upper 16-bits of + * each 32-bit register act as a write mask for the lower 16-bits. This allows + * atomic updates of the register without locking. This macro is used on SoCs + * that have that feature. + */ +#define HIWORD_UPDATE(val, mask, shift) \ + ((val) << (shift) | (mask) << ((shift) + 16)) + +/** + * struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map + * + * @reg: Offset within the syscon of the register containing this field + * @width: Number of bits for this field + * @shift: Bit offset within @reg of this field (or -1 if not avail) + */ +struct sdhci_arasan_soc_ctl_field { + u32 reg; + u16 width; + s16 shift; +}; + +/** + * struct sdhci_arasan_soc_ctl_map - Map in syscon to corecfg registers + * + * @baseclkfreq: Where to find corecfg_baseclkfreq + * @clockmultiplier: Where to find corecfg_clockmultiplier + * @support64b: Where to find SUPPORT64B bit + * @hiword_update: If true, use HIWORD_UPDATE to access the syscon + * + * It's up to the licensee of the Arsan IP block to make these available + * somewhere if needed. Presumably these will be scattered somewhere that's + * accessible via the syscon API. + */ +struct sdhci_arasan_soc_ctl_map { + struct sdhci_arasan_soc_ctl_field baseclkfreq; + struct sdhci_arasan_soc_ctl_field clockmultiplier; + struct sdhci_arasan_soc_ctl_field support64b; + bool hiword_update; +}; + +/** + * struct sdhci_arasan_clk_ops - Clock Operations for Arasan SD controller + * + * @sdcardclk_ops: The output clock related operations + * @sampleclk_ops: The sample clock related operations + */ +struct sdhci_arasan_clk_ops { + const struct clk_ops *sdcardclk_ops; + const struct clk_ops *sampleclk_ops; +}; + +/** + * struct sdhci_arasan_clk_data - Arasan Controller Clock Data. + * + * @sdcardclk_hw: Struct for the clock we might provide to a PHY. + * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw. + * @sampleclk_hw: Struct for the clock we might provide to a PHY. + * @sampleclk: Pointer to normal 'struct clock' for sampleclk_hw. + * @clk_phase_in: Array of Input Clock Phase Delays for all speed modes + * @clk_phase_out: Array of Output Clock Phase Delays for all speed modes + * @set_clk_delays: Function pointer for setting Clock Delays + * @clk_of_data: Platform specific runtime clock data storage pointer + */ +struct sdhci_arasan_clk_data { + struct clk_hw sdcardclk_hw; + struct clk *sdcardclk; + struct clk_hw sampleclk_hw; + struct clk *sampleclk; + int clk_phase_in[MMC_TIMING_MMC_HS400 + 1]; + int clk_phase_out[MMC_TIMING_MMC_HS400 + 1]; + void (*set_clk_delays)(struct sdhci_host *host); + void *clk_of_data; +}; + +/** + * struct sdhci_arasan_data - Arasan Controller Data + * + * @host: Pointer to the main SDHCI host structure. + * @clk_ahb: Pointer to the AHB clock + * @phy: Pointer to the generic phy + * @is_phy_on: True if the PHY is on; false if not. + * @has_cqe: True if controller has command queuing engine. + * @clk_data: Struct for the Arasan Controller Clock Data. + * @clk_ops: Struct for the Arasan Controller Clock Operations. + * @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers. + * @soc_ctl_map: Map to get offsets into soc_ctl registers. + * @quirks: Arasan deviations from spec. + */ +struct sdhci_arasan_data { + struct sdhci_host *host; + struct clk *clk_ahb; + struct phy *phy; + bool is_phy_on; + + bool has_cqe; + struct sdhci_arasan_clk_data clk_data; + const struct sdhci_arasan_clk_ops *clk_ops; + + struct regmap *soc_ctl_base; + const struct sdhci_arasan_soc_ctl_map *soc_ctl_map; + unsigned int quirks; + +/* Controller does not have CD wired and will not function normally without */ +#define SDHCI_ARASAN_QUIRK_FORCE_CDTEST BIT(0) +/* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the + * internal clock even when the clock isn't stable */ +#define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1) +/* + * Some of the Arasan variations might not have timing requirements + * met at 25MHz for Default Speed mode, those controllers work at + * 19MHz instead + */ +#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2) +}; + +struct sdhci_arasan_of_data { + const struct sdhci_arasan_soc_ctl_map *soc_ctl_map; + const struct sdhci_pltfm_data *pdata; + const struct sdhci_arasan_clk_ops *clk_ops; +}; + +static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = { + .baseclkfreq = { .reg = 0xf000, .width = 8, .shift = 8 }, + .clockmultiplier = { .reg = 0xf02c, .width = 8, .shift = 0}, + .hiword_update = true, +}; + +static const struct sdhci_arasan_soc_ctl_map intel_lgm_emmc_soc_ctl_map = { + .baseclkfreq = { .reg = 0xa0, .width = 8, .shift = 2 }, + .clockmultiplier = { .reg = 0, .width = -1, .shift = -1 }, + .hiword_update = false, +}; + +static const struct sdhci_arasan_soc_ctl_map intel_lgm_sdxc_soc_ctl_map = { + .baseclkfreq = { .reg = 0x80, .width = 8, .shift = 2 }, + .clockmultiplier = { .reg = 0, .width = -1, .shift = -1 }, + .hiword_update = false, +}; + +static const struct sdhci_arasan_soc_ctl_map intel_keembay_soc_ctl_map = { + .baseclkfreq = { .reg = 0x0, .width = 8, .shift = 14 }, + .clockmultiplier = { .reg = 0x4, .width = 8, .shift = 14 }, + .support64b = { .reg = 0x4, .width = 1, .shift = 24 }, + .hiword_update = false, +}; + +/** + * sdhci_arasan_syscon_write - Write to a field in soc_ctl registers + * + * @host: The sdhci_host + * @fld: The field to write to + * @val: The value to write + * + * This function allows writing to fields in sdhci_arasan_soc_ctl_map. + * Note that if a field is specified as not available (shift < 0) then + * this function will silently return an error code. It will be noisy + * and print errors for any other (unexpected) errors. + * + * Return: 0 on success and error value on error + */ +static int sdhci_arasan_syscon_write(struct sdhci_host *host, + const struct sdhci_arasan_soc_ctl_field *fld, + u32 val) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + struct regmap *soc_ctl_base = sdhci_arasan->soc_ctl_base; + u32 reg = fld->reg; + u16 width = fld->width; + s16 shift = fld->shift; + int ret; + + /* + * Silently return errors for shift < 0 so caller doesn't have + * to check for fields which are optional. For fields that + * are required then caller needs to do something special + * anyway. + */ + if (shift < 0) + return -EINVAL; + + if (sdhci_arasan->soc_ctl_map->hiword_update) + ret = regmap_write(soc_ctl_base, reg, + HIWORD_UPDATE(val, GENMASK(width, 0), + shift)); + else + ret = regmap_update_bits(soc_ctl_base, reg, + GENMASK(shift + width, shift), + val << shift); + + /* Yell about (unexpected) regmap errors */ + if (ret) + pr_warn("%s: Regmap write fail: %d\n", + mmc_hostname(host->mmc), ret); + + return ret; +} + +static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data; + bool ctrl_phy = false; + + if (!IS_ERR(sdhci_arasan->phy)) { + if (!sdhci_arasan->is_phy_on && clock <= PHY_CLK_TOO_SLOW_HZ) { + /* + * If PHY off, set clock to max speed and power PHY on. + * + * Although PHY docs apparently suggest power cycling + * when changing the clock the PHY doesn't like to be + * powered on while at low speeds like those used in ID + * mode. Even worse is powering the PHY on while the + * clock is off. + * + * To workaround the PHY limitations, the best we can + * do is to power it on at a faster speed and then slam + * through low speeds without power cycling. + */ + sdhci_set_clock(host, host->max_clk); + if (phy_power_on(sdhci_arasan->phy)) { + pr_err("%s: Cannot power on phy.\n", + mmc_hostname(host->mmc)); + return; + } + + sdhci_arasan->is_phy_on = true; + + /* + * We'll now fall through to the below case with + * ctrl_phy = false (so we won't turn off/on). The + * sdhci_set_clock() will set the real clock. + */ + } else if (clock > PHY_CLK_TOO_SLOW_HZ) { + /* + * At higher clock speeds the PHY is fine being power + * cycled and docs say you _should_ power cycle when + * changing clock speeds. + */ + ctrl_phy = true; + } + } + + if (ctrl_phy && sdhci_arasan->is_phy_on) { + phy_power_off(sdhci_arasan->phy); + sdhci_arasan->is_phy_on = false; + } + + if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN) { + /* + * Some of the Arasan variations might not have timing + * requirements met at 25MHz for Default Speed mode, + * those controllers work at 19MHz instead. + */ + if (clock == DEFAULT_SPEED_MAX_DTR) + clock = (DEFAULT_SPEED_MAX_DTR * 19) / 25; + } + + /* Set the Input and Output Clock Phase Delays */ + if (clk_data->set_clk_delays) + clk_data->set_clk_delays(host); + + sdhci_set_clock(host, clock); + + if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE) + /* + * Some controllers immediately report SDHCI_CLOCK_INT_STABLE + * after enabling the clock even though the clock is not + * stable. Trying to use a clock without waiting here results + * in EILSEQ while detecting some older/slower cards. The + * chosen delay is the maximum delay from sdhci_set_clock. + */ + msleep(20); + + if (ctrl_phy) { + if (phy_power_on(sdhci_arasan->phy)) { + pr_err("%s: Cannot power on phy.\n", + mmc_hostname(host->mmc)); + return; + } + + sdhci_arasan->is_phy_on = true; + } +} + +static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + u32 vendor; + struct sdhci_host *host = mmc_priv(mmc); + + vendor = sdhci_readl(host, SDHCI_ARASAN_VENDOR_REGISTER); + if (ios->enhanced_strobe) + vendor |= VENDOR_ENHANCED_STROBE; + else + vendor &= ~VENDOR_ENHANCED_STROBE; + + sdhci_writel(host, vendor, SDHCI_ARASAN_VENDOR_REGISTER); +} + +static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) +{ + u8 ctrl; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + + sdhci_and_cqhci_reset(host, mask); + + if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) { + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl |= SDHCI_CTRL_CDTEST_INS | SDHCI_CTRL_CDTEST_EN; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } +} + +static int sdhci_arasan_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_180: + /* + * Plese don't switch to 1V8 as arasan,5.1 doesn't + * actually refer to this setting to indicate the + * signal voltage and the state machine will be broken + * actually if we force to enable 1V8. That's something + * like broken quirk but we could work around here. + */ + return 0; + case MMC_SIGNAL_VOLTAGE_330: + case MMC_SIGNAL_VOLTAGE_120: + /* We don't support 3V3 and 1V2 */ + break; + } + + return -EINVAL; +} + +static const struct sdhci_ops sdhci_arasan_ops = { + .set_clock = sdhci_arasan_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_arasan_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_power = sdhci_set_power_and_bus_voltage, +}; + +static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static void sdhci_arasan_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static void sdhci_arasan_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + } + + sdhci_cqe_enable(mmc); +} + +static const struct cqhci_host_ops sdhci_arasan_cqhci_ops = { + .enable = sdhci_arasan_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_arasan_dumpregs, +}; + +static const struct sdhci_ops sdhci_arasan_cqe_ops = { + .set_clock = sdhci_arasan_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_arasan_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_power = sdhci_set_power_and_bus_voltage, + .irq = sdhci_arasan_cqhci_irq, +}; + +static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = { + .ops = &sdhci_arasan_cqe_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, +}; + +#ifdef CONFIG_PM_SLEEP +/** + * sdhci_arasan_suspend - Suspend method for the driver + * @dev: Address of the device structure + * + * Put the device in a low power state. + * + * Return: 0 on success and error value on error + */ +static int sdhci_arasan_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + int ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + if (sdhci_arasan->has_cqe) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + + ret = sdhci_suspend_host(host); + if (ret) + return ret; + + if (!IS_ERR(sdhci_arasan->phy) && sdhci_arasan->is_phy_on) { + ret = phy_power_off(sdhci_arasan->phy); + if (ret) { + dev_err(dev, "Cannot power off phy.\n"); + if (sdhci_resume_host(host)) + dev_err(dev, "Cannot resume host.\n"); + + return ret; + } + sdhci_arasan->is_phy_on = false; + } + + clk_disable(pltfm_host->clk); + clk_disable(sdhci_arasan->clk_ahb); + + return 0; +} + +/** + * sdhci_arasan_resume - Resume method for the driver + * @dev: Address of the device structure + * + * Resume operation after suspend + * + * Return: 0 on success and error value on error + */ +static int sdhci_arasan_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_enable(sdhci_arasan->clk_ahb); + if (ret) { + dev_err(dev, "Cannot enable AHB clock.\n"); + return ret; + } + + ret = clk_enable(pltfm_host->clk); + if (ret) { + dev_err(dev, "Cannot enable SD clock.\n"); + return ret; + } + + if (!IS_ERR(sdhci_arasan->phy) && host->mmc->actual_clock) { + ret = phy_power_on(sdhci_arasan->phy); + if (ret) { + dev_err(dev, "Cannot power on phy.\n"); + return ret; + } + sdhci_arasan->is_phy_on = true; + } + + ret = sdhci_resume_host(host); + if (ret) { + dev_err(dev, "Cannot resume host.\n"); + return ret; + } + + if (sdhci_arasan->has_cqe) + return cqhci_resume(host->mmc); + + return 0; +} +#endif /* ! CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend, + sdhci_arasan_resume); + +/** + * sdhci_arasan_sdcardclk_recalc_rate - Return the card clock rate + * + * @hw: Pointer to the hardware clock structure. + * @parent_rate: The parent rate (should be rate of clk_xin). + * + * Return the current actual rate of the SD card clock. This can be used + * to communicate with out PHY. + * + * Return: The card clock rate. + */ +static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sdhci_arasan_clk_data *clk_data = + container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw); + struct sdhci_arasan_data *sdhci_arasan = + container_of(clk_data, struct sdhci_arasan_data, clk_data); + struct sdhci_host *host = sdhci_arasan->host; + + return host->mmc->actual_clock; +} + +static const struct clk_ops arasan_sdcardclk_ops = { + .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate, +}; + +/** + * sdhci_arasan_sampleclk_recalc_rate - Return the sampling clock rate + * + * @hw: Pointer to the hardware clock structure. + * @parent_rate: The parent rate (should be rate of clk_xin). + * + * Return the current actual rate of the sampling clock. This can be used + * to communicate with out PHY. + * + * Return: The sample clock rate. + */ +static unsigned long sdhci_arasan_sampleclk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sdhci_arasan_clk_data *clk_data = + container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw); + struct sdhci_arasan_data *sdhci_arasan = + container_of(clk_data, struct sdhci_arasan_data, clk_data); + struct sdhci_host *host = sdhci_arasan->host; + + return host->mmc->actual_clock; +} + +static const struct clk_ops arasan_sampleclk_ops = { + .recalc_rate = sdhci_arasan_sampleclk_recalc_rate, +}; + +/** + * sdhci_zynqmp_sdcardclk_set_phase - Set the SD Output Clock Tap Delays + * + * @hw: Pointer to the hardware clock structure. + * @degrees: The clock phase shift between 0 - 359. + * + * Set the SD Output Clock Tap Delays for Output path + * + * Return: 0 on success and error value on error + */ +static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees) +{ + struct sdhci_arasan_clk_data *clk_data = + container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw); + struct sdhci_arasan_data *sdhci_arasan = + container_of(clk_data, struct sdhci_arasan_data, clk_data); + struct sdhci_host *host = sdhci_arasan->host; + const char *clk_name = clk_hw_get_name(hw); + u32 node_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : NODE_SD_1; + u8 tap_delay, tap_max = 0; + int ret; + + /* This is applicable for SDHCI_SPEC_300 and above */ + if (host->version < SDHCI_SPEC_300) + return 0; + + switch (host->timing) { + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + /* For 50MHz clock, 30 Taps are available */ + tap_max = 30; + break; + case MMC_TIMING_UHS_SDR50: + /* For 100MHz clock, 15 Taps are available */ + tap_max = 15; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* For 200MHz clock, 8 Taps are available */ + tap_max = 8; + default: + break; + } + + tap_delay = (degrees * tap_max) / 360; + + /* Set the Clock Phase */ + ret = zynqmp_pm_set_sd_tapdelay(node_id, PM_TAPDELAY_OUTPUT, tap_delay); + if (ret) + pr_err("Error setting Output Tap Delay\n"); + + /* Release DLL Reset */ + zynqmp_pm_sd_dll_reset(node_id, PM_DLL_RESET_RELEASE); + + return ret; +} + +static const struct clk_ops zynqmp_sdcardclk_ops = { + .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate, + .set_phase = sdhci_zynqmp_sdcardclk_set_phase, +}; + +/** + * sdhci_zynqmp_sampleclk_set_phase - Set the SD Input Clock Tap Delays + * + * @hw: Pointer to the hardware clock structure. + * @degrees: The clock phase shift between 0 - 359. + * + * Set the SD Input Clock Tap Delays for Input path + * + * Return: 0 on success and error value on error + */ +static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees) +{ + struct sdhci_arasan_clk_data *clk_data = + container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw); + struct sdhci_arasan_data *sdhci_arasan = + container_of(clk_data, struct sdhci_arasan_data, clk_data); + struct sdhci_host *host = sdhci_arasan->host; + const char *clk_name = clk_hw_get_name(hw); + u32 node_id = !strcmp(clk_name, "clk_in_sd0") ? NODE_SD_0 : NODE_SD_1; + u8 tap_delay, tap_max = 0; + int ret; + + /* This is applicable for SDHCI_SPEC_300 and above */ + if (host->version < SDHCI_SPEC_300) + return 0; + + /* Assert DLL Reset */ + zynqmp_pm_sd_dll_reset(node_id, PM_DLL_RESET_ASSERT); + + switch (host->timing) { + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + /* For 50MHz clock, 120 Taps are available */ + tap_max = 120; + break; + case MMC_TIMING_UHS_SDR50: + /* For 100MHz clock, 60 Taps are available */ + tap_max = 60; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* For 200MHz clock, 30 Taps are available */ + tap_max = 30; + default: + break; + } + + tap_delay = (degrees * tap_max) / 360; + + /* Set the Clock Phase */ + ret = zynqmp_pm_set_sd_tapdelay(node_id, PM_TAPDELAY_INPUT, tap_delay); + if (ret) + pr_err("Error setting Input Tap Delay\n"); + + return ret; +} + +static const struct clk_ops zynqmp_sampleclk_ops = { + .recalc_rate = sdhci_arasan_sampleclk_recalc_rate, + .set_phase = sdhci_zynqmp_sampleclk_set_phase, +}; + +/** + * sdhci_versal_sdcardclk_set_phase - Set the SD Output Clock Tap Delays + * + * @hw: Pointer to the hardware clock structure. + * @degrees: The clock phase shift between 0 - 359. + * + * Set the SD Output Clock Tap Delays for Output path + * + * Return: 0 on success and error value on error + */ +static int sdhci_versal_sdcardclk_set_phase(struct clk_hw *hw, int degrees) +{ + struct sdhci_arasan_clk_data *clk_data = + container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw); + struct sdhci_arasan_data *sdhci_arasan = + container_of(clk_data, struct sdhci_arasan_data, clk_data); + struct sdhci_host *host = sdhci_arasan->host; + u8 tap_delay, tap_max = 0; + + /* This is applicable for SDHCI_SPEC_300 and above */ + if (host->version < SDHCI_SPEC_300) + return 0; + + switch (host->timing) { + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + /* For 50MHz clock, 30 Taps are available */ + tap_max = 30; + break; + case MMC_TIMING_UHS_SDR50: + /* For 100MHz clock, 15 Taps are available */ + tap_max = 15; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* For 200MHz clock, 8 Taps are available */ + tap_max = 8; + default: + break; + } + + tap_delay = (degrees * tap_max) / 360; + + /* Set the Clock Phase */ + if (tap_delay) { + u32 regval; + + regval = sdhci_readl(host, SDHCI_ARASAN_OTAPDLY_REGISTER); + regval |= SDHCI_OTAPDLY_ENABLE; + sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER); + regval &= ~SDHCI_ARASAN_OTAPDLY_SEL_MASK; + regval |= tap_delay; + sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER); + } + + return 0; +} + +static const struct clk_ops versal_sdcardclk_ops = { + .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate, + .set_phase = sdhci_versal_sdcardclk_set_phase, +}; + +/** + * sdhci_versal_sampleclk_set_phase - Set the SD Input Clock Tap Delays + * + * @hw: Pointer to the hardware clock structure. + * @degrees: The clock phase shift between 0 - 359. + * + * Set the SD Input Clock Tap Delays for Input path + * + * Return: 0 on success and error value on error + */ +static int sdhci_versal_sampleclk_set_phase(struct clk_hw *hw, int degrees) +{ + struct sdhci_arasan_clk_data *clk_data = + container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw); + struct sdhci_arasan_data *sdhci_arasan = + container_of(clk_data, struct sdhci_arasan_data, clk_data); + struct sdhci_host *host = sdhci_arasan->host; + u8 tap_delay, tap_max = 0; + + /* This is applicable for SDHCI_SPEC_300 and above */ + if (host->version < SDHCI_SPEC_300) + return 0; + + switch (host->timing) { + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + /* For 50MHz clock, 120 Taps are available */ + tap_max = 120; + break; + case MMC_TIMING_UHS_SDR50: + /* For 100MHz clock, 60 Taps are available */ + tap_max = 60; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* For 200MHz clock, 30 Taps are available */ + tap_max = 30; + default: + break; + } + + tap_delay = (degrees * tap_max) / 360; + + /* Set the Clock Phase */ + if (tap_delay) { + u32 regval; + + regval = sdhci_readl(host, SDHCI_ARASAN_ITAPDLY_REGISTER); + regval |= SDHCI_ITAPDLY_CHGWIN; + sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER); + regval |= SDHCI_ITAPDLY_ENABLE; + sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER); + regval &= ~SDHCI_ARASAN_ITAPDLY_SEL_MASK; + regval |= tap_delay; + sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER); + regval &= ~SDHCI_ITAPDLY_CHGWIN; + sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER); + } + + return 0; +} + +static const struct clk_ops versal_sampleclk_ops = { + .recalc_rate = sdhci_arasan_sampleclk_recalc_rate, + .set_phase = sdhci_versal_sampleclk_set_phase, +}; + +static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid) +{ + u16 clk; + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN); + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + /* Issue DLL Reset */ + zynqmp_pm_sd_dll_reset(deviceid, PM_DLL_RESET_PULSE); + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + + sdhci_enable_clk(host, clk); +} + +static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + struct clk_hw *hw = &sdhci_arasan->clk_data.sdcardclk_hw; + const char *clk_name = clk_hw_get_name(hw); + u32 device_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : + NODE_SD_1; + int err; + + arasan_zynqmp_dll_reset(host, device_id); + + err = sdhci_execute_tuning(mmc, opcode); + if (err) + return err; + + arasan_zynqmp_dll_reset(host, device_id); + + return 0; +} + +/** + * sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier + * + * @host: The sdhci_host + * @value: The value to write + * + * The corecfg_clockmultiplier is supposed to contain clock multiplier + * value of programmable clock generator. + * + * NOTES: + * - Many existing devices don't seem to do this and work fine. To keep + * compatibility for old hardware where the device tree doesn't provide a + * register map, this function is a noop if a soc_ctl_map hasn't been provided + * for this platform. + * - The value of corecfg_clockmultiplier should sync with that of corresponding + * value reading from sdhci_capability_register. So this function is called + * once at probe time and never called again. + */ +static void sdhci_arasan_update_clockmultiplier(struct sdhci_host *host, + u32 value) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_arasan_soc_ctl_map *soc_ctl_map = + sdhci_arasan->soc_ctl_map; + + /* Having a map is optional */ + if (!soc_ctl_map) + return; + + /* If we have a map, we expect to have a syscon */ + if (!sdhci_arasan->soc_ctl_base) { + pr_warn("%s: Have regmap, but no soc-ctl-syscon\n", + mmc_hostname(host->mmc)); + return; + } + + sdhci_arasan_syscon_write(host, &soc_ctl_map->clockmultiplier, value); +} + +/** + * sdhci_arasan_update_baseclkfreq - Set corecfg_baseclkfreq + * + * @host: The sdhci_host + * + * The corecfg_baseclkfreq is supposed to contain the MHz of clk_xin. This + * function can be used to make that happen. + * + * NOTES: + * - Many existing devices don't seem to do this and work fine. To keep + * compatibility for old hardware where the device tree doesn't provide a + * register map, this function is a noop if a soc_ctl_map hasn't been provided + * for this platform. + * - It's assumed that clk_xin is not dynamic and that we use the SDHCI divider + * to achieve lower clock rates. That means that this function is called once + * at probe time and never called again. + */ +static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_arasan_soc_ctl_map *soc_ctl_map = + sdhci_arasan->soc_ctl_map; + u32 mhz = DIV_ROUND_CLOSEST(clk_get_rate(pltfm_host->clk), 1000000); + + /* Having a map is optional */ + if (!soc_ctl_map) + return; + + /* If we have a map, we expect to have a syscon */ + if (!sdhci_arasan->soc_ctl_base) { + pr_warn("%s: Have regmap, but no soc-ctl-syscon\n", + mmc_hostname(host->mmc)); + return; + } + + sdhci_arasan_syscon_write(host, &soc_ctl_map->baseclkfreq, mhz); +} + +static void sdhci_arasan_set_clk_delays(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data; + + clk_set_phase(clk_data->sampleclk, + clk_data->clk_phase_in[host->timing]); + clk_set_phase(clk_data->sdcardclk, + clk_data->clk_phase_out[host->timing]); +} + +static void arasan_dt_read_clk_phase(struct device *dev, + struct sdhci_arasan_clk_data *clk_data, + unsigned int timing, const char *prop) +{ + struct device_node *np = dev->of_node; + + int clk_phase[2] = {0}; + + /* + * Read Tap Delay values from DT, if the DT does not contain the + * Tap Values then use the pre-defined values. + */ + if (of_property_read_variable_u32_array(np, prop, &clk_phase[0], + 2, 0)) { + dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n", + prop, clk_data->clk_phase_in[timing], + clk_data->clk_phase_out[timing]); + return; + } + + /* The values read are Input and Output Clock Delays in order */ + clk_data->clk_phase_in[timing] = clk_phase[0]; + clk_data->clk_phase_out[timing] = clk_phase[1]; +} + +/** + * arasan_dt_parse_clk_phases - Read Clock Delay values from DT + * + * @dev: Pointer to our struct device. + * @clk_data: Pointer to the Clock Data structure + * + * Called at initialization to parse the values of Clock Delays. + */ +static void arasan_dt_parse_clk_phases(struct device *dev, + struct sdhci_arasan_clk_data *clk_data) +{ + u32 mio_bank = 0; + int i; + + /* + * This has been kept as a pointer and is assigned a function here. + * So that different controller variants can assign their own handling + * function. + */ + clk_data->set_clk_delays = sdhci_arasan_set_clk_delays; + + if (of_device_is_compatible(dev->of_node, "xlnx,zynqmp-8.9a")) { + u32 zynqmp_iclk_phase[MMC_TIMING_MMC_HS400 + 1] = + ZYNQMP_ICLK_PHASE; + u32 zynqmp_oclk_phase[MMC_TIMING_MMC_HS400 + 1] = + ZYNQMP_OCLK_PHASE; + + of_property_read_u32(dev->of_node, "xlnx,mio-bank", &mio_bank); + if (mio_bank == 2) { + zynqmp_oclk_phase[MMC_TIMING_UHS_SDR104] = 90; + zynqmp_oclk_phase[MMC_TIMING_MMC_HS200] = 90; + } + + for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) { + clk_data->clk_phase_in[i] = zynqmp_iclk_phase[i]; + clk_data->clk_phase_out[i] = zynqmp_oclk_phase[i]; + } + } + + if (of_device_is_compatible(dev->of_node, "xlnx,versal-8.9a")) { + u32 versal_iclk_phase[MMC_TIMING_MMC_HS400 + 1] = + VERSAL_ICLK_PHASE; + u32 versal_oclk_phase[MMC_TIMING_MMC_HS400 + 1] = + VERSAL_OCLK_PHASE; + + for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) { + clk_data->clk_phase_in[i] = versal_iclk_phase[i]; + clk_data->clk_phase_out[i] = versal_oclk_phase[i]; + } + } + + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_LEGACY, + "clk-phase-legacy"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS, + "clk-phase-mmc-hs"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_SD_HS, + "clk-phase-sd-hs"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR12, + "clk-phase-uhs-sdr12"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR25, + "clk-phase-uhs-sdr25"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR50, + "clk-phase-uhs-sdr50"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR104, + "clk-phase-uhs-sdr104"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_DDR50, + "clk-phase-uhs-ddr50"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_DDR52, + "clk-phase-mmc-ddr52"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS200, + "clk-phase-mmc-hs200"); + arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS400, + "clk-phase-mmc-hs400"); +} + +static const struct sdhci_pltfm_data sdhci_arasan_pdata = { + .ops = &sdhci_arasan_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN | + SDHCI_QUIRK2_STOP_WITH_TC, +}; + +static const struct sdhci_arasan_clk_ops arasan_clk_ops = { + .sdcardclk_ops = &arasan_sdcardclk_ops, + .sampleclk_ops = &arasan_sampleclk_ops, +}; + +static struct sdhci_arasan_of_data sdhci_arasan_generic_data = { + .pdata = &sdhci_arasan_pdata, + .clk_ops = &arasan_clk_ops, +}; + +static const struct sdhci_pltfm_data sdhci_keembay_emmc_pdata = { + .ops = &sdhci_arasan_cqe_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED | + SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN | + SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | + SDHCI_QUIRK2_STOP_WITH_TC | + SDHCI_QUIRK2_BROKEN_64_BIT_DMA, +}; + +static const struct sdhci_pltfm_data sdhci_keembay_sd_pdata = { + .ops = &sdhci_arasan_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED | + SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN | + SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | + SDHCI_QUIRK2_STOP_WITH_TC | + SDHCI_QUIRK2_BROKEN_64_BIT_DMA, +}; + +static const struct sdhci_pltfm_data sdhci_keembay_sdio_pdata = { + .ops = &sdhci_arasan_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED | + SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN | + SDHCI_QUIRK2_HOST_OFF_CARD_ON | + SDHCI_QUIRK2_BROKEN_64_BIT_DMA, +}; + +static struct sdhci_arasan_of_data sdhci_arasan_rk3399_data = { + .soc_ctl_map = &rk3399_soc_ctl_map, + .pdata = &sdhci_arasan_cqe_pdata, + .clk_ops = &arasan_clk_ops, +}; + +static struct sdhci_arasan_of_data intel_lgm_emmc_data = { + .soc_ctl_map = &intel_lgm_emmc_soc_ctl_map, + .pdata = &sdhci_arasan_cqe_pdata, + .clk_ops = &arasan_clk_ops, +}; + +static struct sdhci_arasan_of_data intel_lgm_sdxc_data = { + .soc_ctl_map = &intel_lgm_sdxc_soc_ctl_map, + .pdata = &sdhci_arasan_cqe_pdata, + .clk_ops = &arasan_clk_ops, +}; + +static const struct sdhci_pltfm_data sdhci_arasan_zynqmp_pdata = { + .ops = &sdhci_arasan_ops, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN | + SDHCI_QUIRK2_STOP_WITH_TC, +}; + +static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = { + .sdcardclk_ops = &zynqmp_sdcardclk_ops, + .sampleclk_ops = &zynqmp_sampleclk_ops, +}; + +static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = { + .pdata = &sdhci_arasan_zynqmp_pdata, + .clk_ops = &zynqmp_clk_ops, +}; + +static const struct sdhci_arasan_clk_ops versal_clk_ops = { + .sdcardclk_ops = &versal_sdcardclk_ops, + .sampleclk_ops = &versal_sampleclk_ops, +}; + +static struct sdhci_arasan_of_data sdhci_arasan_versal_data = { + .pdata = &sdhci_arasan_zynqmp_pdata, + .clk_ops = &versal_clk_ops, +}; + +static struct sdhci_arasan_of_data intel_keembay_emmc_data = { + .soc_ctl_map = &intel_keembay_soc_ctl_map, + .pdata = &sdhci_keembay_emmc_pdata, + .clk_ops = &arasan_clk_ops, +}; + +static struct sdhci_arasan_of_data intel_keembay_sd_data = { + .soc_ctl_map = &intel_keembay_soc_ctl_map, + .pdata = &sdhci_keembay_sd_pdata, + .clk_ops = &arasan_clk_ops, +}; + +static struct sdhci_arasan_of_data intel_keembay_sdio_data = { + .soc_ctl_map = &intel_keembay_soc_ctl_map, + .pdata = &sdhci_keembay_sdio_pdata, + .clk_ops = &arasan_clk_ops, +}; + +static const struct of_device_id sdhci_arasan_of_match[] = { + /* SoC-specific compatible strings w/ soc_ctl_map */ + { + .compatible = "rockchip,rk3399-sdhci-5.1", + .data = &sdhci_arasan_rk3399_data, + }, + { + .compatible = "intel,lgm-sdhci-5.1-emmc", + .data = &intel_lgm_emmc_data, + }, + { + .compatible = "intel,lgm-sdhci-5.1-sdxc", + .data = &intel_lgm_sdxc_data, + }, + { + .compatible = "intel,keembay-sdhci-5.1-emmc", + .data = &intel_keembay_emmc_data, + }, + { + .compatible = "intel,keembay-sdhci-5.1-sd", + .data = &intel_keembay_sd_data, + }, + { + .compatible = "intel,keembay-sdhci-5.1-sdio", + .data = &intel_keembay_sdio_data, + }, + /* Generic compatible below here */ + { + .compatible = "arasan,sdhci-8.9a", + .data = &sdhci_arasan_generic_data, + }, + { + .compatible = "arasan,sdhci-5.1", + .data = &sdhci_arasan_generic_data, + }, + { + .compatible = "arasan,sdhci-4.9a", + .data = &sdhci_arasan_generic_data, + }, + { + .compatible = "xlnx,zynqmp-8.9a", + .data = &sdhci_arasan_zynqmp_data, + }, + { + .compatible = "xlnx,versal-8.9a", + .data = &sdhci_arasan_versal_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match); + +/** + * sdhci_arasan_register_sdcardclk - Register the sdcardclk for a PHY to use + * + * @sdhci_arasan: Our private data structure. + * @clk_xin: Pointer to the functional clock + * @dev: Pointer to our struct device. + * + * Some PHY devices need to know what the actual card clock is. In order for + * them to find out, we'll provide a clock through the common clock framework + * for them to query. + * + * Return: 0 on success and error value on error + */ +static int +sdhci_arasan_register_sdcardclk(struct sdhci_arasan_data *sdhci_arasan, + struct clk *clk_xin, + struct device *dev) +{ + struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data; + struct device_node *np = dev->of_node; + struct clk_init_data sdcardclk_init; + const char *parent_clk_name; + int ret; + + ret = of_property_read_string_index(np, "clock-output-names", 0, + &sdcardclk_init.name); + if (ret) { + dev_err(dev, "DT has #clock-cells but no clock-output-names\n"); + return ret; + } + + parent_clk_name = __clk_get_name(clk_xin); + sdcardclk_init.parent_names = &parent_clk_name; + sdcardclk_init.num_parents = 1; + sdcardclk_init.flags = CLK_GET_RATE_NOCACHE; + sdcardclk_init.ops = sdhci_arasan->clk_ops->sdcardclk_ops; + + clk_data->sdcardclk_hw.init = &sdcardclk_init; + clk_data->sdcardclk = + devm_clk_register(dev, &clk_data->sdcardclk_hw); + if (IS_ERR(clk_data->sdcardclk)) + return PTR_ERR(clk_data->sdcardclk); + clk_data->sdcardclk_hw.init = NULL; + + ret = of_clk_add_provider(np, of_clk_src_simple_get, + clk_data->sdcardclk); + if (ret) + dev_err(dev, "Failed to add sdcard clock provider\n"); + + return ret; +} + +/** + * sdhci_arasan_register_sampleclk - Register the sampleclk for a PHY to use + * + * @sdhci_arasan: Our private data structure. + * @clk_xin: Pointer to the functional clock + * @dev: Pointer to our struct device. + * + * Some PHY devices need to know what the actual card clock is. In order for + * them to find out, we'll provide a clock through the common clock framework + * for them to query. + * + * Return: 0 on success and error value on error + */ +static int +sdhci_arasan_register_sampleclk(struct sdhci_arasan_data *sdhci_arasan, + struct clk *clk_xin, + struct device *dev) +{ + struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data; + struct device_node *np = dev->of_node; + struct clk_init_data sampleclk_init; + const char *parent_clk_name; + int ret; + + ret = of_property_read_string_index(np, "clock-output-names", 1, + &sampleclk_init.name); + if (ret) { + dev_err(dev, "DT has #clock-cells but no clock-output-names\n"); + return ret; + } + + parent_clk_name = __clk_get_name(clk_xin); + sampleclk_init.parent_names = &parent_clk_name; + sampleclk_init.num_parents = 1; + sampleclk_init.flags = CLK_GET_RATE_NOCACHE; + sampleclk_init.ops = sdhci_arasan->clk_ops->sampleclk_ops; + + clk_data->sampleclk_hw.init = &sampleclk_init; + clk_data->sampleclk = + devm_clk_register(dev, &clk_data->sampleclk_hw); + if (IS_ERR(clk_data->sampleclk)) + return PTR_ERR(clk_data->sampleclk); + clk_data->sampleclk_hw.init = NULL; + + ret = of_clk_add_provider(np, of_clk_src_simple_get, + clk_data->sampleclk); + if (ret) + dev_err(dev, "Failed to add sample clock provider\n"); + + return ret; +} + +/** + * sdhci_arasan_unregister_sdclk - Undoes sdhci_arasan_register_sdclk() + * + * @dev: Pointer to our struct device. + * + * Should be called any time we're exiting and sdhci_arasan_register_sdclk() + * returned success. + */ +static void sdhci_arasan_unregister_sdclk(struct device *dev) +{ + struct device_node *np = dev->of_node; + + if (!of_find_property(np, "#clock-cells", NULL)) + return; + + of_clk_del_provider(dev->of_node); +} + +/** + * sdhci_arasan_update_support64b - Set SUPPORT_64B (64-bit System Bus Support) + * + * This should be set based on the System Address Bus. + * 0: the Core supports only 32-bit System Address Bus. + * 1: the Core supports 64-bit System Address Bus. + * + * NOTES: + * - For Keem Bay, it is required to clear this bit. Its default value is 1'b1. + * Keem Bay does not support 64-bit access. + * + * @host: The sdhci_host + * @value: The value to write + */ +static void sdhci_arasan_update_support64b(struct sdhci_host *host, u32 value) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_arasan_soc_ctl_map *soc_ctl_map = + sdhci_arasan->soc_ctl_map; + + /* Having a map is optional */ + if (!soc_ctl_map) + return; + + /* If we have a map, we expect to have a syscon */ + if (!sdhci_arasan->soc_ctl_base) { + pr_warn("%s: Have regmap, but no soc-ctl-syscon\n", + mmc_hostname(host->mmc)); + return; + } + + sdhci_arasan_syscon_write(host, &soc_ctl_map->support64b, value); +} + +/** + * sdhci_arasan_register_sdclk - Register the sdcardclk for a PHY to use + * + * @sdhci_arasan: Our private data structure. + * @clk_xin: Pointer to the functional clock + * @dev: Pointer to our struct device. + * + * Some PHY devices need to know what the actual card clock is. In order for + * them to find out, we'll provide a clock through the common clock framework + * for them to query. + * + * Note: without seriously re-architecting SDHCI's clock code and testing on + * all platforms, there's no way to create a totally beautiful clock here + * with all clock ops implemented. Instead, we'll just create a clock that can + * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock + * framework that we're doing things behind its back. This should be sufficient + * to create nice clean device tree bindings and later (if needed) we can try + * re-architecting SDHCI if we see some benefit to it. + * + * Return: 0 on success and error value on error + */ +static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan, + struct clk *clk_xin, + struct device *dev) +{ + struct device_node *np = dev->of_node; + u32 num_clks = 0; + int ret; + + /* Providing a clock to the PHY is optional; no error if missing */ + if (of_property_read_u32(np, "#clock-cells", &num_clks) < 0) + return 0; + + ret = sdhci_arasan_register_sdcardclk(sdhci_arasan, clk_xin, dev); + if (ret) + return ret; + + if (num_clks) { + ret = sdhci_arasan_register_sampleclk(sdhci_arasan, clk_xin, + dev); + if (ret) { + sdhci_arasan_unregister_sdclk(dev); + return ret; + } + } + + return 0; +} + +static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan) +{ + struct sdhci_host *host = sdhci_arasan->host; + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if (!sdhci_arasan->has_cqe) + return sdhci_add_host(host); + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(host->mmc->parent, + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR; + cq_host->ops = &sdhci_arasan_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + +static int sdhci_arasan_probe(struct platform_device *pdev) +{ + int ret; + const struct of_device_id *match; + struct device_node *node; + struct clk *clk_xin; + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_arasan_data *sdhci_arasan; + struct device_node *np = pdev->dev.of_node; + const struct sdhci_arasan_of_data *data; + + match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node); + data = match->data; + host = sdhci_pltfm_init(pdev, data->pdata, sizeof(*sdhci_arasan)); + + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + sdhci_arasan->host = host; + + sdhci_arasan->soc_ctl_map = data->soc_ctl_map; + sdhci_arasan->clk_ops = data->clk_ops; + + node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0); + if (node) { + sdhci_arasan->soc_ctl_base = syscon_node_to_regmap(node); + of_node_put(node); + + if (IS_ERR(sdhci_arasan->soc_ctl_base)) { + ret = dev_err_probe(&pdev->dev, + PTR_ERR(sdhci_arasan->soc_ctl_base), + "Can't get syscon\n"); + goto err_pltfm_free; + } + } + + sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb"); + if (IS_ERR(sdhci_arasan->clk_ahb)) { + dev_err(&pdev->dev, "clk_ahb clock not found.\n"); + ret = PTR_ERR(sdhci_arasan->clk_ahb); + goto err_pltfm_free; + } + + clk_xin = devm_clk_get(&pdev->dev, "clk_xin"); + if (IS_ERR(clk_xin)) { + dev_err(&pdev->dev, "clk_xin clock not found.\n"); + ret = PTR_ERR(clk_xin); + goto err_pltfm_free; + } + + ret = clk_prepare_enable(sdhci_arasan->clk_ahb); + if (ret) { + dev_err(&pdev->dev, "Unable to enable AHB clock.\n"); + goto err_pltfm_free; + } + + ret = clk_prepare_enable(clk_xin); + if (ret) { + dev_err(&pdev->dev, "Unable to enable SD clock.\n"); + goto clk_dis_ahb; + } + + sdhci_get_of_property(pdev); + + if (of_property_read_bool(np, "xlnx,fails-without-test-cd")) + sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_FORCE_CDTEST; + + if (of_property_read_bool(np, "xlnx,int-clock-stable-broken")) + sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE; + + pltfm_host->clk = clk_xin; + + if (of_device_is_compatible(pdev->dev.of_node, + "rockchip,rk3399-sdhci-5.1")) + sdhci_arasan_update_clockmultiplier(host, 0x0); + + if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") || + of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") || + of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) { + sdhci_arasan_update_clockmultiplier(host, 0x0); + sdhci_arasan_update_support64b(host, 0x0); + + host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; + } + + sdhci_arasan_update_baseclkfreq(host); + + ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, &pdev->dev); + if (ret) + goto clk_disable_all; + + if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) { + host->mmc_host_ops.execute_tuning = + arasan_zynqmp_execute_tuning; + + sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN; + } + + arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data); + + ret = mmc_of_parse(host->mmc); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret); + goto unreg_clk; + } + + sdhci_arasan->phy = ERR_PTR(-ENODEV); + if (of_device_is_compatible(pdev->dev.of_node, + "arasan,sdhci-5.1")) { + sdhci_arasan->phy = devm_phy_get(&pdev->dev, + "phy_arasan"); + if (IS_ERR(sdhci_arasan->phy)) { + ret = PTR_ERR(sdhci_arasan->phy); + dev_err(&pdev->dev, "No phy for arasan,sdhci-5.1.\n"); + goto unreg_clk; + } + + ret = phy_init(sdhci_arasan->phy); + if (ret < 0) { + dev_err(&pdev->dev, "phy_init err.\n"); + goto unreg_clk; + } + + host->mmc_host_ops.hs400_enhanced_strobe = + sdhci_arasan_hs400_enhanced_strobe; + host->mmc_host_ops.start_signal_voltage_switch = + sdhci_arasan_voltage_switch; + sdhci_arasan->has_cqe = true; + host->mmc->caps2 |= MMC_CAP2_CQE; + + if (!of_property_read_bool(np, "disable-cqe-dcmd")) + host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; + } + + ret = sdhci_arasan_add_host(sdhci_arasan); + if (ret) + goto err_add_host; + + return 0; + +err_add_host: + if (!IS_ERR(sdhci_arasan->phy)) + phy_exit(sdhci_arasan->phy); +unreg_clk: + sdhci_arasan_unregister_sdclk(&pdev->dev); +clk_disable_all: + clk_disable_unprepare(clk_xin); +clk_dis_ahb: + clk_disable_unprepare(sdhci_arasan->clk_ahb); +err_pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_arasan_remove(struct platform_device *pdev) +{ + int ret; + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + struct clk *clk_ahb = sdhci_arasan->clk_ahb; + + if (!IS_ERR(sdhci_arasan->phy)) { + if (sdhci_arasan->is_phy_on) + phy_power_off(sdhci_arasan->phy); + phy_exit(sdhci_arasan->phy); + } + + sdhci_arasan_unregister_sdclk(&pdev->dev); + + ret = sdhci_pltfm_unregister(pdev); + + clk_disable_unprepare(clk_ahb); + + return ret; +} + +static struct platform_driver sdhci_arasan_driver = { + .driver = { + .name = "sdhci-arasan", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_arasan_of_match, + .pm = &sdhci_arasan_dev_pm_ops, + }, + .probe = sdhci_arasan_probe, + .remove = sdhci_arasan_remove, +}; + +module_platform_driver(sdhci_arasan_driver); + +MODULE_DESCRIPTION("Driver for the Arasan SDHCI Controller"); +MODULE_AUTHOR("Soeren Brinkmann "); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c new file mode 100644 index 000000000..4f008ba32 --- /dev/null +++ b/drivers/mmc/host/sdhci-of-aspeed.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (C) 2019 ASPEED Technology Inc. */ +/* Copyright (C) 2019 IBM Corp. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" + +#define ASPEED_SDC_INFO 0x00 +#define ASPEED_SDC_S1MMC8 BIT(25) +#define ASPEED_SDC_S0MMC8 BIT(24) + +struct aspeed_sdc { + struct clk *clk; + struct resource *res; + + spinlock_t lock; + void __iomem *regs; +}; + +struct aspeed_sdhci { + struct aspeed_sdc *parent; + u32 width_mask; +}; + +static void aspeed_sdc_configure_8bit_mode(struct aspeed_sdc *sdc, + struct aspeed_sdhci *sdhci, + bool bus8) +{ + u32 info; + + /* Set/clear 8 bit mode */ + spin_lock(&sdc->lock); + info = readl(sdc->regs + ASPEED_SDC_INFO); + if (bus8) + info |= sdhci->width_mask; + else + info &= ~sdhci->width_mask; + writel(info, sdc->regs + ASPEED_SDC_INFO); + spin_unlock(&sdc->lock); +} + +static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host; + unsigned long parent; + int div; + u16 clk; + + pltfm_host = sdhci_priv(host); + parent = clk_get_rate(pltfm_host->clk); + + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + if (WARN_ON(clock > host->max_clk)) + clock = host->max_clk; + + for (div = 2; div < 256; div *= 2) { + if ((parent / div) <= clock) + break; + } + div >>= 1; + + clk = div << SDHCI_DIVIDER_SHIFT; + + sdhci_enable_clk(host, clk); +} + +static unsigned int aspeed_sdhci_get_max_clock(struct sdhci_host *host) +{ + if (host->mmc->f_max) + return host->mmc->f_max; + + return sdhci_pltfm_clk_get_max_clock(host); +} + +static void aspeed_sdhci_set_bus_width(struct sdhci_host *host, int width) +{ + struct sdhci_pltfm_host *pltfm_priv; + struct aspeed_sdhci *aspeed_sdhci; + struct aspeed_sdc *aspeed_sdc; + u8 ctrl; + + pltfm_priv = sdhci_priv(host); + aspeed_sdhci = sdhci_pltfm_priv(pltfm_priv); + aspeed_sdc = aspeed_sdhci->parent; + + /* Set/clear 8-bit mode */ + aspeed_sdc_configure_8bit_mode(aspeed_sdc, aspeed_sdhci, + width == MMC_BUS_WIDTH_8); + + /* Set/clear 1 or 4 bit mode */ + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + if (width == MMC_BUS_WIDTH_4) + ctrl |= SDHCI_CTRL_4BITBUS; + else + ctrl &= ~SDHCI_CTRL_4BITBUS; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + +static u32 aspeed_sdhci_readl(struct sdhci_host *host, int reg) +{ + u32 val = readl(host->ioaddr + reg); + + if (unlikely(reg == SDHCI_PRESENT_STATE) && + (host->mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)) + val ^= SDHCI_CARD_PRESENT; + + return val; +} + +static const struct sdhci_ops aspeed_sdhci_ops = { + .read_l = aspeed_sdhci_readl, + .set_clock = aspeed_sdhci_set_clock, + .get_max_clock = aspeed_sdhci_get_max_clock, + .set_bus_width = aspeed_sdhci_set_bus_width, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data aspeed_sdhci_pdata = { + .ops = &aspeed_sdhci_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +}; + +static inline int aspeed_sdhci_calculate_slot(struct aspeed_sdhci *dev, + struct resource *res) +{ + resource_size_t delta; + + if (!res || resource_type(res) != IORESOURCE_MEM) + return -EINVAL; + + if (res->start < dev->parent->res->start) + return -EINVAL; + + delta = res->start - dev->parent->res->start; + if (delta & (0x100 - 1)) + return -EINVAL; + + return (delta / 0x100) - 1; +} + +static int aspeed_sdhci_probe(struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host; + struct aspeed_sdhci *dev; + struct sdhci_host *host; + struct resource *res; + int slot; + int ret; + + host = sdhci_pltfm_init(pdev, &aspeed_sdhci_pdata, sizeof(*dev)); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + dev = sdhci_pltfm_priv(pltfm_host); + dev->parent = dev_get_drvdata(pdev->dev.parent); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + slot = aspeed_sdhci_calculate_slot(dev, res); + + if (slot < 0) + return slot; + else if (slot >= 2) + return -EINVAL; + + dev_info(&pdev->dev, "Configuring for slot %d\n", slot); + dev->width_mask = !slot ? ASPEED_SDC_S0MMC8 : ASPEED_SDC_S1MMC8; + + sdhci_get_of_property(pdev); + + pltfm_host->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pltfm_host->clk)) + return PTR_ERR(pltfm_host->clk); + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable SDIO clock\n"); + goto err_pltfm_free; + } + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err_sdhci_add; + + ret = sdhci_add_host(host); + if (ret) + goto err_sdhci_add; + + return 0; + +err_sdhci_add: + clk_disable_unprepare(pltfm_host->clk); +err_pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int aspeed_sdhci_remove(struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_host *host; + int dead = 0; + + host = platform_get_drvdata(pdev); + pltfm_host = sdhci_priv(host); + + sdhci_remove_host(host, dead); + + clk_disable_unprepare(pltfm_host->clk); + + sdhci_pltfm_free(pdev); + + return 0; +} + +static const struct of_device_id aspeed_sdhci_of_match[] = { + { .compatible = "aspeed,ast2400-sdhci", }, + { .compatible = "aspeed,ast2500-sdhci", }, + { .compatible = "aspeed,ast2600-sdhci", }, + { } +}; + +static struct platform_driver aspeed_sdhci_driver = { + .driver = { + .name = "sdhci-aspeed", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = aspeed_sdhci_of_match, + }, + .probe = aspeed_sdhci_probe, + .remove = aspeed_sdhci_remove, +}; + +static int aspeed_sdc_probe(struct platform_device *pdev) + +{ + struct device_node *parent, *child; + struct aspeed_sdc *sdc; + int ret; + + sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); + if (!sdc) + return -ENOMEM; + + spin_lock_init(&sdc->lock); + + sdc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(sdc->clk)) + return PTR_ERR(sdc->clk); + + ret = clk_prepare_enable(sdc->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable SDCLK\n"); + return ret; + } + + sdc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sdc->regs = devm_ioremap_resource(&pdev->dev, sdc->res); + if (IS_ERR(sdc->regs)) { + ret = PTR_ERR(sdc->regs); + goto err_clk; + } + + dev_set_drvdata(&pdev->dev, sdc); + + parent = pdev->dev.of_node; + for_each_available_child_of_node(parent, child) { + struct platform_device *cpdev; + + cpdev = of_platform_device_create(child, NULL, &pdev->dev); + if (!cpdev) { + of_node_put(child); + ret = -ENODEV; + goto err_clk; + } + } + + return 0; + +err_clk: + clk_disable_unprepare(sdc->clk); + return ret; +} + +static int aspeed_sdc_remove(struct platform_device *pdev) +{ + struct aspeed_sdc *sdc = dev_get_drvdata(&pdev->dev); + + clk_disable_unprepare(sdc->clk); + + return 0; +} + +static const struct of_device_id aspeed_sdc_of_match[] = { + { .compatible = "aspeed,ast2400-sd-controller", }, + { .compatible = "aspeed,ast2500-sd-controller", }, + { .compatible = "aspeed,ast2600-sd-controller", }, + { } +}; + +MODULE_DEVICE_TABLE(of, aspeed_sdc_of_match); + +static struct platform_driver aspeed_sdc_driver = { + .driver = { + .name = "sd-controller-aspeed", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sdhci_pltfm_pmops, + .of_match_table = aspeed_sdc_of_match, + }, + .probe = aspeed_sdc_probe, + .remove = aspeed_sdc_remove, +}; + +static int __init aspeed_sdc_init(void) +{ + int rc; + + rc = platform_driver_register(&aspeed_sdhci_driver); + if (rc < 0) + return rc; + + rc = platform_driver_register(&aspeed_sdc_driver); + if (rc < 0) + platform_driver_unregister(&aspeed_sdhci_driver); + + return rc; +} +module_init(aspeed_sdc_init); + +static void __exit aspeed_sdc_exit(void) +{ + platform_driver_unregister(&aspeed_sdc_driver); + platform_driver_unregister(&aspeed_sdhci_driver); +} +module_exit(aspeed_sdc_exit); + +MODULE_DESCRIPTION("Driver for the ASPEED SD/SDIO/SDHCI Controllers"); +MODULE_AUTHOR("Ryan Chen "); +MODULE_AUTHOR("Andrew Jeffery "); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c new file mode 100644 index 000000000..0452c312b --- /dev/null +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Atmel SDMMC controller driver. + * + * Copyright (C) 2015 Atmel, + * 2015 Ludovic Desroches + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" + +#define SDMMC_MC1R 0x204 +#define SDMMC_MC1R_DDR BIT(3) +#define SDMMC_MC1R_FCD BIT(7) +#define SDMMC_CACR 0x230 +#define SDMMC_CACR_CAPWREN BIT(0) +#define SDMMC_CACR_KEY (0x46 << 8) +#define SDMMC_CALCR 0x240 +#define SDMMC_CALCR_EN BIT(0) +#define SDMMC_CALCR_ALWYSON BIT(4) + +#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */ + +struct sdhci_at91_soc_data { + const struct sdhci_pltfm_data *pdata; + bool baseclk_is_generated_internally; + unsigned int divider_for_baseclk; +}; + +struct sdhci_at91_priv { + const struct sdhci_at91_soc_data *soc_data; + struct clk *hclock; + struct clk *gck; + struct clk *mainck; + bool restore_needed; + bool cal_always_on; +}; + +static void sdhci_at91_set_force_card_detect(struct sdhci_host *host) +{ + u8 mc1r; + + mc1r = readb(host->ioaddr + SDMMC_MC1R); + mc1r |= SDMMC_MC1R_FCD; + writeb(mc1r, host->ioaddr + SDMMC_MC1R); +} + +static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + + host->mmc->actual_clock = 0; + + /* + * There is no requirement to disable the internal clock before + * changing the SD clock configuration. Moreover, disabling the + * internal clock, changing the configuration and re-enabling the + * internal clock causes some bugs. It can prevent to get the internal + * clock stable flag ready and an unexpected switch to the base clock + * when using presets. + */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + + clk |= SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + /* Wait max 20 ms */ + if (read_poll_timeout(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE), + 1000, 20000, false, host, SDHCI_CLOCK_CONTROL)) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + return; + } + + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); +} + +static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u8 mc1r; + + if (timing == MMC_TIMING_MMC_DDR52) { + mc1r = sdhci_readb(host, SDMMC_MC1R); + mc1r |= SDMMC_MC1R_DDR; + sdhci_writeb(host, mc1r, SDMMC_MC1R); + } + sdhci_set_uhs_signaling(host, timing); +} + +static void sdhci_at91_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + unsigned int tmp; + + sdhci_reset(host, mask); + + if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) + || mmc_gpio_get_cd(host->mmc) >= 0) + sdhci_at91_set_force_card_detect(host); + + if (priv->cal_always_on && (mask & SDHCI_RESET_ALL)) { + u32 calcr = sdhci_readl(host, SDMMC_CALCR); + + sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN, + SDMMC_CALCR); + + if (read_poll_timeout(sdhci_readl, tmp, !(tmp & SDMMC_CALCR_EN), + 10, 20000, false, host, SDMMC_CALCR)) + dev_err(mmc_dev(host->mmc), "Failed to calibrate\n"); + } +} + +static const struct sdhci_ops sdhci_at91_sama5d2_ops = { + .set_clock = sdhci_at91_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_at91_reset, + .set_uhs_signaling = sdhci_at91_set_uhs_signaling, + .set_power = sdhci_set_power_and_bus_voltage, +}; + +static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = { + .ops = &sdhci_at91_sama5d2_ops, +}; + +static const struct sdhci_at91_soc_data soc_data_sama5d2 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = false, +}; + +static const struct sdhci_at91_soc_data soc_data_sam9x60 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = true, + .divider_for_baseclk = 2, +}; + +static const struct of_device_id sdhci_at91_dt_match[] = { + { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 }, + { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 }, + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match); + +static int sdhci_at91_set_clks_presets(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + unsigned int caps0, caps1; + unsigned int clk_base, clk_mul; + unsigned int gck_rate, clk_base_rate; + unsigned int preset_div; + + clk_prepare_enable(priv->hclock); + caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); + caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); + + gck_rate = clk_get_rate(priv->gck); + if (priv->soc_data->baseclk_is_generated_internally) + clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk; + else + clk_base_rate = clk_get_rate(priv->mainck); + + clk_base = clk_base_rate / 1000000; + clk_mul = gck_rate / clk_base_rate - 1; + + caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK; + caps0 |= FIELD_PREP(SDHCI_CLOCK_V3_BASE_MASK, clk_base); + caps1 &= ~SDHCI_CLOCK_MUL_MASK; + caps1 |= FIELD_PREP(SDHCI_CLOCK_MUL_MASK, clk_mul); + /* Set capabilities in r/w mode. */ + writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR); + writel(caps0, host->ioaddr + SDHCI_CAPABILITIES); + writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); + /* Set capabilities in ro mode. */ + writel(0, host->ioaddr + SDMMC_CACR); + + dev_dbg(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n", + clk_mul, gck_rate, clk_base_rate); + + /* + * We have to set preset values because it depends on the clk_mul + * value. Moreover, SDR104 is supported in a degraded mode since the + * maximum sd clock value is 120 MHz instead of 208 MHz. For that + * reason, we need to use presets to support SDR104. + */ + preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR12); + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR25); + preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR50); + preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR104); + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_DDR50); + + clk_prepare_enable(priv->mainck); + clk_prepare_enable(priv->gck); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_at91_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = pm_runtime_force_suspend(dev); + + priv->restore_needed = true; + + return ret; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int sdhci_at91_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = sdhci_runtime_suspend_host(host); + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + clk_disable_unprepare(priv->gck); + clk_disable_unprepare(priv->hclock); + clk_disable_unprepare(priv->mainck); + + return ret; +} + +static int sdhci_at91_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + if (priv->restore_needed) { + ret = sdhci_at91_set_clks_presets(dev); + if (ret) + return ret; + + priv->restore_needed = false; + goto out; + } + + ret = clk_prepare_enable(priv->mainck); + if (ret) { + dev_err(dev, "can't enable mainck\n"); + return ret; + } + + ret = clk_prepare_enable(priv->hclock); + if (ret) { + dev_err(dev, "can't enable hclock\n"); + return ret; + } + + ret = clk_prepare_enable(priv->gck); + if (ret) { + dev_err(dev, "can't enable gck\n"); + return ret; + } + +out: + return sdhci_runtime_resume_host(host, 0); +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops sdhci_at91_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_at91_suspend, pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(sdhci_at91_runtime_suspend, + sdhci_at91_runtime_resume, + NULL) +}; + +static int sdhci_at91_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + const struct sdhci_at91_soc_data *soc_data; + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_at91_priv *priv; + int ret; + + match = of_match_device(sdhci_at91_dt_match, &pdev->dev); + if (!match) + return -EINVAL; + soc_data = match->data; + + host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv)); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + priv->soc_data = soc_data; + + priv->mainck = devm_clk_get(&pdev->dev, "baseclk"); + if (IS_ERR(priv->mainck)) { + if (soc_data->baseclk_is_generated_internally) { + priv->mainck = NULL; + } else { + dev_err(&pdev->dev, "failed to get baseclk\n"); + ret = PTR_ERR(priv->mainck); + goto sdhci_pltfm_free; + } + } + + priv->hclock = devm_clk_get(&pdev->dev, "hclock"); + if (IS_ERR(priv->hclock)) { + dev_err(&pdev->dev, "failed to get hclock\n"); + ret = PTR_ERR(priv->hclock); + goto sdhci_pltfm_free; + } + + priv->gck = devm_clk_get(&pdev->dev, "multclk"); + if (IS_ERR(priv->gck)) { + dev_err(&pdev->dev, "failed to get multclk\n"); + ret = PTR_ERR(priv->gck); + goto sdhci_pltfm_free; + } + + ret = sdhci_at91_set_clks_presets(&pdev->dev); + if (ret) + goto sdhci_pltfm_free; + + priv->restore_needed = false; + + /* + * if SDCAL pin is wrongly connected, we must enable + * the analog calibration cell permanently. + */ + priv->cal_always_on = + device_property_read_bool(&pdev->dev, + "microchip,sdcal-inverted"); + + ret = mmc_of_parse(host->mmc); + if (ret) + goto clocks_disable_unprepare; + + sdhci_get_of_property(pdev); + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + + /* HS200 is broken at this moment */ + host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; + + ret = sdhci_add_host(host); + if (ret) + goto pm_runtime_disable; + + /* + * When calling sdhci_runtime_suspend_host(), the sdhci layer makes + * the assumption that all the clocks of the controller are disabled. + * It means we can't get irq from it when it is runtime suspended. + * For that reason, it is not planned to wake-up on a card detect irq + * from the controller. + * If we want to use runtime PM and to be able to wake-up on card + * insertion, we have to use a GPIO for the card detection or we can + * use polling. Be aware that using polling will resume/suspend the + * controller between each attempt. + * Disable SDHCI_QUIRK_BROKEN_CARD_DETECTION to be sure nobody tries + * to enable polling via device tree with broken-cd property. + */ + if (mmc_card_is_removable(host->mmc) && + mmc_gpio_get_cd(host->mmc) < 0) { + host->mmc->caps |= MMC_CAP_NEEDS_POLL; + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + } + + /* + * If the device attached to the MMC bus is not removable, it is safer + * to set the Force Card Detect bit. People often don't connect the + * card detect signal and use this pin for another purpose. If the card + * detect pin is not muxed to SDHCI controller, a default value is + * used. This value can be different from a SoC revision to another + * one. Problems come when this default value is not card present. To + * avoid this case, if the device is non removable then the card + * detection procedure using the SDMCC_CD signal is bypassed. + * This bit is reset when a software reset for all command is performed + * so we need to implement our own reset function to set back this bit. + * + * WA: SAMA5D2 doesn't drive CMD if using CD GPIO line. + */ + if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) + || mmc_gpio_get_cd(host->mmc) >= 0) + sdhci_at91_set_force_card_detect(host); + + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; + +pm_runtime_disable: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); +clocks_disable_unprepare: + clk_disable_unprepare(priv->gck); + clk_disable_unprepare(priv->mainck); + clk_disable_unprepare(priv->hclock); +sdhci_pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_at91_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + struct clk *gck = priv->gck; + struct clk *hclock = priv->hclock; + struct clk *mainck = priv->mainck; + + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + sdhci_pltfm_unregister(pdev); + + clk_disable_unprepare(gck); + clk_disable_unprepare(hclock); + clk_disable_unprepare(mainck); + + return 0; +} + +static struct platform_driver sdhci_at91_driver = { + .driver = { + .name = "sdhci-at91", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_at91_dt_match, + .pm = &sdhci_at91_dev_pm_ops, + }, + .probe = sdhci_at91_probe, + .remove = sdhci_at91_remove, +}; + +module_platform_driver(sdhci_at91_driver); + +MODULE_DESCRIPTION("SDHCI driver for at91"); +MODULE_AUTHOR("Ludovic Desroches "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c new file mode 100644 index 000000000..59d8d96ce --- /dev/null +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Synopsys DesignWare Cores Mobile Storage Host Controller + * + * Copyright (C) 2018 Synaptics Incorporated + * + * Author: Jisheng Zhang + */ + +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" + +#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16) + +/* DWCMSHC specific Mode Select value */ +#define DWCMSHC_CTRL_HS400 0x7 + +#define BOUNDARY_OK(addr, len) \ + ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1))) + +struct dwcmshc_priv { + struct clk *bus_clk; +}; + +/* + * If DMA addr spans 128MB boundary, we split the DMA transfer into two + * so that each DMA transfer doesn't exceed the boundary. + */ +static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, unsigned int cmd) +{ + int tmplen, offset; + + if (likely(!len || BOUNDARY_OK(addr, len))) { + sdhci_adma_write_desc(host, desc, addr, len, cmd); + return; + } + + offset = addr & (SZ_128M - 1); + tmplen = SZ_128M - offset; + sdhci_adma_write_desc(host, desc, addr, tmplen, cmd); + + addr += tmplen; + len -= tmplen; + sdhci_adma_write_desc(host, desc, addr, len, cmd); +} + +static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* + * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit + * block count register which doesn't support stuff bits of + * CMD23 argument on dwcmsch host controller. + */ + if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF)) + host->flags &= ~SDHCI_AUTO_CMD23; + else + host->flags |= SDHCI_AUTO_CMD23; +} + +static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + dwcmshc_check_auto_cmd23(mmc, mrq); + + sdhci_request(mmc, mrq); +} + +static void dwcmshc_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u16 ctrl_2; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if ((timing == MMC_TIMING_MMC_HS200) || + (timing == MMC_TIMING_UHS_SDR104)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if ((timing == MMC_TIMING_UHS_SDR25) || + (timing == MMC_TIMING_MMC_HS)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if ((timing == MMC_TIMING_UHS_DDR50) || + (timing == MMC_TIMING_MMC_DDR52)) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + else if (timing == MMC_TIMING_MMC_HS400) + ctrl_2 |= DWCMSHC_CTRL_HS400; + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static const struct sdhci_ops sdhci_dwcmshc_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = dwcmshc_set_uhs_signaling, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .reset = sdhci_reset, + .adma_write_desc = dwcmshc_adma_write_desc, +}; + +static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { + .ops = &sdhci_dwcmshc_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + +static int dwcmshc_probe(struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_host *host; + struct dwcmshc_priv *priv; + int err; + u32 extra; + + host = sdhci_pltfm_init(pdev, &sdhci_dwcmshc_pdata, + sizeof(struct dwcmshc_priv)); + if (IS_ERR(host)) + return PTR_ERR(host); + + /* + * extra adma table cnt for cross 128M boundary handling. + */ + extra = DIV_ROUND_UP_ULL(dma_get_required_mask(&pdev->dev), SZ_128M); + if (extra > SDHCI_MAX_SEGS) + extra = SDHCI_MAX_SEGS; + host->adma_table_cnt += extra; + + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + + pltfm_host->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(pltfm_host->clk)) { + err = PTR_ERR(pltfm_host->clk); + dev_err(&pdev->dev, "failed to get core clk: %d\n", err); + goto free_pltfm; + } + err = clk_prepare_enable(pltfm_host->clk); + if (err) + goto free_pltfm; + + priv->bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(priv->bus_clk)) + clk_prepare_enable(priv->bus_clk); + + err = mmc_of_parse(host->mmc); + if (err) + goto err_clk; + + sdhci_get_of_property(pdev); + + host->mmc_host_ops.request = dwcmshc_request; + + err = sdhci_add_host(host); + if (err) + goto err_clk; + + return 0; + +err_clk: + clk_disable_unprepare(pltfm_host->clk); + clk_disable_unprepare(priv->bus_clk); +free_pltfm: + sdhci_pltfm_free(pdev); + return err; +} + +static int dwcmshc_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + + sdhci_remove_host(host, 0); + + clk_disable_unprepare(pltfm_host->clk); + clk_disable_unprepare(priv->bus_clk); + + sdhci_pltfm_free(pdev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int dwcmshc_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = sdhci_suspend_host(host); + if (ret) + return ret; + + clk_disable_unprepare(pltfm_host->clk); + if (!IS_ERR(priv->bus_clk)) + clk_disable_unprepare(priv->bus_clk); + + return ret; +} + +static int dwcmshc_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; + + if (!IS_ERR(priv->bus_clk)) { + ret = clk_prepare_enable(priv->bus_clk); + if (ret) + return ret; + } + + return sdhci_resume_host(host); +} +#endif + +static SIMPLE_DEV_PM_OPS(dwcmshc_pmops, dwcmshc_suspend, dwcmshc_resume); + +static const struct of_device_id sdhci_dwcmshc_dt_ids[] = { + { .compatible = "snps,dwcmshc-sdhci" }, + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids); + +static struct platform_driver sdhci_dwcmshc_driver = { + .driver = { + .name = "sdhci-dwcmshc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_dwcmshc_dt_ids, + .pm = &dwcmshc_pmops, + }, + .probe = dwcmshc_probe, + .remove = dwcmshc_remove, +}; +module_platform_driver(sdhci_dwcmshc_driver); + +MODULE_DESCRIPTION("SDHCI platform driver for Synopsys DWC MSHC"); +MODULE_AUTHOR("Jisheng Zhang "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c new file mode 100644 index 000000000..5b853f651 --- /dev/null +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -0,0 +1,1523 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale eSDHC controller driver. + * + * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * Copyright 2020 NXP + * + * Authors: Xiaobo Xie + * Anton Vorontsov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdhci-pltfm.h" +#include "sdhci-esdhc.h" + +#define VENDOR_V_22 0x12 +#define VENDOR_V_23 0x13 + +#define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1) + +struct esdhc_clk_fixup { + const unsigned int sd_dflt_max_clk; + const unsigned int max_clk[MMC_TIMING_NUM]; +}; + +static const struct esdhc_clk_fixup ls1021a_esdhc_clk = { + .sd_dflt_max_clk = 25000000, + .max_clk[MMC_TIMING_MMC_HS] = 46500000, + .max_clk[MMC_TIMING_SD_HS] = 46500000, +}; + +static const struct esdhc_clk_fixup ls1046a_esdhc_clk = { + .sd_dflt_max_clk = 25000000, + .max_clk[MMC_TIMING_UHS_SDR104] = 167000000, + .max_clk[MMC_TIMING_MMC_HS200] = 167000000, +}; + +static const struct esdhc_clk_fixup ls1012a_esdhc_clk = { + .sd_dflt_max_clk = 25000000, + .max_clk[MMC_TIMING_UHS_SDR104] = 125000000, + .max_clk[MMC_TIMING_MMC_HS200] = 125000000, +}; + +static const struct esdhc_clk_fixup p1010_esdhc_clk = { + .sd_dflt_max_clk = 20000000, + .max_clk[MMC_TIMING_LEGACY] = 20000000, + .max_clk[MMC_TIMING_MMC_HS] = 42000000, + .max_clk[MMC_TIMING_SD_HS] = 40000000, +}; + +static const struct of_device_id sdhci_esdhc_of_match[] = { + { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk}, + { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk}, + { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk}, + { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk}, + { .compatible = "fsl,mpc8379-esdhc" }, + { .compatible = "fsl,mpc8536-esdhc" }, + { .compatible = "fsl,esdhc" }, + { } +}; +MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match); + +struct sdhci_esdhc { + u8 vendor_ver; + u8 spec_ver; + bool quirk_incorrect_hostver; + bool quirk_limited_clk_division; + bool quirk_unreliable_pulse_detection; + bool quirk_tuning_erratum_type1; + bool quirk_tuning_erratum_type2; + bool quirk_ignore_data_inhibit; + bool quirk_delay_before_data_reset; + bool quirk_trans_complete_erratum; + bool in_sw_tuning; + unsigned int peripheral_clock; + const struct esdhc_clk_fixup *clk_fixup; + u32 div_ratio; +}; + +/** + * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register + * to make it compatible with SD spec. + * + * @host: pointer to sdhci_host + * @spec_reg: SD spec register address + * @value: 32bit eSDHC register value on spec_reg address + * + * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC + * registers are 32 bits. There are differences in register size, register + * address, register function, bit position and function between eSDHC spec + * and SD spec. + * + * Return a fixed up register value + */ +static u32 esdhc_readl_fixup(struct sdhci_host *host, + int spec_reg, u32 value) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u32 ret; + + /* + * The bit of ADMA flag in eSDHC is not compatible with standard + * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is + * supported by eSDHC. + * And for many FSL eSDHC controller, the reset value of field + * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA, + * only these vendor version is greater than 2.2/0x12 support ADMA. + */ + if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) { + if (esdhc->vendor_ver > VENDOR_V_22) { + ret = value | SDHCI_CAN_DO_ADMA2; + return ret; + } + } + + /* + * The DAT[3:0] line signal levels and the CMD line signal level are + * not compatible with standard SDHC register. The line signal levels + * DAT[7:0] are at bits 31:24 and the command line signal level is at + * bit 23. All other bits are the same as in the standard SDHC + * register. + */ + if (spec_reg == SDHCI_PRESENT_STATE) { + ret = value & 0x000fffff; + ret |= (value >> 4) & SDHCI_DATA_LVL_MASK; + ret |= (value << 1) & SDHCI_CMD_LVL; + + /* + * Some controllers have unreliable Data Line Active + * bit for commands with busy signal. This affects + * Command Inhibit (data) bit. Just ignore it since + * MMC core driver has already polled card status + * with CMD13 after any command with busy siganl. + */ + if (esdhc->quirk_ignore_data_inhibit) + ret &= ~SDHCI_DATA_INHIBIT; + return ret; + } + + /* + * DTS properties of mmc host are used to enable each speed mode + * according to soc and board capability. So clean up + * SDR50/SDR104/DDR50 support bits here. + */ + if (spec_reg == SDHCI_CAPABILITIES_1) { + ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_DDR50); + return ret; + } + + ret = value; + return ret; +} + +static u16 esdhc_readw_fixup(struct sdhci_host *host, + int spec_reg, u32 value) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u16 ret; + int shift = (spec_reg & 0x2) * 8; + + if (spec_reg == SDHCI_TRANSFER_MODE) + return pltfm_host->xfer_mode_shadow; + + if (spec_reg == SDHCI_HOST_VERSION) + ret = value & 0xffff; + else + ret = (value >> shift) & 0xffff; + /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect + * vendor version and spec version information. + */ + if ((spec_reg == SDHCI_HOST_VERSION) && + (esdhc->quirk_incorrect_hostver)) + ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200; + return ret; +} + +static u8 esdhc_readb_fixup(struct sdhci_host *host, + int spec_reg, u32 value) +{ + u8 ret; + u8 dma_bits; + int shift = (spec_reg & 0x3) * 8; + + ret = (value >> shift) & 0xff; + + /* + * "DMA select" locates at offset 0x28 in SD specification, but on + * P5020 or P3041, it locates at 0x29. + */ + if (spec_reg == SDHCI_HOST_CONTROL) { + /* DMA select is 22,23 bits in Protocol Control Register */ + dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK; + /* fixup the result */ + ret &= ~SDHCI_CTRL_DMA_MASK; + ret |= dma_bits; + } + return ret; +} + +/** + * esdhc_write*_fixup - Fixup the SD spec register value so that it could be + * written into eSDHC register. + * + * @host: pointer to sdhci_host + * @spec_reg: SD spec register address + * @value: 8/16/32bit SD spec register value that would be written + * @old_value: 32bit eSDHC register value on spec_reg address + * + * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC + * registers are 32 bits. There are differences in register size, register + * address, register function, bit position and function between eSDHC spec + * and SD spec. + * + * Return a fixed up register value + */ +static u32 esdhc_writel_fixup(struct sdhci_host *host, + int spec_reg, u32 value, u32 old_value) +{ + u32 ret; + + /* + * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] + * when SYSCTL[RSTD] is set for some special operations. + * No any impact on other operation. + */ + if (spec_reg == SDHCI_INT_ENABLE) + ret = value | SDHCI_INT_BLK_GAP; + else + ret = value; + + return ret; +} + +static u32 esdhc_writew_fixup(struct sdhci_host *host, + int spec_reg, u16 value, u32 old_value) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int shift = (spec_reg & 0x2) * 8; + u32 ret; + + switch (spec_reg) { + case SDHCI_TRANSFER_MODE: + /* + * Postpone this write, we must do it together with a + * command write that is down below. Return old value. + */ + pltfm_host->xfer_mode_shadow = value; + return old_value; + case SDHCI_COMMAND: + ret = (value << 16) | pltfm_host->xfer_mode_shadow; + return ret; + } + + ret = old_value & (~(0xffff << shift)); + ret |= (value << shift); + + if (spec_reg == SDHCI_BLOCK_SIZE) { + /* + * Two last DMA bits are reserved, and first one is used for + * non-standard blksz of 4096 bytes that we don't support + * yet. So clear the DMA boundary bits. + */ + ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0)); + } + return ret; +} + +static u32 esdhc_writeb_fixup(struct sdhci_host *host, + int spec_reg, u8 value, u32 old_value) +{ + u32 ret; + u32 dma_bits; + u8 tmp; + int shift = (spec_reg & 0x3) * 8; + + /* + * eSDHC doesn't have a standard power control register, so we do + * nothing here to avoid incorrect operation. + */ + if (spec_reg == SDHCI_POWER_CONTROL) + return old_value; + /* + * "DMA select" location is offset 0x28 in SD specification, but on + * P5020 or P3041, it's located at 0x29. + */ + if (spec_reg == SDHCI_HOST_CONTROL) { + /* + * If host control register is not standard, exit + * this function + */ + if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) + return old_value; + + /* DMA select is 22,23 bits in Protocol Control Register */ + dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5; + ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits; + tmp = (value & (~SDHCI_CTRL_DMA_MASK)) | + (old_value & SDHCI_CTRL_DMA_MASK); + ret = (ret & (~0xff)) | tmp; + + /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */ + ret &= ~ESDHC_HOST_CONTROL_RES; + return ret; + } + + ret = (old_value & (~(0xff << shift))) | (value << shift); + return ret; +} + +static u32 esdhc_be_readl(struct sdhci_host *host, int reg) +{ + u32 ret; + u32 value; + + if (reg == SDHCI_CAPABILITIES_1) + value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1); + else + value = ioread32be(host->ioaddr + reg); + + ret = esdhc_readl_fixup(host, reg, value); + + return ret; +} + +static u32 esdhc_le_readl(struct sdhci_host *host, int reg) +{ + u32 ret; + u32 value; + + if (reg == SDHCI_CAPABILITIES_1) + value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1); + else + value = ioread32(host->ioaddr + reg); + + ret = esdhc_readl_fixup(host, reg, value); + + return ret; +} + +static u16 esdhc_be_readw(struct sdhci_host *host, int reg) +{ + u16 ret; + u32 value; + int base = reg & ~0x3; + + value = ioread32be(host->ioaddr + base); + ret = esdhc_readw_fixup(host, reg, value); + return ret; +} + +static u16 esdhc_le_readw(struct sdhci_host *host, int reg) +{ + u16 ret; + u32 value; + int base = reg & ~0x3; + + value = ioread32(host->ioaddr + base); + ret = esdhc_readw_fixup(host, reg, value); + return ret; +} + +static u8 esdhc_be_readb(struct sdhci_host *host, int reg) +{ + u8 ret; + u32 value; + int base = reg & ~0x3; + + value = ioread32be(host->ioaddr + base); + ret = esdhc_readb_fixup(host, reg, value); + return ret; +} + +static u8 esdhc_le_readb(struct sdhci_host *host, int reg) +{ + u8 ret; + u32 value; + int base = reg & ~0x3; + + value = ioread32(host->ioaddr + base); + ret = esdhc_readb_fixup(host, reg, value); + return ret; +} + +static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg) +{ + u32 value; + + value = esdhc_writel_fixup(host, reg, val, 0); + iowrite32be(value, host->ioaddr + reg); +} + +static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg) +{ + u32 value; + + value = esdhc_writel_fixup(host, reg, val, 0); + iowrite32(value, host->ioaddr + reg); +} + +static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + int base = reg & ~0x3; + u32 value; + u32 ret; + + value = ioread32be(host->ioaddr + base); + ret = esdhc_writew_fixup(host, reg, val, value); + if (reg != SDHCI_TRANSFER_MODE) + iowrite32be(ret, host->ioaddr + base); + + /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set + * 1us later after ESDHC_EXTN is set. + */ + if (base == ESDHC_SYSTEM_CONTROL_2) { + if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) && + esdhc->in_sw_tuning) { + udelay(1); + ret |= ESDHC_SMPCLKSEL; + iowrite32be(ret, host->ioaddr + base); + } + } +} + +static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + int base = reg & ~0x3; + u32 value; + u32 ret; + + value = ioread32(host->ioaddr + base); + ret = esdhc_writew_fixup(host, reg, val, value); + if (reg != SDHCI_TRANSFER_MODE) + iowrite32(ret, host->ioaddr + base); + + /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set + * 1us later after ESDHC_EXTN is set. + */ + if (base == ESDHC_SYSTEM_CONTROL_2) { + if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) && + esdhc->in_sw_tuning) { + udelay(1); + ret |= ESDHC_SMPCLKSEL; + iowrite32(ret, host->ioaddr + base); + } + } +} + +static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg) +{ + int base = reg & ~0x3; + u32 value; + u32 ret; + + value = ioread32be(host->ioaddr + base); + ret = esdhc_writeb_fixup(host, reg, val, value); + iowrite32be(ret, host->ioaddr + base); +} + +static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg) +{ + int base = reg & ~0x3; + u32 value; + u32 ret; + + value = ioread32(host->ioaddr + base); + ret = esdhc_writeb_fixup(host, reg, val, value); + iowrite32(ret, host->ioaddr + base); +} + +/* + * For Abort or Suspend after Stop at Block Gap, ignore the ADMA + * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC]) + * and Block Gap Event(IRQSTAT[BGE]) are also set. + * For Continue, apply soft reset for data(SYSCTL[RSTD]); + * and re-issue the entire read transaction from beginning. + */ +static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + bool applicable; + dma_addr_t dmastart; + dma_addr_t dmanow; + + applicable = (intmask & SDHCI_INT_DATA_END) && + (intmask & SDHCI_INT_BLK_GAP) && + (esdhc->vendor_ver == VENDOR_V_23); + if (!applicable) + return; + + host->data->error = 0; + dmastart = sg_dma_address(host->data->sg); + dmanow = dmastart + host->data->bytes_xfered; + /* + * Force update to the next DMA block boundary. + */ + dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + + SDHCI_DEFAULT_BOUNDARY_SIZE; + host->data->bytes_xfered = dmanow - dmastart; + sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); +} + +static int esdhc_of_enable_dma(struct sdhci_host *host) +{ + int ret; + u32 value; + struct device *dev = mmc_dev(host->mmc); + + if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") || + of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (ret) + return ret; + } + + value = sdhci_readl(host, ESDHC_DMA_SYSCTL); + + if (of_dma_is_coherent(dev->of_node)) + value |= ESDHC_DMA_SNOOP; + else + value &= ~ESDHC_DMA_SNOOP; + + sdhci_writel(host, value, ESDHC_DMA_SYSCTL); + return 0; +} + +static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + + if (esdhc->peripheral_clock) + return esdhc->peripheral_clock; + else + return pltfm_host->clock; +} + +static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + unsigned int clock; + + if (esdhc->peripheral_clock) + clock = esdhc->peripheral_clock; + else + clock = pltfm_host->clock; + return clock / 256 / 16; +} + +static void esdhc_clock_enable(struct sdhci_host *host, bool enable) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + ktime_t timeout; + u32 val, clk_en; + + clk_en = ESDHC_CLOCK_SDCLKEN; + + /* + * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version + * is 2.2 or lower. + */ + if (esdhc->vendor_ver <= VENDOR_V_22) + clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | + ESDHC_CLOCK_PEREN); + + val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + + if (enable) + val |= clk_en; + else + val &= ~clk_en; + + sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); + + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ + timeout = ktime_add_ms(ktime_get(), 20); + while (esdhc->vendor_ver > VENDOR_V_22) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + usleep_range(10, 20); + } +} + +static void esdhc_flush_async_fifo(struct sdhci_host *host) +{ + ktime_t timeout; + u32 val; + + val = sdhci_readl(host, ESDHC_DMA_SYSCTL); + val |= ESDHC_FLUSH_ASYNC_FIFO; + sdhci_writel(host, val, ESDHC_DMA_SYSCTL); + + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) & + ESDHC_FLUSH_ASYNC_FIFO)) + break; + if (timedout) { + pr_err("%s: flushing asynchronous FIFO timeout.\n", + mmc_hostname(host->mmc)); + break; + } + usleep_range(10, 20); + } +} + +static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + unsigned int pre_div = 1, div = 1; + unsigned int clock_fixup = 0; + ktime_t timeout; + u32 temp; + + if (clock == 0) { + host->mmc->actual_clock = 0; + esdhc_clock_enable(host, false); + return; + } + + /* Start pre_div at 2 for vendor version < 2.3. */ + if (esdhc->vendor_ver < VENDOR_V_23) + pre_div = 2; + + /* Fix clock value. */ + if (host->mmc->card && mmc_card_sd(host->mmc->card) && + esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) + clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk; + else if (esdhc->clk_fixup) + clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; + + if (clock_fixup == 0 || clock < clock_fixup) + clock_fixup = clock; + + /* Calculate pre_div and div. */ + while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256) + pre_div *= 2; + + while (host->max_clk / pre_div / div > clock_fixup && div < 16) + div++; + + esdhc->div_ratio = pre_div * div; + + /* Limit clock division for HS400 200MHz clock for quirk. */ + if (esdhc->quirk_limited_clk_division && + clock == MMC_HS200_MAX_DTR && + (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 || + host->flags & SDHCI_HS400_TUNING)) { + if (esdhc->div_ratio <= 4) { + pre_div = 4; + div = 1; + } else if (esdhc->div_ratio <= 8) { + pre_div = 4; + div = 2; + } else if (esdhc->div_ratio <= 12) { + pre_div = 4; + div = 3; + } else { + pr_warn("%s: using unsupported clock division.\n", + mmc_hostname(host->mmc)); + } + esdhc->div_ratio = pre_div * div; + } + + host->mmc->actual_clock = host->max_clk / esdhc->div_ratio; + + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->mmc->actual_clock); + + /* Set clock division into register. */ + pre_div >>= 1; + div--; + + esdhc_clock_enable(host, false); + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~ESDHC_CLOCK_MASK; + temp |= ((div << ESDHC_DIVIDER_SHIFT) | + (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ + timeout = ktime_add_ms(ktime_get(), 20); + while (esdhc->vendor_ver > VENDOR_V_22) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + usleep_range(10, 20); + } + + /* Additional setting for HS400. */ + if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && + clock == MMC_HS200_MAX_DTR) { + temp = sdhci_readl(host, ESDHC_TBCTL); + sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL); + temp = sdhci_readl(host, ESDHC_SDCLKCTL); + sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL); + esdhc_clock_enable(host, true); + + temp = sdhci_readl(host, ESDHC_DLLCFG0); + temp |= ESDHC_DLL_ENABLE; + if (host->mmc->actual_clock == MMC_HS200_MAX_DTR) + temp |= ESDHC_DLL_FREQ_SEL; + sdhci_writel(host, temp, ESDHC_DLLCFG0); + + temp |= ESDHC_DLL_RESET; + sdhci_writel(host, temp, ESDHC_DLLCFG0); + udelay(1); + temp &= ~ESDHC_DLL_RESET; + sdhci_writel(host, temp, ESDHC_DLLCFG0); + + /* Wait max 20 ms */ + if (read_poll_timeout(sdhci_readl, temp, + temp & ESDHC_DLL_STS_SLV_LOCK, + 10, 20000, false, + host, ESDHC_DLLSTAT0)) + pr_err("%s: timeout for delay chain lock.\n", + mmc_hostname(host->mmc)); + + temp = sdhci_readl(host, ESDHC_TBCTL); + sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL); + + esdhc_clock_enable(host, false); + esdhc_flush_async_fifo(host); + } + esdhc_clock_enable(host, true); +} + +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) +{ + u32 ctrl; + + ctrl = sdhci_readl(host, ESDHC_PROCTL); + ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK); + switch (width) { + case MMC_BUS_WIDTH_8: + ctrl |= ESDHC_CTRL_8BITBUS; + break; + + case MMC_BUS_WIDTH_4: + ctrl |= ESDHC_CTRL_4BITBUS; + break; + + default: + break; + } + + sdhci_writel(host, ctrl, ESDHC_PROCTL); +} + +static void esdhc_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u32 val, bus_width = 0; + + /* + * Add delay to make sure all the DMA transfers are finished + * for quirk. + */ + if (esdhc->quirk_delay_before_data_reset && + (mask & SDHCI_RESET_DATA) && + (host->flags & SDHCI_REQ_USE_DMA)) + mdelay(5); + + /* + * Save bus-width for eSDHC whose vendor version is 2.2 + * or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK; + } + + sdhci_reset(host, mask); + + /* + * Restore bus-width setting and interrupt registers for eSDHC + * whose vendor version is 2.2 or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + val &= ~ESDHC_CTRL_BUSWIDTH_MASK; + val |= bus_width; + sdhci_writel(host, val, ESDHC_PROCTL); + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } + + /* + * Some bits have to be cleaned manually for eSDHC whose spec + * version is higher than 3.0 for all reset. + */ + if ((mask & SDHCI_RESET_ALL) && + (esdhc->spec_ver >= SDHCI_SPEC_300)) { + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_EN; + sdhci_writel(host, val, ESDHC_TBCTL); + + /* + * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to + * 0 for quirk. + */ + if (esdhc->quirk_unreliable_pulse_detection) { + val = sdhci_readl(host, ESDHC_DLLCFG1); + val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL; + sdhci_writel(host, val, ESDHC_DLLCFG1); + } + } +} + +/* The SCFG, Supplemental Configuration Unit, provides SoC specific + * configuration and status registers for the device. There is a + * SDHC IO VSEL control register on SCFG for some platforms. It's + * used to support SDHC IO voltage switching. + */ +static const struct of_device_id scfg_device_ids[] = { + { .compatible = "fsl,t1040-scfg", }, + { .compatible = "fsl,ls1012a-scfg", }, + { .compatible = "fsl,ls1046a-scfg", }, + {} +}; + +/* SDHC IO VSEL control register definition */ +#define SCFG_SDHCIOVSELCR 0x408 +#define SDHCIOVSELCR_TGLEN 0x80000000 +#define SDHCIOVSELCR_VSELVAL 0x60000000 +#define SDHCIOVSELCR_SDHC_VS 0x00000001 + +static int esdhc_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct device_node *scfg_node; + void __iomem *scfg_base = NULL; + u32 sdhciovselcr; + u32 val; + + /* + * Signal Voltage Switching is only applicable for Host Controllers + * v3.00 and above. + */ + if (host->version < SDHCI_SPEC_300) + return 0; + + val = sdhci_readl(host, ESDHC_PROCTL); + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + val &= ~ESDHC_VOLT_SEL; + sdhci_writel(host, val, ESDHC_PROCTL); + return 0; + case MMC_SIGNAL_VOLTAGE_180: + scfg_node = of_find_matching_node(NULL, scfg_device_ids); + if (scfg_node) + scfg_base = of_iomap(scfg_node, 0); + of_node_put(scfg_node); + if (scfg_base) { + sdhciovselcr = SDHCIOVSELCR_TGLEN | + SDHCIOVSELCR_VSELVAL; + iowrite32be(sdhciovselcr, + scfg_base + SCFG_SDHCIOVSELCR); + + val |= ESDHC_VOLT_SEL; + sdhci_writel(host, val, ESDHC_PROCTL); + mdelay(5); + + sdhciovselcr = SDHCIOVSELCR_TGLEN | + SDHCIOVSELCR_SDHC_VS; + iowrite32be(sdhciovselcr, + scfg_base + SCFG_SDHCIOVSELCR); + iounmap(scfg_base); + } else { + val |= ESDHC_VOLT_SEL; + sdhci_writel(host, val, ESDHC_PROCTL); + } + return 0; + default: + return 0; + } +} + +static struct soc_device_attribute soc_tuning_erratum_type1[] = { + { .family = "QorIQ T1023", }, + { .family = "QorIQ T1040", }, + { .family = "QorIQ T2080", }, + { .family = "QorIQ LS1021A", }, + { }, +}; + +static struct soc_device_attribute soc_tuning_erratum_type2[] = { + { .family = "QorIQ LS1012A", }, + { .family = "QorIQ LS1043A", }, + { .family = "QorIQ LS1046A", }, + { .family = "QorIQ LS1080A", }, + { .family = "QorIQ LS2080A", }, + { .family = "QorIQ LA1575A", }, + { }, +}; + +static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable) +{ + u32 val; + + esdhc_clock_enable(host, false); + esdhc_flush_async_fifo(host); + + val = sdhci_readl(host, ESDHC_TBCTL); + if (enable) + val |= ESDHC_TB_EN; + else + val &= ~ESDHC_TB_EN; + sdhci_writel(host, val, ESDHC_TBCTL); + + esdhc_clock_enable(host, true); +} + +static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start, + u8 *window_end) +{ + u32 val; + + /* Write TBCTL[11:8]=4'h8 */ + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~(0xf << 8); + val |= 8 << 8; + sdhci_writel(host, val, ESDHC_TBCTL); + + mdelay(1); + + /* Read TBCTL[31:0] register and rewrite again */ + val = sdhci_readl(host, ESDHC_TBCTL); + sdhci_writel(host, val, ESDHC_TBCTL); + + mdelay(1); + + /* Read the TBSTAT[31:0] register twice */ + val = sdhci_readl(host, ESDHC_TBSTAT); + val = sdhci_readl(host, ESDHC_TBSTAT); + + *window_end = val & 0xff; + *window_start = (val >> 8) & 0xff; +} + +static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, + u8 *window_end) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u8 start_ptr, end_ptr; + + if (esdhc->quirk_tuning_erratum_type1) { + *window_start = 5 * esdhc->div_ratio; + *window_end = 3 * esdhc->div_ratio; + return; + } + + esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr); + + /* Reset data lines by setting ESDHCCTL[RSTD] */ + sdhci_reset(host, SDHCI_RESET_DATA); + /* Write 32'hFFFF_FFFF to IRQSTAT register */ + sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS); + + /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2 + * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2, + * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio + * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio. + */ + + if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) { + *window_start = 8 * esdhc->div_ratio; + *window_end = 4 * esdhc->div_ratio; + } else { + *window_start = 5 * esdhc->div_ratio; + *window_end = 3 * esdhc->div_ratio; + } +} + +static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode, + u8 window_start, u8 window_end) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u32 val; + int ret; + + /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */ + val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) & + ESDHC_WNDW_STRT_PTR_MASK; + val |= window_end & ESDHC_WNDW_END_PTR_MASK; + sdhci_writel(host, val, ESDHC_TBPTR); + + /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */ + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_MODE_MASK; + val |= ESDHC_TB_MODE_SW; + sdhci_writel(host, val, ESDHC_TBCTL); + + esdhc->in_sw_tuning = true; + ret = sdhci_execute_tuning(mmc, opcode); + esdhc->in_sw_tuning = false; + return ret; +} + +static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u8 window_start, window_end; + int ret, retries = 1; + bool hs400_tuning; + unsigned int clk; + u32 val; + + /* For tuning mode, the sd clock divisor value + * must be larger than 3 according to reference manual. + */ + clk = esdhc->peripheral_clock / 3; + if (host->clock > clk) + esdhc_of_set_clock(host, clk); + + esdhc_tuning_block_enable(host, true); + + /* + * The eSDHC controller takes the data timeout value into account + * during tuning. If the SD card is too slow sending the response, the + * timer will expire and a "Buffer Read Ready" interrupt without data + * is triggered. This leads to tuning errors. + * + * Just set the timeout to the maximum value because the core will + * already take care of it in sdhci_send_tuning(). + */ + sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); + + hs400_tuning = host->flags & SDHCI_HS400_TUNING; + + do { + if (esdhc->quirk_limited_clk_division && + hs400_tuning) + esdhc_of_set_clock(host, host->clock); + + /* Do HW tuning */ + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_MODE_MASK; + val |= ESDHC_TB_MODE_3; + sdhci_writel(host, val, ESDHC_TBCTL); + + ret = sdhci_execute_tuning(mmc, opcode); + if (ret) + break; + + /* For type2 affected platforms of the tuning erratum, + * tuning may succeed although eSDHC might not have + * tuned properly. Need to check tuning window. + */ + if (esdhc->quirk_tuning_erratum_type2 && + !host->tuning_err) { + esdhc_tuning_window_ptr(host, &window_start, + &window_end); + if (abs(window_start - window_end) > + (4 * esdhc->div_ratio + 2)) + host->tuning_err = -EAGAIN; + } + + /* If HW tuning fails and triggers erratum, + * try workaround. + */ + ret = host->tuning_err; + if (ret == -EAGAIN && + (esdhc->quirk_tuning_erratum_type1 || + esdhc->quirk_tuning_erratum_type2)) { + /* Recover HS400 tuning flag */ + if (hs400_tuning) + host->flags |= SDHCI_HS400_TUNING; + pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n", + mmc_hostname(mmc)); + /* Do SW tuning */ + esdhc_prepare_sw_tuning(host, &window_start, + &window_end); + ret = esdhc_execute_sw_tuning(mmc, opcode, + window_start, + window_end); + if (ret) + break; + + /* Retry both HW/SW tuning with reduced clock. */ + ret = host->tuning_err; + if (ret == -EAGAIN && retries) { + /* Recover HS400 tuning flag */ + if (hs400_tuning) + host->flags |= SDHCI_HS400_TUNING; + + clk = host->max_clk / (esdhc->div_ratio + 1); + esdhc_of_set_clock(host, clk); + pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n", + mmc_hostname(mmc)); + } else { + break; + } + } else { + break; + } + } while (retries--); + + if (ret) { + esdhc_tuning_block_enable(host, false); + } else if (hs400_tuning) { + val = sdhci_readl(host, ESDHC_SDTIMNGCTL); + val |= ESDHC_FLW_CTL_BG; + sdhci_writel(host, val, ESDHC_SDTIMNGCTL); + } + + return ret; +} + +static void esdhc_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u32 val; + + /* + * There are specific registers setting for HS400 mode. + * Clean all of them if controller is in HS400 mode to + * exit HS400 mode before re-setting any speed mode. + */ + val = sdhci_readl(host, ESDHC_TBCTL); + if (val & ESDHC_HS400_MODE) { + val = sdhci_readl(host, ESDHC_SDTIMNGCTL); + val &= ~ESDHC_FLW_CTL_BG; + sdhci_writel(host, val, ESDHC_SDTIMNGCTL); + + val = sdhci_readl(host, ESDHC_SDCLKCTL); + val &= ~ESDHC_CMD_CLK_CTL; + sdhci_writel(host, val, ESDHC_SDCLKCTL); + + esdhc_clock_enable(host, false); + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_HS400_MODE; + sdhci_writel(host, val, ESDHC_TBCTL); + esdhc_clock_enable(host, true); + + val = sdhci_readl(host, ESDHC_DLLCFG0); + val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL); + sdhci_writel(host, val, ESDHC_DLLCFG0); + + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_HS400_WNDW_ADJUST; + sdhci_writel(host, val, ESDHC_TBCTL); + + esdhc_tuning_block_enable(host, false); + } + + if (timing == MMC_TIMING_MMC_HS400) + esdhc_tuning_block_enable(host, true); + else + sdhci_set_uhs_signaling(host, timing); +} + +static u32 esdhc_irq(struct sdhci_host *host, u32 intmask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u32 command; + + if (esdhc->quirk_trans_complete_erratum) { + command = SDHCI_GET_CMD(sdhci_readw(host, + SDHCI_COMMAND)); + if (command == MMC_WRITE_MULTIPLE_BLOCK && + sdhci_readw(host, SDHCI_BLOCK_COUNT) && + intmask & SDHCI_INT_DATA_END) { + intmask &= ~SDHCI_INT_DATA_END; + sdhci_writel(host, SDHCI_INT_DATA_END, + SDHCI_INT_STATUS); + } + } + return intmask; +} + +#ifdef CONFIG_PM_SLEEP +static u32 esdhc_proctl; +static int esdhc_of_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + + esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL); + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + return sdhci_suspend_host(host); +} + +static int esdhc_of_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + int ret = sdhci_resume_host(host); + + if (ret == 0) { + /* Isn't this already done by sdhci_resume_host() ? --rmk */ + esdhc_of_enable_dma(host); + sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); + } + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops, + esdhc_of_suspend, + esdhc_of_resume); + +static const struct sdhci_ops sdhci_esdhc_be_ops = { + .read_l = esdhc_be_readl, + .read_w = esdhc_be_readw, + .read_b = esdhc_be_readb, + .write_l = esdhc_be_writel, + .write_w = esdhc_be_writew, + .write_b = esdhc_be_writeb, + .set_clock = esdhc_of_set_clock, + .enable_dma = esdhc_of_enable_dma, + .get_max_clock = esdhc_of_get_max_clock, + .get_min_clock = esdhc_of_get_min_clock, + .adma_workaround = esdhc_of_adma_workaround, + .set_bus_width = esdhc_pltfm_set_bus_width, + .reset = esdhc_reset, + .set_uhs_signaling = esdhc_set_uhs_signaling, + .irq = esdhc_irq, +}; + +static const struct sdhci_ops sdhci_esdhc_le_ops = { + .read_l = esdhc_le_readl, + .read_w = esdhc_le_readw, + .read_b = esdhc_le_readb, + .write_l = esdhc_le_writel, + .write_w = esdhc_le_writew, + .write_b = esdhc_le_writeb, + .set_clock = esdhc_of_set_clock, + .enable_dma = esdhc_of_enable_dma, + .get_max_clock = esdhc_of_get_max_clock, + .get_min_clock = esdhc_of_get_min_clock, + .adma_workaround = esdhc_of_adma_workaround, + .set_bus_width = esdhc_pltfm_set_bus_width, + .reset = esdhc_reset, + .set_uhs_signaling = esdhc_set_uhs_signaling, + .irq = esdhc_irq, +}; + +static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = { + .quirks = ESDHC_DEFAULT_QUIRKS | +#ifdef CONFIG_PPC + SDHCI_QUIRK_BROKEN_CARD_DETECTION | +#endif + SDHCI_QUIRK_NO_CARD_NO_RESET | + SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .ops = &sdhci_esdhc_be_ops, +}; + +static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = { + .quirks = ESDHC_DEFAULT_QUIRKS | + SDHCI_QUIRK_NO_CARD_NO_RESET | + SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .ops = &sdhci_esdhc_le_ops, +}; + +static struct soc_device_attribute soc_incorrect_hostver[] = { + { .family = "QorIQ T4240", .revision = "1.0", }, + { .family = "QorIQ T4240", .revision = "2.0", }, + { }, +}; + +static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = { + { .family = "QorIQ LX2160A", .revision = "1.0", }, + { .family = "QorIQ LX2160A", .revision = "2.0", }, + { .family = "QorIQ LS1028A", .revision = "1.0", }, + { }, +}; + +static struct soc_device_attribute soc_unreliable_pulse_detection[] = { + { .family = "QorIQ LX2160A", .revision = "1.0", }, + { .family = "QorIQ LX2160A", .revision = "2.0", }, + { .family = "QorIQ LS1028A", .revision = "1.0", }, + { }, +}; + +static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) +{ + const struct of_device_id *match; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_esdhc *esdhc; + struct device_node *np; + struct clk *clk; + u32 val; + u16 host_ver; + + pltfm_host = sdhci_priv(host); + esdhc = sdhci_pltfm_priv(pltfm_host); + + host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); + esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT; + esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK; + if (soc_device_match(soc_incorrect_hostver)) + esdhc->quirk_incorrect_hostver = true; + else + esdhc->quirk_incorrect_hostver = false; + + if (soc_device_match(soc_fixup_sdhc_clkdivs)) + esdhc->quirk_limited_clk_division = true; + else + esdhc->quirk_limited_clk_division = false; + + if (soc_device_match(soc_unreliable_pulse_detection)) + esdhc->quirk_unreliable_pulse_detection = true; + else + esdhc->quirk_unreliable_pulse_detection = false; + + match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node); + if (match) + esdhc->clk_fixup = match->data; + np = pdev->dev.of_node; + + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { + esdhc->quirk_delay_before_data_reset = true; + esdhc->quirk_trans_complete_erratum = true; + } + + clk = of_clk_get(np, 0); + if (!IS_ERR(clk)) { + /* + * esdhc->peripheral_clock would be assigned with a value + * which is eSDHC base clock when use periperal clock. + * For some platforms, the clock value got by common clk + * API is peripheral clock while the eSDHC base clock is + * 1/2 peripheral clock. + */ + if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") || + of_device_is_compatible(np, "fsl,ls1028a-esdhc") || + of_device_is_compatible(np, "fsl,ls1088a-esdhc")) + esdhc->peripheral_clock = clk_get_rate(clk) / 2; + else + esdhc->peripheral_clock = clk_get_rate(clk); + + clk_put(clk); + } + + esdhc_clock_enable(host, false); + val = sdhci_readl(host, ESDHC_DMA_SYSCTL); + /* + * This bit is not able to be reset by SDHCI_RESET_ALL. Need to + * initialize it as 1 or 0 once, to override the different value + * which may be configured in bootloader. + */ + if (esdhc->peripheral_clock) + val |= ESDHC_PERIPHERAL_CLK_SEL; + else + val &= ~ESDHC_PERIPHERAL_CLK_SEL; + sdhci_writel(host, val, ESDHC_DMA_SYSCTL); + esdhc_clock_enable(host, true); +} + +static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc) +{ + esdhc_tuning_block_enable(mmc_priv(mmc), false); + return 0; +} + +static int sdhci_esdhc_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct device_node *np; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_esdhc *esdhc; + int ret; + + np = pdev->dev.of_node; + + if (of_property_read_bool(np, "little-endian")) + host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, + sizeof(struct sdhci_esdhc)); + else + host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, + sizeof(struct sdhci_esdhc)); + + if (IS_ERR(host)) + return PTR_ERR(host); + + host->mmc_host_ops.start_signal_voltage_switch = + esdhc_signal_voltage_switch; + host->mmc_host_ops.execute_tuning = esdhc_execute_tuning; + host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr; + host->tuning_delay = 1; + + esdhc_init(pdev, host); + + sdhci_get_of_property(pdev); + + pltfm_host = sdhci_priv(host); + esdhc = sdhci_pltfm_priv(pltfm_host); + if (soc_device_match(soc_tuning_erratum_type1)) + esdhc->quirk_tuning_erratum_type1 = true; + else + esdhc->quirk_tuning_erratum_type1 = false; + + if (soc_device_match(soc_tuning_erratum_type2)) + esdhc->quirk_tuning_erratum_type2 = true; + else + esdhc->quirk_tuning_erratum_type2 = false; + + if (esdhc->vendor_ver == VENDOR_V_22) + host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; + + if (esdhc->vendor_ver > VENDOR_V_22) + host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; + + if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) { + host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST; + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + } + + if (of_device_is_compatible(np, "fsl,p5040-esdhc") || + of_device_is_compatible(np, "fsl,p5020-esdhc") || + of_device_is_compatible(np, "fsl,p4080-esdhc") || + of_device_is_compatible(np, "fsl,p1020-esdhc") || + of_device_is_compatible(np, "fsl,t1040-esdhc")) + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + if (of_device_is_compatible(np, "fsl,ls1021a-esdhc")) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + + esdhc->quirk_ignore_data_inhibit = false; + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { + /* + * Freescale messed up with P2020 as it has a non-standard + * host control register + */ + host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL; + esdhc->quirk_ignore_data_inhibit = true; + } + + /* call to generic mmc_of_parse to support additional capabilities */ + ret = mmc_of_parse(host->mmc); + if (ret) + goto err; + + mmc_of_parse_voltage(np, &host->ocr_mask); + + ret = sdhci_add_host(host); + if (ret) + goto err; + + return 0; + err: + sdhci_pltfm_free(pdev); + return ret; +} + +static struct platform_driver sdhci_esdhc_driver = { + .driver = { + .name = "sdhci-esdhc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_esdhc_of_match, + .pm = &esdhc_of_dev_pm_ops, + }, + .probe = sdhci_esdhc_probe, + .remove = sdhci_pltfm_unregister, +}; + +module_platform_driver(sdhci_esdhc_driver); + +MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC"); +MODULE_AUTHOR("Xiaobo Xie , " + "Anton Vorontsov "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c new file mode 100644 index 000000000..12675797b --- /dev/null +++ b/drivers/mmc/host/sdhci-of-hlwd.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * drivers/mmc/host/sdhci-of-hlwd.c + * + * Nintendo Wii Secure Digital Host Controller Interface. + * Copyright (C) 2009 The GameCube Linux Team + * Copyright (C) 2009 Albert Herranz + * + * Based on sdhci-of-esdhc.c + * + * Copyright (c) 2007 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * + * Authors: Xiaobo Xie + * Anton Vorontsov + */ + +#include +#include +#include +#include "sdhci-pltfm.h" + +/* + * Ops and quirks for the Nintendo Wii SDHCI controllers. + */ + +/* + * We need a small delay after each write, or things go horribly wrong. + */ +#define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */ + +static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg) +{ + sdhci_be32bs_writel(host, val, reg); + udelay(SDHCI_HLWD_WRITE_DELAY); +} + +static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg) +{ + sdhci_be32bs_writew(host, val, reg); + udelay(SDHCI_HLWD_WRITE_DELAY); +} + +static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg) +{ + sdhci_be32bs_writeb(host, val, reg); + udelay(SDHCI_HLWD_WRITE_DELAY); +} + +static const struct sdhci_ops sdhci_hlwd_ops = { + .read_l = sdhci_be32bs_readl, + .read_w = sdhci_be32bs_readw, + .read_b = sdhci_be32bs_readb, + .write_l = sdhci_hlwd_writel, + .write_w = sdhci_hlwd_writew, + .write_b = sdhci_hlwd_writeb, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_hlwd_pdata = { + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE, + .ops = &sdhci_hlwd_ops, +}; + +static int sdhci_hlwd_probe(struct platform_device *pdev) +{ + return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata, 0); +} + +static const struct of_device_id sdhci_hlwd_of_match[] = { + { .compatible = "nintendo,hollywood-sdhci" }, + { } +}; +MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match); + +static struct platform_driver sdhci_hlwd_driver = { + .driver = { + .name = "sdhci-hlwd", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_hlwd_of_match, + .pm = &sdhci_pltfm_pmops, + }, + .probe = sdhci_hlwd_probe, + .remove = sdhci_pltfm_unregister, +}; + +module_platform_driver(sdhci_hlwd_driver); + +MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver"); +MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of-sparx5.c b/drivers/mmc/host/sdhci-of-sparx5.c new file mode 100644 index 000000000..28e4ee69e --- /dev/null +++ b/drivers/mmc/host/sdhci-of-sparx5.c @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * drivers/mmc/host/sdhci-of-sparx5.c + * + * MCHP Sparx5 SoC Secure Digital Host Controller Interface. + * + * Copyright (c) 2019 Microchip Inc. + * + * Author: Lars Povlsen + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" + +#define CPU_REGS_GENERAL_CTRL (0x22 * 4) +#define MSHC_DLY_CC_MASK GENMASK(16, 13) +#define MSHC_DLY_CC_SHIFT 13 +#define MSHC_DLY_CC_MAX 15 + +#define CPU_REGS_PROC_CTRL (0x2C * 4) +#define ACP_CACHE_FORCE_ENA BIT(4) +#define ACP_AWCACHE BIT(3) +#define ACP_ARCACHE BIT(2) +#define ACP_CACHE_MASK (ACP_CACHE_FORCE_ENA|ACP_AWCACHE|ACP_ARCACHE) + +#define MSHC2_VERSION 0x500 /* Off 0x140, reg 0x0 */ +#define MSHC2_TYPE 0x504 /* Off 0x140, reg 0x1 */ +#define MSHC2_EMMC_CTRL 0x52c /* Off 0x140, reg 0xB */ +#define MSHC2_EMMC_CTRL_EMMC_RST_N BIT(2) +#define MSHC2_EMMC_CTRL_IS_EMMC BIT(0) + +struct sdhci_sparx5_data { + struct sdhci_host *host; + struct regmap *cpu_ctrl; + int delay_clock; +}; + +#define BOUNDARY_OK(addr, len) \ + ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1))) + +/* + * If DMA addr spans 128MB boundary, we split the DMA transfer into two + * so that each DMA transfer doesn't exceed the boundary. + */ +static void sdhci_sparx5_adma_write_desc(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, + unsigned int cmd) +{ + int tmplen, offset; + + if (likely(!len || BOUNDARY_OK(addr, len))) { + sdhci_adma_write_desc(host, desc, addr, len, cmd); + return; + } + + pr_debug("%s: write_desc: splitting dma len %d, offset %pad\n", + mmc_hostname(host->mmc), len, &addr); + + offset = addr & (SZ_128M - 1); + tmplen = SZ_128M - offset; + sdhci_adma_write_desc(host, desc, addr, tmplen, cmd); + + addr += tmplen; + len -= tmplen; + sdhci_adma_write_desc(host, desc, addr, len, cmd); +} + +static void sparx5_set_cacheable(struct sdhci_host *host, u32 value) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_sparx5_data *sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host); + + pr_debug("%s: Set Cacheable = 0x%x\n", mmc_hostname(host->mmc), value); + + /* Update ACP caching attributes in HW */ + regmap_update_bits(sdhci_sparx5->cpu_ctrl, + CPU_REGS_PROC_CTRL, ACP_CACHE_MASK, value); +} + +static void sparx5_set_delay(struct sdhci_host *host, u8 value) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_sparx5_data *sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host); + + pr_debug("%s: Set DLY_CC = %u\n", mmc_hostname(host->mmc), value); + + /* Update DLY_CC in HW */ + regmap_update_bits(sdhci_sparx5->cpu_ctrl, + CPU_REGS_GENERAL_CTRL, + MSHC_DLY_CC_MASK, + (value << MSHC_DLY_CC_SHIFT)); +} + +static void sdhci_sparx5_set_emmc(struct sdhci_host *host) +{ + if (!mmc_card_is_removable(host->mmc)) { + u8 value; + + value = sdhci_readb(host, MSHC2_EMMC_CTRL); + if (!(value & MSHC2_EMMC_CTRL_IS_EMMC)) { + value |= MSHC2_EMMC_CTRL_IS_EMMC; + pr_debug("%s: Set EMMC_CTRL: 0x%08x\n", + mmc_hostname(host->mmc), value); + sdhci_writeb(host, value, MSHC2_EMMC_CTRL); + } + } +} + +static void sdhci_sparx5_reset_emmc(struct sdhci_host *host) +{ + u8 value; + + pr_debug("%s: Toggle EMMC_CTRL.EMMC_RST_N\n", mmc_hostname(host->mmc)); + value = sdhci_readb(host, MSHC2_EMMC_CTRL) & + ~MSHC2_EMMC_CTRL_EMMC_RST_N; + sdhci_writeb(host, value, MSHC2_EMMC_CTRL); + /* For eMMC, minimum is 1us but give it 10us for good measure */ + usleep_range(10, 20); + sdhci_writeb(host, value | MSHC2_EMMC_CTRL_EMMC_RST_N, + MSHC2_EMMC_CTRL); + /* For eMMC, minimum is 200us but give it 300us for good measure */ + usleep_range(300, 400); +} + +static void sdhci_sparx5_reset(struct sdhci_host *host, u8 mask) +{ + pr_debug("%s: *** RESET: mask %d\n", mmc_hostname(host->mmc), mask); + + sdhci_reset(host, mask); + + /* Be sure CARD_IS_EMMC stays set */ + sdhci_sparx5_set_emmc(host); +} + +static const struct sdhci_ops sdhci_sparx5_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .reset = sdhci_sparx5_reset, + .adma_write_desc = sdhci_sparx5_adma_write_desc, +}; + +static const struct sdhci_pltfm_data sdhci_sparx5_pdata = { + .quirks = 0, + .quirks2 = SDHCI_QUIRK2_HOST_NO_CMD23 | /* Controller issue */ + SDHCI_QUIRK2_NO_1_8_V, /* No sdr104, ddr50, etc */ + .ops = &sdhci_sparx5_ops, +}; + +static int sdhci_sparx5_probe(struct platform_device *pdev) +{ + int ret; + const char *syscon = "microchip,sparx5-cpu-syscon"; + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_sparx5_data *sdhci_sparx5; + struct device_node *np = pdev->dev.of_node; + u32 value; + u32 extra; + + host = sdhci_pltfm_init(pdev, &sdhci_sparx5_pdata, + sizeof(*sdhci_sparx5)); + + if (IS_ERR(host)) + return PTR_ERR(host); + + /* + * extra adma table cnt for cross 128M boundary handling. + */ + extra = DIV_ROUND_UP_ULL(dma_get_required_mask(&pdev->dev), SZ_128M); + if (extra > SDHCI_MAX_SEGS) + extra = SDHCI_MAX_SEGS; + host->adma_table_cnt += extra; + + pltfm_host = sdhci_priv(host); + sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host); + sdhci_sparx5->host = host; + + pltfm_host->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(pltfm_host->clk)) { + ret = PTR_ERR(pltfm_host->clk); + dev_err(&pdev->dev, "failed to get core clk: %d\n", ret); + goto free_pltfm; + } + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + goto free_pltfm; + + if (!of_property_read_u32(np, "microchip,clock-delay", &value) && + (value > 0 && value <= MSHC_DLY_CC_MAX)) + sdhci_sparx5->delay_clock = value; + + sdhci_get_of_property(pdev); + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err_clk; + + sdhci_sparx5->cpu_ctrl = syscon_regmap_lookup_by_compatible(syscon); + if (IS_ERR(sdhci_sparx5->cpu_ctrl)) { + dev_err(&pdev->dev, "No CPU syscon regmap !\n"); + ret = PTR_ERR(sdhci_sparx5->cpu_ctrl); + goto err_clk; + } + + if (sdhci_sparx5->delay_clock >= 0) + sparx5_set_delay(host, sdhci_sparx5->delay_clock); + + if (!mmc_card_is_removable(host->mmc)) { + /* Do a HW reset of eMMC card */ + sdhci_sparx5_reset_emmc(host); + /* Update EMMC_CTRL */ + sdhci_sparx5_set_emmc(host); + /* If eMMC, disable SD and SDIO */ + host->mmc->caps2 |= (MMC_CAP2_NO_SDIO|MMC_CAP2_NO_SD); + } + + ret = sdhci_add_host(host); + if (ret) + goto err_clk; + + /* Set AXI bus master to use un-cached access (for DMA) */ + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA) && + IS_ENABLED(CONFIG_DMA_DECLARE_COHERENT)) + sparx5_set_cacheable(host, ACP_CACHE_FORCE_ENA); + + pr_debug("%s: SDHC version: 0x%08x\n", + mmc_hostname(host->mmc), sdhci_readl(host, MSHC2_VERSION)); + pr_debug("%s: SDHC type: 0x%08x\n", + mmc_hostname(host->mmc), sdhci_readl(host, MSHC2_TYPE)); + + return ret; + +err_clk: + clk_disable_unprepare(pltfm_host->clk); +free_pltfm: + sdhci_pltfm_free(pdev); + return ret; +} + +static const struct of_device_id sdhci_sparx5_of_match[] = { + { .compatible = "microchip,dw-sparx5-sdhci" }, + { } +}; +MODULE_DEVICE_TABLE(of, sdhci_sparx5_of_match); + +static struct platform_driver sdhci_sparx5_driver = { + .driver = { + .name = "sdhci-sparx5", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_sparx5_of_match, + .pm = &sdhci_pltfm_pmops, + }, + .probe = sdhci_sparx5_probe, + .remove = sdhci_pltfm_unregister, +}; + +module_platform_driver(sdhci_sparx5_driver); + +MODULE_DESCRIPTION("Sparx5 SDHCI OF driver"); +MODULE_AUTHOR("Lars Povlsen "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c new file mode 100644 index 000000000..53c362bb2 --- /dev/null +++ b/drivers/mmc/host/sdhci-omap.c @@ -0,0 +1,1325 @@ +// SPDX-License-Identifier: GPL-2.0-only +/** + * SDHCI Controller driver for TI's OMAP SoCs + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" + +#define SDHCI_OMAP_CON 0x12c +#define CON_DW8 BIT(5) +#define CON_DMA_MASTER BIT(20) +#define CON_DDR BIT(19) +#define CON_CLKEXTFREE BIT(16) +#define CON_PADEN BIT(15) +#define CON_CTPL BIT(11) +#define CON_INIT BIT(1) +#define CON_OD BIT(0) + +#define SDHCI_OMAP_DLL 0x0134 +#define DLL_SWT BIT(20) +#define DLL_FORCE_SR_C_SHIFT 13 +#define DLL_FORCE_SR_C_MASK (0x7f << DLL_FORCE_SR_C_SHIFT) +#define DLL_FORCE_VALUE BIT(12) +#define DLL_CALIB BIT(1) + +#define SDHCI_OMAP_CMD 0x20c + +#define SDHCI_OMAP_PSTATE 0x0224 +#define PSTATE_DLEV_DAT0 BIT(20) +#define PSTATE_DATI BIT(1) + +#define SDHCI_OMAP_HCTL 0x228 +#define HCTL_SDBP BIT(8) +#define HCTL_SDVS_SHIFT 9 +#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT) +#define HCTL_SDVS_33 (0x7 << HCTL_SDVS_SHIFT) +#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT) +#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT) + +#define SDHCI_OMAP_SYSCTL 0x22c +#define SYSCTL_CEN BIT(2) +#define SYSCTL_CLKD_SHIFT 6 +#define SYSCTL_CLKD_MASK 0x3ff + +#define SDHCI_OMAP_STAT 0x230 + +#define SDHCI_OMAP_IE 0x234 +#define INT_CC_EN BIT(0) + +#define SDHCI_OMAP_ISE 0x238 + +#define SDHCI_OMAP_AC12 0x23c +#define AC12_V1V8_SIGEN BIT(19) +#define AC12_SCLK_SEL BIT(23) + +#define SDHCI_OMAP_CAPA 0x240 +#define CAPA_VS33 BIT(24) +#define CAPA_VS30 BIT(25) +#define CAPA_VS18 BIT(26) + +#define SDHCI_OMAP_CAPA2 0x0244 +#define CAPA2_TSDR50 BIT(13) + +#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */ + +#define SYSCTL_CLKD_MAX 0x3FF + +#define IOV_1V8 1800000 /* 180000 uV */ +#define IOV_3V0 3000000 /* 300000 uV */ +#define IOV_3V3 3300000 /* 330000 uV */ + +#define MAX_PHASE_DELAY 0x7C + +/* sdhci-omap controller flags */ +#define SDHCI_OMAP_REQUIRE_IODELAY BIT(0) +#define SDHCI_OMAP_SPECIAL_RESET BIT(1) + +struct sdhci_omap_data { + u32 offset; + u8 flags; +}; + +struct sdhci_omap_host { + char *version; + void __iomem *base; + struct device *dev; + struct regulator *pbias; + bool pbias_enabled; + struct sdhci_host *host; + u8 bus_mode; + u8 power_mode; + u8 timing; + u8 flags; + + struct pinctrl *pinctrl; + struct pinctrl_state **pinctrl_state; + bool is_tuning; + /* Omap specific context save */ + u32 con; + u32 hctl; + u32 sysctl; + u32 capa; + u32 ie; + u32 ise; +}; + +static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host); +static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host); + +static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host, + unsigned int offset) +{ + return readl(host->base + offset); +} + +static inline void sdhci_omap_writel(struct sdhci_omap_host *host, + unsigned int offset, u32 data) +{ + writel(data, host->base + offset); +} + +static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host, + bool power_on, unsigned int iov) +{ + int ret; + struct device *dev = omap_host->dev; + + if (IS_ERR(omap_host->pbias)) + return 0; + + if (power_on) { + ret = regulator_set_voltage(omap_host->pbias, iov, iov); + if (ret) { + dev_err(dev, "pbias set voltage failed\n"); + return ret; + } + + if (omap_host->pbias_enabled) + return 0; + + ret = regulator_enable(omap_host->pbias); + if (ret) { + dev_err(dev, "pbias reg enable fail\n"); + return ret; + } + + omap_host->pbias_enabled = true; + } else { + if (!omap_host->pbias_enabled) + return 0; + + ret = regulator_disable(omap_host->pbias); + if (ret) { + dev_err(dev, "pbias reg disable fail\n"); + return ret; + } + omap_host->pbias_enabled = false; + } + + return 0; +} + +static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host, + unsigned int iov) +{ + int ret; + struct sdhci_host *host = omap_host->host; + struct mmc_host *mmc = host->mmc; + + ret = sdhci_omap_set_pbias(omap_host, false, 0); + if (ret) + return ret; + + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov); + if (ret) { + dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n"); + return ret; + } + } + + ret = sdhci_omap_set_pbias(omap_host, true, iov); + if (ret) + return ret; + + return 0; +} + +static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host, + unsigned char signal_voltage) +{ + u32 reg; + ktime_t timeout; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL); + reg &= ~HCTL_SDVS_MASK; + + if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) + reg |= HCTL_SDVS_33; + else + reg |= HCTL_SDVS_18; + + sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg); + + reg |= HCTL_SDBP; + sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg); + + /* wait 1ms */ + timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP) + break; + if (WARN_ON(timedout)) + return; + usleep_range(5, 10); + } +} + +static void sdhci_omap_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + if (enable) + reg |= (CON_CTPL | CON_CLKEXTFREE); + else + reg &= ~(CON_CTPL | CON_CLKEXTFREE); + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + sdhci_enable_sdio_irq(mmc, enable); +} + +static inline void sdhci_omap_set_dll(struct sdhci_omap_host *omap_host, + int count) +{ + int i; + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL); + reg |= DLL_FORCE_VALUE; + reg &= ~DLL_FORCE_SR_C_MASK; + reg |= (count << DLL_FORCE_SR_C_SHIFT); + sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg); + + reg |= DLL_CALIB; + sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg); + for (i = 0; i < 1000; i++) { + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL); + if (reg & DLL_CALIB) + break; + } + reg &= ~DLL_CALIB; + sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg); +} + +static void sdhci_omap_disable_tuning(struct sdhci_omap_host *omap_host) +{ + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12); + reg &= ~AC12_SCLK_SEL; + sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL); + reg &= ~(DLL_FORCE_VALUE | DLL_SWT); + sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg); +} + +static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + struct thermal_zone_device *thermal_dev; + struct device *dev = omap_host->dev; + struct mmc_ios *ios = &mmc->ios; + u32 start_window = 0, max_window = 0; + bool single_point_failure = false; + bool dcrc_was_enabled = false; + u8 cur_match, prev_match = 0; + u32 length = 0, max_len = 0; + u32 phase_delay = 0; + int temperature; + int ret = 0; + u32 reg; + int i; + + /* clock tuning is not needed for upto 52MHz */ + if (ios->clock <= 52000000) + return 0; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA2); + if (ios->timing == MMC_TIMING_UHS_SDR50 && !(reg & CAPA2_TSDR50)) + return 0; + + thermal_dev = thermal_zone_get_zone_by_name("cpu_thermal"); + if (IS_ERR(thermal_dev)) { + dev_err(dev, "Unable to get thermal zone for tuning\n"); + return PTR_ERR(thermal_dev); + } + + ret = thermal_zone_get_temp(thermal_dev, &temperature); + if (ret) + return ret; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL); + reg |= DLL_SWT; + sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg); + + /* + * OMAP5/DRA74X/DRA72x Errata i802: + * DCRC error interrupts (MMCHS_STAT[21] DCRC=0x1) can occur + * during the tuning procedure. So disable it during the + * tuning procedure. + */ + if (host->ier & SDHCI_INT_DATA_CRC) { + host->ier &= ~SDHCI_INT_DATA_CRC; + dcrc_was_enabled = true; + } + + omap_host->is_tuning = true; + + /* + * Stage 1: Search for a maximum pass window ignoring any + * any single point failures. If the tuning value ends up + * near it, move away from it in stage 2 below + */ + while (phase_delay <= MAX_PHASE_DELAY) { + sdhci_omap_set_dll(omap_host, phase_delay); + + cur_match = !mmc_send_tuning(mmc, opcode, NULL); + if (cur_match) { + if (prev_match) { + length++; + } else if (single_point_failure) { + /* ignore single point failure */ + length++; + } else { + start_window = phase_delay; + length = 1; + } + } else { + single_point_failure = prev_match; + } + + if (length > max_len) { + max_window = start_window; + max_len = length; + } + + prev_match = cur_match; + phase_delay += 4; + } + + if (!max_len) { + dev_err(dev, "Unable to find match\n"); + ret = -EIO; + goto tuning_error; + } + + /* + * Assign tuning value as a ratio of maximum pass window based + * on temperature + */ + if (temperature < -20000) + phase_delay = min(max_window + 4 * (max_len - 1) - 24, + max_window + + DIV_ROUND_UP(13 * max_len, 16) * 4); + else if (temperature < 20000) + phase_delay = max_window + DIV_ROUND_UP(9 * max_len, 16) * 4; + else if (temperature < 40000) + phase_delay = max_window + DIV_ROUND_UP(8 * max_len, 16) * 4; + else if (temperature < 70000) + phase_delay = max_window + DIV_ROUND_UP(7 * max_len, 16) * 4; + else if (temperature < 90000) + phase_delay = max_window + DIV_ROUND_UP(5 * max_len, 16) * 4; + else if (temperature < 120000) + phase_delay = max_window + DIV_ROUND_UP(4 * max_len, 16) * 4; + else + phase_delay = max_window + DIV_ROUND_UP(3 * max_len, 16) * 4; + + /* + * Stage 2: Search for a single point failure near the chosen tuning + * value in two steps. First in the +3 to +10 range and then in the + * +2 to -10 range. If found, move away from it in the appropriate + * direction by the appropriate amount depending on the temperature. + */ + for (i = 3; i <= 10; i++) { + sdhci_omap_set_dll(omap_host, phase_delay + i); + + if (mmc_send_tuning(mmc, opcode, NULL)) { + if (temperature < 10000) + phase_delay += i + 6; + else if (temperature < 20000) + phase_delay += i - 12; + else if (temperature < 70000) + phase_delay += i - 8; + else + phase_delay += i - 6; + + goto single_failure_found; + } + } + + for (i = 2; i >= -10; i--) { + sdhci_omap_set_dll(omap_host, phase_delay + i); + + if (mmc_send_tuning(mmc, opcode, NULL)) { + if (temperature < 10000) + phase_delay += i + 12; + else if (temperature < 20000) + phase_delay += i + 8; + else if (temperature < 70000) + phase_delay += i + 8; + else if (temperature < 90000) + phase_delay += i + 10; + else + phase_delay += i + 12; + + goto single_failure_found; + } + } + +single_failure_found: + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12); + if (!(reg & AC12_SCLK_SEL)) { + ret = -EIO; + goto tuning_error; + } + + sdhci_omap_set_dll(omap_host, phase_delay); + + omap_host->is_tuning = false; + + goto ret; + +tuning_error: + omap_host->is_tuning = false; + dev_err(dev, "Tuning failed\n"); + sdhci_omap_disable_tuning(omap_host); + +ret: + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + /* Reenable forbidden interrupt */ + if (dcrc_was_enabled) + host->ier |= SDHCI_INT_DATA_CRC; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + return ret; +} + +static int sdhci_omap_card_busy(struct mmc_host *mmc) +{ + u32 reg, ac12; + int ret = false; + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + u32 ier = host->ier; + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + ac12 = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12); + reg &= ~CON_CLKEXTFREE; + if (ac12 & AC12_V1V8_SIGEN) + reg |= CON_CLKEXTFREE; + reg |= CON_PADEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + disable_irq(host->irq); + ier |= SDHCI_INT_CARD_INT; + sdhci_writel(host, ier, SDHCI_INT_ENABLE); + sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); + + /* + * Delay is required for PSTATE to correctly reflect + * DLEV/CLEV values after PADEN is set. + */ + usleep_range(50, 100); + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_PSTATE); + if ((reg & PSTATE_DATI) || !(reg & PSTATE_DLEV_DAT0)) + ret = true; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg &= ~(CON_CLKEXTFREE | CON_PADEN); + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + enable_irq(host->irq); + + return ret; +} + +static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + u32 reg; + int ret; + unsigned int iov; + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + struct device *dev; + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + dev = omap_host->dev; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + if (!(reg & CAPA_VS33)) + return -EOPNOTSUPP; + + sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12); + reg &= ~AC12_V1V8_SIGEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg); + + iov = IOV_3V3; + } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + if (!(reg & CAPA_VS18)) + return -EOPNOTSUPP; + + sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12); + reg |= AC12_V1V8_SIGEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg); + + iov = IOV_1V8; + } else { + return -EOPNOTSUPP; + } + + ret = sdhci_omap_enable_iov(omap_host, iov); + if (ret) { + dev_err(dev, "failed to switch IO voltage to %dmV\n", iov); + return ret; + } + + dev_dbg(dev, "IO voltage switched to %dmV\n", iov); + return 0; +} + +static void sdhci_omap_set_timing(struct sdhci_omap_host *omap_host, u8 timing) +{ + int ret; + struct pinctrl_state *pinctrl_state; + struct device *dev = omap_host->dev; + + if (!(omap_host->flags & SDHCI_OMAP_REQUIRE_IODELAY)) + return; + + if (omap_host->timing == timing) + return; + + sdhci_omap_stop_clock(omap_host); + + pinctrl_state = omap_host->pinctrl_state[timing]; + ret = pinctrl_select_state(omap_host->pinctrl, pinctrl_state); + if (ret) { + dev_err(dev, "failed to select pinctrl state\n"); + return; + } + + sdhci_omap_start_clock(omap_host); + omap_host->timing = timing; +} + +static void sdhci_omap_set_power_mode(struct sdhci_omap_host *omap_host, + u8 power_mode) +{ + if (omap_host->bus_mode == MMC_POWER_OFF) + sdhci_omap_disable_tuning(omap_host); + omap_host->power_mode = power_mode; +} + +static void sdhci_omap_set_bus_mode(struct sdhci_omap_host *omap_host, + unsigned int mode) +{ + u32 reg; + + if (omap_host->bus_mode == mode) + return; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + if (mode == MMC_BUSMODE_OPENDRAIN) + reg |= CON_OD; + else + reg &= ~CON_OD; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + omap_host->bus_mode = mode; +} + +static void sdhci_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + + sdhci_omap_set_bus_mode(omap_host, ios->bus_mode); + sdhci_omap_set_timing(omap_host, ios->timing); + sdhci_set_ios(mmc, ios); + sdhci_omap_set_power_mode(omap_host, ios->power_mode); +} + +static u16 sdhci_omap_calc_divisor(struct sdhci_pltfm_host *host, + unsigned int clock) +{ + u16 dsor; + + dsor = DIV_ROUND_UP(clk_get_rate(host->clk), clock); + if (dsor > SYSCTL_CLKD_MAX) + dsor = SYSCTL_CLKD_MAX; + + return dsor; +} + +static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host) +{ + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL); + reg |= SYSCTL_CEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg); +} + +static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host) +{ + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL); + reg &= ~SYSCTL_CEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg); +} + +static void sdhci_omap_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + unsigned long clkdiv; + + sdhci_omap_stop_clock(omap_host); + + if (!clock) + return; + + clkdiv = sdhci_omap_calc_divisor(pltfm_host, clock); + clkdiv = (clkdiv & SYSCTL_CLKD_MASK) << SYSCTL_CLKD_SHIFT; + sdhci_enable_clk(host, clkdiv); + + sdhci_omap_start_clock(omap_host); +} + +static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); +} + +static int sdhci_omap_enable_dma(struct sdhci_host *host) +{ + u32 reg; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg &= ~CON_DMA_MASTER; + /* Switch to DMA slave mode when using external DMA */ + if (!host->use_external_dma) + reg |= CON_DMA_MASTER; + + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + return 0; +} + +static unsigned int sdhci_omap_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return clk_get_rate(pltfm_host->clk) / SYSCTL_CLKD_MAX; +} + +static void sdhci_omap_set_bus_width(struct sdhci_host *host, int width) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + if (width == MMC_BUS_WIDTH_8) + reg |= CON_DW8; + else + reg &= ~CON_DW8; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + sdhci_set_bus_width(host, width); +} + +static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode) +{ + u32 reg; + ktime_t timeout; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + if (omap_host->power_mode == power_mode) + return; + + if (power_mode != MMC_POWER_ON) + return; + + disable_irq(host->irq); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg |= CON_INIT; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + sdhci_omap_writel(omap_host, SDHCI_OMAP_CMD, 0x0); + + /* wait 1ms */ + timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN) + break; + if (WARN_ON(timedout)) + return; + usleep_range(5, 10); + } + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg &= ~CON_INIT; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + sdhci_omap_writel(omap_host, SDHCI_OMAP_STAT, INT_CC_EN); + + enable_irq(host->irq); +} + +static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u32 reg; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + sdhci_omap_stop_clock(omap_host); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + if (timing == MMC_TIMING_UHS_DDR50 || timing == MMC_TIMING_MMC_DDR52) + reg |= CON_DDR; + else + reg &= ~CON_DDR; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + sdhci_set_uhs_signaling(host, timing); + sdhci_omap_start_clock(omap_host); +} + +#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ +static void sdhci_omap_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + unsigned long limit = MMC_TIMEOUT_US; + unsigned long i = 0; + + /* Don't reset data lines during tuning operation */ + if (omap_host->is_tuning) + mask &= ~SDHCI_RESET_DATA; + + if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) { + sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); + while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) && + (i++ < limit)) + udelay(1); + i = 0; + while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) && + (i++ < limit)) + udelay(1); + + if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) + dev_err(mmc_dev(host->mmc), + "Timeout waiting on controller reset in %s\n", + __func__); + return; + } + + sdhci_reset(host, mask); +} + +#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\ + SDHCI_INT_TIMEOUT) +#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE) + +static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + if (omap_host->is_tuning && host->cmd && !host->data_early && + (intmask & CMD_ERR_MASK)) { + + /* + * Since we are not resetting data lines during tuning + * operation, data error or data complete interrupts + * might still arrive. Mark this request as a failure + * but still wait for the data interrupt + */ + if (intmask & SDHCI_INT_TIMEOUT) + host->cmd->error = -ETIMEDOUT; + else + host->cmd->error = -EILSEQ; + + host->cmd = NULL; + + /* + * Sometimes command error interrupts and command complete + * interrupt will arrive together. Clear all command related + * interrupts here. + */ + sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS); + intmask &= ~CMD_MASK; + } + + return intmask; +} + +static void sdhci_omap_set_timeout(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (cmd->opcode == MMC_ERASE) + sdhci_set_data_timeout_irq(host, false); + + __sdhci_set_timeout(host, cmd); +} + +static struct sdhci_ops sdhci_omap_ops = { + .set_clock = sdhci_omap_set_clock, + .set_power = sdhci_omap_set_power, + .enable_dma = sdhci_omap_enable_dma, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_min_clock = sdhci_omap_get_min_clock, + .set_bus_width = sdhci_omap_set_bus_width, + .platform_send_init_74_clocks = sdhci_omap_init_74_clocks, + .reset = sdhci_omap_reset, + .set_uhs_signaling = sdhci_omap_set_uhs_signaling, + .irq = sdhci_omap_irq, + .set_timeout = sdhci_omap_set_timeout, +}; + +static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) +{ + u32 reg; + int ret = 0; + struct device *dev = omap_host->dev; + struct regulator *vqmmc; + + vqmmc = regulator_get(dev, "vqmmc"); + if (IS_ERR(vqmmc)) { + ret = PTR_ERR(vqmmc); + goto reg_put; + } + + /* voltage capabilities might be set by boot loader, clear it */ + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33); + + if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3)) + reg |= CAPA_VS33; + if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8)) + reg |= CAPA_VS18; + + sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg); + +reg_put: + regulator_put(vqmmc); + + return ret; +} + +static const struct sdhci_pltfm_data sdhci_omap_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, + .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | + SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_RSP_136_HAS_CRC | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, + .ops = &sdhci_omap_ops, +}; + +static const struct sdhci_omap_data k2g_data = { + .offset = 0x200, +}; + +static const struct sdhci_omap_data am335_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + +static const struct sdhci_omap_data am437_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + +static const struct sdhci_omap_data dra7_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_REQUIRE_IODELAY, +}; + +static const struct of_device_id omap_sdhci_match[] = { + { .compatible = "ti,dra7-sdhci", .data = &dra7_data }, + { .compatible = "ti,k2g-sdhci", .data = &k2g_data }, + { .compatible = "ti,am335-sdhci", .data = &am335_data }, + { .compatible = "ti,am437-sdhci", .data = &am437_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_sdhci_match); + +static struct pinctrl_state +*sdhci_omap_iodelay_pinctrl_state(struct sdhci_omap_host *omap_host, char *mode, + u32 *caps, u32 capmask) +{ + struct device *dev = omap_host->dev; + char *version = omap_host->version; + struct pinctrl_state *pinctrl_state = ERR_PTR(-ENODEV); + char str[20]; + + if (!(*caps & capmask)) + goto ret; + + if (version) { + snprintf(str, 20, "%s-%s", mode, version); + pinctrl_state = pinctrl_lookup_state(omap_host->pinctrl, str); + } + + if (IS_ERR(pinctrl_state)) + pinctrl_state = pinctrl_lookup_state(omap_host->pinctrl, mode); + + if (IS_ERR(pinctrl_state)) { + dev_err(dev, "no pinctrl state for %s mode", mode); + *caps &= ~capmask; + } + +ret: + return pinctrl_state; +} + +static int sdhci_omap_config_iodelay_pinctrl_state(struct sdhci_omap_host + *omap_host) +{ + struct device *dev = omap_host->dev; + struct sdhci_host *host = omap_host->host; + struct mmc_host *mmc = host->mmc; + u32 *caps = &mmc->caps; + u32 *caps2 = &mmc->caps2; + struct pinctrl_state *state; + struct pinctrl_state **pinctrl_state; + + if (!(omap_host->flags & SDHCI_OMAP_REQUIRE_IODELAY)) + return 0; + + pinctrl_state = devm_kcalloc(dev, + MMC_TIMING_MMC_HS200 + 1, + sizeof(*pinctrl_state), + GFP_KERNEL); + if (!pinctrl_state) + return -ENOMEM; + + omap_host->pinctrl = devm_pinctrl_get(omap_host->dev); + if (IS_ERR(omap_host->pinctrl)) { + dev_err(dev, "Cannot get pinctrl\n"); + return PTR_ERR(omap_host->pinctrl); + } + + state = pinctrl_lookup_state(omap_host->pinctrl, "default"); + if (IS_ERR(state)) { + dev_err(dev, "no pinctrl state for default mode\n"); + return PTR_ERR(state); + } + pinctrl_state[MMC_TIMING_LEGACY] = state; + + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "sdr104", caps, + MMC_CAP_UHS_SDR104); + if (!IS_ERR(state)) + pinctrl_state[MMC_TIMING_UHS_SDR104] = state; + + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "ddr50", caps, + MMC_CAP_UHS_DDR50); + if (!IS_ERR(state)) + pinctrl_state[MMC_TIMING_UHS_DDR50] = state; + + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "sdr50", caps, + MMC_CAP_UHS_SDR50); + if (!IS_ERR(state)) + pinctrl_state[MMC_TIMING_UHS_SDR50] = state; + + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "sdr25", caps, + MMC_CAP_UHS_SDR25); + if (!IS_ERR(state)) + pinctrl_state[MMC_TIMING_UHS_SDR25] = state; + + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "sdr12", caps, + MMC_CAP_UHS_SDR12); + if (!IS_ERR(state)) + pinctrl_state[MMC_TIMING_UHS_SDR12] = state; + + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "ddr_1_8v", caps, + MMC_CAP_1_8V_DDR); + if (!IS_ERR(state)) { + pinctrl_state[MMC_TIMING_MMC_DDR52] = state; + } else { + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "ddr_3_3v", + caps, + MMC_CAP_3_3V_DDR); + if (!IS_ERR(state)) + pinctrl_state[MMC_TIMING_MMC_DDR52] = state; + } + + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "hs", caps, + MMC_CAP_SD_HIGHSPEED); + if (!IS_ERR(state)) + pinctrl_state[MMC_TIMING_SD_HS] = state; + + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "hs", caps, + MMC_CAP_MMC_HIGHSPEED); + if (!IS_ERR(state)) + pinctrl_state[MMC_TIMING_MMC_HS] = state; + + state = sdhci_omap_iodelay_pinctrl_state(omap_host, "hs200_1_8v", caps2, + MMC_CAP2_HS200_1_8V_SDR); + if (!IS_ERR(state)) + pinctrl_state[MMC_TIMING_MMC_HS200] = state; + + omap_host->pinctrl_state = pinctrl_state; + + return 0; +} + +static const struct soc_device_attribute sdhci_omap_soc_devices[] = { + { + .machine = "DRA7[45]*", + .revision = "ES1.[01]", + }, + { + /* sentinel */ + } +}; + +static int sdhci_omap_probe(struct platform_device *pdev) +{ + int ret; + u32 offset; + struct device *dev = &pdev->dev; + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + struct mmc_host *mmc; + const struct of_device_id *match; + struct sdhci_omap_data *data; + const struct soc_device_attribute *soc; + struct resource *regs; + + match = of_match_device(omap_sdhci_match, dev); + if (!match) + return -EINVAL; + + data = (struct sdhci_omap_data *)match->data; + if (!data) { + dev_err(dev, "no sdhci omap data\n"); + return -EINVAL; + } + offset = data->offset; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + + host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata, + sizeof(*omap_host)); + if (IS_ERR(host)) { + dev_err(dev, "Failed sdhci_pltfm_init\n"); + return PTR_ERR(host); + } + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + omap_host->host = host; + omap_host->base = host->ioaddr; + omap_host->dev = dev; + omap_host->power_mode = MMC_POWER_UNDEFINED; + omap_host->timing = MMC_TIMING_LEGACY; + omap_host->flags = data->flags; + host->ioaddr += offset; + host->mapbase = regs->start + offset; + + mmc = host->mmc; + sdhci_get_of_property(pdev); + ret = mmc_of_parse(mmc); + if (ret) + goto err_pltfm_free; + + soc = soc_device_match(sdhci_omap_soc_devices); + if (soc) { + omap_host->version = "rev11"; + if (!strcmp(dev_name(dev), "4809c000.mmc")) + mmc->f_max = 96000000; + if (!strcmp(dev_name(dev), "480b4000.mmc")) + mmc->f_max = 48000000; + if (!strcmp(dev_name(dev), "480ad000.mmc")) + mmc->f_max = 48000000; + } + + if (!mmc_can_gpio_ro(mmc)) + mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; + + pltfm_host->clk = devm_clk_get(dev, "fck"); + if (IS_ERR(pltfm_host->clk)) { + ret = PTR_ERR(pltfm_host->clk); + goto err_pltfm_free; + } + + ret = clk_set_rate(pltfm_host->clk, mmc->f_max); + if (ret) { + dev_err(dev, "failed to set clock to %d\n", mmc->f_max); + goto err_pltfm_free; + } + + omap_host->pbias = devm_regulator_get_optional(dev, "pbias"); + if (IS_ERR(omap_host->pbias)) { + ret = PTR_ERR(omap_host->pbias); + if (ret != -ENODEV) + goto err_pltfm_free; + dev_dbg(dev, "unable to get pbias regulator %d\n", ret); + } + omap_host->pbias_enabled = false; + + /* + * omap_device_pm_domain has callbacks to enable the main + * functional clock, interface clock and also configure the + * SYSCONFIG register of omap devices. The callback will be invoked + * as part of pm_runtime_get_sync. + */ + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + pm_runtime_put_noidle(dev); + goto err_rpm_disable; + } + + ret = sdhci_omap_set_capabilities(omap_host); + if (ret) { + dev_err(dev, "failed to set system capabilities\n"); + goto err_put_sync; + } + + host->mmc_host_ops.start_signal_voltage_switch = + sdhci_omap_start_signal_voltage_switch; + host->mmc_host_ops.set_ios = sdhci_omap_set_ios; + host->mmc_host_ops.card_busy = sdhci_omap_card_busy; + host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning; + host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq; + + /* Switch to external DMA only if there is the "dmas" property */ + if (of_find_property(dev->of_node, "dmas", NULL)) + sdhci_switch_external_dma(host, true); + + /* R1B responses is required to properly manage HW busy detection. */ + mmc->caps |= MMC_CAP_NEED_RSP_BUSY; + + ret = sdhci_setup_host(host); + if (ret) + goto err_put_sync; + + ret = sdhci_omap_config_iodelay_pinctrl_state(omap_host); + if (ret) + goto err_cleanup_host; + + ret = __sdhci_add_host(host); + if (ret) + goto err_cleanup_host; + + return 0; + +err_cleanup_host: + sdhci_cleanup_host(host); + +err_put_sync: + pm_runtime_put_sync(dev); + +err_rpm_disable: + pm_runtime_disable(dev); + +err_pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_omap_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sdhci_host *host = platform_get_drvdata(pdev); + + sdhci_remove_host(host, true); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + sdhci_pltfm_free(pdev); + + return 0; +} +#ifdef CONFIG_PM_SLEEP +static void sdhci_omap_context_save(struct sdhci_omap_host *omap_host) +{ + omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL); + omap_host->sysctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL); + omap_host->capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + omap_host->ie = sdhci_omap_readl(omap_host, SDHCI_OMAP_IE); + omap_host->ise = sdhci_omap_readl(omap_host, SDHCI_OMAP_ISE); +} + +/* Order matters here, HCTL must be restored in two phases */ +static void sdhci_omap_context_restore(struct sdhci_omap_host *omap_host) +{ + sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl); + sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa); + sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl); + + sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, omap_host->sysctl); + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con); + sdhci_omap_writel(omap_host, SDHCI_OMAP_IE, omap_host->ie); + sdhci_omap_writel(omap_host, SDHCI_OMAP_ISE, omap_host->ise); +} + +static int __maybe_unused sdhci_omap_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + sdhci_suspend_host(host); + + sdhci_omap_context_save(omap_host); + + pinctrl_pm_select_idle_state(dev); + + pm_runtime_force_suspend(dev); + + return 0; +} + +static int __maybe_unused sdhci_omap_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + pm_runtime_force_resume(dev); + + pinctrl_pm_select_default_state(dev); + + sdhci_omap_context_restore(omap_host); + + sdhci_resume_host(host); + + return 0; +} +#endif +static SIMPLE_DEV_PM_OPS(sdhci_omap_dev_pm_ops, sdhci_omap_suspend, + sdhci_omap_resume); + +static struct platform_driver sdhci_omap_driver = { + .probe = sdhci_omap_probe, + .remove = sdhci_omap_remove, + .driver = { + .name = "sdhci-omap", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sdhci_omap_dev_pm_ops, + .of_match_table = omap_sdhci_match, + }, +}; + +module_platform_driver(sdhci_omap_driver); + +MODULE_DESCRIPTION("SDHCI driver for OMAP SoCs"); +MODULE_AUTHOR("Texas Instruments Inc."); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sdhci_omap"); diff --git a/drivers/mmc/host/sdhci-pci-arasan.c b/drivers/mmc/host/sdhci-pci-arasan.c new file mode 100644 index 000000000..499f3205e --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-arasan.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sdhci-pci-arasan.c - Driver for Arasan PCI Controller with + * integrated phy. + * + * Copyright (C) 2017 Arasan Chip Systems Inc. + * + * Author: Atul Garg + */ + +#include +#include + +#include "sdhci.h" +#include "sdhci-pci.h" + +/* Extra registers for Arasan SD/SDIO/MMC Host Controller with PHY */ +#define PHY_ADDR_REG 0x300 +#define PHY_DAT_REG 0x304 + +#define PHY_WRITE BIT(8) +#define PHY_BUSY BIT(9) +#define DATA_MASK 0xFF + +/* PHY Specific Registers */ +#define DLL_STATUS 0x00 +#define IPAD_CTRL1 0x01 +#define IPAD_CTRL2 0x02 +#define IPAD_STS 0x03 +#define IOREN_CTRL1 0x06 +#define IOREN_CTRL2 0x07 +#define IOPU_CTRL1 0x08 +#define IOPU_CTRL2 0x09 +#define ITAP_DELAY 0x0C +#define OTAP_DELAY 0x0D +#define STRB_SEL 0x0E +#define CLKBUF_SEL 0x0F +#define MODE_CTRL 0x11 +#define DLL_TRIM 0x12 +#define CMD_CTRL 0x20 +#define DATA_CTRL 0x21 +#define STRB_CTRL 0x22 +#define CLK_CTRL 0x23 +#define PHY_CTRL 0x24 + +#define DLL_ENBL BIT(3) +#define RTRIM_EN BIT(1) +#define PDB_ENBL BIT(1) +#define RETB_ENBL BIT(6) +#define ODEN_CMD BIT(1) +#define ODEN_DAT 0xFF +#define REN_STRB BIT(0) +#define REN_CMND BIT(1) +#define REN_DATA 0xFF +#define PU_CMD BIT(1) +#define PU_DAT 0xFF +#define ITAPDLY_EN BIT(0) +#define OTAPDLY_EN BIT(0) +#define OD_REL_CMD BIT(1) +#define OD_REL_DAT 0xFF +#define DLLTRM_ICP 0x8 +#define PDB_CMND BIT(0) +#define PDB_DATA 0xFF +#define PDB_STRB BIT(0) +#define PDB_CLOCK BIT(0) +#define CALDONE_MASK 0x10 +#define DLL_RDY_MASK 0x10 +#define MAX_CLK_BUF 0x7 + +/* Mode Controls */ +#define ENHSTRB_MODE BIT(0) +#define HS400_MODE BIT(1) +#define LEGACY_MODE BIT(2) +#define DDR50_MODE BIT(3) + +/* + * Controller has no specific bits for HS200/HS. + * Used BIT(4), BIT(5) for software programming. + */ +#define HS200_MODE BIT(4) +#define HISPD_MODE BIT(5) + +#define OTAPDLY(x) (((x) << 1) | OTAPDLY_EN) +#define ITAPDLY(x) (((x) << 1) | ITAPDLY_EN) +#define FREQSEL(x) (((x) << 5) | DLL_ENBL) +#define IOPAD(x, y) ((x) | ((y) << 2)) + +/* Arasan private data */ +struct arasan_host { + u32 chg_clk; +}; + +static int arasan_phy_addr_poll(struct sdhci_host *host, u32 offset, u32 mask) +{ + ktime_t timeout = ktime_add_us(ktime_get(), 100); + bool failed; + u8 val = 0; + + while (1) { + failed = ktime_after(ktime_get(), timeout); + val = sdhci_readw(host, PHY_ADDR_REG); + if (!(val & mask)) + return 0; + if (failed) + return -EBUSY; + } +} + +static int arasan_phy_write(struct sdhci_host *host, u8 data, u8 offset) +{ + sdhci_writew(host, data, PHY_DAT_REG); + sdhci_writew(host, (PHY_WRITE | offset), PHY_ADDR_REG); + return arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY); +} + +static int arasan_phy_read(struct sdhci_host *host, u8 offset, u8 *data) +{ + int ret; + + sdhci_writew(host, 0, PHY_DAT_REG); + sdhci_writew(host, offset, PHY_ADDR_REG); + ret = arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY); + + /* Masking valid data bits */ + *data = sdhci_readw(host, PHY_DAT_REG) & DATA_MASK; + return ret; +} + +static int arasan_phy_sts_poll(struct sdhci_host *host, u32 offset, u32 mask) +{ + int ret; + ktime_t timeout = ktime_add_us(ktime_get(), 100); + bool failed; + u8 val = 0; + + while (1) { + failed = ktime_after(ktime_get(), timeout); + ret = arasan_phy_read(host, offset, &val); + if (ret) + return -EBUSY; + else if (val & mask) + return 0; + if (failed) + return -EBUSY; + } +} + +/* Initialize the Arasan PHY */ +static int arasan_phy_init(struct sdhci_host *host) +{ + int ret; + u8 val; + + /* Program IOPADs and wait for calibration to be done */ + if (arasan_phy_read(host, IPAD_CTRL1, &val) || + arasan_phy_write(host, val | RETB_ENBL | PDB_ENBL, IPAD_CTRL1) || + arasan_phy_read(host, IPAD_CTRL2, &val) || + arasan_phy_write(host, val | RTRIM_EN, IPAD_CTRL2)) + return -EBUSY; + ret = arasan_phy_sts_poll(host, IPAD_STS, CALDONE_MASK); + if (ret) + return -EBUSY; + + /* Program CMD/Data lines */ + if (arasan_phy_read(host, IOREN_CTRL1, &val) || + arasan_phy_write(host, val | REN_CMND | REN_STRB, IOREN_CTRL1) || + arasan_phy_read(host, IOPU_CTRL1, &val) || + arasan_phy_write(host, val | PU_CMD, IOPU_CTRL1) || + arasan_phy_read(host, CMD_CTRL, &val) || + arasan_phy_write(host, val | PDB_CMND, CMD_CTRL) || + arasan_phy_read(host, IOREN_CTRL2, &val) || + arasan_phy_write(host, val | REN_DATA, IOREN_CTRL2) || + arasan_phy_read(host, IOPU_CTRL2, &val) || + arasan_phy_write(host, val | PU_DAT, IOPU_CTRL2) || + arasan_phy_read(host, DATA_CTRL, &val) || + arasan_phy_write(host, val | PDB_DATA, DATA_CTRL) || + arasan_phy_read(host, STRB_CTRL, &val) || + arasan_phy_write(host, val | PDB_STRB, STRB_CTRL) || + arasan_phy_read(host, CLK_CTRL, &val) || + arasan_phy_write(host, val | PDB_CLOCK, CLK_CTRL) || + arasan_phy_read(host, CLKBUF_SEL, &val) || + arasan_phy_write(host, val | MAX_CLK_BUF, CLKBUF_SEL) || + arasan_phy_write(host, LEGACY_MODE, MODE_CTRL)) + return -EBUSY; + return 0; +} + +/* Set Arasan PHY for different modes */ +static int arasan_phy_set(struct sdhci_host *host, u8 mode, u8 otap, + u8 drv_type, u8 itap, u8 trim, u8 clk) +{ + u8 val; + int ret; + + if (mode == HISPD_MODE || mode == HS200_MODE) + ret = arasan_phy_write(host, 0x0, MODE_CTRL); + else + ret = arasan_phy_write(host, mode, MODE_CTRL); + if (ret) + return ret; + if (mode == HS400_MODE || mode == HS200_MODE) { + ret = arasan_phy_read(host, IPAD_CTRL1, &val); + if (ret) + return ret; + ret = arasan_phy_write(host, IOPAD(val, drv_type), IPAD_CTRL1); + if (ret) + return ret; + } + if (mode == LEGACY_MODE) { + ret = arasan_phy_write(host, 0x0, OTAP_DELAY); + if (ret) + return ret; + ret = arasan_phy_write(host, 0x0, ITAP_DELAY); + } else { + ret = arasan_phy_write(host, OTAPDLY(otap), OTAP_DELAY); + if (ret) + return ret; + if (mode != HS200_MODE) + ret = arasan_phy_write(host, ITAPDLY(itap), ITAP_DELAY); + else + ret = arasan_phy_write(host, 0x0, ITAP_DELAY); + } + if (ret) + return ret; + if (mode != LEGACY_MODE) { + ret = arasan_phy_write(host, trim, DLL_TRIM); + if (ret) + return ret; + } + ret = arasan_phy_write(host, 0, DLL_STATUS); + if (ret) + return ret; + if (mode != LEGACY_MODE) { + ret = arasan_phy_write(host, FREQSEL(clk), DLL_STATUS); + if (ret) + return ret; + ret = arasan_phy_sts_poll(host, DLL_STATUS, DLL_RDY_MASK); + if (ret) + return -EBUSY; + } + return 0; +} + +static int arasan_select_phy_clock(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct arasan_host *arasan_host = sdhci_pci_priv(slot); + u8 clk; + + if (arasan_host->chg_clk == host->mmc->ios.clock) + return 0; + + arasan_host->chg_clk = host->mmc->ios.clock; + if (host->mmc->ios.clock == 200000000) + clk = 0x0; + else if (host->mmc->ios.clock == 100000000) + clk = 0x2; + else if (host->mmc->ios.clock == 50000000) + clk = 0x1; + else + clk = 0x0; + + if (host->mmc_host_ops.hs400_enhanced_strobe) { + arasan_phy_set(host, ENHSTRB_MODE, 1, 0x0, 0x0, + DLLTRM_ICP, clk); + } else { + switch (host->mmc->ios.timing) { + case MMC_TIMING_LEGACY: + arasan_phy_set(host, LEGACY_MODE, 0x0, 0x0, 0x0, + 0x0, 0x0); + break; + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + arasan_phy_set(host, HISPD_MODE, 0x3, 0x0, 0x2, + DLLTRM_ICP, clk); + break; + case MMC_TIMING_MMC_HS200: + case MMC_TIMING_UHS_SDR104: + arasan_phy_set(host, HS200_MODE, 0x2, + host->mmc->ios.drv_type, 0x0, + DLLTRM_ICP, clk); + break; + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + arasan_phy_set(host, DDR50_MODE, 0x1, 0x0, + 0x0, DLLTRM_ICP, clk); + break; + case MMC_TIMING_MMC_HS400: + arasan_phy_set(host, HS400_MODE, 0x1, + host->mmc->ios.drv_type, 0xa, + DLLTRM_ICP, clk); + break; + default: + break; + } + } + return 0; +} + +static int arasan_pci_probe_slot(struct sdhci_pci_slot *slot) +{ + int err; + + slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_8_BIT_DATA; + err = arasan_phy_init(slot->host); + if (err) + return -ENODEV; + return 0; +} + +static void arasan_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +{ + sdhci_set_clock(host, clock); + + /* Change phy settings for the new clock */ + arasan_select_phy_clock(host); +} + +static const struct sdhci_ops arasan_sdhci_pci_ops = { + .set_clock = arasan_sdhci_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +const struct sdhci_pci_fixes sdhci_arasan = { + .probe_slot = arasan_pci_probe_slot, + .ops = &arasan_sdhci_pci_ops, + .priv_size = sizeof(struct arasan_host), +}; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c new file mode 100644 index 000000000..8b02fe391 --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -0,0 +1,2401 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface + * + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. + * + * Thanks to the following companies for their support: + * + * - JMicron (hardware and technical support) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_X86 +#include +#endif + +#include "cqhci.h" + +#include "sdhci.h" +#include "sdhci-pci.h" + +static void sdhci_pci_hw_reset(struct sdhci_host *host); + +#ifdef CONFIG_PM_SLEEP +static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip) +{ + mmc_pm_flag_t pm_flags = 0; + bool cap_cd_wake = false; + int i; + + for (i = 0; i < chip->num_slots; i++) { + struct sdhci_pci_slot *slot = chip->slots[i]; + + if (slot) { + pm_flags |= slot->host->mmc->pm_flags; + if (slot->host->mmc->caps & MMC_CAP_CD_WAKE) + cap_cd_wake = true; + } + } + + if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ)) + return device_wakeup_enable(&chip->pdev->dev); + else if (!cap_cd_wake) + return device_wakeup_disable(&chip->pdev->dev); + + return 0; +} + +static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip) +{ + int i, ret; + + sdhci_pci_init_wakeup(chip); + + for (i = 0; i < chip->num_slots; i++) { + struct sdhci_pci_slot *slot = chip->slots[i]; + struct sdhci_host *host; + + if (!slot) + continue; + + host = slot->host; + + if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + ret = sdhci_suspend_host(host); + if (ret) + goto err_pci_suspend; + + if (device_may_wakeup(&chip->pdev->dev)) + mmc_gpio_set_cd_wake(host->mmc, true); + } + + return 0; + +err_pci_suspend: + while (--i >= 0) + sdhci_resume_host(chip->slots[i]->host); + return ret; +} + +int sdhci_pci_resume_host(struct sdhci_pci_chip *chip) +{ + struct sdhci_pci_slot *slot; + int i, ret; + + for (i = 0; i < chip->num_slots; i++) { + slot = chip->slots[i]; + if (!slot) + continue; + + ret = sdhci_resume_host(slot->host); + if (ret) + return ret; + + mmc_gpio_set_cd_wake(slot->host->mmc, false); + } + + return 0; +} + +static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip) +{ + int ret; + + ret = cqhci_suspend(chip->slots[0]->host->mmc); + if (ret) + return ret; + + return sdhci_pci_suspend_host(chip); +} + +static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip) +{ + int ret; + + ret = sdhci_pci_resume_host(chip); + if (ret) + return ret; + + return cqhci_resume(chip->slots[0]->host->mmc); +} +#endif + +#ifdef CONFIG_PM +static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip) +{ + struct sdhci_pci_slot *slot; + struct sdhci_host *host; + int i, ret; + + for (i = 0; i < chip->num_slots; i++) { + slot = chip->slots[i]; + if (!slot) + continue; + + host = slot->host; + + ret = sdhci_runtime_suspend_host(host); + if (ret) + goto err_pci_runtime_suspend; + + if (chip->rpm_retune && + host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + } + + return 0; + +err_pci_runtime_suspend: + while (--i >= 0) + sdhci_runtime_resume_host(chip->slots[i]->host, 0); + return ret; +} + +static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip) +{ + struct sdhci_pci_slot *slot; + int i, ret; + + for (i = 0; i < chip->num_slots; i++) { + slot = chip->slots[i]; + if (!slot) + continue; + + ret = sdhci_runtime_resume_host(slot->host, 0); + if (ret) + return ret; + } + + return 0; +} + +static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip) +{ + int ret; + + ret = cqhci_suspend(chip->slots[0]->host->mmc); + if (ret) + return ret; + + return sdhci_pci_runtime_suspend_host(chip); +} + +static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip) +{ + int ret; + + ret = sdhci_pci_runtime_resume_host(chip); + if (ret) + return ret; + + return cqhci_resume(chip->slots[0]->host->mmc); +} +#endif + +static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static void sdhci_pci_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) && + host->mmc->cqe_private) + cqhci_deactivate(host->mmc); + sdhci_reset(host, mask); +} + +/*****************************************************************************\ + * * + * Hardware specific quirk handling * + * * +\*****************************************************************************/ + +static int ricoh_probe(struct sdhci_pci_chip *chip) +{ + if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG || + chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY) + chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; + return 0; +} + +static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->caps = + FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) | + FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) | + SDHCI_TIMEOUT_CLK_UNIT | + SDHCI_CAN_VDD_330 | + SDHCI_CAN_DO_HISPD | + SDHCI_CAN_DO_SDMA; + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int ricoh_mmc_resume(struct sdhci_pci_chip *chip) +{ + /* Apply a delay to allow controller to settle */ + /* Otherwise it becomes confused if card state changed + during suspend */ + msleep(500); + return sdhci_pci_resume_host(chip); +} +#endif + +static const struct sdhci_pci_fixes sdhci_ricoh = { + .probe = ricoh_probe, + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_FORCE_DMA | + SDHCI_QUIRK_CLOCK_BEFORE_RESET, +}; + +static const struct sdhci_pci_fixes sdhci_ricoh_mmc = { + .probe_slot = ricoh_mmc_probe_slot, +#ifdef CONFIG_PM_SLEEP + .resume = ricoh_mmc_resume, +#endif + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_CLOCK_BEFORE_RESET | + SDHCI_QUIRK_NO_CARD_NO_RESET | + SDHCI_QUIRK_MISSING_CAPS +}; + +static const struct sdhci_pci_fixes sdhci_ene_712 = { + .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_BROKEN_DMA, +}; + +static const struct sdhci_pci_fixes sdhci_ene_714 = { + .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | + SDHCI_QUIRK_BROKEN_DMA, +}; + +static const struct sdhci_pci_fixes sdhci_cafe = { + .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | + SDHCI_QUIRK_NO_BUSY_IRQ | + SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, +}; + +static const struct sdhci_pci_fixes sdhci_intel_qrk = { + .quirks = SDHCI_QUIRK_NO_HISPD_BIT, +}; + +static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; + return 0; +} + +/* + * ADMA operation is disabled for Moorestown platform due to + * hardware bugs. + */ +static int mrst_hc_probe(struct sdhci_pci_chip *chip) +{ + /* + * slots number is fixed here for MRST as SDIO3/5 are never used and + * have hardware bugs. + */ + chip->num_slots = 1; + return 0; +} + +static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; + return 0; +} + +#ifdef CONFIG_PM + +static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id) +{ + struct sdhci_pci_slot *slot = dev_id; + struct sdhci_host *host = slot->host; + + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + return IRQ_HANDLED; +} + +static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) +{ + int err, irq, gpio = slot->cd_gpio; + + slot->cd_gpio = -EINVAL; + slot->cd_irq = -EINVAL; + + if (!gpio_is_valid(gpio)) + return; + + err = devm_gpio_request(&slot->chip->pdev->dev, gpio, "sd_cd"); + if (err < 0) + goto out; + + err = gpio_direction_input(gpio); + if (err < 0) + goto out_free; + + irq = gpio_to_irq(gpio); + if (irq < 0) + goto out_free; + + err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, "sd_cd", slot); + if (err) + goto out_free; + + slot->cd_gpio = gpio; + slot->cd_irq = irq; + + return; + +out_free: + devm_gpio_free(&slot->chip->pdev->dev, gpio); +out: + dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n"); +} + +static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) +{ + if (slot->cd_irq >= 0) + free_irq(slot->cd_irq, slot); +} + +#else + +static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) +{ +} + +static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) +{ +} + +#endif + +static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; + slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC; + return 0; +} + +static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; + return 0; +} + +static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { + .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, + .probe_slot = mrst_hc_probe_slot, +}; + +static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { + .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, + .probe = mrst_hc_probe, +}; + +static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .allow_runtime_pm = true, + .own_cd_for_runtime_pm = true, +}; + +static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, + .allow_runtime_pm = true, + .probe_slot = mfd_sdio_probe_slot, +}; + +static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .allow_runtime_pm = true, + .probe_slot = mfd_emmc_probe_slot, +}; + +static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { + .quirks = SDHCI_QUIRK_BROKEN_ADMA, + .probe_slot = pch_hc_probe_slot, +}; + +#ifdef CONFIG_X86 + +#define BYT_IOSF_SCCEP 0x63 +#define BYT_IOSF_OCP_NETCTRL0 0x1078 +#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) + +static void byt_ocp_setting(struct pci_dev *pdev) +{ + u32 val = 0; + + if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC && + pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO && + pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD && + pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2) + return; + + if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, + &val)) { + dev_err(&pdev->dev, "%s read error\n", __func__); + return; + } + + if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) + return; + + val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; + + if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, + val)) { + dev_err(&pdev->dev, "%s write error\n", __func__); + return; + } + + dev_dbg(&pdev->dev, "%s completed\n", __func__); +} + +#else + +static inline void byt_ocp_setting(struct pci_dev *pdev) +{ +} + +#endif + +enum { + INTEL_DSM_FNS = 0, + INTEL_DSM_V18_SWITCH = 3, + INTEL_DSM_V33_SWITCH = 4, + INTEL_DSM_DRV_STRENGTH = 9, + INTEL_DSM_D3_RETUNE = 10, +}; + +struct intel_host { + u32 dsm_fns; + int drv_strength; + bool d3_retune; + bool rpm_retune_ok; + bool needs_pwr_off; + u32 glk_rx_ctrl1; + u32 glk_tun_val; + u32 active_ltr; + u32 idle_ltr; +}; + +static const guid_t intel_dsm_guid = + GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, + 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); + +static int __intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + union acpi_object *obj; + int err = 0; + size_t len; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); + if (!obj) + return -EOPNOTSUPP; + + if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) { + err = -EINVAL; + goto out; + } + + len = min_t(size_t, obj->buffer.length, 4); + + *result = 0; + memcpy(result, obj->buffer.pointer, len); +out: + ACPI_FREE(obj); + + return err; +} + +static int intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) + return -EOPNOTSUPP; + + return __intel_dsm(intel_host, dev, fn, result); +} + +static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, + struct mmc_host *mmc) +{ + int err; + u32 val; + + intel_host->d3_retune = true; + + err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); + if (err) { + pr_debug("%s: DSM not supported, error %d\n", + mmc_hostname(mmc), err); + return; + } + + pr_debug("%s: DSM function mask %#x\n", + mmc_hostname(mmc), intel_host->dsm_fns); + + err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val); + intel_host->drv_strength = err ? 0 : val; + + err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val); + intel_host->d3_retune = err ? true : !!val; +} + +static void sdhci_pci_int_hw_reset(struct sdhci_host *host) +{ + u8 reg; + + reg = sdhci_readb(host, SDHCI_POWER_CONTROL); + reg |= 0x10; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + /* For eMMC, minimum is 1us but give it 9us for good measure */ + udelay(9); + reg &= ~0x10; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + /* For eMMC, minimum is 200us but give it 300us for good measure */ + usleep_range(300, 1000); +} + +static int intel_select_drive_strength(struct mmc_card *card, + unsigned int max_dtr, int host_drv, + int card_drv, int *drv_type) +{ + struct sdhci_host *host = mmc_priv(card->host); + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct intel_host *intel_host = sdhci_pci_priv(slot); + + if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv)) + return 0; + + return intel_host->drv_strength; +} + +static int bxt_get_cd(struct mmc_host *mmc) +{ + int gpio_cd = mmc_gpio_get_cd(mmc); + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + int ret = 0; + + if (!gpio_cd) + return 0; + + spin_lock_irqsave(&host->lock, flags); + + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + + ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +out: + spin_unlock_irqrestore(&host->lock, flags); + + return ret; +} + +#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20 +#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100 + +static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct intel_host *intel_host = sdhci_pci_priv(slot); + int cntr; + u8 reg; + + /* + * Bus power may control card power, but a full reset still may not + * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can. + * That might be needed to initialize correctly, if the card was left + * powered on previously. + */ + if (intel_host->needs_pwr_off) { + intel_host->needs_pwr_off = false; + if (mode != MMC_POWER_OFF) { + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + usleep_range(10000, 12500); + } + } + + sdhci_set_power(host, mode, vdd); + + if (mode == MMC_POWER_OFF) + return; + + /* + * Bus power might not enable after D3 -> D0 transition due to the + * present state not yet having propagated. Retry for up to 2ms. + */ + for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) { + reg = sdhci_readb(host, SDHCI_POWER_CONTROL); + if (reg & SDHCI_POWER_ON) + break; + udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY); + reg |= SDHCI_POWER_ON; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + } +} + +static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + /* Set UHS timing to SDR25 for High Speed mode */ + if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS) + timing = MMC_TIMING_UHS_SDR25; + sdhci_set_uhs_signaling(host, timing); +} + +#define INTEL_HS400_ES_REG 0x78 +#define INTEL_HS400_ES_BIT BIT(0) + +static void intel_hs400_enhanced_strobe(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 val; + + val = sdhci_readl(host, INTEL_HS400_ES_REG); + if (ios->enhanced_strobe) + val |= INTEL_HS400_ES_BIT; + else + val &= ~INTEL_HS400_ES_BIT; + sdhci_writel(host, val, INTEL_HS400_ES_REG); +} + +static int intel_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct device *dev = mmc_dev(mmc); + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct intel_host *intel_host = sdhci_pci_priv(slot); + unsigned int fn; + u32 result = 0; + int err; + + err = sdhci_start_signal_voltage_switch(mmc, ios); + if (err) + return err; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + fn = INTEL_DSM_V33_SWITCH; + break; + case MMC_SIGNAL_VOLTAGE_180: + fn = INTEL_DSM_V18_SWITCH; + break; + default: + return 0; + } + + err = intel_dsm(intel_host, dev, fn, &result); + pr_debug("%s: %s DSM fn %u error %d result %u\n", + mmc_hostname(mmc), __func__, fn, err, result); + + return 0; +} + +static const struct sdhci_ops sdhci_intel_byt_ops = { + .set_clock = sdhci_set_clock, + .set_power = sdhci_intel_set_power, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_intel_set_uhs_signaling, + .hw_reset = sdhci_pci_hw_reset, +}; + +static const struct sdhci_ops sdhci_intel_glk_ops = { + .set_clock = sdhci_set_clock, + .set_power = sdhci_intel_set_power, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_cqhci_reset, + .set_uhs_signaling = sdhci_intel_set_uhs_signaling, + .hw_reset = sdhci_pci_hw_reset, + .irq = sdhci_cqhci_irq, +}; + +static void byt_read_dsm(struct sdhci_pci_slot *slot) +{ + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct device *dev = &slot->chip->pdev->dev; + struct mmc_host *mmc = slot->host->mmc; + + intel_dsm_init(intel_host, dev, mmc); + slot->chip->rpm_retune = intel_host->d3_retune; +} + +static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + int err = sdhci_execute_tuning(mmc, opcode); + struct sdhci_host *host = mmc_priv(mmc); + + if (err) + return err; + + /* + * Tuning can leave the IP in an active state (Buffer Read Enable bit + * set) which prevents the entry to low power states (i.e. S0i3). Data + * reset will clear it. + */ + sdhci_reset(host, SDHCI_RESET_DATA); + + return 0; +} + +#define INTEL_ACTIVELTR 0x804 +#define INTEL_IDLELTR 0x808 + +#define INTEL_LTR_REQ BIT(15) +#define INTEL_LTR_SCALE_MASK GENMASK(11, 10) +#define INTEL_LTR_SCALE_1US (2 << 10) +#define INTEL_LTR_SCALE_32US (3 << 10) +#define INTEL_LTR_VALUE_MASK GENMASK(9, 0) + +static void intel_cache_ltr(struct sdhci_pci_slot *slot) +{ + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct sdhci_host *host = slot->host; + + intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR); + intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR); +} + +static void intel_ltr_set(struct device *dev, s32 val) +{ + struct sdhci_pci_chip *chip = dev_get_drvdata(dev); + struct sdhci_pci_slot *slot = chip->slots[0]; + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct sdhci_host *host = slot->host; + u32 ltr; + + pm_runtime_get_sync(dev); + + /* + * Program latency tolerance (LTR) accordingly what has been asked + * by the PM QoS layer or disable it in case we were passed + * negative value or PM_QOS_LATENCY_ANY. + */ + ltr = readl(host->ioaddr + INTEL_ACTIVELTR); + + if (val == PM_QOS_LATENCY_ANY || val < 0) { + ltr &= ~INTEL_LTR_REQ; + } else { + ltr |= INTEL_LTR_REQ; + ltr &= ~INTEL_LTR_SCALE_MASK; + ltr &= ~INTEL_LTR_VALUE_MASK; + + if (val > INTEL_LTR_VALUE_MASK) { + val >>= 5; + if (val > INTEL_LTR_VALUE_MASK) + val = INTEL_LTR_VALUE_MASK; + ltr |= INTEL_LTR_SCALE_32US | val; + } else { + ltr |= INTEL_LTR_SCALE_1US | val; + } + } + + if (ltr == intel_host->active_ltr) + goto out; + + writel(ltr, host->ioaddr + INTEL_ACTIVELTR); + writel(ltr, host->ioaddr + INTEL_IDLELTR); + + /* Cache the values into lpss structure */ + intel_cache_ltr(slot); +out: + pm_runtime_put_autosuspend(dev); +} + +static bool intel_use_ltr(struct sdhci_pci_chip *chip) +{ + switch (chip->pdev->device) { + case PCI_DEVICE_ID_INTEL_BYT_EMMC: + case PCI_DEVICE_ID_INTEL_BYT_EMMC2: + case PCI_DEVICE_ID_INTEL_BYT_SDIO: + case PCI_DEVICE_ID_INTEL_BYT_SD: + case PCI_DEVICE_ID_INTEL_BSW_EMMC: + case PCI_DEVICE_ID_INTEL_BSW_SDIO: + case PCI_DEVICE_ID_INTEL_BSW_SD: + return false; + default: + return true; + } +} + +static void intel_ltr_expose(struct sdhci_pci_chip *chip) +{ + struct device *dev = &chip->pdev->dev; + + if (!intel_use_ltr(chip)) + return; + + dev->power.set_latency_tolerance = intel_ltr_set; + dev_pm_qos_expose_latency_tolerance(dev); +} + +static void intel_ltr_hide(struct sdhci_pci_chip *chip) +{ + struct device *dev = &chip->pdev->dev; + + if (!intel_use_ltr(chip)) + return; + + dev_pm_qos_hide_latency_tolerance(dev); + dev->power.set_latency_tolerance = NULL; +} + +static void byt_probe_slot(struct sdhci_pci_slot *slot) +{ + struct mmc_host_ops *ops = &slot->host->mmc_host_ops; + struct device *dev = &slot->chip->pdev->dev; + struct mmc_host *mmc = slot->host->mmc; + + byt_read_dsm(slot); + + byt_ocp_setting(slot->chip->pdev); + + ops->execute_tuning = intel_execute_tuning; + ops->start_signal_voltage_switch = intel_start_signal_voltage_switch; + + device_property_read_u32(dev, "max-frequency", &mmc->f_max); + + if (!mmc->slotno) { + slot->chip->slots[mmc->slotno] = slot; + intel_ltr_expose(slot->chip); + } +} + +static void byt_add_debugfs(struct sdhci_pci_slot *slot) +{ + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct mmc_host *mmc = slot->host->mmc; + struct dentry *dir = mmc->debugfs_root; + + if (!intel_use_ltr(slot->chip)) + return; + + debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr); + debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr); + + intel_cache_ltr(slot); +} + +static int byt_add_host(struct sdhci_pci_slot *slot) +{ + int ret = sdhci_add_host(slot->host); + + if (!ret) + byt_add_debugfs(slot); + return ret; +} + +static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead) +{ + struct mmc_host *mmc = slot->host->mmc; + + if (!mmc->slotno) + intel_ltr_hide(slot->chip); +} + +static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ + byt_probe_slot(slot); + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | + MMC_CAP_CMD_DURING_TFR | + MMC_CAP_WAIT_WHILE_BUSY; + slot->hw_reset = sdhci_pci_int_hw_reset; + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC) + slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ + slot->host->mmc_host_ops.select_drive_strength = + intel_select_drive_strength; + return 0; +} + +static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) +{ + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && + (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || + dmi_match(DMI_SYS_VENDOR, "IRBIS")); +} + +static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) +{ + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC && + dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC."); +} + +static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ + int ret = byt_emmc_probe_slot(slot); + + if (!glk_broken_cqhci(slot)) + slot->host->mmc->caps2 |= MMC_CAP2_CQE; + + if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { + if (!jsl_broken_hs400es(slot)) { + slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES; + slot->host->mmc_host_ops.hs400_enhanced_strobe = + intel_hs400_enhanced_strobe; + } + slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; + } + + return ret; +} + +static const struct cqhci_host_ops glk_cqhci_ops = { + .enable = sdhci_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_pci_dumpregs, +}; + +static int glk_emmc_add_host(struct sdhci_pci_slot *slot) +{ + struct device *dev = &slot->chip->pdev->dev; + struct sdhci_host *host = slot->host; + struct cqhci_host *cq_host; + bool dma64; + int ret; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + 0x200; + cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; + cq_host->ops = &glk_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + byt_add_debugfs(slot); + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + +#ifdef CONFIG_PM +#define GLK_RX_CTRL1 0x834 +#define GLK_TUN_VAL 0x840 +#define GLK_PATH_PLL GENMASK(13, 8) +#define GLK_DLY GENMASK(6, 0) +/* Workaround firmware failing to restore the tuning value */ +static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp) +{ + struct sdhci_pci_slot *slot = chip->slots[0]; + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct sdhci_host *host = slot->host; + u32 glk_rx_ctrl1; + u32 glk_tun_val; + u32 dly; + + if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc)) + return; + + glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1); + glk_tun_val = sdhci_readl(host, GLK_TUN_VAL); + + if (susp) { + intel_host->glk_rx_ctrl1 = glk_rx_ctrl1; + intel_host->glk_tun_val = glk_tun_val; + return; + } + + if (!intel_host->glk_tun_val) + return; + + if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) { + intel_host->rpm_retune_ok = true; + return; + } + + dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) + + (intel_host->glk_tun_val << 1)); + if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1)) + return; + + glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly; + sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1); + + intel_host->rpm_retune_ok = true; + chip->rpm_retune = true; + mmc_retune_needed(host->mmc); + pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc)); +} + +static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp) +{ + if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && + !chip->rpm_retune) + glk_rpm_retune_wa(chip, susp); +} + +static int glk_runtime_suspend(struct sdhci_pci_chip *chip) +{ + glk_rpm_retune_chk(chip, true); + + return sdhci_cqhci_runtime_suspend(chip); +} + +static int glk_runtime_resume(struct sdhci_pci_chip *chip) +{ + glk_rpm_retune_chk(chip, false); + + return sdhci_cqhci_runtime_resume(chip); +} +#endif + +#ifdef CONFIG_ACPI +static int ni_set_max_freq(struct sdhci_pci_slot *slot) +{ + acpi_status status; + unsigned long long max_freq; + + status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev), + "MXFQ", NULL, &max_freq); + if (ACPI_FAILURE(status)) { + dev_err(&slot->chip->pdev->dev, + "MXFQ not found in acpi table\n"); + return -EINVAL; + } + + slot->host->mmc->f_max = max_freq * 1000000; + + return 0; +} +#else +static inline int ni_set_max_freq(struct sdhci_pci_slot *slot) +{ + return 0; +} +#endif + +static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) +{ + int err; + + byt_probe_slot(slot); + + err = ni_set_max_freq(slot); + if (err) + return err; + + slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | + MMC_CAP_WAIT_WHILE_BUSY; + return 0; +} + +static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) +{ + byt_probe_slot(slot); + slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | + MMC_CAP_WAIT_WHILE_BUSY; + return 0; +} + +static void byt_needs_pwr_off(struct sdhci_pci_slot *slot) +{ + struct intel_host *intel_host = sdhci_pci_priv(slot); + u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL); + + intel_host->needs_pwr_off = reg & SDHCI_POWER_ON; +} + +static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) +{ + byt_probe_slot(slot); + slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | + MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; + slot->cd_idx = 0; + slot->cd_override_level = true; + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || + slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD || + slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD || + slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD) + slot->host->mmc_host_ops.get_cd = bxt_get_cd; + + if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI && + slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3) + slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V; + + byt_needs_pwr_off(slot); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP + +static int byt_resume(struct sdhci_pci_chip *chip) +{ + byt_ocp_setting(chip->pdev); + + return sdhci_pci_resume_host(chip); +} + +#endif + +#ifdef CONFIG_PM + +static int byt_runtime_resume(struct sdhci_pci_chip *chip) +{ + byt_ocp_setting(chip->pdev); + + return sdhci_pci_runtime_resume_host(chip); +} + +#endif + +static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { +#ifdef CONFIG_PM_SLEEP + .resume = byt_resume, +#endif +#ifdef CONFIG_PM + .runtime_resume = byt_runtime_resume, +#endif + .allow_runtime_pm = true, + .probe_slot = byt_emmc_probe_slot, + .add_host = byt_add_host, + .remove_slot = byt_remove_slot, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | + SDHCI_QUIRK2_STOP_WITH_TC, + .ops = &sdhci_intel_byt_ops, + .priv_size = sizeof(struct intel_host), +}; + +static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { + .allow_runtime_pm = true, + .probe_slot = glk_emmc_probe_slot, + .add_host = glk_emmc_add_host, + .remove_slot = byt_remove_slot, +#ifdef CONFIG_PM_SLEEP + .suspend = sdhci_cqhci_suspend, + .resume = sdhci_cqhci_resume, +#endif +#ifdef CONFIG_PM + .runtime_suspend = glk_runtime_suspend, + .runtime_resume = glk_runtime_resume, +#endif + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | + SDHCI_QUIRK2_STOP_WITH_TC, + .ops = &sdhci_intel_glk_ops, + .priv_size = sizeof(struct intel_host), +}; + +static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = { +#ifdef CONFIG_PM_SLEEP + .resume = byt_resume, +#endif +#ifdef CONFIG_PM + .runtime_resume = byt_runtime_resume, +#endif + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED, + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON | + SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .allow_runtime_pm = true, + .probe_slot = ni_byt_sdio_probe_slot, + .add_host = byt_add_host, + .remove_slot = byt_remove_slot, + .ops = &sdhci_intel_byt_ops, + .priv_size = sizeof(struct intel_host), +}; + +static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { +#ifdef CONFIG_PM_SLEEP + .resume = byt_resume, +#endif +#ifdef CONFIG_PM + .runtime_resume = byt_runtime_resume, +#endif + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED, + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON | + SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .allow_runtime_pm = true, + .probe_slot = byt_sdio_probe_slot, + .add_host = byt_add_host, + .remove_slot = byt_remove_slot, + .ops = &sdhci_intel_byt_ops, + .priv_size = sizeof(struct intel_host), +}; + +static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { +#ifdef CONFIG_PM_SLEEP + .resume = byt_resume, +#endif +#ifdef CONFIG_PM + .runtime_resume = byt_runtime_resume, +#endif + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_LED, + .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | + SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_STOP_WITH_TC, + .allow_runtime_pm = true, + .own_cd_for_runtime_pm = true, + .probe_slot = byt_sd_probe_slot, + .add_host = byt_add_host, + .remove_slot = byt_remove_slot, + .ops = &sdhci_intel_byt_ops, + .priv_size = sizeof(struct intel_host), +}; + +/* Define Host controllers for Intel Merrifield platform */ +#define INTEL_MRFLD_EMMC_0 0 +#define INTEL_MRFLD_EMMC_1 1 +#define INTEL_MRFLD_SD 2 +#define INTEL_MRFLD_SDIO 3 + +#ifdef CONFIG_ACPI +static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) +{ + struct acpi_device *device, *child; + + device = ACPI_COMPANION(&slot->chip->pdev->dev); + if (!device) + return; + + acpi_device_fix_up_power(device); + list_for_each_entry(child, &device->children, node) + if (child->status.present && child->status.enabled) + acpi_device_fix_up_power(child); +} +#else +static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {} +#endif + +static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) +{ + unsigned int func = PCI_FUNC(slot->chip->pdev->devfn); + + switch (func) { + case INTEL_MRFLD_EMMC_0: + case INTEL_MRFLD_EMMC_1: + slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | + MMC_CAP_8_BIT_DATA | + MMC_CAP_1_8V_DDR; + break; + case INTEL_MRFLD_SD: + slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + break; + case INTEL_MRFLD_SDIO: + /* Advertise 2.0v for compatibility with the SDIO card's OCR */ + slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195; + slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | + MMC_CAP_POWER_OFF_CARD; + break; + default: + return -ENODEV; + } + + intel_mrfld_mmc_fix_up_power_slot(slot); + return 0; +} + +static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | + SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .allow_runtime_pm = true, + .probe_slot = intel_mrfld_mmc_probe_slot, +}; + +static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) +{ + u8 scratch; + int ret; + + ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); + if (ret) + return ret; + + /* + * Turn PMOS on [bit 0], set over current detection to 2.4 V + * [bit 1:2] and enable over current debouncing [bit 6]. + */ + if (on) + scratch |= 0x47; + else + scratch &= ~0x47; + + return pci_write_config_byte(chip->pdev, 0xAE, scratch); +} + +static int jmicron_probe(struct sdhci_pci_chip *chip) +{ + int ret; + u16 mmcdev = 0; + + if (chip->pdev->revision == 0) { + chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE | + SDHCI_QUIRK_RESET_AFTER_REQUEST | + SDHCI_QUIRK_BROKEN_SMALL_PIO; + } + + /* + * JMicron chips can have two interfaces to the same hardware + * in order to work around limitations in Microsoft's driver. + * We need to make sure we only bind to one of them. + * + * This code assumes two things: + * + * 1. The PCI code adds subfunctions in order. + * + * 2. The MMC interface has a lower subfunction number + * than the SD interface. + */ + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) + mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC; + else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD) + mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD; + + if (mmcdev) { + struct pci_dev *sd_dev; + + sd_dev = NULL; + while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, + mmcdev, sd_dev)) != NULL) { + if ((PCI_SLOT(chip->pdev->devfn) == + PCI_SLOT(sd_dev->devfn)) && + (chip->pdev->bus == sd_dev->bus)) + break; + } + + if (sd_dev) { + pci_dev_put(sd_dev); + dev_info(&chip->pdev->dev, "Refusing to bind to " + "secondary interface.\n"); + return -ENODEV; + } + } + + /* + * JMicron chips need a bit of a nudge to enable the power + * output pins. + */ + ret = jmicron_pmos(chip, 1); + if (ret) { + dev_err(&chip->pdev->dev, "Failure enabling card power\n"); + return ret; + } + + /* quirk for unsable RO-detection on JM388 chips */ + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || + chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) + chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT; + + return 0; +} + +static void jmicron_enable_mmc(struct sdhci_host *host, int on) +{ + u8 scratch; + + scratch = readb(host->ioaddr + 0xC0); + + if (on) + scratch |= 0x01; + else + scratch &= ~0x01; + + writeb(scratch, host->ioaddr + 0xC0); +} + +static int jmicron_probe_slot(struct sdhci_pci_slot *slot) +{ + if (slot->chip->pdev->revision == 0) { + u16 version; + + version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); + version = (version & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT; + + /* + * Older versions of the chip have lots of nasty glitches + * in the ADMA engine. It's best just to avoid it + * completely. + */ + if (version < 0xAC) + slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; + } + + /* JM388 MMC doesn't support 1.8V while SD supports it */ + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { + slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 | + MMC_VDD_29_30 | MMC_VDD_30_31 | + MMC_VDD_165_195; /* allow 1.8V */ + slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 | + MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */ + } + + /* + * The secondary interface requires a bit set to get the + * interrupts. + */ + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || + slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) + jmicron_enable_mmc(slot->host, 1); + + slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST; + + return 0; +} + +static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) +{ + if (dead) + return; + + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || + slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) + jmicron_enable_mmc(slot->host, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int jmicron_suspend(struct sdhci_pci_chip *chip) +{ + int i, ret; + + ret = sdhci_pci_suspend_host(chip); + if (ret) + return ret; + + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || + chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { + for (i = 0; i < chip->num_slots; i++) + jmicron_enable_mmc(chip->slots[i]->host, 0); + } + + return 0; +} + +static int jmicron_resume(struct sdhci_pci_chip *chip) +{ + int ret, i; + + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || + chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { + for (i = 0; i < chip->num_slots; i++) + jmicron_enable_mmc(chip->slots[i]->host, 1); + } + + ret = jmicron_pmos(chip, 1); + if (ret) { + dev_err(&chip->pdev->dev, "Failure enabling card power\n"); + return ret; + } + + return sdhci_pci_resume_host(chip); +} +#endif + +static const struct sdhci_pci_fixes sdhci_jmicron = { + .probe = jmicron_probe, + + .probe_slot = jmicron_probe_slot, + .remove_slot = jmicron_remove_slot, + +#ifdef CONFIG_PM_SLEEP + .suspend = jmicron_suspend, + .resume = jmicron_resume, +#endif +}; + +/* SysKonnect CardBus2SDIO extra registers */ +#define SYSKT_CTRL 0x200 +#define SYSKT_RDFIFO_STAT 0x204 +#define SYSKT_WRFIFO_STAT 0x208 +#define SYSKT_POWER_DATA 0x20c +#define SYSKT_POWER_330 0xef +#define SYSKT_POWER_300 0xf8 +#define SYSKT_POWER_184 0xcc +#define SYSKT_POWER_CMD 0x20d +#define SYSKT_POWER_START (1 << 7) +#define SYSKT_POWER_STATUS 0x20e +#define SYSKT_POWER_STATUS_OK (1 << 0) +#define SYSKT_BOARD_REV 0x210 +#define SYSKT_CHIP_REV 0x211 +#define SYSKT_CONF_DATA 0x212 +#define SYSKT_CONF_DATA_1V8 (1 << 2) +#define SYSKT_CONF_DATA_2V5 (1 << 1) +#define SYSKT_CONF_DATA_3V3 (1 << 0) + +static int syskt_probe(struct sdhci_pci_chip *chip) +{ + if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { + chip->pdev->class &= ~0x0000FF; + chip->pdev->class |= PCI_SDHCI_IFDMA; + } + return 0; +} + +static int syskt_probe_slot(struct sdhci_pci_slot *slot) +{ + int tm, ps; + + u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV); + u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV); + dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, " + "board rev %d.%d, chip rev %d.%d\n", + board_rev >> 4, board_rev & 0xf, + chip_rev >> 4, chip_rev & 0xf); + if (chip_rev >= 0x20) + slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA; + + writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA); + writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD); + udelay(50); + tm = 10; /* Wait max 1 ms */ + do { + ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS); + if (ps & SYSKT_POWER_STATUS_OK) + break; + udelay(100); + } while (--tm); + if (!tm) { + dev_err(&slot->chip->pdev->dev, + "power regulator never stabilized"); + writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD); + return -ENODEV; + } + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_syskt = { + .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER, + .probe = syskt_probe, + .probe_slot = syskt_probe_slot, +}; + +static int via_probe(struct sdhci_pci_chip *chip) +{ + if (chip->pdev->revision == 0x10) + chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_via = { + .probe = via_probe, +}; + +static int rtsx_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps2 |= MMC_CAP2_HS200; + return 0; +} + +static const struct sdhci_pci_fixes sdhci_rtsx = { + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_BROKEN_64_BIT_DMA | + SDHCI_QUIRK2_BROKEN_DDR50, + .probe_slot = rtsx_probe_slot, +}; + +/*AMD chipset generation*/ +enum amd_chipset_gen { + AMD_CHIPSET_BEFORE_ML, + AMD_CHIPSET_CZ, + AMD_CHIPSET_NL, + AMD_CHIPSET_UNKNOWN, +}; + +/* AMD registers */ +#define AMD_SD_AUTO_PATTERN 0xB8 +#define AMD_MSLEEP_DURATION 4 +#define AMD_SD_MISC_CONTROL 0xD0 +#define AMD_MAX_TUNE_VALUE 0x0B +#define AMD_AUTO_TUNE_SEL 0x10800 +#define AMD_FIFO_PTR 0x30 +#define AMD_BIT_MASK 0x1F + +static void amd_tuning_reset(struct sdhci_host *host) +{ + unsigned int val; + + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); + + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val &= ~SDHCI_CTRL_EXEC_TUNING; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); +} + +static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase) +{ + unsigned int val; + + pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val); + val &= ~AMD_BIT_MASK; + val |= (AMD_AUTO_TUNE_SEL | (phase << 1)); + pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val); +} + +static void amd_enable_manual_tuning(struct pci_dev *pdev) +{ + unsigned int val; + + pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val); + val |= AMD_FIFO_PTR; + pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); +} + +static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct pci_dev *pdev = slot->chip->pdev; + u8 valid_win = 0; + u8 valid_win_max = 0; + u8 valid_win_end = 0; + u8 ctrl, tune_around; + + amd_tuning_reset(host); + + for (tune_around = 0; tune_around < 12; tune_around++) { + amd_config_tuning_phase(pdev, tune_around); + + if (mmc_send_tuning(host->mmc, opcode, NULL)) { + valid_win = 0; + msleep(AMD_MSLEEP_DURATION); + ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA; + sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET); + } else if (++valid_win > valid_win_max) { + valid_win_max = valid_win; + valid_win_end = tune_around; + } + } + + if (!valid_win_max) { + dev_err(&pdev->dev, "no tuning point found\n"); + return -EIO; + } + + amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2); + + amd_enable_manual_tuning(pdev); + + host->mmc->retune_period = 0; + + return 0; +} + +static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* AMD requires custom HS200 tuning */ + if (host->timing == MMC_TIMING_MMC_HS200) + return amd_execute_tuning_hs200(host, opcode); + + /* Otherwise perform standard SDHCI tuning */ + return sdhci_execute_tuning(mmc, opcode); +} + +static int amd_probe_slot(struct sdhci_pci_slot *slot) +{ + struct mmc_host_ops *ops = &slot->host->mmc_host_ops; + + ops->execute_tuning = amd_execute_tuning; + + return 0; +} + +static int amd_probe(struct sdhci_pci_chip *chip) +{ + struct pci_dev *smbus_dev; + enum amd_chipset_gen gen; + + smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); + if (smbus_dev) { + gen = AMD_CHIPSET_BEFORE_ML; + } else { + smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); + if (smbus_dev) { + if (smbus_dev->revision < 0x51) + gen = AMD_CHIPSET_CZ; + else + gen = AMD_CHIPSET_NL; + } else { + gen = AMD_CHIPSET_UNKNOWN; + } + } + + pci_dev_put(smbus_dev); + + if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ) + chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; + + return 0; +} + +static u32 sdhci_read_present_state(struct sdhci_host *host) +{ + return sdhci_readl(host, SDHCI_PRESENT_STATE); +} + +static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct pci_dev *pdev = slot->chip->pdev; + u32 present_state; + + /* + * SDHC 0x7906 requires a hard reset to clear all internal state. + * Otherwise it can get into a bad state where the DATA lines are always + * read as zeros. + */ + if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) { + pci_clear_master(pdev); + + pci_save_state(pdev); + + pci_set_power_state(pdev, PCI_D3cold); + pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc), + pdev->current_state); + pci_set_power_state(pdev, PCI_D0); + + pci_restore_state(pdev); + + /* + * SDHCI_RESET_ALL says the card detect logic should not be + * reset, but since we need to reset the entire controller + * we should wait until the card detect logic has stabilized. + * + * This normally takes about 40ms. + */ + readx_poll_timeout( + sdhci_read_present_state, + host, + present_state, + present_state & SDHCI_CD_STABLE, + 10000, + 100000 + ); + } + + return sdhci_reset(host, mask); +} + +static const struct sdhci_ops amd_sdhci_pci_ops = { + .set_clock = sdhci_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = amd_sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pci_fixes sdhci_amd = { + .probe = amd_probe, + .ops = &amd_sdhci_pci_ops, + .probe_slot = amd_probe_slot, +}; + +static const struct pci_device_id pci_ids[] = { + SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh), + SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc), + SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc), + SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc), + SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712), + SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712), + SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714), + SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714), + SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe), + SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron), + SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron), + SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron), + SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron), + SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt), + SDHCI_PCI_DEVICE(VIA, 95D0, via), + SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx), + SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk), + SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0), + SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2), + SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2), + SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd), + SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio), + SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio), + SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc), + SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc), + SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio), + SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio), + SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc), + SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio), + SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio), + SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc), + SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc), + SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio), + SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd), + SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio), + SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio), + SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc), + SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc), + SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc), + SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc), + SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio), + SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc), + SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc), + SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio), + SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc), + SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio), + SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc), + SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio), + SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio), + SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(O2, 8120, o2), + SDHCI_PCI_DEVICE(O2, 8220, o2), + SDHCI_PCI_DEVICE(O2, 8221, o2), + SDHCI_PCI_DEVICE(O2, 8320, o2), + SDHCI_PCI_DEVICE(O2, 8321, o2), + SDHCI_PCI_DEVICE(O2, FUJIN2, o2), + SDHCI_PCI_DEVICE(O2, SDS0, o2), + SDHCI_PCI_DEVICE(O2, SDS1, o2), + SDHCI_PCI_DEVICE(O2, SEABIRD0, o2), + SDHCI_PCI_DEVICE(O2, SEABIRD1, o2), + SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan), + SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps), + SDHCI_PCI_DEVICE(GLI, 9750, gl9750), + SDHCI_PCI_DEVICE(GLI, 9755, gl9755), + SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e), + SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd), + /* Generic SD host controller */ + {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)}, + { /* end: all zeroes */ }, +}; + +MODULE_DEVICE_TABLE(pci, pci_ids); + +/*****************************************************************************\ + * * + * SDHCI core callbacks * + * * +\*****************************************************************************/ + +int sdhci_pci_enable_dma(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot; + struct pci_dev *pdev; + + slot = sdhci_priv(host); + pdev = slot->chip->pdev; + + if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && + ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && + (host->flags & SDHCI_USE_SDMA)) { + dev_warn(&pdev->dev, "Will use DMA mode even though HW " + "doesn't fully claim to support it.\n"); + } + + pci_set_master(pdev); + + return 0; +} + +static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + int rst_n_gpio = slot->rst_n_gpio; + + if (!gpio_is_valid(rst_n_gpio)) + return; + gpio_set_value_cansleep(rst_n_gpio, 0); + /* For eMMC, minimum is 1us but give it 10us for good measure */ + udelay(10); + gpio_set_value_cansleep(rst_n_gpio, 1); + /* For eMMC, minimum is 200us but give it 300us for good measure */ + usleep_range(300, 1000); +} + +static void sdhci_pci_hw_reset(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + + if (slot->hw_reset) + slot->hw_reset(host); +} + +static const struct sdhci_ops sdhci_pci_ops = { + .set_clock = sdhci_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .hw_reset = sdhci_pci_hw_reset, +}; + +/*****************************************************************************\ + * * + * Suspend/resume * + * * +\*****************************************************************************/ + +#ifdef CONFIG_PM_SLEEP +static int sdhci_pci_suspend(struct device *dev) +{ + struct sdhci_pci_chip *chip = dev_get_drvdata(dev); + + if (!chip) + return 0; + + if (chip->fixes && chip->fixes->suspend) + return chip->fixes->suspend(chip); + + return sdhci_pci_suspend_host(chip); +} + +static int sdhci_pci_resume(struct device *dev) +{ + struct sdhci_pci_chip *chip = dev_get_drvdata(dev); + + if (!chip) + return 0; + + if (chip->fixes && chip->fixes->resume) + return chip->fixes->resume(chip); + + return sdhci_pci_resume_host(chip); +} +#endif + +#ifdef CONFIG_PM +static int sdhci_pci_runtime_suspend(struct device *dev) +{ + struct sdhci_pci_chip *chip = dev_get_drvdata(dev); + + if (!chip) + return 0; + + if (chip->fixes && chip->fixes->runtime_suspend) + return chip->fixes->runtime_suspend(chip); + + return sdhci_pci_runtime_suspend_host(chip); +} + +static int sdhci_pci_runtime_resume(struct device *dev) +{ + struct sdhci_pci_chip *chip = dev_get_drvdata(dev); + + if (!chip) + return 0; + + if (chip->fixes && chip->fixes->runtime_resume) + return chip->fixes->runtime_resume(chip); + + return sdhci_pci_runtime_resume_host(chip); +} +#endif + +static const struct dev_pm_ops sdhci_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume) + SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend, + sdhci_pci_runtime_resume, NULL) +}; + +/*****************************************************************************\ + * * + * Device probing/removal * + * * +\*****************************************************************************/ + +static struct sdhci_pci_slot *sdhci_pci_probe_slot( + struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, + int slotno) +{ + struct sdhci_pci_slot *slot; + struct sdhci_host *host; + int ret, bar = first_bar + slotno; + size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0; + + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); + return ERR_PTR(-ENODEV); + } + + if (pci_resource_len(pdev, bar) < 0x100) { + dev_err(&pdev->dev, "Invalid iomem size. You may " + "experience problems.\n"); + } + + if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { + dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n"); + return ERR_PTR(-ENODEV); + } + + if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { + dev_err(&pdev->dev, "Unknown interface. Aborting.\n"); + return ERR_PTR(-ENODEV); + } + + host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size); + if (IS_ERR(host)) { + dev_err(&pdev->dev, "cannot allocate host\n"); + return ERR_CAST(host); + } + + slot = sdhci_priv(host); + + slot->chip = chip; + slot->host = host; + slot->rst_n_gpio = -EINVAL; + slot->cd_gpio = -EINVAL; + slot->cd_idx = -1; + + /* Retrieve platform data if there is any */ + if (*sdhci_pci_get_data) + slot->data = sdhci_pci_get_data(pdev, slotno); + + if (slot->data) { + if (slot->data->setup) { + ret = slot->data->setup(slot->data); + if (ret) { + dev_err(&pdev->dev, "platform setup failed\n"); + goto free; + } + } + slot->rst_n_gpio = slot->data->rst_n_gpio; + slot->cd_gpio = slot->data->cd_gpio; + } + + host->hw_name = "PCI"; + host->ops = chip->fixes && chip->fixes->ops ? + chip->fixes->ops : + &sdhci_pci_ops; + host->quirks = chip->quirks; + host->quirks2 = chip->quirks2; + + host->irq = pdev->irq; + + ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc)); + if (ret) { + dev_err(&pdev->dev, "cannot request region\n"); + goto cleanup; + } + + host->ioaddr = pcim_iomap_table(pdev)[bar]; + + if (chip->fixes && chip->fixes->probe_slot) { + ret = chip->fixes->probe_slot(slot); + if (ret) + goto cleanup; + } + + if (gpio_is_valid(slot->rst_n_gpio)) { + if (!devm_gpio_request(&pdev->dev, slot->rst_n_gpio, "eMMC_reset")) { + gpio_direction_output(slot->rst_n_gpio, 1); + slot->host->mmc->caps |= MMC_CAP_HW_RESET; + slot->hw_reset = sdhci_pci_gpio_hw_reset; + } else { + dev_warn(&pdev->dev, "failed to request rst_n_gpio\n"); + slot->rst_n_gpio = -EINVAL; + } + } + + host->mmc->pm_caps = MMC_PM_KEEP_POWER; + host->mmc->slotno = slotno; + host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; + + if (device_can_wakeup(&pdev->dev)) + host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; + + if (host->mmc->caps & MMC_CAP_CD_WAKE) + device_init_wakeup(&pdev->dev, true); + + if (slot->cd_idx >= 0) { + ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, + slot->cd_override_level, 0); + if (ret && ret != -EPROBE_DEFER) + ret = mmc_gpiod_request_cd(host->mmc, NULL, + slot->cd_idx, + slot->cd_override_level, + 0); + if (ret == -EPROBE_DEFER) + goto remove; + + if (ret) { + dev_warn(&pdev->dev, "failed to setup card detect gpio\n"); + slot->cd_idx = -1; + } + } + + if (chip->fixes && chip->fixes->add_host) + ret = chip->fixes->add_host(slot); + else + ret = sdhci_add_host(host); + if (ret) + goto remove; + + sdhci_pci_add_own_cd(slot); + + /* + * Check if the chip needs a separate GPIO for card detect to wake up + * from runtime suspend. If it is not there, don't allow runtime PM. + * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure. + */ + if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && + !gpio_is_valid(slot->cd_gpio) && slot->cd_idx < 0) + chip->allow_runtime_pm = false; + + return slot; + +remove: + if (chip->fixes && chip->fixes->remove_slot) + chip->fixes->remove_slot(slot, 0); + +cleanup: + if (slot->data && slot->data->cleanup) + slot->data->cleanup(slot->data); + +free: + sdhci_free_host(host); + + return ERR_PTR(ret); +} + +static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) +{ + int dead; + u32 scratch; + + sdhci_pci_remove_own_cd(slot); + + dead = 0; + scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); + if (scratch == (u32)-1) + dead = 1; + + sdhci_remove_host(slot->host, dead); + + if (slot->chip->fixes && slot->chip->fixes->remove_slot) + slot->chip->fixes->remove_slot(slot, dead); + + if (slot->data && slot->data->cleanup) + slot->data->cleanup(slot->data); + + sdhci_free_host(slot->host); +} + +static void sdhci_pci_runtime_pm_allow(struct device *dev) +{ + pm_suspend_ignore_children(dev, 1); + pm_runtime_set_autosuspend_delay(dev, 50); + pm_runtime_use_autosuspend(dev); + pm_runtime_allow(dev); + /* Stay active until mmc core scans for a card */ + pm_runtime_put_noidle(dev); +} + +static void sdhci_pci_runtime_pm_forbid(struct device *dev) +{ + pm_runtime_forbid(dev); + pm_runtime_get_noresume(dev); +} + +static int sdhci_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct sdhci_pci_chip *chip; + struct sdhci_pci_slot *slot; + + u8 slots, first_bar; + int ret, i; + + BUG_ON(pdev == NULL); + BUG_ON(ent == NULL); + + dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", + (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); + + ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); + if (ret) + return ret; + + slots = PCI_SLOT_INFO_SLOTS(slots) + 1; + dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); + + BUG_ON(slots > MAX_SLOTS); + + ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); + if (ret) + return ret; + + first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; + + if (first_bar > 5) { + dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n"); + return -ENODEV; + } + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->pdev = pdev; + chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; + if (chip->fixes) { + chip->quirks = chip->fixes->quirks; + chip->quirks2 = chip->fixes->quirks2; + chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; + } + chip->num_slots = slots; + chip->pm_retune = true; + chip->rpm_retune = true; + + pci_set_drvdata(pdev, chip); + + if (chip->fixes && chip->fixes->probe) { + ret = chip->fixes->probe(chip); + if (ret) + return ret; + } + + slots = chip->num_slots; /* Quirk may have changed this */ + + for (i = 0; i < slots; i++) { + slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i); + if (IS_ERR(slot)) { + for (i--; i >= 0; i--) + sdhci_pci_remove_slot(chip->slots[i]); + return PTR_ERR(slot); + } + + chip->slots[i] = slot; + } + + if (chip->allow_runtime_pm) + sdhci_pci_runtime_pm_allow(&pdev->dev); + + return 0; +} + +static void sdhci_pci_remove(struct pci_dev *pdev) +{ + int i; + struct sdhci_pci_chip *chip = pci_get_drvdata(pdev); + + if (chip->allow_runtime_pm) + sdhci_pci_runtime_pm_forbid(&pdev->dev); + + for (i = 0; i < chip->num_slots; i++) + sdhci_pci_remove_slot(chip->slots[i]); +} + +static struct pci_driver sdhci_driver = { + .name = "sdhci-pci", + .id_table = pci_ids, + .probe = sdhci_pci_probe, + .remove = sdhci_pci_remove, + .driver = { + .pm = &sdhci_pci_pm_ops + }, +}; + +module_pci_driver(sdhci_driver); + +MODULE_AUTHOR("Pierre Ossman "); +MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c new file mode 100644 index 000000000..18638fb36 --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-data.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include + +struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno); +EXPORT_SYMBOL_GPL(sdhci_pci_get_data); diff --git a/drivers/mmc/host/sdhci-pci-dwc-mshc.c b/drivers/mmc/host/sdhci-pci-dwc-mshc.c new file mode 100644 index 000000000..f78d65448 --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-dwc-mshc.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SDHCI driver for Synopsys DWC_MSHC controller + * + * Copyright (C) 2018 Synopsys, Inc. (www.synopsys.com) + * + * Authors: + * Prabu Thangamuthu + * Manjunath M B + */ + +#include "sdhci.h" +#include "sdhci-pci.h" + +#define SDHCI_VENDOR_PTR_R 0xE8 + +/* Synopsys vendor specific registers */ +#define SDHC_GPIO_OUT 0x34 +#define SDHC_AT_CTRL_R 0x40 +#define SDHC_SW_TUNE_EN 0x00000010 + +/* MMCM DRP */ +#define SDHC_MMCM_DIV_REG 0x1020 +#define DIV_REG_100_MHZ 0x1145 +#define DIV_REG_200_MHZ 0x1083 +#define SDHC_MMCM_CLKFBOUT 0x1024 +#define CLKFBOUT_100_MHZ 0x0000 +#define CLKFBOUT_200_MHZ 0x0080 +#define SDHC_CCLK_MMCM_RST 0x00000001 + +static void sdhci_snps_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + u32 reg, vendor_ptr; + + vendor_ptr = sdhci_readw(host, SDHCI_VENDOR_PTR_R); + + /* Disable software managed rx tuning */ + reg = sdhci_readl(host, (SDHC_AT_CTRL_R + vendor_ptr)); + reg &= ~SDHC_SW_TUNE_EN; + sdhci_writel(host, reg, (SDHC_AT_CTRL_R + vendor_ptr)); + + if (clock <= 52000000) { + sdhci_set_clock(host, clock); + } else { + /* Assert reset to MMCM */ + reg = sdhci_readl(host, (SDHC_GPIO_OUT + vendor_ptr)); + reg |= SDHC_CCLK_MMCM_RST; + sdhci_writel(host, reg, (SDHC_GPIO_OUT + vendor_ptr)); + + /* Configure MMCM */ + if (clock == 100000000) { + sdhci_writel(host, DIV_REG_100_MHZ, SDHC_MMCM_DIV_REG); + sdhci_writel(host, CLKFBOUT_100_MHZ, + SDHC_MMCM_CLKFBOUT); + } else { + sdhci_writel(host, DIV_REG_200_MHZ, SDHC_MMCM_DIV_REG); + sdhci_writel(host, CLKFBOUT_200_MHZ, + SDHC_MMCM_CLKFBOUT); + } + + /* De-assert reset to MMCM */ + reg = sdhci_readl(host, (SDHC_GPIO_OUT + vendor_ptr)); + reg &= ~SDHC_CCLK_MMCM_RST; + sdhci_writel(host, reg, (SDHC_GPIO_OUT + vendor_ptr)); + + /* Enable clock */ + clk = SDHCI_PROG_CLOCK_MODE | SDHCI_CLOCK_INT_EN | + SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + } +} + +static const struct sdhci_ops sdhci_snps_ops = { + .set_clock = sdhci_snps_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +const struct sdhci_pci_fixes sdhci_snps = { + .ops = &sdhci_snps_ops, +}; diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c new file mode 100644 index 000000000..23b89b4ca --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -0,0 +1,865 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Genesys Logic, Inc. + * + * Authors: Ben Chuang + * + * Version: v0.9.0 (2019-08-08) + */ + +#include +#include +#include +#include +#include +#include "sdhci.h" +#include "sdhci-pci.h" +#include "cqhci.h" + +/* Genesys Logic extra registers */ +#define SDHCI_GLI_9750_WT 0x800 +#define SDHCI_GLI_9750_WT_EN BIT(0) +#define GLI_9750_WT_EN_ON 0x1 +#define GLI_9750_WT_EN_OFF 0x0 + +#define SDHCI_GLI_9750_DRIVING 0x860 +#define SDHCI_GLI_9750_DRIVING_1 GENMASK(11, 0) +#define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26) +#define GLI_9750_DRIVING_1_VALUE 0xFFF +#define GLI_9750_DRIVING_2_VALUE 0x3 +#define SDHCI_GLI_9750_SEL_1 BIT(29) +#define SDHCI_GLI_9750_SEL_2 BIT(31) +#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30)) + +#define SDHCI_GLI_9750_PLL 0x864 +#define SDHCI_GLI_9750_PLL_LDIV GENMASK(9, 0) +#define SDHCI_GLI_9750_PLL_PDIV GENMASK(14, 12) +#define SDHCI_GLI_9750_PLL_DIR BIT(15) +#define SDHCI_GLI_9750_PLL_TX2_INV BIT(23) +#define SDHCI_GLI_9750_PLL_TX2_DLY GENMASK(22, 20) +#define GLI_9750_PLL_TX2_INV_VALUE 0x1 +#define GLI_9750_PLL_TX2_DLY_VALUE 0x0 +#define SDHCI_GLI_9750_PLLSSC_STEP GENMASK(28, 24) +#define SDHCI_GLI_9750_PLLSSC_EN BIT(31) + +#define SDHCI_GLI_9750_PLLSSC 0x86C +#define SDHCI_GLI_9750_PLLSSC_PPM GENMASK(31, 16) + +#define SDHCI_GLI_9750_SW_CTRL 0x874 +#define SDHCI_GLI_9750_SW_CTRL_4 GENMASK(7, 6) +#define GLI_9750_SW_CTRL_4_VALUE 0x3 + +#define SDHCI_GLI_9750_MISC 0x878 +#define SDHCI_GLI_9750_MISC_TX1_INV BIT(2) +#define SDHCI_GLI_9750_MISC_RX_INV BIT(3) +#define SDHCI_GLI_9750_MISC_TX1_DLY GENMASK(6, 4) +#define GLI_9750_MISC_TX1_INV_VALUE 0x0 +#define GLI_9750_MISC_RX_INV_ON 0x1 +#define GLI_9750_MISC_RX_INV_OFF 0x0 +#define GLI_9750_MISC_RX_INV_VALUE GLI_9750_MISC_RX_INV_OFF +#define GLI_9750_MISC_TX1_DLY_VALUE 0x5 + +#define SDHCI_GLI_9750_TUNING_CONTROL 0x540 +#define SDHCI_GLI_9750_TUNING_CONTROL_EN BIT(4) +#define GLI_9750_TUNING_CONTROL_EN_ON 0x1 +#define GLI_9750_TUNING_CONTROL_EN_OFF 0x0 +#define SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_1 BIT(16) +#define SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_2 GENMASK(20, 19) +#define GLI_9750_TUNING_CONTROL_GLITCH_1_VALUE 0x1 +#define GLI_9750_TUNING_CONTROL_GLITCH_2_VALUE 0x2 + +#define SDHCI_GLI_9750_TUNING_PARAMETERS 0x544 +#define SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY GENMASK(2, 0) +#define GLI_9750_TUNING_PARAMETERS_RX_DLY_VALUE 0x1 + +#define SDHCI_GLI_9763E_CTRL_HS400 0x7 + +#define SDHCI_GLI_9763E_HS400_ES_REG 0x52C +#define SDHCI_GLI_9763E_HS400_ES_BIT BIT(8) + +#define PCIE_GLI_9763E_VHS 0x884 +#define GLI_9763E_VHS_REV GENMASK(19, 16) +#define GLI_9763E_VHS_REV_R 0x0 +#define GLI_9763E_VHS_REV_M 0x1 +#define GLI_9763E_VHS_REV_W 0x2 +#define PCIE_GLI_9763E_MB 0x888 +#define GLI_9763E_MB_CMDQ_OFF BIT(19) +#define PCIE_GLI_9763E_SCR 0x8E0 +#define GLI_9763E_SCR_AXI_REQ BIT(9) + +#define SDHCI_GLI_9763E_CQE_BASE_ADDR 0x200 +#define GLI_9763E_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \ + SDHCI_TRNS_BLK_CNT_EN | \ + SDHCI_TRNS_DMA) + +#define PCI_GLI_9755_WT 0x800 +#define PCI_GLI_9755_WT_EN BIT(0) +#define GLI_9755_WT_EN_ON 0x1 +#define GLI_9755_WT_EN_OFF 0x0 + +#define PCI_GLI_9755_PLL 0x64 +#define PCI_GLI_9755_PLL_LDIV GENMASK(9, 0) +#define PCI_GLI_9755_PLL_PDIV GENMASK(14, 12) +#define PCI_GLI_9755_PLL_DIR BIT(15) +#define PCI_GLI_9755_PLLSSC_STEP GENMASK(28, 24) +#define PCI_GLI_9755_PLLSSC_EN BIT(31) + +#define PCI_GLI_9755_PLLSSC 0x68 +#define PCI_GLI_9755_PLLSSC_PPM GENMASK(15, 0) + +#define GLI_MAX_TUNING_LOOP 40 + +/* Genesys Logic chipset */ +static inline void gl9750_wt_on(struct sdhci_host *host) +{ + u32 wt_value; + u32 wt_enable; + + wt_value = sdhci_readl(host, SDHCI_GLI_9750_WT); + wt_enable = FIELD_GET(SDHCI_GLI_9750_WT_EN, wt_value); + + if (wt_enable == GLI_9750_WT_EN_ON) + return; + + wt_value &= ~SDHCI_GLI_9750_WT_EN; + wt_value |= FIELD_PREP(SDHCI_GLI_9750_WT_EN, GLI_9750_WT_EN_ON); + + sdhci_writel(host, wt_value, SDHCI_GLI_9750_WT); +} + +static inline void gl9750_wt_off(struct sdhci_host *host) +{ + u32 wt_value; + u32 wt_enable; + + wt_value = sdhci_readl(host, SDHCI_GLI_9750_WT); + wt_enable = FIELD_GET(SDHCI_GLI_9750_WT_EN, wt_value); + + if (wt_enable == GLI_9750_WT_EN_OFF) + return; + + wt_value &= ~SDHCI_GLI_9750_WT_EN; + wt_value |= FIELD_PREP(SDHCI_GLI_9750_WT_EN, GLI_9750_WT_EN_OFF); + + sdhci_writel(host, wt_value, SDHCI_GLI_9750_WT); +} + +static void gli_set_9750(struct sdhci_host *host) +{ + u32 driving_value; + u32 pll_value; + u32 sw_ctrl_value; + u32 misc_value; + u32 parameter_value; + u32 control_value; + u16 ctrl2; + + gl9750_wt_on(host); + + driving_value = sdhci_readl(host, SDHCI_GLI_9750_DRIVING); + pll_value = sdhci_readl(host, SDHCI_GLI_9750_PLL); + sw_ctrl_value = sdhci_readl(host, SDHCI_GLI_9750_SW_CTRL); + misc_value = sdhci_readl(host, SDHCI_GLI_9750_MISC); + parameter_value = sdhci_readl(host, SDHCI_GLI_9750_TUNING_PARAMETERS); + control_value = sdhci_readl(host, SDHCI_GLI_9750_TUNING_CONTROL); + + driving_value &= ~(SDHCI_GLI_9750_DRIVING_1); + driving_value &= ~(SDHCI_GLI_9750_DRIVING_2); + driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_1, + GLI_9750_DRIVING_1_VALUE); + driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2, + GLI_9750_DRIVING_2_VALUE); + driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST); + driving_value |= SDHCI_GLI_9750_SEL_2; + sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING); + + sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4; + sw_ctrl_value |= FIELD_PREP(SDHCI_GLI_9750_SW_CTRL_4, + GLI_9750_SW_CTRL_4_VALUE); + sdhci_writel(host, sw_ctrl_value, SDHCI_GLI_9750_SW_CTRL); + + /* reset the tuning flow after reinit and before starting tuning */ + pll_value &= ~SDHCI_GLI_9750_PLL_TX2_INV; + pll_value &= ~SDHCI_GLI_9750_PLL_TX2_DLY; + pll_value |= FIELD_PREP(SDHCI_GLI_9750_PLL_TX2_INV, + GLI_9750_PLL_TX2_INV_VALUE); + pll_value |= FIELD_PREP(SDHCI_GLI_9750_PLL_TX2_DLY, + GLI_9750_PLL_TX2_DLY_VALUE); + + misc_value &= ~SDHCI_GLI_9750_MISC_TX1_INV; + misc_value &= ~SDHCI_GLI_9750_MISC_RX_INV; + misc_value &= ~SDHCI_GLI_9750_MISC_TX1_DLY; + misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_TX1_INV, + GLI_9750_MISC_TX1_INV_VALUE); + misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_RX_INV, + GLI_9750_MISC_RX_INV_VALUE); + misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_TX1_DLY, + GLI_9750_MISC_TX1_DLY_VALUE); + + parameter_value &= ~SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY; + parameter_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY, + GLI_9750_TUNING_PARAMETERS_RX_DLY_VALUE); + + control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_1; + control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_2; + control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_1, + GLI_9750_TUNING_CONTROL_GLITCH_1_VALUE); + control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_2, + GLI_9750_TUNING_CONTROL_GLITCH_2_VALUE); + + sdhci_writel(host, pll_value, SDHCI_GLI_9750_PLL); + sdhci_writel(host, misc_value, SDHCI_GLI_9750_MISC); + + /* disable tuned clk */ + ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl2 &= ~SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); + + /* enable tuning parameters control */ + control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_EN; + control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_EN, + GLI_9750_TUNING_CONTROL_EN_ON); + sdhci_writel(host, control_value, SDHCI_GLI_9750_TUNING_CONTROL); + + /* write tuning parameters */ + sdhci_writel(host, parameter_value, SDHCI_GLI_9750_TUNING_PARAMETERS); + + /* disable tuning parameters control */ + control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_EN; + control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_EN, + GLI_9750_TUNING_CONTROL_EN_OFF); + sdhci_writel(host, control_value, SDHCI_GLI_9750_TUNING_CONTROL); + + /* clear tuned clk */ + ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl2 &= ~SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); + + gl9750_wt_off(host); +} + +static void gli_set_9750_rx_inv(struct sdhci_host *host, bool b) +{ + u32 misc_value; + + gl9750_wt_on(host); + + misc_value = sdhci_readl(host, SDHCI_GLI_9750_MISC); + misc_value &= ~SDHCI_GLI_9750_MISC_RX_INV; + if (b) { + misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_RX_INV, + GLI_9750_MISC_RX_INV_ON); + } else { + misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_RX_INV, + GLI_9750_MISC_RX_INV_OFF); + } + sdhci_writel(host, misc_value, SDHCI_GLI_9750_MISC); + + gl9750_wt_off(host); +} + +static int __sdhci_execute_tuning_9750(struct sdhci_host *host, u32 opcode) +{ + int i; + int rx_inv; + + for (rx_inv = 0; rx_inv < 2; rx_inv++) { + gli_set_9750_rx_inv(host, !!rx_inv); + sdhci_start_tuning(host); + + for (i = 0; i < GLI_MAX_TUNING_LOOP; i++) { + u16 ctrl; + + sdhci_send_tuning(host, opcode); + + if (!host->tuning_done) { + sdhci_abort_tuning(host, opcode); + break; + } + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { + if (ctrl & SDHCI_CTRL_TUNED_CLK) + return 0; /* Success! */ + break; + } + } + } + if (!host->tuning_done) { + pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", + mmc_hostname(host->mmc)); + return -ETIMEDOUT; + } + + pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", + mmc_hostname(host->mmc)); + sdhci_reset_tuning(host); + + return -EAGAIN; +} + +static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + host->mmc->retune_period = 0; + if (host->tuning_mode == SDHCI_TUNING_MODE_1) + host->mmc->retune_period = host->tuning_count; + + gli_set_9750(host); + host->tuning_err = __sdhci_execute_tuning_9750(host, opcode); + sdhci_end_tuning(host); + + return 0; +} + +static void gl9750_disable_ssc_pll(struct sdhci_host *host) +{ + u32 pll; + + gl9750_wt_on(host); + pll = sdhci_readl(host, SDHCI_GLI_9750_PLL); + pll &= ~(SDHCI_GLI_9750_PLL_DIR | SDHCI_GLI_9750_PLLSSC_EN); + sdhci_writel(host, pll, SDHCI_GLI_9750_PLL); + gl9750_wt_off(host); +} + +static void gl9750_set_pll(struct sdhci_host *host, u8 dir, u16 ldiv, u8 pdiv) +{ + u32 pll; + + gl9750_wt_on(host); + pll = sdhci_readl(host, SDHCI_GLI_9750_PLL); + pll &= ~(SDHCI_GLI_9750_PLL_LDIV | + SDHCI_GLI_9750_PLL_PDIV | + SDHCI_GLI_9750_PLL_DIR); + pll |= FIELD_PREP(SDHCI_GLI_9750_PLL_LDIV, ldiv) | + FIELD_PREP(SDHCI_GLI_9750_PLL_PDIV, pdiv) | + FIELD_PREP(SDHCI_GLI_9750_PLL_DIR, dir); + sdhci_writel(host, pll, SDHCI_GLI_9750_PLL); + gl9750_wt_off(host); + + /* wait for pll stable */ + mdelay(1); +} + +static void gl9750_set_ssc(struct sdhci_host *host, u8 enable, u8 step, u16 ppm) +{ + u32 pll; + u32 ssc; + + gl9750_wt_on(host); + pll = sdhci_readl(host, SDHCI_GLI_9750_PLL); + ssc = sdhci_readl(host, SDHCI_GLI_9750_PLLSSC); + pll &= ~(SDHCI_GLI_9750_PLLSSC_STEP | + SDHCI_GLI_9750_PLLSSC_EN); + ssc &= ~SDHCI_GLI_9750_PLLSSC_PPM; + pll |= FIELD_PREP(SDHCI_GLI_9750_PLLSSC_STEP, step) | + FIELD_PREP(SDHCI_GLI_9750_PLLSSC_EN, enable); + ssc |= FIELD_PREP(SDHCI_GLI_9750_PLLSSC_PPM, ppm); + sdhci_writel(host, ssc, SDHCI_GLI_9750_PLLSSC); + sdhci_writel(host, pll, SDHCI_GLI_9750_PLL); + gl9750_wt_off(host); +} + +static void gl9750_set_ssc_pll_205mhz(struct sdhci_host *host) +{ + /* set pll to 205MHz and enable ssc */ + gl9750_set_ssc(host, 0x1, 0x1F, 0xFFE7); + gl9750_set_pll(host, 0x1, 0x246, 0x0); +} + +static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct mmc_ios *ios = &host->mmc->ios; + u16 clk; + + host->mmc->actual_clock = 0; + + gl9750_disable_ssc_pll(host); + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) { + host->mmc->actual_clock = 205000000; + gl9750_set_ssc_pll_205mhz(host); + } + + sdhci_enable_clk(host, clk); +} + +static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot) +{ + int ret; + + ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + pr_warn("%s: enable PCI MSI failed, error=%d\n", + mmc_hostname(slot->host->mmc), ret); + return; + } + + slot->host->irq = pci_irq_vector(slot->chip->pdev, 0); +} + +static inline void gl9755_wt_on(struct pci_dev *pdev) +{ + u32 wt_value; + u32 wt_enable; + + pci_read_config_dword(pdev, PCI_GLI_9755_WT, &wt_value); + wt_enable = FIELD_GET(PCI_GLI_9755_WT_EN, wt_value); + + if (wt_enable == GLI_9755_WT_EN_ON) + return; + + wt_value &= ~PCI_GLI_9755_WT_EN; + wt_value |= FIELD_PREP(PCI_GLI_9755_WT_EN, GLI_9755_WT_EN_ON); + + pci_write_config_dword(pdev, PCI_GLI_9755_WT, wt_value); +} + +static inline void gl9755_wt_off(struct pci_dev *pdev) +{ + u32 wt_value; + u32 wt_enable; + + pci_read_config_dword(pdev, PCI_GLI_9755_WT, &wt_value); + wt_enable = FIELD_GET(PCI_GLI_9755_WT_EN, wt_value); + + if (wt_enable == GLI_9755_WT_EN_OFF) + return; + + wt_value &= ~PCI_GLI_9755_WT_EN; + wt_value |= FIELD_PREP(PCI_GLI_9755_WT_EN, GLI_9755_WT_EN_OFF); + + pci_write_config_dword(pdev, PCI_GLI_9755_WT, wt_value); +} + +static void gl9755_disable_ssc_pll(struct pci_dev *pdev) +{ + u32 pll; + + gl9755_wt_on(pdev); + pci_read_config_dword(pdev, PCI_GLI_9755_PLL, &pll); + pll &= ~(PCI_GLI_9755_PLL_DIR | PCI_GLI_9755_PLLSSC_EN); + pci_write_config_dword(pdev, PCI_GLI_9755_PLL, pll); + gl9755_wt_off(pdev); +} + +static void gl9755_set_pll(struct pci_dev *pdev, u8 dir, u16 ldiv, u8 pdiv) +{ + u32 pll; + + gl9755_wt_on(pdev); + pci_read_config_dword(pdev, PCI_GLI_9755_PLL, &pll); + pll &= ~(PCI_GLI_9755_PLL_LDIV | + PCI_GLI_9755_PLL_PDIV | + PCI_GLI_9755_PLL_DIR); + pll |= FIELD_PREP(PCI_GLI_9755_PLL_LDIV, ldiv) | + FIELD_PREP(PCI_GLI_9755_PLL_PDIV, pdiv) | + FIELD_PREP(PCI_GLI_9755_PLL_DIR, dir); + pci_write_config_dword(pdev, PCI_GLI_9755_PLL, pll); + gl9755_wt_off(pdev); + + /* wait for pll stable */ + mdelay(1); +} + +static void gl9755_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm) +{ + u32 pll; + u32 ssc; + + gl9755_wt_on(pdev); + pci_read_config_dword(pdev, PCI_GLI_9755_PLL, &pll); + pci_read_config_dword(pdev, PCI_GLI_9755_PLLSSC, &ssc); + pll &= ~(PCI_GLI_9755_PLLSSC_STEP | + PCI_GLI_9755_PLLSSC_EN); + ssc &= ~PCI_GLI_9755_PLLSSC_PPM; + pll |= FIELD_PREP(PCI_GLI_9755_PLLSSC_STEP, step) | + FIELD_PREP(PCI_GLI_9755_PLLSSC_EN, enable); + ssc |= FIELD_PREP(PCI_GLI_9755_PLLSSC_PPM, ppm); + pci_write_config_dword(pdev, PCI_GLI_9755_PLLSSC, ssc); + pci_write_config_dword(pdev, PCI_GLI_9755_PLL, pll); + gl9755_wt_off(pdev); +} + +static void gl9755_set_ssc_pll_205mhz(struct pci_dev *pdev) +{ + /* set pll to 205MHz and enable ssc */ + gl9755_set_ssc(pdev, 0x1, 0x1F, 0xFFE7); + gl9755_set_pll(pdev, 0x1, 0x246, 0x0); +} + +static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct mmc_ios *ios = &host->mmc->ios; + struct pci_dev *pdev; + u16 clk; + + pdev = slot->chip->pdev; + host->mmc->actual_clock = 0; + + gl9755_disable_ssc_pll(pdev); + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) { + host->mmc->actual_clock = 205000000; + gl9755_set_ssc_pll_205mhz(pdev); + } + + sdhci_enable_clk(host, clk); +} + +static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot) +{ + struct sdhci_host *host = slot->host; + + gli_pcie_enable_msi(slot); + slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO; + sdhci_enable_v4_mode(host); + + return 0; +} + +static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot) +{ + struct sdhci_host *host = slot->host; + + gli_pcie_enable_msi(slot); + slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO; + sdhci_enable_v4_mode(host); + + return 0; +} + +static void sdhci_gli_voltage_switch(struct sdhci_host *host) +{ + /* + * According to Section 3.6.1 signal voltage switch procedure in + * SD Host Controller Simplified Spec. 4.20, steps 6~8 are as + * follows: + * (6) Set 1.8V Signal Enable in the Host Control 2 register. + * (7) Wait 5ms. 1.8V voltage regulator shall be stable within this + * period. + * (8) If 1.8V Signal Enable is cleared by Host Controller, go to + * step (12). + * + * Wait 5ms after set 1.8V signal enable in Host Control 2 register + * to ensure 1.8V signal enable bit is set by GL9750/GL9755. + * + * ...however, the controller in the NUC10i3FNK4 (a 9755) requires + * slightly longer than 5ms before the control register reports that + * 1.8V is ready, and far longer still before the card will actually + * work reliably. + */ + usleep_range(100000, 110000); +} + +static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask) +{ + sdhci_reset(host, mask); + gli_set_9750(host); +} + +static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg) +{ + u32 value; + + value = readl(host->ioaddr + reg); + if (unlikely(reg == SDHCI_MAX_CURRENT && !(value & 0xff))) + value |= 0xc8; + + return value; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip) +{ + struct sdhci_pci_slot *slot = chip->slots[0]; + + pci_free_irq_vectors(slot->chip->pdev); + gli_pcie_enable_msi(slot); + + return sdhci_pci_resume_host(chip); +} + +static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip) +{ + struct sdhci_pci_slot *slot = chip->slots[0]; + int ret; + + ret = sdhci_pci_gli_resume(chip); + if (ret) + return ret; + + return cqhci_resume(slot->host->mmc); +} + +static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip) +{ + struct sdhci_pci_slot *slot = chip->slots[0]; + int ret; + + ret = cqhci_suspend(slot->host->mmc); + if (ret) + return ret; + + return sdhci_suspend_host(slot->host); +} +#endif + +static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 val; + + val = sdhci_readl(host, SDHCI_GLI_9763E_HS400_ES_REG); + if (ios->enhanced_strobe) + val |= SDHCI_GLI_9763E_HS400_ES_BIT; + else + val &= ~SDHCI_GLI_9763E_HS400_ES_BIT; + + sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG); +} + +static void sdhci_set_gl9763e_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u16 ctrl_2; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if (timing == MMC_TIMING_MMC_HS200) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_MMC_HS) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_MMC_DDR52) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + else if (timing == MMC_TIMING_MMC_HS400) + ctrl_2 |= SDHCI_GLI_9763E_CTRL_HS400; + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static void sdhci_gl9763e_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static void sdhci_gl9763e_cqe_pre_enable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + u32 value; + + value = cqhci_readl(cq_host, CQHCI_CFG); + value |= CQHCI_ENABLE; + cqhci_writel(cq_host, value, CQHCI_CFG); +} + +static void sdhci_gl9763e_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + + sdhci_writew(host, GLI_9763E_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); + sdhci_cqe_enable(mmc); +} + +static u32 sdhci_gl9763e_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static void sdhci_gl9763e_cqe_post_disable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct cqhci_host *cq_host = mmc->cqe_private; + u32 value; + + value = cqhci_readl(cq_host, CQHCI_CFG); + value &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, value, CQHCI_CFG); + sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); +} + +static const struct cqhci_host_ops sdhci_gl9763e_cqhci_ops = { + .enable = sdhci_gl9763e_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_gl9763e_dumpregs, + .pre_enable = sdhci_gl9763e_cqe_pre_enable, + .post_disable = sdhci_gl9763e_cqe_post_disable, +}; + +static int gl9763e_add_host(struct sdhci_pci_slot *slot) +{ + struct device *dev = &slot->chip->pdev->dev; + struct sdhci_host *host = slot->host; + struct cqhci_host *cq_host; + bool dma64; + int ret; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_GLI_9763E_CQE_BASE_ADDR; + cq_host->ops = &sdhci_gl9763e_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + +static void sdhci_gl9763e_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) && + host->mmc->cqe_private) + cqhci_deactivate(host->mmc); + sdhci_reset(host, mask); +} + +static void gli_set_gl9763e(struct sdhci_pci_slot *slot) +{ + struct pci_dev *pdev = slot->chip->pdev; + u32 value; + + pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); + value &= ~GLI_9763E_VHS_REV; + value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W); + pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value); + + pci_read_config_dword(pdev, PCIE_GLI_9763E_SCR, &value); + value |= GLI_9763E_SCR_AXI_REQ; + pci_write_config_dword(pdev, PCIE_GLI_9763E_SCR, value); + + pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); + value &= ~GLI_9763E_VHS_REV; + value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R); + pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value); +} + +static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot) +{ + struct pci_dev *pdev = slot->chip->pdev; + struct sdhci_host *host = slot->host; + u32 value; + + host->mmc->caps |= MMC_CAP_8_BIT_DATA | + MMC_CAP_1_8V_DDR | + MMC_CAP_NONREMOVABLE; + host->mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR | + MMC_CAP2_HS400_1_8V | + MMC_CAP2_HS400_ES | + MMC_CAP2_NO_SDIO | + MMC_CAP2_NO_SD; + + pci_read_config_dword(pdev, PCIE_GLI_9763E_MB, &value); + if (!(value & GLI_9763E_MB_CMDQ_OFF)) + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + + gli_pcie_enable_msi(slot); + host->mmc_host_ops.hs400_enhanced_strobe = + gl9763e_hs400_enhanced_strobe; + gli_set_gl9763e(slot); + sdhci_enable_v4_mode(host); + + return 0; +} + +static const struct sdhci_ops sdhci_gl9755_ops = { + .set_clock = sdhci_gl9755_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .voltage_switch = sdhci_gli_voltage_switch, +}; + +const struct sdhci_pci_fixes sdhci_gl9755 = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50, + .probe_slot = gli_probe_slot_gl9755, + .ops = &sdhci_gl9755_ops, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_pci_gli_resume, +#endif +}; + +static const struct sdhci_ops sdhci_gl9750_ops = { + .read_l = sdhci_gl9750_readl, + .set_clock = sdhci_gl9750_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_gl9750_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .voltage_switch = sdhci_gli_voltage_switch, + .platform_execute_tuning = gl9750_execute_tuning, +}; + +const struct sdhci_pci_fixes sdhci_gl9750 = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50, + .probe_slot = gli_probe_slot_gl9750, + .ops = &sdhci_gl9750_ops, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_pci_gli_resume, +#endif +}; + +static const struct sdhci_ops sdhci_gl9763e_ops = { + .set_clock = sdhci_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_gl9763e_reset, + .set_uhs_signaling = sdhci_set_gl9763e_signaling, + .voltage_switch = sdhci_gli_voltage_switch, + .irq = sdhci_gl9763e_cqhci_irq, +}; + +const struct sdhci_pci_fixes sdhci_gl9763e = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .probe_slot = gli_probe_slot_gl9763e, + .ops = &sdhci_gl9763e_ops, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_cqhci_gli_resume, + .suspend = sdhci_cqhci_gli_suspend, +#endif + .add_host = gl9763e_add_host, +}; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c new file mode 100644 index 000000000..72234790a --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -0,0 +1,871 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 BayHub Technology Ltd. + * + * Authors: Peter Guo + * Adam Lee + * Ernest Zhang + */ + +#include +#include +#include +#include +#include + +#include "sdhci.h" +#include "sdhci-pci.h" + +/* + * O2Micro device registers + */ + +#define O2_SD_MISC_REG5 0x64 +#define O2_SD_LD0_CTRL 0x68 +#define O2_SD_DEV_CTRL 0x88 +#define O2_SD_LOCK_WP 0xD3 +#define O2_SD_TEST_REG 0xD4 +#define O2_SD_FUNC_REG0 0xDC +#define O2_SD_MULTI_VCC3V 0xEE +#define O2_SD_CLKREQ 0xEC +#define O2_SD_CAPS 0xE0 +#define O2_SD_ADMA1 0xE2 +#define O2_SD_ADMA2 0xE7 +#define O2_SD_MISC_CTRL2 0xF0 +#define O2_SD_INF_MOD 0xF1 +#define O2_SD_MISC_CTRL4 0xFC +#define O2_SD_MISC_CTRL 0x1C0 +#define O2_SD_PWR_FORCE_L0 0x0002 +#define O2_SD_TUNING_CTRL 0x300 +#define O2_SD_PLL_SETTING 0x304 +#define O2_SD_MISC_SETTING 0x308 +#define O2_SD_CLK_SETTING 0x328 +#define O2_SD_CAP_REG2 0x330 +#define O2_SD_CAP_REG0 0x334 +#define O2_SD_UHS1_CAP_SETTING 0x33C +#define O2_SD_DELAY_CTRL 0x350 +#define O2_SD_UHS2_L1_CTRL 0x35C +#define O2_SD_FUNC_REG3 0x3E0 +#define O2_SD_FUNC_REG4 0x3E4 +#define O2_SD_LED_ENABLE BIT(6) +#define O2_SD_FREG0_LEDOFF BIT(13) +#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22) + +#define O2_SD_VENDOR_SETTING 0x110 +#define O2_SD_VENDOR_SETTING2 0x1C8 +#define O2_SD_HW_TUNING_DISABLE BIT(4) + +#define O2_PLL_DLL_WDT_CONTROL1 0x1CC +#define O2_PLL_FORCE_ACTIVE BIT(18) +#define O2_PLL_LOCK_STATUS BIT(14) +#define O2_PLL_SOFT_RESET BIT(12) +#define O2_DLL_LOCK_STATUS BIT(11) + +#define O2_SD_DETECT_SETTING 0x324 + +static const u32 dmdn_table[] = {0x2B1C0000, + 0x2C1A0000, 0x371B0000, 0x35100000}; +#define DMDN_SZ ARRAY_SIZE(dmdn_table) + +struct o2_host { + u8 dll_adjust_count; +}; + +static void sdhci_o2_wait_card_detect_stable(struct sdhci_host *host) +{ + ktime_t timeout; + u32 scratch32; + + /* Wait max 50 ms */ + timeout = ktime_add_ms(ktime_get(), 50); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + scratch32 = sdhci_readl(host, SDHCI_PRESENT_STATE); + if ((scratch32 & SDHCI_CARD_PRESENT) >> SDHCI_CARD_PRES_SHIFT + == (scratch32 & SDHCI_CD_LVL) >> SDHCI_CD_LVL_SHIFT) + break; + + if (timedout) { + pr_err("%s: Card Detect debounce never finished.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + return; + } + udelay(10); + } +} + +static void sdhci_o2_enable_internal_clock(struct sdhci_host *host) +{ + ktime_t timeout; + u16 scratch; + u32 scratch32; + + /* PLL software reset */ + scratch32 = sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1); + scratch32 |= O2_PLL_SOFT_RESET; + sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1); + udelay(1); + scratch32 &= ~(O2_PLL_SOFT_RESET); + sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1); + + /* PLL force active */ + scratch32 |= O2_PLL_FORCE_ACTIVE; + sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1); + + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + scratch = sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1); + if (scratch & O2_PLL_LOCK_STATUS) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + goto out; + } + udelay(10); + } + + /* Wait for card detect finish */ + udelay(1); + sdhci_o2_wait_card_detect_stable(host); + +out: + /* Cancel PLL force active */ + scratch32 = sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1); + scratch32 &= ~O2_PLL_FORCE_ACTIVE; + sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1); +} + +static int sdhci_o2_get_cd(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + + if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS)) + sdhci_o2_enable_internal_clock(host); + else + sdhci_o2_wait_card_detect_stable(host); + + return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +} + +static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value) +{ + u32 scratch_32; + + pci_read_config_dword(chip->pdev, + O2_SD_PLL_SETTING, &scratch_32); + + scratch_32 &= 0x0000FFFF; + scratch_32 |= value; + + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); +} + +static u32 sdhci_o2_pll_dll_wdt_control(struct sdhci_host *host) +{ + return sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1); +} + +/* + * This function is used to detect dll lock status. + * Since the dll lock status bit will toggle randomly + * with very short interval which needs to be polled + * as fast as possible. Set sleep_us as 1 microsecond. + */ +static int sdhci_o2_wait_dll_detect_lock(struct sdhci_host *host) +{ + u32 scratch32 = 0; + + return readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host, + scratch32, !(scratch32 & O2_DLL_LOCK_STATUS), 1, 1000000); +} + +static void sdhci_o2_set_tuning_mode(struct sdhci_host *host) +{ + u16 reg; + + /* enable hardware tuning */ + reg = sdhci_readw(host, O2_SD_VENDOR_SETTING); + reg &= ~O2_SD_HW_TUNING_DISABLE; + sdhci_writew(host, reg, O2_SD_VENDOR_SETTING); +} + +static void __sdhci_o2_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + int i; + + sdhci_send_tuning(host, opcode); + + for (i = 0; i < 150; i++) { + u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { + if (ctrl & SDHCI_CTRL_TUNED_CLK) { + host->tuning_done = true; + return; + } + pr_warn("%s: HW tuning failed !\n", + mmc_hostname(host->mmc)); + break; + } + + mdelay(1); + } + + pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", + mmc_hostname(host->mmc)); + sdhci_reset_tuning(host); +} + +/* + * This function is used to fix o2 dll shift issue. + * It isn't necessary to detect card present before recovery. + * Firstly, it is used by bht emmc card, which is embedded. + * Second, before call recovery card present will be detected + * outside of the execute tuning function. + */ +static int sdhci_o2_dll_recovery(struct sdhci_host *host) +{ + int ret = 0; + u8 scratch_8 = 0; + u32 scratch_32 = 0; + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct sdhci_pci_chip *chip = slot->chip; + struct o2_host *o2_host = sdhci_pci_priv(slot); + + /* UnLock WP */ + pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch_8); + scratch_8 &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); + while (o2_host->dll_adjust_count < DMDN_SZ && !ret) { + /* Disable clock */ + sdhci_writeb(host, 0, SDHCI_CLOCK_CONTROL); + + /* PLL software reset */ + scratch_32 = sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1); + scratch_32 |= O2_PLL_SOFT_RESET; + sdhci_writel(host, scratch_32, O2_PLL_DLL_WDT_CONTROL1); + + pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG4, + &scratch_32); + /* Enable Base Clk setting change */ + scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET; + pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG4, scratch_32); + o2_pci_set_baseclk(chip, dmdn_table[o2_host->dll_adjust_count]); + + /* Enable internal clock */ + scratch_8 = SDHCI_CLOCK_INT_EN; + sdhci_writeb(host, scratch_8, SDHCI_CLOCK_CONTROL); + + if (sdhci_o2_get_cd(host->mmc)) { + /* + * need wait at least 5ms for dll status stable, + * after enable internal clock + */ + usleep_range(5000, 6000); + if (sdhci_o2_wait_dll_detect_lock(host)) { + scratch_8 |= SDHCI_CLOCK_CARD_EN; + sdhci_writeb(host, scratch_8, + SDHCI_CLOCK_CONTROL); + ret = 1; + } else { + pr_warn("%s: DLL unlocked when dll_adjust_count is %d.\n", + mmc_hostname(host->mmc), + o2_host->dll_adjust_count); + } + } else { + pr_err("%s: card present detect failed.\n", + mmc_hostname(host->mmc)); + break; + } + + o2_host->dll_adjust_count++; + } + if (!ret && o2_host->dll_adjust_count == DMDN_SZ) + pr_err("%s: DLL adjust over max times\n", + mmc_hostname(host->mmc)); + /* Lock WP */ + pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch_8); + scratch_8 |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); + return ret; +} + +static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + int current_bus_width = 0; + u32 scratch32 = 0; + u16 scratch = 0; + + /* + * This handler only implements the eMMC tuning that is specific to + * this controller. Fall back to the standard method for other TIMING. + */ + if ((host->timing != MMC_TIMING_MMC_HS200) && + (host->timing != MMC_TIMING_UHS_SDR104)) + return sdhci_execute_tuning(mmc, opcode); + + if (WARN_ON((opcode != MMC_SEND_TUNING_BLOCK_HS200) && + (opcode != MMC_SEND_TUNING_BLOCK))) + return -EINVAL; + + /* Force power mode enter L0 */ + scratch = sdhci_readw(host, O2_SD_MISC_CTRL); + scratch |= O2_SD_PWR_FORCE_L0; + sdhci_writew(host, scratch, O2_SD_MISC_CTRL); + + /* wait DLL lock, timeout value 5ms */ + if (readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host, + scratch32, (scratch32 & O2_DLL_LOCK_STATUS), 1, 5000)) + pr_warn("%s: DLL can't lock in 5ms after force L0 during tuning.\n", + mmc_hostname(host->mmc)); + /* + * Judge the tuning reason, whether caused by dll shift + * If cause by dll shift, should call sdhci_o2_dll_recovery + */ + if (!sdhci_o2_wait_dll_detect_lock(host)) + if (!sdhci_o2_dll_recovery(host)) { + pr_err("%s: o2 dll recovery failed\n", + mmc_hostname(host->mmc)); + return -EINVAL; + } + /* + * o2 sdhci host didn't support 8bit emmc tuning + */ + if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) { + current_bus_width = mmc->ios.bus_width; + mmc->ios.bus_width = MMC_BUS_WIDTH_4; + sdhci_set_bus_width(host, MMC_BUS_WIDTH_4); + } + + sdhci_o2_set_tuning_mode(host); + + sdhci_start_tuning(host); + + __sdhci_o2_execute_tuning(host, opcode); + + sdhci_end_tuning(host); + + if (current_bus_width == MMC_BUS_WIDTH_8) { + mmc->ios.bus_width = MMC_BUS_WIDTH_8; + sdhci_set_bus_width(host, current_bus_width); + } + + /* Cancel force power mode enter L0 */ + scratch = sdhci_readw(host, O2_SD_MISC_CTRL); + scratch &= ~(O2_SD_PWR_FORCE_L0); + sdhci_writew(host, scratch, O2_SD_MISC_CTRL); + + sdhci_reset(host, SDHCI_RESET_CMD); + sdhci_reset(host, SDHCI_RESET_DATA); + + host->flags &= ~SDHCI_HS400_TUNING; + return 0; +} + +static void o2_pci_led_enable(struct sdhci_pci_chip *chip) +{ + int ret; + u32 scratch_32; + + /* Set led of SD host function enable */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG0, &scratch_32); + if (ret) + return; + + scratch_32 &= ~O2_SD_FREG0_LEDOFF; + pci_write_config_dword(chip->pdev, + O2_SD_FUNC_REG0, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_TEST_REG, &scratch_32); + if (ret) + return; + + scratch_32 |= O2_SD_LED_ENABLE; + pci_write_config_dword(chip->pdev, + O2_SD_TEST_REG, scratch_32); +} + +static void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip) +{ + u32 scratch_32; + int ret; + /* Improve write performance for SD3.0 */ + ret = pci_read_config_dword(chip->pdev, O2_SD_DEV_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~((1 << 12) | (1 << 13) | (1 << 14)); + pci_write_config_dword(chip->pdev, O2_SD_DEV_CTRL, scratch_32); + + /* Enable Link abnormal reset generating Reset */ + ret = pci_read_config_dword(chip->pdev, O2_SD_MISC_REG5, &scratch_32); + if (ret) + return; + scratch_32 &= ~((1 << 19) | (1 << 11)); + scratch_32 |= (1 << 10); + pci_write_config_dword(chip->pdev, O2_SD_MISC_REG5, scratch_32); + + /* set card power over current protection */ + ret = pci_read_config_dword(chip->pdev, O2_SD_TEST_REG, &scratch_32); + if (ret) + return; + scratch_32 |= (1 << 4); + pci_write_config_dword(chip->pdev, O2_SD_TEST_REG, scratch_32); + + /* adjust the output delay for SD mode */ + pci_write_config_dword(chip->pdev, O2_SD_DELAY_CTRL, 0x00002492); + + /* Set the output voltage setting of Aux 1.2v LDO */ + ret = pci_read_config_dword(chip->pdev, O2_SD_LD0_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~(3 << 12); + pci_write_config_dword(chip->pdev, O2_SD_LD0_CTRL, scratch_32); + + /* Set Max power supply capability of SD host */ + ret = pci_read_config_dword(chip->pdev, O2_SD_CAP_REG0, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x01FE); + scratch_32 |= 0x00CC; + pci_write_config_dword(chip->pdev, O2_SD_CAP_REG0, scratch_32); + /* Set DLL Tuning Window */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_TUNING_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x000000FF); + scratch_32 |= 0x00000066; + pci_write_config_dword(chip->pdev, O2_SD_TUNING_CTRL, scratch_32); + + /* Set UHS2 T_EIDLE */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_UHS2_L1_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x000000FC); + scratch_32 |= 0x00000084; + pci_write_config_dword(chip->pdev, O2_SD_UHS2_L1_CTRL, scratch_32); + + /* Set UHS2 Termination */ + ret = pci_read_config_dword(chip->pdev, O2_SD_FUNC_REG3, &scratch_32); + if (ret) + return; + scratch_32 &= ~((1 << 21) | (1 << 30)); + + pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32); + + /* Set L1 Entrance Timer */ + ret = pci_read_config_dword(chip->pdev, O2_SD_CAPS, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0xf0000000); + scratch_32 |= 0x30000000; + pci_write_config_dword(chip->pdev, O2_SD_CAPS, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_MISC_CTRL4, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x000f0000); + scratch_32 |= 0x00080000; + pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32); +} + +static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip, + struct sdhci_host *host) +{ + int ret; + + ret = pci_find_capability(chip->pdev, PCI_CAP_ID_MSI); + if (!ret) { + pr_info("%s: unsupport msi, use INTx irq\n", + mmc_hostname(host->mmc)); + return; + } + + ret = pci_alloc_irq_vectors(chip->pdev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + pr_err("%s: enable PCI MSI failed, err=%d\n", + mmc_hostname(host->mmc), ret); + return; + } + + host->irq = pci_irq_vector(chip->pdev, 0); +} + +static void sdhci_o2_enable_clk(struct sdhci_host *host, u16 clk) +{ + /* Enable internal clock */ + clk |= SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + sdhci_o2_enable_internal_clock(host); + if (sdhci_o2_get_cd(host->mmc)) { + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + } +} + +static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + u8 scratch; + u32 scratch_32; + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct sdhci_pci_chip *chip = slot->chip; + + host->mmc->actual_clock = 0; + + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + if ((host->timing == MMC_TIMING_UHS_SDR104) && (clock == 200000000)) { + pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); + + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32); + + if ((scratch_32 & 0xFFFF0000) != 0x2c280000) + o2_pci_set_baseclk(chip, 0x2c280000); + + pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); + + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + } + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_o2_enable_clk(host, clk); +} + +static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) +{ + struct sdhci_pci_chip *chip; + struct sdhci_host *host; + struct o2_host *o2_host = sdhci_pci_priv(slot); + u32 reg, caps; + int ret; + + chip = slot->chip; + host = slot->host; + + o2_host->dll_adjust_count = 0; + caps = sdhci_readl(host, SDHCI_CAPABILITIES); + + /* + * mmc_select_bus_width() will test the bus to determine the actual bus + * width. + */ + if (caps & SDHCI_CAN_DO_8BIT) + host->mmc->caps |= MMC_CAP_8_BIT_DATA; + + switch (chip->pdev->device) { + case PCI_DEVICE_ID_O2_SDS0: + case PCI_DEVICE_ID_O2_SEABIRD0: + case PCI_DEVICE_ID_O2_SEABIRD1: + case PCI_DEVICE_ID_O2_SDS1: + case PCI_DEVICE_ID_O2_FUJIN2: + reg = sdhci_readl(host, O2_SD_VENDOR_SETTING); + if (reg & 0x1) + host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + + sdhci_pci_o2_enable_msi(chip, host); + + if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD0) { + ret = pci_read_config_dword(chip->pdev, + O2_SD_MISC_SETTING, ®); + if (ret) + return -EIO; + if (reg & (1 << 4)) { + pr_info("%s: emmc 1.8v flag is set, force 1.8v signaling voltage\n", + mmc_hostname(host->mmc)); + host->flags &= ~SDHCI_SIGNALING_330; + host->flags |= SDHCI_SIGNALING_180; + host->mmc->caps2 |= MMC_CAP2_NO_SD; + host->mmc->caps2 |= MMC_CAP2_NO_SDIO; + pci_write_config_dword(chip->pdev, + O2_SD_DETECT_SETTING, 3); + } + + slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd; + } + + if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD1) { + slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd; + host->mmc->caps2 |= MMC_CAP2_NO_SDIO; + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + } + + host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning; + + if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2) + break; + /* set dll watch dog timer */ + reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2); + reg |= (1 << 12); + sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2); + + break; + default: + break; + } + + return 0; +} + +static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) +{ + int ret; + u8 scratch; + u32 scratch_32; + + switch (chip->pdev->device) { + case PCI_DEVICE_ID_O2_8220: + case PCI_DEVICE_ID_O2_8221: + case PCI_DEVICE_ID_O2_8320: + case PCI_DEVICE_ID_O2_8321: + /* This extra setup is required due to broken ADMA. */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + /* Set Multi 3 to VCC3V# */ + pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08); + + /* Disable CLK_REQ# support after media DET */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_CLKREQ, &scratch); + if (ret) + return ret; + scratch |= 0x20; + pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch); + + /* Choose capabilities, enable SDMA. We have to write 0x01 + * to the capabilities register first to unlock it. + */ + ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch); + if (ret) + return ret; + scratch |= 0x01; + pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch); + pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73); + + /* Disable ADMA1/2 */ + pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39); + pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08); + + /* Disable the infinite transfer mode */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_INF_MOD, &scratch); + if (ret) + return ret; + scratch |= 0x08; + pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch); + + /* Lock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + break; + case PCI_DEVICE_ID_O2_SDS0: + case PCI_DEVICE_ID_O2_SDS1: + case PCI_DEVICE_ID_O2_FUJIN2: + /* UnLock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + /* DevId=8520 subId= 0x11 or 0x12 Type Chip support */ + if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) { + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG0, + &scratch_32); + scratch_32 = ((scratch_32 & 0xFF000000) >> 24); + + /* Check Whether subId is 0x11 or 0x12 */ + if ((scratch_32 == 0x11) || (scratch_32 == 0x12)) { + scratch_32 = 0x25100000; + + o2_pci_set_baseclk(chip, scratch_32); + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG4, + &scratch_32); + + /* Enable Base Clk setting change */ + scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET; + pci_write_config_dword(chip->pdev, + O2_SD_FUNC_REG4, + scratch_32); + + /* Set Tuning Window to 4 */ + pci_write_config_byte(chip->pdev, + O2_SD_TUNING_CTRL, 0x44); + + break; + } + } + + /* Enable 8520 led function */ + o2_pci_led_enable(chip); + + /* Set timeout CLK */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_CLK_SETTING, &scratch_32); + if (ret) + return ret; + + scratch_32 &= ~(0xFF00); + scratch_32 |= 0x07E0C800; + pci_write_config_dword(chip->pdev, + O2_SD_CLK_SETTING, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_CLKREQ, &scratch_32); + if (ret) + return ret; + scratch_32 |= 0x3; + pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_PLL_SETTING, &scratch_32); + if (ret) + return ret; + + scratch_32 &= ~(0x1F3F070E); + scratch_32 |= 0x18270106; + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); + + /* Disable UHS1 funciton */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_CAP_REG2, &scratch_32); + if (ret) + return ret; + scratch_32 &= ~(0xE0); + pci_write_config_dword(chip->pdev, + O2_SD_CAP_REG2, scratch_32); + + if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) + sdhci_pci_o2_fujin2_pci_init(chip); + + /* Lock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + break; + case PCI_DEVICE_ID_O2_SEABIRD0: + case PCI_DEVICE_ID_O2_SEABIRD1: + /* UnLock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_PLL_SETTING, &scratch_32); + + if ((scratch_32 & 0xff000000) == 0x01000000) { + scratch_32 &= 0x0000FFFF; + scratch_32 |= 0x1F340000; + + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); + } else { + scratch_32 &= 0x0000FFFF; + scratch_32 |= 0x25100000; + + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG4, + &scratch_32); + scratch_32 |= (1 << 22); + pci_write_config_dword(chip->pdev, + O2_SD_FUNC_REG4, scratch_32); + } + + /* Set Tuning Windows to 5 */ + pci_write_config_byte(chip->pdev, + O2_SD_TUNING_CTRL, 0x55); + //Adjust 1st and 2nd CD debounce time + pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32); + scratch_32 &= 0xFFE7FFFF; + scratch_32 |= 0x00180000; + pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32); + pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1); + /* Lock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + break; + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip) +{ + sdhci_pci_o2_probe(chip); + return sdhci_pci_resume_host(chip); +} +#endif + +static const struct sdhci_ops sdhci_pci_o2_ops = { + .set_clock = sdhci_pci_o2_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +const struct sdhci_pci_fixes sdhci_o2 = { + .probe = sdhci_pci_o2_probe, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, + .probe_slot = sdhci_pci_o2_probe_slot, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_pci_o2_resume, +#endif + .ops = &sdhci_pci_o2_ops, + .priv_size = sizeof(struct o2_host), +}; diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h new file mode 100644 index 000000000..dcd99d505 --- /dev/null +++ b/drivers/mmc/host/sdhci-pci.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SDHCI_PCI_H +#define __SDHCI_PCI_H + +/* + * PCI device IDs, sub IDs + */ + +#define PCI_DEVICE_ID_O2_SDS0 0x8420 +#define PCI_DEVICE_ID_O2_SDS1 0x8421 +#define PCI_DEVICE_ID_O2_FUJIN2 0x8520 +#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620 +#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621 + +#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 +#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a +#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14 +#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 +#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 +#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50 +#define PCI_DEVICE_ID_INTEL_BSW_EMMC 0x2294 +#define PCI_DEVICE_ID_INTEL_BSW_SDIO 0x2295 +#define PCI_DEVICE_ID_INTEL_BSW_SD 0x2296 +#define PCI_DEVICE_ID_INTEL_MRFLD_MMC 0x1190 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa +#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb +#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 +#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 +#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 +#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b +#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c +#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d +#define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db +#define PCI_DEVICE_ID_INTEL_CDF_EMMC 0x18db +#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca +#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc +#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0 +#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca +#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc +#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0 +#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca +#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc +#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0 +#define PCI_DEVICE_ID_INTEL_GLK_SD 0x31ca +#define PCI_DEVICE_ID_INTEL_GLK_EMMC 0x31cc +#define PCI_DEVICE_ID_INTEL_GLK_SDIO 0x31d0 +#define PCI_DEVICE_ID_INTEL_CNP_EMMC 0x9dc4 +#define PCI_DEVICE_ID_INTEL_CNP_SD 0x9df5 +#define PCI_DEVICE_ID_INTEL_CNPH_SD 0xa375 +#define PCI_DEVICE_ID_INTEL_ICP_EMMC 0x34c4 +#define PCI_DEVICE_ID_INTEL_ICP_SD 0x34f8 +#define PCI_DEVICE_ID_INTEL_EHL_EMMC 0x4b47 +#define PCI_DEVICE_ID_INTEL_EHL_SD 0x4b48 +#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4 +#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5 +#define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5 +#define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4 +#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8 +#define PCI_DEVICE_ID_INTEL_LKF_EMMC 0x98c4 +#define PCI_DEVICE_ID_INTEL_LKF_SD 0x98f8 +#define PCI_DEVICE_ID_INTEL_ADL_EMMC 0x54c4 + +#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000 +#define PCI_DEVICE_ID_VIA_95D0 0x95d0 +#define PCI_DEVICE_ID_REALTEK_5250 0x5250 + +#define PCI_SUBDEVICE_ID_NI_7884 0x7884 +#define PCI_SUBDEVICE_ID_NI_78E3 0x78e3 + +#define PCI_VENDOR_ID_ARASAN 0x16e6 +#define PCI_DEVICE_ID_ARASAN_PHY_EMMC 0x0670 + +#define PCI_DEVICE_ID_SYNOPSYS_DWC_MSHC 0xc202 + +#define PCI_DEVICE_ID_GLI_9755 0x9755 +#define PCI_DEVICE_ID_GLI_9750 0x9750 +#define PCI_DEVICE_ID_GLI_9763E 0xe763 + +/* + * PCI device class and mask + */ + +#define SYSTEM_SDHCI (PCI_CLASS_SYSTEM_SDHCI << 8) +#define PCI_CLASS_MASK 0xFFFF00 + +/* + * Macros for PCI device-description + */ + +#define _PCI_VEND(vend) PCI_VENDOR_ID_##vend +#define _PCI_DEV(vend, dev) PCI_DEVICE_ID_##vend##_##dev +#define _PCI_SUBDEV(subvend, subdev) PCI_SUBDEVICE_ID_##subvend##_##subdev + +#define SDHCI_PCI_DEVICE(vend, dev, cfg) { \ + .vendor = _PCI_VEND(vend), .device = _PCI_DEV(vend, dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ + .driver_data = (kernel_ulong_t)&(sdhci_##cfg) \ +} + +#define SDHCI_PCI_SUBDEVICE(vend, dev, subvend, subdev, cfg) { \ + .vendor = _PCI_VEND(vend), .device = _PCI_DEV(vend, dev), \ + .subvendor = _PCI_VEND(subvend), \ + .subdevice = _PCI_SUBDEV(subvend, subdev), \ + .driver_data = (kernel_ulong_t)&(sdhci_##cfg) \ +} + +#define SDHCI_PCI_DEVICE_CLASS(vend, cl, cl_msk, cfg) { \ + .vendor = _PCI_VEND(vend), .device = PCI_ANY_ID, \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ + .class = (cl), .class_mask = (cl_msk), \ + .driver_data = (kernel_ulong_t)&(sdhci_##cfg) \ +} + +/* + * PCI registers + */ + +#define PCI_SDHCI_IFPIO 0x00 +#define PCI_SDHCI_IFDMA 0x01 +#define PCI_SDHCI_IFVENDOR 0x02 + +#define PCI_SLOT_INFO 0x40 /* 8 bits */ +#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) +#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 + +#define MAX_SLOTS 8 + +struct sdhci_pci_chip; +struct sdhci_pci_slot; + +struct sdhci_pci_fixes { + unsigned int quirks; + unsigned int quirks2; + bool allow_runtime_pm; + bool own_cd_for_runtime_pm; + + int (*probe) (struct sdhci_pci_chip *); + + int (*probe_slot) (struct sdhci_pci_slot *); + int (*add_host) (struct sdhci_pci_slot *); + void (*remove_slot) (struct sdhci_pci_slot *, int); + +#ifdef CONFIG_PM_SLEEP + int (*suspend) (struct sdhci_pci_chip *); + int (*resume) (struct sdhci_pci_chip *); +#endif +#ifdef CONFIG_PM + int (*runtime_suspend) (struct sdhci_pci_chip *); + int (*runtime_resume) (struct sdhci_pci_chip *); +#endif + + const struct sdhci_ops *ops; + size_t priv_size; +}; + +struct sdhci_pci_slot { + struct sdhci_pci_chip *chip; + struct sdhci_host *host; + struct sdhci_pci_data *data; + + int rst_n_gpio; + int cd_gpio; + int cd_irq; + + int cd_idx; + bool cd_override_level; + + void (*hw_reset)(struct sdhci_host *host); + unsigned long private[] ____cacheline_aligned; +}; + +struct sdhci_pci_chip { + struct pci_dev *pdev; + + unsigned int quirks; + unsigned int quirks2; + bool allow_runtime_pm; + bool pm_retune; + bool rpm_retune; + const struct sdhci_pci_fixes *fixes; + + int num_slots; /* Slots on controller */ + struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ +}; + +static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot) +{ + return (void *)slot->private; +} + +#ifdef CONFIG_PM_SLEEP +int sdhci_pci_resume_host(struct sdhci_pci_chip *chip); +#endif +int sdhci_pci_enable_dma(struct sdhci_host *host); + +extern const struct sdhci_pci_fixes sdhci_arasan; +extern const struct sdhci_pci_fixes sdhci_snps; +extern const struct sdhci_pci_fixes sdhci_o2; +extern const struct sdhci_pci_fixes sdhci_gl9750; +extern const struct sdhci_pci_fixes sdhci_gl9755; +extern const struct sdhci_pci_fixes sdhci_gl9763e; + +#endif /* __SDHCI_PCI_H */ diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c new file mode 100644 index 000000000..6ce1519ae --- /dev/null +++ b/drivers/mmc/host/sdhci-pic32.c @@ -0,0 +1,255 @@ +/* + * Support of SDHCI platform devices for Microchip PIC32. + * + * Copyright (C) 2015 Microchip + * Andrei Pistirica, Paul Thacker + * + * Inspired by sdhci-pltfm.c + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdhci.h" +#include "sdhci-pltfm.h" +#include + +#define SDH_SHARED_BUS_CTRL 0x000000E0 +#define SDH_SHARED_BUS_NR_CLK_PINS_MASK 0x7 +#define SDH_SHARED_BUS_NR_IRQ_PINS_MASK 0x30 +#define SDH_SHARED_BUS_CLK_PINS 0x10 +#define SDH_SHARED_BUS_IRQ_PINS 0x14 +#define SDH_CAPS_SDH_SLOT_TYPE_MASK 0xC0000000 +#define SDH_SLOT_TYPE_REMOVABLE 0x0 +#define SDH_SLOT_TYPE_EMBEDDED 0x1 +#define SDH_SLOT_TYPE_SHARED_BUS 0x2 +#define SDHCI_CTRL_CDSSEL 0x80 +#define SDHCI_CTRL_CDTLVL 0x40 + +#define ADMA_FIFO_RD_THSHLD 512 +#define ADMA_FIFO_WR_THSHLD 512 + +struct pic32_sdhci_priv { + struct platform_device *pdev; + struct clk *sys_clk; + struct clk *base_clk; +}; + +static unsigned int pic32_sdhci_get_max_clock(struct sdhci_host *host) +{ + struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host); + + return clk_get_rate(sdhci_pdata->base_clk); +} + +static void pic32_sdhci_set_bus_width(struct sdhci_host *host, int width) +{ + u8 ctrl; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + if (width == MMC_BUS_WIDTH_8) { + ctrl &= ~SDHCI_CTRL_4BITBUS; + if (host->version >= SDHCI_SPEC_300) + ctrl |= SDHCI_CTRL_8BITBUS; + } else { + if (host->version >= SDHCI_SPEC_300) + ctrl &= ~SDHCI_CTRL_8BITBUS; + if (width == MMC_BUS_WIDTH_4) + ctrl |= SDHCI_CTRL_4BITBUS; + else + ctrl &= ~SDHCI_CTRL_4BITBUS; + } + + /* CD select and test bits must be set for errata workaround. */ + ctrl &= ~SDHCI_CTRL_CDTLVL; + ctrl |= SDHCI_CTRL_CDSSEL; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + +static unsigned int pic32_sdhci_get_ro(struct sdhci_host *host) +{ + /* + * The SDHCI_WRITE_PROTECT bit is unstable on current hardware so we + * can't depend on its value in any way. + */ + return 0; +} + +static const struct sdhci_ops pic32_sdhci_ops = { + .get_max_clock = pic32_sdhci_get_max_clock, + .set_clock = sdhci_set_clock, + .set_bus_width = pic32_sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .get_ro = pic32_sdhci_get_ro, +}; + +static const struct sdhci_pltfm_data sdhci_pic32_pdata = { + .ops = &pic32_sdhci_ops, + .quirks = SDHCI_QUIRK_NO_HISPD_BIT, + .quirks2 = SDHCI_QUIRK2_NO_1_8_V, +}; + +static void pic32_sdhci_shared_bus(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + u32 bus = readl(host->ioaddr + SDH_SHARED_BUS_CTRL); + u32 clk_pins = (bus & SDH_SHARED_BUS_NR_CLK_PINS_MASK) >> 0; + u32 irq_pins = (bus & SDH_SHARED_BUS_NR_IRQ_PINS_MASK) >> 4; + + /* select first clock */ + if (clk_pins & 1) + bus |= (1 << SDH_SHARED_BUS_CLK_PINS); + + /* select first interrupt */ + if (irq_pins & 1) + bus |= (1 << SDH_SHARED_BUS_IRQ_PINS); + + writel(bus, host->ioaddr + SDH_SHARED_BUS_CTRL); +} + +static int pic32_sdhci_probe_platform(struct platform_device *pdev, + struct pic32_sdhci_priv *pdata) +{ + int ret = 0; + u32 caps_slot_type; + struct sdhci_host *host = platform_get_drvdata(pdev); + + /* Check card slot connected on shared bus. */ + host->caps = readl(host->ioaddr + SDHCI_CAPABILITIES); + caps_slot_type = (host->caps & SDH_CAPS_SDH_SLOT_TYPE_MASK) >> 30; + if (caps_slot_type == SDH_SLOT_TYPE_SHARED_BUS) + pic32_sdhci_shared_bus(pdev); + + return ret; +} + +static int pic32_sdhci_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct pic32_sdhci_priv *sdhci_pdata; + struct pic32_sdhci_platform_data *plat_data; + int ret; + + host = sdhci_pltfm_init(pdev, &sdhci_pic32_pdata, + sizeof(struct pic32_sdhci_priv)); + if (IS_ERR(host)) { + ret = PTR_ERR(host); + goto err; + } + + pltfm_host = sdhci_priv(host); + sdhci_pdata = sdhci_pltfm_priv(pltfm_host); + + plat_data = pdev->dev.platform_data; + if (plat_data && plat_data->setup_dma) { + ret = plat_data->setup_dma(ADMA_FIFO_RD_THSHLD, + ADMA_FIFO_WR_THSHLD); + if (ret) + goto err_host; + } + + sdhci_pdata->sys_clk = devm_clk_get(&pdev->dev, "sys_clk"); + if (IS_ERR(sdhci_pdata->sys_clk)) { + ret = PTR_ERR(sdhci_pdata->sys_clk); + dev_err(&pdev->dev, "Error getting clock\n"); + goto err_host; + } + + ret = clk_prepare_enable(sdhci_pdata->sys_clk); + if (ret) { + dev_err(&pdev->dev, "Error enabling clock\n"); + goto err_host; + } + + sdhci_pdata->base_clk = devm_clk_get(&pdev->dev, "base_clk"); + if (IS_ERR(sdhci_pdata->base_clk)) { + ret = PTR_ERR(sdhci_pdata->base_clk); + dev_err(&pdev->dev, "Error getting clock\n"); + goto err_sys_clk; + } + + ret = clk_prepare_enable(sdhci_pdata->base_clk); + if (ret) { + dev_err(&pdev->dev, "Error enabling clock\n"); + goto err_base_clk; + } + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err_base_clk; + + ret = pic32_sdhci_probe_platform(pdev, sdhci_pdata); + if (ret) { + dev_err(&pdev->dev, "failed to probe platform!\n"); + goto err_base_clk; + } + + ret = sdhci_add_host(host); + if (ret) + goto err_base_clk; + + dev_info(&pdev->dev, "Successfully added sdhci host\n"); + return 0; + +err_base_clk: + clk_disable_unprepare(sdhci_pdata->base_clk); +err_sys_clk: + clk_disable_unprepare(sdhci_pdata->sys_clk); +err_host: + sdhci_pltfm_free(pdev); +err: + dev_err(&pdev->dev, "pic32-sdhci probe failed: %d\n", ret); + return ret; +} + +static int pic32_sdhci_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host); + u32 scratch; + + scratch = readl(host->ioaddr + SDHCI_INT_STATUS); + sdhci_remove_host(host, scratch == (u32)~0); + clk_disable_unprepare(sdhci_pdata->base_clk); + clk_disable_unprepare(sdhci_pdata->sys_clk); + sdhci_pltfm_free(pdev); + + return 0; +} + +static const struct of_device_id pic32_sdhci_id_table[] = { + { .compatible = "microchip,pic32mzda-sdhci" }, + {} +}; +MODULE_DEVICE_TABLE(of, pic32_sdhci_id_table); + +static struct platform_driver pic32_sdhci_driver = { + .driver = { + .name = "pic32-sdhci", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(pic32_sdhci_id_table), + }, + .probe = pic32_sdhci_probe, + .remove = pic32_sdhci_remove, +}; + +module_platform_driver(pic32_sdhci_driver); + +MODULE_DESCRIPTION("Microchip PIC32 SDHCI driver"); +MODULE_AUTHOR("Pistirica Sorin Andrei & Sandeep Sheriker"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c new file mode 100644 index 000000000..328b132bb --- /dev/null +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sdhci-pltfm.c Support for SDHCI platform devices + * Copyright (c) 2009 Intel Corporation + * + * Copyright (c) 2007, 2011 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * + * Authors: Xiaobo Xie + * Anton Vorontsov + */ + +/* Supports: + * SDHCI platform devices + * + * Inspired by sdhci-pci.c, by Pierre Ossman + */ + +#include +#include +#include +#include +#ifdef CONFIG_PPC +#include +#endif +#include "sdhci-pltfm.h" + +unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return clk_get_rate(pltfm_host->clk); +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock); + +static const struct sdhci_ops sdhci_pltfm_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static bool sdhci_wp_inverted(struct device *dev) +{ + if (device_property_present(dev, "sdhci,wp-inverted") || + device_property_present(dev, "wp-inverted")) + return true; + + /* Old device trees don't have the wp-inverted property. */ +#ifdef CONFIG_PPC + return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); +#else + return false; +#endif /* CONFIG_PPC */ +} + +#ifdef CONFIG_OF +static void sdhci_get_compatibility(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + + if (!np) + return; + + if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) + host->quirks |= SDHCI_QUIRK_BROKEN_DMA; + + if (of_device_is_compatible(np, "fsl,p2020-esdhc") || + of_device_is_compatible(np, "fsl,p1010-esdhc") || + of_device_is_compatible(np, "fsl,t4240-esdhc") || + of_device_is_compatible(np, "fsl,mpc8536-esdhc")) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; +} +#else +void sdhci_get_compatibility(struct platform_device *pdev) {} +#endif /* CONFIG_OF */ + +void sdhci_get_property(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + u32 bus_width; + + if (device_property_present(dev, "sdhci,auto-cmd12")) + host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + + if (device_property_present(dev, "sdhci,1-bit-only") || + (device_property_read_u32(dev, "bus-width", &bus_width) == 0 && + bus_width == 1)) + host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; + + if (sdhci_wp_inverted(dev)) + host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; + + if (device_property_present(dev, "broken-cd")) + host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + if (device_property_present(dev, "no-1-8-v")) + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + + sdhci_get_compatibility(pdev); + + device_property_read_u32(dev, "clock-frequency", &pltfm_host->clock); + + if (device_property_present(dev, "keep-power-in-suspend")) + host->mmc->pm_caps |= MMC_PM_KEEP_POWER; + + if (device_property_read_bool(dev, "wakeup-source") || + device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */ + host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; +} +EXPORT_SYMBOL_GPL(sdhci_get_property); + +struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, + const struct sdhci_pltfm_data *pdata, + size_t priv_size) +{ + struct sdhci_host *host; + void __iomem *ioaddr; + int irq, ret; + + ioaddr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ioaddr)) { + ret = PTR_ERR(ioaddr); + goto err; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err; + } + + host = sdhci_alloc_host(&pdev->dev, + sizeof(struct sdhci_pltfm_host) + priv_size); + + if (IS_ERR(host)) { + ret = PTR_ERR(host); + goto err; + } + + host->ioaddr = ioaddr; + host->irq = irq; + host->hw_name = dev_name(&pdev->dev); + if (pdata && pdata->ops) + host->ops = pdata->ops; + else + host->ops = &sdhci_pltfm_ops; + if (pdata) { + host->quirks = pdata->quirks; + host->quirks2 = pdata->quirks2; + } + + platform_set_drvdata(pdev, host); + + return host; +err: + dev_err(&pdev->dev, "%s failed %d\n", __func__, ret); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_init); + +void sdhci_pltfm_free(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + + sdhci_free_host(host); +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_free); + +int sdhci_pltfm_register(struct platform_device *pdev, + const struct sdhci_pltfm_data *pdata, + size_t priv_size) +{ + struct sdhci_host *host; + int ret = 0; + + host = sdhci_pltfm_init(pdev, pdata, priv_size); + if (IS_ERR(host)) + return PTR_ERR(host); + + sdhci_get_property(pdev); + + ret = sdhci_add_host(host); + if (ret) + sdhci_pltfm_free(pdev); + + return ret; +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_register); + +int sdhci_pltfm_unregister(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + + sdhci_remove_host(host, dead); + clk_disable_unprepare(pltfm_host->clk); + sdhci_pltfm_free(pdev); + + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister); + +#ifdef CONFIG_PM_SLEEP +int sdhci_pltfm_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + ret = sdhci_suspend_host(host); + if (ret) + return ret; + + clk_disable_unprepare(pltfm_host->clk); + + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend); + +int sdhci_pltfm_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; + + ret = sdhci_resume_host(host); + if (ret) + clk_disable_unprepare(pltfm_host->clk); + + return ret; +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_resume); +#endif + +const struct dev_pm_ops sdhci_pltfm_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume) +}; +EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops); + +static int __init sdhci_pltfm_drv_init(void) +{ + pr_info("sdhci-pltfm: SDHCI platform and OF driver helper\n"); + + return 0; +} +module_init(sdhci_pltfm_drv_init); + +static void __exit sdhci_pltfm_drv_exit(void) +{ +} +module_exit(sdhci_pltfm_drv_exit); + +MODULE_DESCRIPTION("SDHCI platform and OF driver helper"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h new file mode 100644 index 000000000..9bd717ff7 --- /dev/null +++ b/drivers/mmc/host/sdhci-pltfm.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2010 MontaVista Software, LLC. + * + * Author: Anton Vorontsov + */ + +#ifndef _DRIVERS_MMC_SDHCI_PLTFM_H +#define _DRIVERS_MMC_SDHCI_PLTFM_H + +#include +#include +#include "sdhci.h" + +struct sdhci_pltfm_data { + const struct sdhci_ops *ops; + unsigned int quirks; + unsigned int quirks2; +}; + +struct sdhci_pltfm_host { + struct clk *clk; + + /* migrate from sdhci_of_host */ + unsigned int clock; + u16 xfer_mode_shadow; + + unsigned long private[] ____cacheline_aligned; +}; + +#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER +/* + * These accessors are designed for big endian hosts doing I/O to + * little endian controllers incorporating a 32-bit hardware byte swapper. + */ +static inline u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg) +{ + return in_be32(host->ioaddr + reg); +} + +static inline u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg) +{ + return in_be16(host->ioaddr + (reg ^ 0x2)); +} + +static inline u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg) +{ + return in_8(host->ioaddr + (reg ^ 0x3)); +} + +static inline void sdhci_be32bs_writel(struct sdhci_host *host, + u32 val, int reg) +{ + out_be32(host->ioaddr + reg, val); +} + +static inline void sdhci_be32bs_writew(struct sdhci_host *host, + u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int base = reg & ~0x3; + int shift = (reg & 0x2) * 8; + + switch (reg) { + case SDHCI_TRANSFER_MODE: + /* + * Postpone this write, we must do it together with a + * command write that is down below. + */ + pltfm_host->xfer_mode_shadow = val; + return; + case SDHCI_COMMAND: + sdhci_be32bs_writel(host, + val << 16 | pltfm_host->xfer_mode_shadow, + SDHCI_TRANSFER_MODE); + return; + } + clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); +} + +static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) +{ + int base = reg & ~0x3; + int shift = (reg & 0x3) * 8; + + clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); +} +#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */ + +void sdhci_get_property(struct platform_device *pdev); + +static inline void sdhci_get_of_property(struct platform_device *pdev) +{ + return sdhci_get_property(pdev); +} + +extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, + const struct sdhci_pltfm_data *pdata, + size_t priv_size); +extern void sdhci_pltfm_free(struct platform_device *pdev); + +extern int sdhci_pltfm_register(struct platform_device *pdev, + const struct sdhci_pltfm_data *pdata, + size_t priv_size); +extern int sdhci_pltfm_unregister(struct platform_device *pdev); + +extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host); + +static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host) +{ + return host->private; +} + +extern const struct dev_pm_ops sdhci_pltfm_pmops; +#ifdef CONFIG_PM_SLEEP +int sdhci_pltfm_suspend(struct device *dev); +int sdhci_pltfm_resume(struct device *dev); +#else +static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; } +static inline int sdhci_pltfm_resume(struct device *dev) { return 0; } +#endif + +#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c new file mode 100644 index 000000000..f18906b55 --- /dev/null +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2010 Marvell International Ltd. + * Zhangfei Gao + * Kevin Wang + * Jun Nie + * Qiming Wu + * Philip Rakity + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci.h" +#include "sdhci-pltfm.h" + +#define SD_FIFO_PARAM 0xe0 +#define DIS_PAD_SD_CLK_GATE 0x0400 /* Turn on/off Dynamic SD Clock Gating */ +#define CLK_GATE_ON 0x0200 /* Disable/enable Clock Gate */ +#define CLK_GATE_CTL 0x0100 /* Clock Gate Control */ +#define CLK_GATE_SETTING_BITS (DIS_PAD_SD_CLK_GATE | \ + CLK_GATE_ON | CLK_GATE_CTL) + +#define SD_CLOCK_BURST_SIZE_SETUP 0xe6 +#define SDCLK_SEL_SHIFT 8 +#define SDCLK_SEL_MASK 0x3 +#define SDCLK_DELAY_SHIFT 10 +#define SDCLK_DELAY_MASK 0x3c + +#define SD_CE_ATA_2 0xea +#define MMC_CARD 0x1000 +#define MMC_WIDTH 0x0100 + +static void pxav2_reset(struct sdhci_host *host, u8 mask) +{ + struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); + struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + + sdhci_reset(host, mask); + + if (mask == SDHCI_RESET_ALL) { + u16 tmp = 0; + + /* + * tune timing of read data/command when crc error happen + * no performance impact + */ + if (pdata && pdata->clk_delay_sel == 1) { + tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); + + tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT); + tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK) + << SDCLK_DELAY_SHIFT; + tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT); + tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT; + + writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); + } + + if (pdata && (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING)) { + tmp = readw(host->ioaddr + SD_FIFO_PARAM); + tmp &= ~CLK_GATE_SETTING_BITS; + writew(tmp, host->ioaddr + SD_FIFO_PARAM); + } else { + tmp = readw(host->ioaddr + SD_FIFO_PARAM); + tmp &= ~CLK_GATE_SETTING_BITS; + tmp |= CLK_GATE_SETTING_BITS; + writew(tmp, host->ioaddr + SD_FIFO_PARAM); + } + } +} + +static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width) +{ + u8 ctrl; + u16 tmp; + + ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); + tmp = readw(host->ioaddr + SD_CE_ATA_2); + if (width == MMC_BUS_WIDTH_8) { + ctrl &= ~SDHCI_CTRL_4BITBUS; + tmp |= MMC_CARD | MMC_WIDTH; + } else { + tmp &= ~(MMC_CARD | MMC_WIDTH); + if (width == MMC_BUS_WIDTH_4) + ctrl |= SDHCI_CTRL_4BITBUS; + else + ctrl &= ~SDHCI_CTRL_4BITBUS; + } + writew(tmp, host->ioaddr + SD_CE_ATA_2); + writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); +} + +static const struct sdhci_ops pxav2_sdhci_ops = { + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = pxav2_mmc_set_bus_width, + .reset = pxav2_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_pxav2_of_match[] = { + { + .compatible = "mrvl,pxav2-mmc", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match); + +static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) +{ + struct sdhci_pxa_platdata *pdata; + struct device_node *np = dev->of_node; + u32 bus_width; + u32 clk_delay_cycles; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + if (of_find_property(np, "non-removable", NULL)) + pdata->flags |= PXA_FLAG_CARD_PERMANENT; + + of_property_read_u32(np, "bus-width", &bus_width); + if (bus_width == 8) + pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT; + + of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); + if (clk_delay_cycles > 0) { + pdata->clk_delay_sel = 1; + pdata->clk_delay_cycles = clk_delay_cycles; + } + + return pdata; +} +#else +static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) +{ + return NULL; +} +#endif + +static int sdhci_pxav2_probe(struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + struct device *dev = &pdev->dev; + struct sdhci_host *host = NULL; + const struct of_device_id *match; + + int ret; + struct clk *clk; + + host = sdhci_pltfm_init(pdev, NULL, 0); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + + clk = devm_clk_get(dev, "PXA-SDHCLK"); + if (IS_ERR(clk)) { + dev_err(dev, "failed to get io clock\n"); + ret = PTR_ERR(clk); + goto free; + } + pltfm_host->clk = clk; + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable io clock\n"); + goto free; + } + + host->quirks = SDHCI_QUIRK_BROKEN_ADMA + | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL + | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; + + match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev); + if (match) { + pdata = pxav2_get_mmc_pdata(dev); + } + if (pdata) { + if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { + /* on-chip device */ + host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; + host->mmc->caps |= MMC_CAP_NONREMOVABLE; + } + + /* If slot design supports 8 bit data, indicate this to MMC. */ + if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) + host->mmc->caps |= MMC_CAP_8_BIT_DATA; + + if (pdata->quirks) + host->quirks |= pdata->quirks; + if (pdata->host_caps) + host->mmc->caps |= pdata->host_caps; + if (pdata->pm_caps) + host->mmc->pm_caps |= pdata->pm_caps; + } + + host->ops = &pxav2_sdhci_ops; + + ret = sdhci_add_host(host); + if (ret) + goto disable_clk; + + return 0; + +disable_clk: + clk_disable_unprepare(clk); +free: + sdhci_pltfm_free(pdev); + return ret; +} + +static struct platform_driver sdhci_pxav2_driver = { + .driver = { + .name = "sdhci-pxav2", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(sdhci_pxav2_of_match), + .pm = &sdhci_pltfm_pmops, + }, + .probe = sdhci_pxav2_probe, + .remove = sdhci_pltfm_unregister, +}; + +module_platform_driver(sdhci_pxav2_driver); + +MODULE_DESCRIPTION("SDHCI driver for pxav2"); +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c new file mode 100644 index 000000000..a6d89a3f1 --- /dev/null +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -0,0 +1,583 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2010 Marvell International Ltd. + * Zhangfei Gao + * Kevin Wang + * Mingwei Wang + * Philip Rakity + * Mark Brown + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci.h" +#include "sdhci-pltfm.h" + +#define PXAV3_RPM_DELAY_MS 50 + +#define SD_CLOCK_BURST_SIZE_SETUP 0x10A +#define SDCLK_SEL 0x100 +#define SDCLK_DELAY_SHIFT 9 +#define SDCLK_DELAY_MASK 0x1f + +#define SD_CFG_FIFO_PARAM 0x100 +#define SDCFG_GEN_PAD_CLK_ON (1<<6) +#define SDCFG_GEN_PAD_CLK_CNT_MASK 0xFF +#define SDCFG_GEN_PAD_CLK_CNT_SHIFT 24 + +#define SD_SPI_MODE 0x108 +#define SD_CE_ATA_1 0x10C + +#define SD_CE_ATA_2 0x10E +#define SDCE_MISC_INT (1<<2) +#define SDCE_MISC_INT_EN (1<<1) + +struct sdhci_pxa { + struct clk *clk_core; + struct clk *clk_io; + u8 power_mode; + void __iomem *sdio3_conf_reg; +}; + +/* + * These registers are relative to the second register region, for the + * MBus bridge. + */ +#define SDHCI_WINDOW_CTRL(i) (0x80 + ((i) << 3)) +#define SDHCI_WINDOW_BASE(i) (0x84 + ((i) << 3)) +#define SDHCI_MAX_WIN_NUM 8 + +/* + * Fields below belong to SDIO3 Configuration Register (third register + * region for the Armada 38x flavor) + */ + +#define SDIO3_CONF_CLK_INV BIT(0) +#define SDIO3_CONF_SD_FB_CLK BIT(2) + +static int mv_conf_mbus_windows(struct platform_device *pdev, + const struct mbus_dram_target_info *dram) +{ + int i; + void __iomem *regs; + struct resource *res; + + if (!dram) { + dev_err(&pdev->dev, "no mbus dram info\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "cannot get mbus registers\n"); + return -EINVAL; + } + + regs = ioremap(res->start, resource_size(res)); + if (!regs) { + dev_err(&pdev->dev, "cannot map mbus registers\n"); + return -ENOMEM; + } + + for (i = 0; i < SDHCI_MAX_WIN_NUM; i++) { + writel(0, regs + SDHCI_WINDOW_CTRL(i)); + writel(0, regs + SDHCI_WINDOW_BASE(i)); + } + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + /* Write size, attributes and target id to control register */ + writel(((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + regs + SDHCI_WINDOW_CTRL(i)); + /* Write base address to base register */ + writel(cs->base, regs + SDHCI_WINDOW_BASE(i)); + } + + iounmap(regs); + + return 0; +} + +static int armada_38x_quirks(struct platform_device *pdev, + struct sdhci_host *host) +{ + struct device_node *np = pdev->dev.of_node; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host); + struct resource *res; + + host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; + host->quirks |= SDHCI_QUIRK_MISSING_CAPS; + + host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); + host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "conf-sdio3"); + if (res) { + pxa->sdio3_conf_reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pxa->sdio3_conf_reg)) + return PTR_ERR(pxa->sdio3_conf_reg); + } else { + /* + * According to erratum 'FE-2946959' both SDR50 and DDR50 + * modes require specific clock adjustments in SDIO3 + * Configuration register, if the adjustment is not done, + * remove them from the capabilities. + */ + host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50); + + dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n"); + } + + /* + * According to erratum 'ERR-7878951' Armada 38x SDHCI + * controller has different capabilities than the ones shown + * in its registers + */ + if (of_property_read_bool(np, "no-1-8-v")) { + host->caps &= ~SDHCI_CAN_VDD_180; + host->mmc->caps &= ~MMC_CAP_1_8V_DDR; + } else { + host->caps &= ~SDHCI_CAN_VDD_330; + } + host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_USE_SDR50_TUNING); + + return 0; +} + +static void pxav3_reset(struct sdhci_host *host, u8 mask) +{ + struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); + struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + + sdhci_reset(host, mask); + + if (mask == SDHCI_RESET_ALL) { + /* + * tune timing of read data/command when crc error happen + * no performance impact + */ + if (pdata && 0 != pdata->clk_delay_cycles) { + u16 tmp; + + tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); + tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK) + << SDCLK_DELAY_SHIFT; + tmp |= SDCLK_SEL; + writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); + } + } +} + +#define MAX_WAIT_COUNT 5 +static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host); + u16 tmp; + int count; + + if (pxa->power_mode == MMC_POWER_UP + && power_mode == MMC_POWER_ON) { + + dev_dbg(mmc_dev(host->mmc), + "%s: slot->power_mode = %d," + "ios->power_mode = %d\n", + __func__, + pxa->power_mode, + power_mode); + + /* set we want notice of when 74 clocks are sent */ + tmp = readw(host->ioaddr + SD_CE_ATA_2); + tmp |= SDCE_MISC_INT_EN; + writew(tmp, host->ioaddr + SD_CE_ATA_2); + + /* start sending the 74 clocks */ + tmp = readw(host->ioaddr + SD_CFG_FIFO_PARAM); + tmp |= SDCFG_GEN_PAD_CLK_ON; + writew(tmp, host->ioaddr + SD_CFG_FIFO_PARAM); + + /* slowest speed is about 100KHz or 10usec per clock */ + udelay(740); + count = 0; + + while (count++ < MAX_WAIT_COUNT) { + if ((readw(host->ioaddr + SD_CE_ATA_2) + & SDCE_MISC_INT) == 0) + break; + udelay(10); + } + + if (count == MAX_WAIT_COUNT) + dev_warn(mmc_dev(host->mmc), "74 clock interrupt not cleared\n"); + + /* clear the interrupt bit if posted */ + tmp = readw(host->ioaddr + SD_CE_ATA_2); + tmp |= SDCE_MISC_INT; + writew(tmp, host->ioaddr + SD_CE_ATA_2); + } + pxa->power_mode = power_mode; +} + +static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host); + u16 ctrl_2; + + /* + * Set V18_EN -- UHS modes do not work without this. + * does not change signaling voltage + */ + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + switch (uhs) { + case MMC_TIMING_UHS_SDR12: + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + break; + case MMC_TIMING_UHS_SDR25: + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + break; + case MMC_TIMING_UHS_SDR50: + ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180; + break; + case MMC_TIMING_UHS_SDR104: + ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180; + break; + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180; + break; + } + + /* + * Update SDIO3 Configuration register according to erratum + * FE-2946959 + */ + if (pxa->sdio3_conf_reg) { + u8 reg_val = readb(pxa->sdio3_conf_reg); + + if (uhs == MMC_TIMING_UHS_SDR50 || + uhs == MMC_TIMING_UHS_DDR50) { + reg_val &= ~SDIO3_CONF_CLK_INV; + reg_val |= SDIO3_CONF_SD_FB_CLK; + } else if (uhs == MMC_TIMING_MMC_HS) { + reg_val &= ~SDIO3_CONF_CLK_INV; + reg_val &= ~SDIO3_CONF_SD_FB_CLK; + } else { + reg_val |= SDIO3_CONF_CLK_INV; + reg_val &= ~SDIO3_CONF_SD_FB_CLK; + } + writeb(reg_val, pxa->sdio3_conf_reg); + } + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + dev_dbg(mmc_dev(host->mmc), + "%s uhs = %d, ctrl_2 = %04X\n", + __func__, uhs, ctrl_2); +} + +static void pxav3_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + u8 pwr = host->pwr; + + sdhci_set_power_noreg(host, mode, vdd); + + if (host->pwr == pwr) + return; + + if (host->pwr == 0) + vdd = 0; + + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); +} + +static const struct sdhci_ops pxav3_sdhci_ops = { + .set_clock = sdhci_set_clock, + .set_power = pxav3_set_power, + .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = pxav3_reset, + .set_uhs_signaling = pxav3_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_pxav3_pdata = { + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_32BIT_ADMA_SIZE + | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .ops = &pxav3_sdhci_ops, +}; + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_pxav3_of_match[] = { + { + .compatible = "mrvl,pxav3-mmc", + }, + { + .compatible = "marvell,armada-380-sdhci", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match); + +static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) +{ + struct sdhci_pxa_platdata *pdata; + struct device_node *np = dev->of_node; + u32 clk_delay_cycles; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + if (!of_property_read_u32(np, "mrvl,clk-delay-cycles", + &clk_delay_cycles)) + pdata->clk_delay_cycles = clk_delay_cycles; + + return pdata; +} +#else +static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) +{ + return NULL; +} +#endif + +static int sdhci_pxav3_probe(struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + struct sdhci_host *host = NULL; + struct sdhci_pxa *pxa = NULL; + const struct of_device_id *match; + int ret; + + host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, sizeof(*pxa)); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + pxa = sdhci_pltfm_priv(pltfm_host); + + pxa->clk_io = devm_clk_get(dev, "io"); + if (IS_ERR(pxa->clk_io)) + pxa->clk_io = devm_clk_get(dev, NULL); + if (IS_ERR(pxa->clk_io)) { + dev_err(dev, "failed to get io clock\n"); + ret = PTR_ERR(pxa->clk_io); + goto err_clk_get; + } + pltfm_host->clk = pxa->clk_io; + clk_prepare_enable(pxa->clk_io); + + pxa->clk_core = devm_clk_get(dev, "core"); + if (!IS_ERR(pxa->clk_core)) + clk_prepare_enable(pxa->clk_core); + + /* enable 1/8V DDR capable */ + host->mmc->caps |= MMC_CAP_1_8V_DDR; + + if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { + ret = armada_38x_quirks(pdev, host); + if (ret < 0) + goto err_mbus_win; + ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); + if (ret < 0) + goto err_mbus_win; + } + + match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev); + if (match) { + ret = mmc_of_parse(host->mmc); + if (ret) + goto err_of_parse; + sdhci_get_of_property(pdev); + pdata = pxav3_get_mmc_pdata(dev); + pdev->dev.platform_data = pdata; + } else if (pdata) { + /* on-chip device */ + if (pdata->flags & PXA_FLAG_CARD_PERMANENT) + host->mmc->caps |= MMC_CAP_NONREMOVABLE; + + /* If slot design supports 8 bit data, indicate this to MMC. */ + if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) + host->mmc->caps |= MMC_CAP_8_BIT_DATA; + + if (pdata->quirks) + host->quirks |= pdata->quirks; + if (pdata->quirks2) + host->quirks2 |= pdata->quirks2; + if (pdata->host_caps) + host->mmc->caps |= pdata->host_caps; + if (pdata->host_caps2) + host->mmc->caps2 |= pdata->host_caps2; + if (pdata->pm_caps) + host->mmc->pm_caps |= pdata->pm_caps; + } + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); + + ret = sdhci_add_host(host); + if (ret) + goto err_add_host; + + if (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) + device_init_wakeup(&pdev->dev, 1); + + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; + +err_add_host: + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); +err_of_parse: +err_mbus_win: + clk_disable_unprepare(pxa->clk_io); + clk_disable_unprepare(pxa->clk_core); +err_clk_get: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_pxav3_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host); + + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + sdhci_remove_host(host, 1); + + clk_disable_unprepare(pxa->clk_io); + clk_disable_unprepare(pxa->clk_core); + + sdhci_pltfm_free(pdev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_pxav3_suspend(struct device *dev) +{ + int ret; + struct sdhci_host *host = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + ret = sdhci_suspend_host(host); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static int sdhci_pxav3_resume(struct device *dev) +{ + int ret; + struct sdhci_host *host = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + ret = sdhci_resume_host(host); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} +#endif + +#ifdef CONFIG_PM +static int sdhci_pxav3_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = sdhci_runtime_suspend_host(host); + if (ret) + return ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + clk_disable_unprepare(pxa->clk_io); + if (!IS_ERR(pxa->clk_core)) + clk_disable_unprepare(pxa->clk_core); + + return 0; +} + +static int sdhci_pxav3_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host); + + clk_prepare_enable(pxa->clk_io); + if (!IS_ERR(pxa->clk_core)) + clk_prepare_enable(pxa->clk_core); + + return sdhci_runtime_resume_host(host, 0); +} +#endif + +static const struct dev_pm_ops sdhci_pxav3_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume) + SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend, + sdhci_pxav3_runtime_resume, NULL) +}; + +static struct platform_driver sdhci_pxav3_driver = { + .driver = { + .name = "sdhci-pxav3", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(sdhci_pxav3_of_match), + .pm = &sdhci_pxav3_pmops, + }, + .probe = sdhci_pxav3_probe, + .remove = sdhci_pxav3_remove, +}; + +module_platform_driver(sdhci_pxav3_driver); + +MODULE_DESCRIPTION("SDHCI driver for pxav3"); +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c new file mode 100644 index 000000000..f48a788a9 --- /dev/null +++ b/drivers/mmc/host/sdhci-s3c.c @@ -0,0 +1,800 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* linux/drivers/mmc/host/sdhci-s3c.c + * + * Copyright 2008 Openmoko Inc. + * Copyright 2008 Simtec Electronics + * Ben Dooks + * http://armlinux.simtec.co.uk/ + * + * SDHCI (HSMMC) support for Samsung SoC + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "sdhci.h" + +#define MAX_BUS_CLK (4) + +#define S3C_SDHCI_CONTROL2 (0x80) +#define S3C_SDHCI_CONTROL3 (0x84) +#define S3C64XX_SDHCI_CONTROL4 (0x8C) + +#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR BIT(31) +#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK BIT(30) +#define S3C_SDHCI_CTRL2_CDINVRXD3 BIT(29) +#define S3C_SDHCI_CTRL2_SLCARDOUT BIT(28) + +#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK (0xf << 24) +#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT (24) +#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x) ((_x) << 24) + +#define S3C_SDHCI_CTRL2_LVLDAT_MASK (0xff << 16) +#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT (16) +#define S3C_SDHCI_CTRL2_LVLDAT(_x) ((_x) << 16) + +#define S3C_SDHCI_CTRL2_ENFBCLKTX BIT(15) +#define S3C_SDHCI_CTRL2_ENFBCLKRX BIT(14) +#define S3C_SDHCI_CTRL2_SDCDSEL BIT(13) +#define S3C_SDHCI_CTRL2_SDSIGPC BIT(12) +#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART BIT(11) + +#define S3C_SDHCI_CTRL2_DFCNT_MASK (0x3 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_SHIFT (9) +#define S3C_SDHCI_CTRL2_DFCNT_NONE (0x0 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK (0x1 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK (0x2 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK (0x3 << 9) + +#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD BIT(8) +#define S3C_SDHCI_CTRL2_RWAITMODE BIT(7) +#define S3C_SDHCI_CTRL2_DISBUFRD BIT(6) + +#define S3C_SDHCI_CTRL2_SELBASECLK_MASK (0x3 << 4) +#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT (4) +#define S3C_SDHCI_CTRL2_PWRSYNC BIT(3) +#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON BIT(1) +#define S3C_SDHCI_CTRL2_HWINITFIN BIT(0) + +#define S3C_SDHCI_CTRL3_FCSEL3 BIT(31) +#define S3C_SDHCI_CTRL3_FCSEL2 BIT(23) +#define S3C_SDHCI_CTRL3_FCSEL1 BIT(15) +#define S3C_SDHCI_CTRL3_FCSEL0 BIT(7) + +#define S3C_SDHCI_CTRL3_FIA3_MASK (0x7f << 24) +#define S3C_SDHCI_CTRL3_FIA3_SHIFT (24) +#define S3C_SDHCI_CTRL3_FIA3(_x) ((_x) << 24) + +#define S3C_SDHCI_CTRL3_FIA2_MASK (0x7f << 16) +#define S3C_SDHCI_CTRL3_FIA2_SHIFT (16) +#define S3C_SDHCI_CTRL3_FIA2(_x) ((_x) << 16) + +#define S3C_SDHCI_CTRL3_FIA1_MASK (0x7f << 8) +#define S3C_SDHCI_CTRL3_FIA1_SHIFT (8) +#define S3C_SDHCI_CTRL3_FIA1(_x) ((_x) << 8) + +#define S3C_SDHCI_CTRL3_FIA0_MASK (0x7f << 0) +#define S3C_SDHCI_CTRL3_FIA0_SHIFT (0) +#define S3C_SDHCI_CTRL3_FIA0(_x) ((_x) << 0) + +#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK (0x3 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT (16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA (0x0 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA (0x1 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA (0x2 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA (0x3 << 16) + +#define S3C64XX_SDHCI_CONTROL4_BUSY (1) + +/** + * struct sdhci_s3c - S3C SDHCI instance + * @host: The SDHCI host created + * @pdev: The platform device we where created from. + * @ioarea: The resource created when we claimed the IO area. + * @pdata: The platform data for this controller. + * @cur_clk: The index of the current bus clock. + * @ext_cd_irq: External card detect interrupt. + * @clk_io: The clock for the internal bus interface. + * @clk_rates: Clock frequencies. + * @clk_bus: The clocks that are available for the SD/MMC bus clock. + * @no_divider: No or non-standard internal clock divider. + */ +struct sdhci_s3c { + struct sdhci_host *host; + struct platform_device *pdev; + struct resource *ioarea; + struct s3c_sdhci_platdata *pdata; + int cur_clk; + int ext_cd_irq; + + struct clk *clk_io; + struct clk *clk_bus[MAX_BUS_CLK]; + unsigned long clk_rates[MAX_BUS_CLK]; + + bool no_divider; +}; + +/** + * struct sdhci_s3c_driver_data - S3C SDHCI platform specific driver data + * @sdhci_quirks: sdhci host specific quirks. + * @no_divider: no or non-standard internal clock divider. + * + * Specifies platform specific configuration of sdhci controller. + * Note: A structure for driver specific platform data is used for future + * expansion of its usage. + */ +struct sdhci_s3c_drv_data { + unsigned int sdhci_quirks; + bool no_divider; +}; + +static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host) +{ + return sdhci_priv(host); +} + +/** + * sdhci_s3c_get_max_clk - callback to get maximum clock frequency. + * @host: The SDHCI host instance. + * + * Callback to return the maximum clock rate acheivable by the controller. +*/ +static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host) +{ + struct sdhci_s3c *ourhost = to_s3c(host); + unsigned long rate, max = 0; + int src; + + for (src = 0; src < MAX_BUS_CLK; src++) { + rate = ourhost->clk_rates[src]; + if (rate > max) + max = rate; + } + + return max; +} + +/** + * sdhci_s3c_consider_clock - consider one the bus clocks for current setting + * @ourhost: Our SDHCI instance. + * @src: The source clock index. + * @wanted: The clock frequency wanted. + */ +static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost, + unsigned int src, + unsigned int wanted) +{ + unsigned long rate; + struct clk *clksrc = ourhost->clk_bus[src]; + int shift; + + if (IS_ERR(clksrc)) + return UINT_MAX; + + /* + * If controller uses a non-standard clock division, find the best clock + * speed possible with selected clock source and skip the division. + */ + if (ourhost->no_divider) { + rate = clk_round_rate(clksrc, wanted); + return wanted - rate; + } + + rate = ourhost->clk_rates[src]; + + for (shift = 0; shift <= 8; ++shift) { + if ((rate >> shift) <= wanted) + break; + } + + if (shift > 8) { + dev_dbg(&ourhost->pdev->dev, + "clk %d: rate %ld, min rate %lu > wanted %u\n", + src, rate, rate / 256, wanted); + return UINT_MAX; + } + + dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n", + src, rate, wanted, rate >> shift); + + return wanted - (rate >> shift); +} + +/** + * sdhci_s3c_set_clock - callback on clock change + * @host: The SDHCI host being changed + * @clock: The clock rate being requested. + * + * When the card's clock is going to be changed, look at the new frequency + * and find the best clock source to go with it. +*/ +static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_s3c *ourhost = to_s3c(host); + unsigned int best = UINT_MAX; + unsigned int delta; + int best_src = 0; + int src; + u32 ctrl; + + host->mmc->actual_clock = 0; + + /* don't bother if the clock is going off. */ + if (clock == 0) { + sdhci_set_clock(host, clock); + return; + } + + for (src = 0; src < MAX_BUS_CLK; src++) { + delta = sdhci_s3c_consider_clock(ourhost, src, clock); + if (delta < best) { + best = delta; + best_src = src; + } + } + + dev_dbg(&ourhost->pdev->dev, + "selected source %d, clock %d, delta %d\n", + best_src, clock, best); + + /* select the new clock source */ + if (ourhost->cur_clk != best_src) { + struct clk *clk = ourhost->clk_bus[best_src]; + + clk_prepare_enable(clk); + if (ourhost->cur_clk >= 0) + clk_disable_unprepare( + ourhost->clk_bus[ourhost->cur_clk]); + + ourhost->cur_clk = best_src; + host->max_clk = ourhost->clk_rates[best_src]; + } + + /* turn clock off to card before changing clock source */ + writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); + + ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); + ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; + ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; + writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); + + /* reprogram default hardware configuration */ + writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA, + host->ioaddr + S3C64XX_SDHCI_CONTROL4); + + ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); + ctrl |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR | + S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK | + S3C_SDHCI_CTRL2_ENFBCLKRX | + S3C_SDHCI_CTRL2_DFCNT_NONE | + S3C_SDHCI_CTRL2_ENCLKOUTHOLD); + writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); + + /* reconfigure the controller for new clock rate */ + ctrl = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); + if (clock < 25 * 1000000) + ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2); + writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3); + + sdhci_set_clock(host, clock); +} + +/** + * sdhci_s3c_get_min_clock - callback to get minimal supported clock value + * @host: The SDHCI host being queried + * + * To init mmc host properly a minimal clock value is needed. For high system + * bus clock's values the standard formula gives values out of allowed range. + * The clock still can be set to lower values, if clock source other then + * system bus is selected. +*/ +static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_s3c *ourhost = to_s3c(host); + unsigned long rate, min = ULONG_MAX; + int src; + + for (src = 0; src < MAX_BUS_CLK; src++) { + rate = ourhost->clk_rates[src] / 256; + if (!rate) + continue; + if (rate < min) + min = rate; + } + + return min; +} + +/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/ +static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_s3c *ourhost = to_s3c(host); + unsigned long rate, max = 0; + int src; + + for (src = 0; src < MAX_BUS_CLK; src++) { + struct clk *clk; + + clk = ourhost->clk_bus[src]; + if (IS_ERR(clk)) + continue; + + rate = clk_round_rate(clk, ULONG_MAX); + if (rate > max) + max = rate; + } + + return max; +} + +/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */ +static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_s3c *ourhost = to_s3c(host); + unsigned long rate, min = ULONG_MAX; + int src; + + for (src = 0; src < MAX_BUS_CLK; src++) { + struct clk *clk; + + clk = ourhost->clk_bus[src]; + if (IS_ERR(clk)) + continue; + + rate = clk_round_rate(clk, 0); + if (rate < min) + min = rate; + } + + return min; +} + +/* sdhci_cmu_set_clock - callback on clock change.*/ +static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_s3c *ourhost = to_s3c(host); + struct device *dev = &ourhost->pdev->dev; + unsigned long timeout; + u16 clk = 0; + int ret; + + host->mmc->actual_clock = 0; + + /* If the clock is going off, set to 0 at clock control register */ + if (clock == 0) { + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + return; + } + + sdhci_s3c_set_clock(host, clock); + + /* Reset SD Clock Enable */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + ret = clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock); + if (ret != 0) { + dev_err(dev, "%s: failed to set clock rate %uHz\n", + mmc_hostname(host->mmc), clock); + return; + } + + clk = SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + /* Wait max 20 ms */ + timeout = 20; + while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) + & SDHCI_CLOCK_INT_STABLE)) { + if (timeout == 0) { + dev_err(dev, "%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + return; + } + timeout--; + mdelay(1); + } + + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); +} + +static struct sdhci_ops sdhci_s3c_ops = { + .get_max_clock = sdhci_s3c_get_max_clk, + .set_clock = sdhci_s3c_set_clock, + .get_min_clock = sdhci_s3c_get_min_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +#ifdef CONFIG_OF +static int sdhci_s3c_parse_dt(struct device *dev, + struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) +{ + struct device_node *node = dev->of_node; + u32 max_width; + + /* if the bus-width property is not specified, assume width as 1 */ + if (of_property_read_u32(node, "bus-width", &max_width)) + max_width = 1; + pdata->max_width = max_width; + + /* get the card detection method */ + if (of_get_property(node, "broken-cd", NULL)) { + pdata->cd_type = S3C_SDHCI_CD_NONE; + return 0; + } + + if (of_get_property(node, "non-removable", NULL)) { + pdata->cd_type = S3C_SDHCI_CD_PERMANENT; + return 0; + } + + if (of_get_named_gpio(node, "cd-gpios", 0)) + return 0; + + /* assuming internal card detect that will be configured by pinctrl */ + pdata->cd_type = S3C_SDHCI_CD_INTERNAL; + return 0; +} +#else +static int sdhci_s3c_parse_dt(struct device *dev, + struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) +{ + return -EINVAL; +} +#endif + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_s3c_dt_match[]; +#endif + +static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data( + struct platform_device *pdev) +{ +#ifdef CONFIG_OF + if (pdev->dev.of_node) { + const struct of_device_id *match; + match = of_match_node(sdhci_s3c_dt_match, pdev->dev.of_node); + return (struct sdhci_s3c_drv_data *)match->data; + } +#endif + return (struct sdhci_s3c_drv_data *) + platform_get_device_id(pdev)->driver_data; +} + +static int sdhci_s3c_probe(struct platform_device *pdev) +{ + struct s3c_sdhci_platdata *pdata; + struct sdhci_s3c_drv_data *drv_data; + struct device *dev = &pdev->dev; + struct sdhci_host *host; + struct sdhci_s3c *sc; + int ret, irq, ptr, clks; + + if (!pdev->dev.platform_data && !pdev->dev.of_node) { + dev_err(dev, "no device data specified\n"); + return -ENOENT; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c)); + if (IS_ERR(host)) { + dev_err(dev, "sdhci_alloc_host() failed\n"); + return PTR_ERR(host); + } + sc = sdhci_priv(host); + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + ret = -ENOMEM; + goto err_pdata_io_clk; + } + + if (pdev->dev.of_node) { + ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata); + if (ret) + goto err_pdata_io_clk; + } else { + memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); + } + + drv_data = sdhci_s3c_get_driver_data(pdev); + + sc->host = host; + sc->pdev = pdev; + sc->pdata = pdata; + sc->cur_clk = -1; + + platform_set_drvdata(pdev, host); + + sc->clk_io = devm_clk_get(dev, "hsmmc"); + if (IS_ERR(sc->clk_io)) { + dev_err(dev, "failed to get io clock\n"); + ret = PTR_ERR(sc->clk_io); + goto err_pdata_io_clk; + } + + /* enable the local io clock and keep it running for the moment. */ + clk_prepare_enable(sc->clk_io); + + for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) { + char name[14]; + + snprintf(name, 14, "mmc_busclk.%d", ptr); + sc->clk_bus[ptr] = devm_clk_get(dev, name); + if (IS_ERR(sc->clk_bus[ptr])) + continue; + + clks++; + sc->clk_rates[ptr] = clk_get_rate(sc->clk_bus[ptr]); + + dev_info(dev, "clock source %d: %s (%ld Hz)\n", + ptr, name, sc->clk_rates[ptr]); + } + + if (clks == 0) { + dev_err(dev, "failed to find any bus clocks\n"); + ret = -ENOENT; + goto err_no_busclks; + } + + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->ioaddr)) { + ret = PTR_ERR(host->ioaddr); + goto err_req_regs; + } + + /* Ensure we have minimal gpio selected CMD/CLK/Detect */ + if (pdata->cfg_gpio) + pdata->cfg_gpio(pdev, pdata->max_width); + + host->hw_name = "samsung-hsmmc"; + host->ops = &sdhci_s3c_ops; + host->quirks = 0; + host->quirks2 = 0; + host->irq = irq; + + /* Setup quirks for the controller */ + host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; + host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT; + if (drv_data) { + host->quirks |= drv_data->sdhci_quirks; + sc->no_divider = drv_data->no_divider; + } + +#ifndef CONFIG_MMC_SDHCI_S3C_DMA + + /* we currently see overruns on errors, so disable the SDMA + * support as well. */ + host->quirks |= SDHCI_QUIRK_BROKEN_DMA; + +#endif /* CONFIG_MMC_SDHCI_S3C_DMA */ + + /* It seems we do not get an DATA transfer complete on non-busy + * transfers, not sure if this is a problem with this specific + * SDHCI block, or a missing configuration that needs to be set. */ + host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ; + + /* This host supports the Auto CMD12 */ + host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + + /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */ + host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC; + + if (pdata->cd_type == S3C_SDHCI_CD_NONE || + pdata->cd_type == S3C_SDHCI_CD_PERMANENT) + host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT) + host->mmc->caps = MMC_CAP_NONREMOVABLE; + + switch (pdata->max_width) { + case 8: + host->mmc->caps |= MMC_CAP_8_BIT_DATA; + fallthrough; + case 4: + host->mmc->caps |= MMC_CAP_4_BIT_DATA; + break; + } + + if (pdata->pm_caps) + host->mmc->pm_caps |= pdata->pm_caps; + + host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE); + + /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */ + host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; + + /* + * If controller does not have internal clock divider, + * we can use overriding functions instead of default. + */ + if (sc->no_divider) { + sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; + sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; + sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; + } + + /* It supports additional host capabilities if needed */ + if (pdata->host_caps) + host->mmc->caps |= pdata->host_caps; + + if (pdata->host_caps2) + host->mmc->caps2 |= pdata->host_caps2; + + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err_req_regs; + + ret = sdhci_add_host(host); + if (ret) + goto err_req_regs; + +#ifdef CONFIG_PM + if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) + clk_disable_unprepare(sc->clk_io); +#endif + return 0; + + err_req_regs: + pm_runtime_disable(&pdev->dev); + + err_no_busclks: + clk_disable_unprepare(sc->clk_io); + + err_pdata_io_clk: + sdhci_free_host(host); + + return ret; +} + +static int sdhci_s3c_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_s3c *sc = sdhci_priv(host); + + if (sc->ext_cd_irq) + free_irq(sc->ext_cd_irq, sc); + +#ifdef CONFIG_PM + if (sc->pdata->cd_type != S3C_SDHCI_CD_INTERNAL) + clk_prepare_enable(sc->clk_io); +#endif + sdhci_remove_host(host, 1); + + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + clk_disable_unprepare(sc->clk_io); + + sdhci_free_host(host); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_s3c_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + return sdhci_suspend_host(host); +} + +static int sdhci_s3c_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + + return sdhci_resume_host(host); +} +#endif + +#ifdef CONFIG_PM +static int sdhci_s3c_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_s3c *ourhost = to_s3c(host); + struct clk *busclk = ourhost->clk_io; + int ret; + + ret = sdhci_runtime_suspend_host(host); + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + if (ourhost->cur_clk >= 0) + clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); + clk_disable_unprepare(busclk); + return ret; +} + +static int sdhci_s3c_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_s3c *ourhost = to_s3c(host); + struct clk *busclk = ourhost->clk_io; + int ret; + + clk_prepare_enable(busclk); + if (ourhost->cur_clk >= 0) + clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]); + ret = sdhci_runtime_resume_host(host, 0); + return ret; +} +#endif + +static const struct dev_pm_ops sdhci_s3c_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume) + SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume, + NULL) +}; + +static const struct platform_device_id sdhci_s3c_driver_ids[] = { + { + .name = "s3c-sdhci", + .driver_data = (kernel_ulong_t)NULL, + }, + { } +}; +MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids); + +#ifdef CONFIG_OF +static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { + .no_divider = true, +}; + +static const struct of_device_id sdhci_s3c_dt_match[] = { + { .compatible = "samsung,s3c6410-sdhci", }, + { .compatible = "samsung,exynos4210-sdhci", + .data = &exynos4_sdhci_drv_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match); +#endif + +static struct platform_driver sdhci_s3c_driver = { + .probe = sdhci_s3c_probe, + .remove = sdhci_s3c_remove, + .id_table = sdhci_s3c_driver_ids, + .driver = { + .name = "s3c-sdhci", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(sdhci_s3c_dt_match), + .pm = &sdhci_s3c_pmops, + }, +}; + +module_platform_driver(sdhci_s3c_driver); + +MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue"); +MODULE_AUTHOR("Ben Dooks, "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:s3c-sdhci"); diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c new file mode 100644 index 000000000..e9b347b3a --- /dev/null +++ b/drivers/mmc/host/sdhci-sirf.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SDHCI support for SiRF primaII and marco SoCs + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + */ + +#include +#include +#include +#include +#include +#include +#include "sdhci-pltfm.h" + +#define SDHCI_CLK_DELAY_SETTING 0x4C +#define SDHCI_SIRF_8BITBUS BIT(3) +#define SIRF_TUNING_COUNT 16384 + +static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width) +{ + u8 ctrl; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl &= ~(SDHCI_CTRL_4BITBUS | SDHCI_SIRF_8BITBUS); + + /* + * CSR atlas7 and prima2 SD host version is not 3.0 + * 8bit-width enable bit of CSR SD hosts is 3, + * while stardard hosts use bit 5 + */ + if (width == MMC_BUS_WIDTH_8) + ctrl |= SDHCI_SIRF_8BITBUS; + else if (width == MMC_BUS_WIDTH_4) + ctrl |= SDHCI_CTRL_4BITBUS; + + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + +static u32 sdhci_sirf_readl_le(struct sdhci_host *host, int reg) +{ + u32 val = readl(host->ioaddr + reg); + + if (unlikely((reg == SDHCI_CAPABILITIES_1) && + (host->mmc->caps & MMC_CAP_UHS_SDR50))) { + /* fake CAP_1 register */ + val = SDHCI_SUPPORT_DDR50 | + SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING; + } + + if (unlikely(reg == SDHCI_SLOT_INT_STATUS)) { + u32 prss = val; + /* fake chips as V3.0 host conreoller */ + prss &= ~(0xFF << 16); + val = prss | (SDHCI_SPEC_300 << 16); + } + return val; +} + +static u16 sdhci_sirf_readw_le(struct sdhci_host *host, int reg) +{ + u16 ret = 0; + + ret = readw(host->ioaddr + reg); + + if (unlikely(reg == SDHCI_HOST_VERSION)) { + ret = readw(host->ioaddr + SDHCI_HOST_VERSION); + ret |= SDHCI_SPEC_300; + } + + return ret; +} + +static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + int tuning_seq_cnt = 3; + int phase; + u8 tuned_phase_cnt = 0; + int rc = 0, longest_range = 0; + int start = -1, end = 0, tuning_value = -1, range = 0; + u16 clock_setting; + struct mmc_host *mmc = host->mmc; + + clock_setting = sdhci_readw(host, SDHCI_CLK_DELAY_SETTING); + clock_setting &= ~0x3fff; + +retry: + phase = 0; + tuned_phase_cnt = 0; + do { + sdhci_writel(host, + clock_setting | phase, + SDHCI_CLK_DELAY_SETTING); + + if (!mmc_send_tuning(mmc, opcode, NULL)) { + /* Tuning is successful at this tuning point */ + tuned_phase_cnt++; + dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", + mmc_hostname(mmc), phase); + if (start == -1) + start = phase; + end = phase; + range++; + if (phase == (SIRF_TUNING_COUNT - 1) + && range > longest_range) + tuning_value = (start + end) / 2; + } else { + dev_dbg(mmc_dev(mmc), "%s: Found bad phase = %d\n", + mmc_hostname(mmc), phase); + if (range > longest_range) { + tuning_value = (start + end) / 2; + longest_range = range; + } + start = -1; + end = range = 0; + } + } while (++phase < SIRF_TUNING_COUNT); + + if (tuned_phase_cnt && tuning_value > 0) { + /* + * Finally set the selected phase in delay + * line hw block. + */ + phase = tuning_value; + sdhci_writel(host, + clock_setting | phase, + SDHCI_CLK_DELAY_SETTING); + + dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", + mmc_hostname(mmc), phase); + } else { + if (--tuning_seq_cnt) + goto retry; + /* Tuning failed */ + dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", + mmc_hostname(mmc)); + rc = -EIO; + } + + return rc; +} + +static const struct sdhci_ops sdhci_sirf_ops = { + .read_l = sdhci_sirf_readl_le, + .read_w = sdhci_sirf_readw_le, + .platform_execute_tuning = sdhci_sirf_execute_tuning, + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = sdhci_sirf_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_sirf_pdata = { + .ops = &sdhci_sirf_ops, + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + +static int sdhci_sirf_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct clk *clk; + int ret; + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "unable to get clock"); + return PTR_ERR(clk); + } + + host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, 0); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + pltfm_host->clk = clk; + + sdhci_get_of_property(pdev); + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + goto err_clk_prepare; + + ret = sdhci_add_host(host); + if (ret) + goto err_sdhci_add; + + /* + * We must request the IRQ after sdhci_add_host(), as the tasklet only + * gets setup in sdhci_add_host() and we oops. + */ + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); + if (ret == -EPROBE_DEFER) + goto err_request_cd; + if (!ret) + mmc_gpiod_request_cd_irq(host->mmc); + + return 0; + +err_request_cd: + sdhci_remove_host(host, 0); +err_sdhci_add: + clk_disable_unprepare(pltfm_host->clk); +err_clk_prepare: + sdhci_pltfm_free(pdev); + return ret; +} + +static const struct of_device_id sdhci_sirf_of_match[] = { + { .compatible = "sirf,prima2-sdhc" }, + { } +}; +MODULE_DEVICE_TABLE(of, sdhci_sirf_of_match); + +static struct platform_driver sdhci_sirf_driver = { + .driver = { + .name = "sdhci-sirf", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_sirf_of_match, + .pm = &sdhci_pltfm_pmops, + }, + .probe = sdhci_sirf_probe, + .remove = sdhci_pltfm_unregister, +}; + +module_platform_driver(sdhci_sirf_driver); + +MODULE_DESCRIPTION("SDHCI driver for SiRFprimaII/SiRFmarco"); +MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c new file mode 100644 index 000000000..d463e2fd5 --- /dev/null +++ b/drivers/mmc/host/sdhci-spear.c @@ -0,0 +1,196 @@ +/* + * drivers/mmc/host/sdhci-spear.c + * + * Support of SDHCI platform devices for spear soc family + * + * Copyright (C) 2010 ST Microelectronics + * Viresh Kumar + * + * Inspired by sdhci-pltfm.c + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdhci.h" + +struct spear_sdhci { + struct clk *clk; +}; + +/* sdhci ops */ +static const struct sdhci_ops sdhci_pltfm_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static int sdhci_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct spear_sdhci *sdhci; + struct device *dev; + int ret; + + dev = pdev->dev.parent ? pdev->dev.parent : &pdev->dev; + host = sdhci_alloc_host(dev, sizeof(*sdhci)); + if (IS_ERR(host)) { + ret = PTR_ERR(host); + dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); + goto err; + } + + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->ioaddr)) { + ret = PTR_ERR(host->ioaddr); + dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret); + goto err_host; + } + + host->hw_name = "sdhci"; + host->ops = &sdhci_pltfm_ops; + host->irq = platform_get_irq(pdev, 0); + if (host->irq <= 0) { + ret = -EINVAL; + goto err_host; + } + host->quirks = SDHCI_QUIRK_BROKEN_ADMA; + + sdhci = sdhci_priv(host); + + /* clk enable */ + sdhci->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(sdhci->clk)) { + ret = PTR_ERR(sdhci->clk); + dev_dbg(&pdev->dev, "Error getting clock\n"); + goto err_host; + } + + ret = clk_prepare_enable(sdhci->clk); + if (ret) { + dev_dbg(&pdev->dev, "Error enabling clock\n"); + goto err_host; + } + + ret = clk_set_rate(sdhci->clk, 50000000); + if (ret) + dev_dbg(&pdev->dev, "Error setting desired clk, clk=%lu\n", + clk_get_rate(sdhci->clk)); + + /* + * It is optional to use GPIOs for sdhci card detection. If we + * find a descriptor using slot GPIO, we use it. + */ + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); + if (ret == -EPROBE_DEFER) + goto disable_clk; + + ret = sdhci_add_host(host); + if (ret) + goto disable_clk; + + platform_set_drvdata(pdev, host); + + return 0; + +disable_clk: + clk_disable_unprepare(sdhci->clk); +err_host: + sdhci_free_host(host); +err: + dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret); + return ret; +} + +static int sdhci_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct spear_sdhci *sdhci = sdhci_priv(host); + int dead = 0; + u32 scratch; + + scratch = readl(host->ioaddr + SDHCI_INT_STATUS); + if (scratch == (u32)-1) + dead = 1; + + sdhci_remove_host(host, dead); + clk_disable_unprepare(sdhci->clk); + sdhci_free_host(host); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct spear_sdhci *sdhci = sdhci_priv(host); + int ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + ret = sdhci_suspend_host(host); + if (!ret) + clk_disable(sdhci->clk); + + return ret; +} + +static int sdhci_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct spear_sdhci *sdhci = sdhci_priv(host); + int ret; + + ret = clk_enable(sdhci->clk); + if (ret) { + dev_dbg(dev, "Resume: Error enabling clock\n"); + return ret; + } + + return sdhci_resume_host(host); +} +#endif + +static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume); + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_spear_id_table[] = { + { .compatible = "st,spear300-sdhci" }, + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_spear_id_table); +#endif + +static struct platform_driver sdhci_driver = { + .driver = { + .name = "sdhci", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sdhci_pm_ops, + .of_match_table = of_match_ptr(sdhci_spear_id_table), + }, + .probe = sdhci_probe, + .remove = sdhci_remove, +}; + +module_platform_driver(sdhci_driver); + +MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); +MODULE_AUTHOR("Viresh Kumar "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c new file mode 100644 index 000000000..52bfe356f --- /dev/null +++ b/drivers/mmc/host/sdhci-sprd.c @@ -0,0 +1,833 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Secure Digital Host Controller +// +// Copyright (C) 2018 Spreadtrum, Inc. +// Author: Chunyan Zhang + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" +#include "mmc_hsq.h" + +/* SDHCI_ARGUMENT2 register high 16bit */ +#define SDHCI_SPRD_ARG2_STUFF GENMASK(31, 16) + +#define SDHCI_SPRD_REG_32_DLL_CFG 0x200 +#define SDHCI_SPRD_DLL_ALL_CPST_EN (BIT(18) | BIT(24) | BIT(25) | BIT(26) | BIT(27)) +#define SDHCI_SPRD_DLL_EN BIT(21) +#define SDHCI_SPRD_DLL_SEARCH_MODE BIT(16) +#define SDHCI_SPRD_DLL_INIT_COUNT 0xc00 +#define SDHCI_SPRD_DLL_PHASE_INTERNAL 0x3 + +#define SDHCI_SPRD_REG_32_DLL_DLY 0x204 + +#define SDHCI_SPRD_REG_32_DLL_DLY_OFFSET 0x208 +#define SDHCIBSPRD_IT_WR_DLY_INV BIT(5) +#define SDHCI_SPRD_BIT_CMD_DLY_INV BIT(13) +#define SDHCI_SPRD_BIT_POSRD_DLY_INV BIT(21) +#define SDHCI_SPRD_BIT_NEGRD_DLY_INV BIT(29) + +#define SDHCI_SPRD_REG_32_BUSY_POSI 0x250 +#define SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN BIT(25) +#define SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN BIT(24) + +#define SDHCI_SPRD_REG_DEBOUNCE 0x28C +#define SDHCI_SPRD_BIT_DLL_BAK BIT(0) +#define SDHCI_SPRD_BIT_DLL_VAL BIT(1) + +#define SDHCI_SPRD_INT_SIGNAL_MASK 0x1B7F410B + +/* SDHCI_HOST_CONTROL2 */ +#define SDHCI_SPRD_CTRL_HS200 0x0005 +#define SDHCI_SPRD_CTRL_HS400 0x0006 +#define SDHCI_SPRD_CTRL_HS400ES 0x0007 + +/* + * According to the standard specification, BIT(3) of SDHCI_SOFTWARE_RESET is + * reserved, and only used on Spreadtrum's design, the hardware cannot work + * if this bit is cleared. + * 1 : normal work + * 0 : hardware reset + */ +#define SDHCI_HW_RESET_CARD BIT(3) + +#define SDHCI_SPRD_MAX_CUR 0xFFFFFF +#define SDHCI_SPRD_CLK_MAX_DIV 1023 + +#define SDHCI_SPRD_CLK_DEF_RATE 26000000 +#define SDHCI_SPRD_PHY_DLL_CLK 52000000 + +struct sdhci_sprd_host { + u32 version; + struct clk *clk_sdio; + struct clk *clk_enable; + struct clk *clk_2x_enable; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_uhs; + struct pinctrl_state *pins_default; + u32 base_rate; + int flags; /* backup of host attribute */ + u32 phy_delay[MMC_TIMING_MMC_HS400 + 2]; +}; + +struct sdhci_sprd_phy_cfg { + const char *property; + u8 timing; +}; + +static const struct sdhci_sprd_phy_cfg sdhci_sprd_phy_cfgs[] = { + { "sprd,phy-delay-legacy", MMC_TIMING_LEGACY, }, + { "sprd,phy-delay-sd-highspeed", MMC_TIMING_SD_HS, }, + { "sprd,phy-delay-sd-uhs-sdr50", MMC_TIMING_UHS_SDR50, }, + { "sprd,phy-delay-sd-uhs-sdr104", MMC_TIMING_UHS_SDR104, }, + { "sprd,phy-delay-mmc-highspeed", MMC_TIMING_MMC_HS, }, + { "sprd,phy-delay-mmc-ddr52", MMC_TIMING_MMC_DDR52, }, + { "sprd,phy-delay-mmc-hs200", MMC_TIMING_MMC_HS200, }, + { "sprd,phy-delay-mmc-hs400", MMC_TIMING_MMC_HS400, }, + { "sprd,phy-delay-mmc-hs400es", MMC_TIMING_MMC_HS400 + 1, }, +}; + +#define TO_SPRD_HOST(host) sdhci_pltfm_priv(sdhci_priv(host)) + +static void sdhci_sprd_init_config(struct sdhci_host *host) +{ + u16 val; + + /* set dll backup mode */ + val = sdhci_readl(host, SDHCI_SPRD_REG_DEBOUNCE); + val |= SDHCI_SPRD_BIT_DLL_BAK | SDHCI_SPRD_BIT_DLL_VAL; + sdhci_writel(host, val, SDHCI_SPRD_REG_DEBOUNCE); +} + +static inline u32 sdhci_sprd_readl(struct sdhci_host *host, int reg) +{ + if (unlikely(reg == SDHCI_MAX_CURRENT)) + return SDHCI_SPRD_MAX_CUR; + + return readl_relaxed(host->ioaddr + reg); +} + +static inline void sdhci_sprd_writel(struct sdhci_host *host, u32 val, int reg) +{ + /* SDHCI_MAX_CURRENT is reserved on Spreadtrum's platform */ + if (unlikely(reg == SDHCI_MAX_CURRENT)) + return; + + if (unlikely(reg == SDHCI_SIGNAL_ENABLE || reg == SDHCI_INT_ENABLE)) + val = val & SDHCI_SPRD_INT_SIGNAL_MASK; + + writel_relaxed(val, host->ioaddr + reg); +} + +static inline void sdhci_sprd_writew(struct sdhci_host *host, u16 val, int reg) +{ + /* SDHCI_BLOCK_COUNT is Read Only on Spreadtrum's platform */ + if (unlikely(reg == SDHCI_BLOCK_COUNT)) + return; + + writew_relaxed(val, host->ioaddr + reg); +} + +static inline void sdhci_sprd_writeb(struct sdhci_host *host, u8 val, int reg) +{ + /* + * Since BIT(3) of SDHCI_SOFTWARE_RESET is reserved according to the + * standard specification, sdhci_reset() write this register directly + * without checking other reserved bits, that will clear BIT(3) which + * is defined as hardware reset on Spreadtrum's platform and clearing + * it by mistake will lead the card not work. So here we need to work + * around it. + */ + if (unlikely(reg == SDHCI_SOFTWARE_RESET)) { + if (readb_relaxed(host->ioaddr + reg) & SDHCI_HW_RESET_CARD) + val |= SDHCI_HW_RESET_CARD; + } + + writeb_relaxed(val, host->ioaddr + reg); +} + +static inline void sdhci_sprd_sd_clk_off(struct sdhci_host *host) +{ + u16 ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + + ctrl &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL); +} + +static inline void sdhci_sprd_sd_clk_on(struct sdhci_host *host) +{ + u16 ctrl; + + ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + ctrl |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL); +} + +static inline void +sdhci_sprd_set_dll_invert(struct sdhci_host *host, u32 mask, bool en) +{ + u32 dll_dly_offset; + + dll_dly_offset = sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_DLY_OFFSET); + if (en) + dll_dly_offset |= mask; + else + dll_dly_offset &= ~mask; + sdhci_writel(host, dll_dly_offset, SDHCI_SPRD_REG_32_DLL_DLY_OFFSET); +} + +static inline u32 sdhci_sprd_calc_div(u32 base_clk, u32 clk) +{ + u32 div; + + /* select 2x clock source */ + if (base_clk <= clk * 2) + return 0; + + div = (u32) (base_clk / (clk * 2)); + + if ((base_clk / div) > (clk * 2)) + div++; + + if (div > SDHCI_SPRD_CLK_MAX_DIV) + div = SDHCI_SPRD_CLK_MAX_DIV; + + if (div % 2) + div = (div + 1) / 2; + else + div = div / 2; + + return div; +} + +static inline void _sdhci_sprd_set_clock(struct sdhci_host *host, + unsigned int clk) +{ + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + u32 div, val, mask; + + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); + div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8); + sdhci_enable_clk(host, div); + + val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); + mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; + /* Enable CLK_AUTO when the clock is greater than 400K. */ + if (clk > 400000) { + if (mask != (val & mask)) { + val |= mask; + sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); + } + } else { + if (val & mask) { + val &= ~mask; + sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); + } + } +} + +static void sdhci_sprd_enable_phy_dll(struct sdhci_host *host) +{ + u32 tmp; + + tmp = sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_CFG); + tmp &= ~(SDHCI_SPRD_DLL_EN | SDHCI_SPRD_DLL_ALL_CPST_EN); + sdhci_writel(host, tmp, SDHCI_SPRD_REG_32_DLL_CFG); + /* wait 1ms */ + usleep_range(1000, 1250); + + tmp = sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_CFG); + tmp |= SDHCI_SPRD_DLL_ALL_CPST_EN | SDHCI_SPRD_DLL_SEARCH_MODE | + SDHCI_SPRD_DLL_INIT_COUNT | SDHCI_SPRD_DLL_PHASE_INTERNAL; + sdhci_writel(host, tmp, SDHCI_SPRD_REG_32_DLL_CFG); + /* wait 1ms */ + usleep_range(1000, 1250); + + tmp = sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_CFG); + tmp |= SDHCI_SPRD_DLL_EN; + sdhci_writel(host, tmp, SDHCI_SPRD_REG_32_DLL_CFG); + /* wait 1ms */ + usleep_range(1000, 1250); +} + +static void sdhci_sprd_set_clock(struct sdhci_host *host, unsigned int clock) +{ + bool en = false, clk_changed = false; + + if (clock == 0) { + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + } else if (clock != host->clock) { + sdhci_sprd_sd_clk_off(host); + _sdhci_sprd_set_clock(host, clock); + + if (clock <= 400000) + en = true; + sdhci_sprd_set_dll_invert(host, SDHCI_SPRD_BIT_CMD_DLY_INV | + SDHCI_SPRD_BIT_POSRD_DLY_INV, en); + clk_changed = true; + } else { + _sdhci_sprd_set_clock(host, clock); + } + + /* + * According to the Spreadtrum SD host specification, when we changed + * the clock to be more than 52M, we should enable the PHY DLL which + * is used to track the clock frequency to make the clock work more + * stable. Otherwise deviation may occur of the higher clock. + */ + if (clk_changed && clock > SDHCI_SPRD_PHY_DLL_CLK) + sdhci_sprd_enable_phy_dll(host); +} + +static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + + return clk_round_rate(sprd_host->clk_sdio, ULONG_MAX); +} + +static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host) +{ + return 100000; +} + +static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + struct mmc_host *mmc = host->mmc; + u32 *p = sprd_host->phy_delay; + u16 ctrl_2; + + if (timing == host->timing) + return; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + switch (timing) { + case MMC_TIMING_UHS_SDR12: + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + break; + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + break; + case MMC_TIMING_UHS_SDR50: + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + break; + case MMC_TIMING_UHS_SDR104: + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + break; + case MMC_TIMING_MMC_HS200: + ctrl_2 |= SDHCI_SPRD_CTRL_HS200; + break; + case MMC_TIMING_MMC_HS400: + ctrl_2 |= SDHCI_SPRD_CTRL_HS400; + break; + default: + break; + } + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + + if (!mmc->ios.enhanced_strobe) + sdhci_writel(host, p[timing], SDHCI_SPRD_REG_32_DLL_DLY); +} + +static void sdhci_sprd_hw_reset(struct sdhci_host *host) +{ + int val; + + /* + * Note: don't use sdhci_writeb() API here since it is redirected to + * sdhci_sprd_writeb() in which we have a workaround for + * SDHCI_SOFTWARE_RESET which would make bit SDHCI_HW_RESET_CARD can + * not be cleared. + */ + val = readb_relaxed(host->ioaddr + SDHCI_SOFTWARE_RESET); + val &= ~SDHCI_HW_RESET_CARD; + writeb_relaxed(val, host->ioaddr + SDHCI_SOFTWARE_RESET); + /* wait for 10 us */ + usleep_range(10, 20); + + val |= SDHCI_HW_RESET_CARD; + writeb_relaxed(val, host->ioaddr + SDHCI_SOFTWARE_RESET); + usleep_range(300, 500); +} + +static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host) +{ + /* The Spredtrum controller actual maximum timeout count is 1 << 31 */ + return 1 << 31; +} + +static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host) +{ + return 0; +} + +static void sdhci_sprd_request_done(struct sdhci_host *host, + struct mmc_request *mrq) +{ + /* Validate if the request was from software queue firstly. */ + if (mmc_hsq_finalize_request(host->mmc, mrq)) + return; + + mmc_request_done(host->mmc, mrq); +} + +static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + + switch (mode) { + case MMC_POWER_OFF: + mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0); + + mmc_regulator_disable_vqmmc(mmc); + break; + case MMC_POWER_ON: + mmc_regulator_enable_vqmmc(mmc); + break; + case MMC_POWER_UP: + mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd); + break; + } +} + +static struct sdhci_ops sdhci_sprd_ops = { + .read_l = sdhci_sprd_readl, + .write_l = sdhci_sprd_writel, + .write_w = sdhci_sprd_writew, + .write_b = sdhci_sprd_writeb, + .set_clock = sdhci_sprd_set_clock, + .set_power = sdhci_sprd_set_power, + .get_max_clock = sdhci_sprd_get_max_clock, + .get_min_clock = sdhci_sprd_get_min_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_sprd_set_uhs_signaling, + .hw_reset = sdhci_sprd_hw_reset, + .get_max_timeout_count = sdhci_sprd_get_max_timeout_count, + .get_ro = sdhci_sprd_get_ro, + .request_done = sdhci_sprd_request_done, +}; + +static void sdhci_sprd_check_auto_cmd23(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + + host->flags |= sprd_host->flags & SDHCI_AUTO_CMD23; + + /* + * From version 4.10 onward, ARGUMENT2 register is also as 32-bit + * block count register which doesn't support stuff bits of + * CMD23 argument on Spreadtrum's sd host controller. + */ + if (host->version >= SDHCI_SPEC_410 && + mrq->sbc && (mrq->sbc->arg & SDHCI_SPRD_ARG2_STUFF) && + (host->flags & SDHCI_AUTO_CMD23)) + host->flags &= ~SDHCI_AUTO_CMD23; +} + +static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + sdhci_sprd_check_auto_cmd23(mmc, mrq); + + sdhci_request(mmc, mrq); +} + +static int sdhci_sprd_request_atomic(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + sdhci_sprd_check_auto_cmd23(mmc, mrq); + + return sdhci_request_atomic(mmc, mrq); +} + +static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + int ret; + + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) { + pr_err("%s: Switching signalling voltage failed\n", + mmc_hostname(mmc)); + return ret; + } + } + + if (IS_ERR(sprd_host->pinctrl)) + goto reset; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_180: + ret = pinctrl_select_state(sprd_host->pinctrl, + sprd_host->pins_uhs); + if (ret) { + pr_err("%s: failed to select uhs pin state\n", + mmc_hostname(mmc)); + return ret; + } + break; + + default: + fallthrough; + case MMC_SIGNAL_VOLTAGE_330: + ret = pinctrl_select_state(sprd_host->pinctrl, + sprd_host->pins_default); + if (ret) { + pr_err("%s: failed to select default pin state\n", + mmc_hostname(mmc)); + return ret; + } + break; + } + + /* Wait for 300 ~ 500 us for pin state stable */ + usleep_range(300, 500); + +reset: + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + + return 0; +} + +static void sdhci_sprd_hs400_enhanced_strobe(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + u32 *p = sprd_host->phy_delay; + u16 ctrl_2; + + if (!ios->enhanced_strobe) + return; + + sdhci_sprd_sd_clk_off(host); + + /* Set HS400 enhanced strobe mode */ + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + ctrl_2 |= SDHCI_SPRD_CTRL_HS400ES; + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + + sdhci_sprd_sd_clk_on(host); + + /* Set the PHY DLL delay value for HS400 enhanced strobe mode */ + sdhci_writel(host, p[MMC_TIMING_MMC_HS400 + 1], + SDHCI_SPRD_REG_32_DLL_DLY); +} + +static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host, + struct device_node *np) +{ + u32 *p = sprd_host->phy_delay; + int ret, i, index; + u32 val[4]; + + for (i = 0; i < ARRAY_SIZE(sdhci_sprd_phy_cfgs); i++) { + ret = of_property_read_u32_array(np, + sdhci_sprd_phy_cfgs[i].property, val, 4); + if (ret) + continue; + + index = sdhci_sprd_phy_cfgs[i].timing; + p[index] = val[0] | (val[1] << 8) | (val[2] << 16) | (val[3] << 24); + } +} + +static const struct sdhci_pltfm_data sdhci_sprd_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_MISSING_CAPS, + .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | + SDHCI_QUIRK2_USE_32BIT_BLK_CNT | + SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &sdhci_sprd_ops, +}; + +static int sdhci_sprd_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_sprd_host *sprd_host; + struct mmc_hsq *hsq; + struct clk *clk; + int ret = 0; + + host = sdhci_pltfm_init(pdev, &sdhci_sprd_pdata, sizeof(*sprd_host)); + if (IS_ERR(host)) + return PTR_ERR(host); + + host->dma_mask = DMA_BIT_MASK(64); + pdev->dev.dma_mask = &host->dma_mask; + host->mmc_host_ops.request = sdhci_sprd_request; + host->mmc_host_ops.hs400_enhanced_strobe = + sdhci_sprd_hs400_enhanced_strobe; + /* + * We can not use the standard ops to change and detect the voltage + * signal for Spreadtrum SD host controller, since our voltage regulator + * for I/O is fixed in hardware, that means we do not need control + * the standard SD host controller to change the I/O voltage. + */ + host->mmc_host_ops.start_signal_voltage_switch = + sdhci_sprd_voltage_switch; + + host->mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_WAIT_WHILE_BUSY; + + ret = mmc_of_parse(host->mmc); + if (ret) + goto pltfm_free; + + if (!mmc_card_is_removable(host->mmc)) + host->mmc_host_ops.request_atomic = sdhci_sprd_request_atomic; + else + host->always_defer_done = true; + + sprd_host = TO_SPRD_HOST(host); + sdhci_sprd_phy_param_parse(sprd_host, pdev->dev.of_node); + + sprd_host->pinctrl = devm_pinctrl_get(&pdev->dev); + if (!IS_ERR(sprd_host->pinctrl)) { + sprd_host->pins_uhs = + pinctrl_lookup_state(sprd_host->pinctrl, "state_uhs"); + if (IS_ERR(sprd_host->pins_uhs)) { + ret = PTR_ERR(sprd_host->pins_uhs); + goto pltfm_free; + } + + sprd_host->pins_default = + pinctrl_lookup_state(sprd_host->pinctrl, "default"); + if (IS_ERR(sprd_host->pins_default)) { + ret = PTR_ERR(sprd_host->pins_default); + goto pltfm_free; + } + } + + clk = devm_clk_get(&pdev->dev, "sdio"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto pltfm_free; + } + sprd_host->clk_sdio = clk; + sprd_host->base_rate = clk_get_rate(sprd_host->clk_sdio); + if (!sprd_host->base_rate) + sprd_host->base_rate = SDHCI_SPRD_CLK_DEF_RATE; + + clk = devm_clk_get(&pdev->dev, "enable"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto pltfm_free; + } + sprd_host->clk_enable = clk; + + clk = devm_clk_get(&pdev->dev, "2x_enable"); + if (!IS_ERR(clk)) + sprd_host->clk_2x_enable = clk; + + ret = clk_prepare_enable(sprd_host->clk_sdio); + if (ret) + goto pltfm_free; + + ret = clk_prepare_enable(sprd_host->clk_enable); + if (ret) + goto clk_disable; + + ret = clk_prepare_enable(sprd_host->clk_2x_enable); + if (ret) + goto clk_disable2; + + sdhci_sprd_init_config(host); + host->version = sdhci_readw(host, SDHCI_HOST_VERSION); + sprd_host->version = ((host->version & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT); + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); + + sdhci_enable_v4_mode(host); + + /* + * Supply the existing CAPS, but clear the UHS-I modes. This + * will allow these modes to be specified only by device + * tree properties through mmc_of_parse(). + */ + host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); + host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); + host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_DDR50); + + ret = mmc_regulator_get_supply(host->mmc); + if (ret) + goto pm_runtime_disable; + + ret = sdhci_setup_host(host); + if (ret) + goto pm_runtime_disable; + + sprd_host->flags = host->flags; + + hsq = devm_kzalloc(&pdev->dev, sizeof(*hsq), GFP_KERNEL); + if (!hsq) { + ret = -ENOMEM; + goto err_cleanup_host; + } + + ret = mmc_hsq_init(hsq, host->mmc); + if (ret) + goto err_cleanup_host; + + ret = __sdhci_add_host(host); + if (ret) + goto err_cleanup_host; + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; + +err_cleanup_host: + sdhci_cleanup_host(host); + +pm_runtime_disable: + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + + clk_disable_unprepare(sprd_host->clk_2x_enable); + +clk_disable2: + clk_disable_unprepare(sprd_host->clk_enable); + +clk_disable: + clk_disable_unprepare(sprd_host->clk_sdio); + +pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_sprd_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + + sdhci_remove_host(host, 0); + + clk_disable_unprepare(sprd_host->clk_sdio); + clk_disable_unprepare(sprd_host->clk_enable); + clk_disable_unprepare(sprd_host->clk_2x_enable); + + sdhci_pltfm_free(pdev); + + return 0; +} + +static const struct of_device_id sdhci_sprd_of_match[] = { + { .compatible = "sprd,sdhci-r11", }, + { } +}; +MODULE_DEVICE_TABLE(of, sdhci_sprd_of_match); + +#ifdef CONFIG_PM +static int sdhci_sprd_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + + mmc_hsq_suspend(host->mmc); + sdhci_runtime_suspend_host(host); + + clk_disable_unprepare(sprd_host->clk_sdio); + clk_disable_unprepare(sprd_host->clk_enable); + clk_disable_unprepare(sprd_host->clk_2x_enable); + + return 0; +} + +static int sdhci_sprd_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); + int ret; + + ret = clk_prepare_enable(sprd_host->clk_2x_enable); + if (ret) + return ret; + + ret = clk_prepare_enable(sprd_host->clk_enable); + if (ret) + goto clk_2x_disable; + + ret = clk_prepare_enable(sprd_host->clk_sdio); + if (ret) + goto clk_disable; + + sdhci_runtime_resume_host(host, 1); + mmc_hsq_resume(host->mmc); + + return 0; + +clk_disable: + clk_disable_unprepare(sprd_host->clk_enable); + +clk_2x_disable: + clk_disable_unprepare(sprd_host->clk_2x_enable); + + return ret; +} +#endif + +static const struct dev_pm_ops sdhci_sprd_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(sdhci_sprd_runtime_suspend, + sdhci_sprd_runtime_resume, NULL) +}; + +static struct platform_driver sdhci_sprd_driver = { + .probe = sdhci_sprd_probe, + .remove = sdhci_sprd_remove, + .driver = { + .name = "sdhci_sprd_r11", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(sdhci_sprd_of_match), + .pm = &sdhci_sprd_pm_ops, + }, +}; +module_platform_driver(sdhci_sprd_driver); + +MODULE_DESCRIPTION("Spreadtrum sdio host controller r11 driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sdhci-sprd-r11"); diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c new file mode 100644 index 000000000..4e9ff3e82 --- /dev/null +++ b/drivers/mmc/host/sdhci-st.c @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support for SDHCI on STMicroelectronics SoCs + * + * Copyright (C) 2014 STMicroelectronics Ltd + * Author: Giuseppe Cavallaro + * Contributors: Peter Griffin + * + * Based on sdhci-cns3xxx.c + */ + +#include +#include +#include +#include +#include +#include +#include "sdhci-pltfm.h" + +struct st_mmc_platform_data { + struct reset_control *rstc; + struct clk *icnclk; + void __iomem *top_ioaddr; +}; + +/* MMCSS glue logic to setup the HC on some ST SoCs (e.g. STiH407 family) */ + +#define ST_MMC_CCONFIG_REG_1 0x400 +#define ST_MMC_CCONFIG_TIMEOUT_CLK_UNIT BIT(24) +#define ST_MMC_CCONFIG_TIMEOUT_CLK_FREQ BIT(12) +#define ST_MMC_CCONFIG_TUNING_COUNT_DEFAULT BIT(8) +#define ST_MMC_CCONFIG_ASYNC_WAKEUP BIT(0) +#define ST_MMC_CCONFIG_1_DEFAULT \ + ((ST_MMC_CCONFIG_TIMEOUT_CLK_UNIT) | \ + (ST_MMC_CCONFIG_TIMEOUT_CLK_FREQ) | \ + (ST_MMC_CCONFIG_TUNING_COUNT_DEFAULT)) + +#define ST_MMC_CCONFIG_REG_2 0x404 +#define ST_MMC_CCONFIG_HIGH_SPEED BIT(28) +#define ST_MMC_CCONFIG_ADMA2 BIT(24) +#define ST_MMC_CCONFIG_8BIT BIT(20) +#define ST_MMC_CCONFIG_MAX_BLK_LEN 16 +#define MAX_BLK_LEN_1024 1 +#define MAX_BLK_LEN_2048 2 +#define BASE_CLK_FREQ_200 0xc8 +#define BASE_CLK_FREQ_100 0x64 +#define BASE_CLK_FREQ_50 0x32 +#define ST_MMC_CCONFIG_2_DEFAULT \ + (ST_MMC_CCONFIG_HIGH_SPEED | ST_MMC_CCONFIG_ADMA2 | \ + ST_MMC_CCONFIG_8BIT | \ + (MAX_BLK_LEN_1024 << ST_MMC_CCONFIG_MAX_BLK_LEN)) + +#define ST_MMC_CCONFIG_REG_3 0x408 +#define ST_MMC_CCONFIG_EMMC_SLOT_TYPE BIT(28) +#define ST_MMC_CCONFIG_64BIT BIT(24) +#define ST_MMC_CCONFIG_ASYNCH_INTR_SUPPORT BIT(20) +#define ST_MMC_CCONFIG_1P8_VOLT BIT(16) +#define ST_MMC_CCONFIG_3P0_VOLT BIT(12) +#define ST_MMC_CCONFIG_3P3_VOLT BIT(8) +#define ST_MMC_CCONFIG_SUSP_RES_SUPPORT BIT(4) +#define ST_MMC_CCONFIG_SDMA BIT(0) +#define ST_MMC_CCONFIG_3_DEFAULT \ + (ST_MMC_CCONFIG_ASYNCH_INTR_SUPPORT | \ + ST_MMC_CCONFIG_3P3_VOLT | \ + ST_MMC_CCONFIG_SUSP_RES_SUPPORT | \ + ST_MMC_CCONFIG_SDMA) + +#define ST_MMC_CCONFIG_REG_4 0x40c +#define ST_MMC_CCONFIG_D_DRIVER BIT(20) +#define ST_MMC_CCONFIG_C_DRIVER BIT(16) +#define ST_MMC_CCONFIG_A_DRIVER BIT(12) +#define ST_MMC_CCONFIG_DDR50 BIT(8) +#define ST_MMC_CCONFIG_SDR104 BIT(4) +#define ST_MMC_CCONFIG_SDR50 BIT(0) +#define ST_MMC_CCONFIG_4_DEFAULT 0 + +#define ST_MMC_CCONFIG_REG_5 0x410 +#define ST_MMC_CCONFIG_TUNING_FOR_SDR50 BIT(8) +#define RETUNING_TIMER_CNT_MAX 0xf +#define ST_MMC_CCONFIG_5_DEFAULT 0 + +/* I/O configuration for Arasan IP */ +#define ST_MMC_GP_OUTPUT 0x450 +#define ST_MMC_GP_OUTPUT_CD BIT(12) + +#define ST_MMC_STATUS_R 0x460 + +#define ST_TOP_MMC_DLY_FIX_OFF(x) (x - 0x8) + +/* TOP config registers to manage static and dynamic delay */ +#define ST_TOP_MMC_TX_CLK_DLY ST_TOP_MMC_DLY_FIX_OFF(0x8) +#define ST_TOP_MMC_RX_CLK_DLY ST_TOP_MMC_DLY_FIX_OFF(0xc) +/* MMC delay control register */ +#define ST_TOP_MMC_DLY_CTRL ST_TOP_MMC_DLY_FIX_OFF(0x18) +#define ST_TOP_MMC_DLY_CTRL_DLL_BYPASS_CMD BIT(0) +#define ST_TOP_MMC_DLY_CTRL_DLL_BYPASS_PH_SEL BIT(1) +#define ST_TOP_MMC_DLY_CTRL_TX_DLL_ENABLE BIT(8) +#define ST_TOP_MMC_DLY_CTRL_RX_DLL_ENABLE BIT(9) +#define ST_TOP_MMC_DLY_CTRL_ATUNE_NOT_CFG_DLY BIT(10) +#define ST_TOP_MMC_START_DLL_LOCK BIT(11) + +/* register to provide the phase-shift value for DLL */ +#define ST_TOP_MMC_TX_DLL_STEP_DLY ST_TOP_MMC_DLY_FIX_OFF(0x1c) +#define ST_TOP_MMC_RX_DLL_STEP_DLY ST_TOP_MMC_DLY_FIX_OFF(0x20) +#define ST_TOP_MMC_RX_CMD_STEP_DLY ST_TOP_MMC_DLY_FIX_OFF(0x24) + +/* phase shift delay on the tx clk 2.188ns */ +#define ST_TOP_MMC_TX_DLL_STEP_DLY_VALID 0x6 + +#define ST_TOP_MMC_DLY_MAX 0xf + +#define ST_TOP_MMC_DYN_DLY_CONF \ + (ST_TOP_MMC_DLY_CTRL_TX_DLL_ENABLE | \ + ST_TOP_MMC_DLY_CTRL_ATUNE_NOT_CFG_DLY | \ + ST_TOP_MMC_START_DLL_LOCK) + +/* + * For clock speeds greater than 90MHz, we need to check that the + * DLL procedure has finished before switching to ultra-speed modes. + */ +#define CLK_TO_CHECK_DLL_LOCK 90000000 + +static inline void st_mmcss_set_static_delay(void __iomem *ioaddr) +{ + if (!ioaddr) + return; + + writel_relaxed(0x0, ioaddr + ST_TOP_MMC_DLY_CTRL); + writel_relaxed(ST_TOP_MMC_DLY_MAX, + ioaddr + ST_TOP_MMC_TX_CLK_DLY); +} + +/** + * st_mmcss_cconfig: configure the Arasan HC inside the flashSS. + * @np: dt device node. + * @host: sdhci host + * Description: this function is to configure the Arasan host controller. + * On some ST SoCs, i.e. STiH407 family, the MMC devices inside a dedicated + * flashSS sub-system which needs to be configured to be compliant to eMMC 4.5 + * or eMMC4.3. This has to be done before registering the sdhci host. + */ +static void st_mmcss_cconfig(struct device_node *np, struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct mmc_host *mhost = host->mmc; + u32 cconf2, cconf3, cconf4, cconf5; + + if (!of_device_is_compatible(np, "st,sdhci-stih407")) + return; + + cconf2 = ST_MMC_CCONFIG_2_DEFAULT; + cconf3 = ST_MMC_CCONFIG_3_DEFAULT; + cconf4 = ST_MMC_CCONFIG_4_DEFAULT; + cconf5 = ST_MMC_CCONFIG_5_DEFAULT; + + writel_relaxed(ST_MMC_CCONFIG_1_DEFAULT, + host->ioaddr + ST_MMC_CCONFIG_REG_1); + + /* Set clock frequency, default to 50MHz if max-frequency is not + * provided */ + + switch (mhost->f_max) { + case 200000000: + clk_set_rate(pltfm_host->clk, mhost->f_max); + cconf2 |= BASE_CLK_FREQ_200; + break; + case 100000000: + clk_set_rate(pltfm_host->clk, mhost->f_max); + cconf2 |= BASE_CLK_FREQ_100; + break; + default: + clk_set_rate(pltfm_host->clk, 50000000); + cconf2 |= BASE_CLK_FREQ_50; + } + + writel_relaxed(cconf2, host->ioaddr + ST_MMC_CCONFIG_REG_2); + + if (!mmc_card_is_removable(mhost)) + cconf3 |= ST_MMC_CCONFIG_EMMC_SLOT_TYPE; + else + /* CARD _D ET_CTRL */ + writel_relaxed(ST_MMC_GP_OUTPUT_CD, + host->ioaddr + ST_MMC_GP_OUTPUT); + + if (mhost->caps & MMC_CAP_UHS_SDR50) { + /* use 1.8V */ + cconf3 |= ST_MMC_CCONFIG_1P8_VOLT; + cconf4 |= ST_MMC_CCONFIG_SDR50; + /* Use tuning */ + cconf5 |= ST_MMC_CCONFIG_TUNING_FOR_SDR50; + /* Max timeout for retuning */ + cconf5 |= RETUNING_TIMER_CNT_MAX; + } + + if (mhost->caps & MMC_CAP_UHS_SDR104) { + /* + * SDR104 implies the HC can support HS200 mode, so + * it's mandatory to use 1.8V + */ + cconf3 |= ST_MMC_CCONFIG_1P8_VOLT; + cconf4 |= ST_MMC_CCONFIG_SDR104; + /* Max timeout for retuning */ + cconf5 |= RETUNING_TIMER_CNT_MAX; + } + + if (mhost->caps & MMC_CAP_UHS_DDR50) + cconf4 |= ST_MMC_CCONFIG_DDR50; + + writel_relaxed(cconf3, host->ioaddr + ST_MMC_CCONFIG_REG_3); + writel_relaxed(cconf4, host->ioaddr + ST_MMC_CCONFIG_REG_4); + writel_relaxed(cconf5, host->ioaddr + ST_MMC_CCONFIG_REG_5); +} + +static inline void st_mmcss_set_dll(void __iomem *ioaddr) +{ + if (!ioaddr) + return; + + writel_relaxed(ST_TOP_MMC_DYN_DLY_CONF, ioaddr + ST_TOP_MMC_DLY_CTRL); + writel_relaxed(ST_TOP_MMC_TX_DLL_STEP_DLY_VALID, + ioaddr + ST_TOP_MMC_TX_DLL_STEP_DLY); +} + +static int st_mmcss_lock_dll(void __iomem *ioaddr) +{ + unsigned long curr, value; + unsigned long finish = jiffies + HZ; + + /* Checks if the DLL procedure is finished */ + do { + curr = jiffies; + value = readl(ioaddr + ST_MMC_STATUS_R); + if (value & 0x1) + return 0; + + cpu_relax(); + } while (!time_after_eq(curr, finish)); + + return -EBUSY; +} + +static int sdhci_st_set_dll_for_clock(struct sdhci_host *host) +{ + int ret = 0; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host); + + if (host->clock > CLK_TO_CHECK_DLL_LOCK) { + st_mmcss_set_dll(pdata->top_ioaddr); + ret = st_mmcss_lock_dll(host->ioaddr); + } + + return ret; +} + +static void sdhci_st_set_uhs_signaling(struct sdhci_host *host, + unsigned int uhs) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host); + u16 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + int ret = 0; + + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + switch (uhs) { + /* + * Set V18_EN -- UHS modes do not work without this. + * does not change signaling voltage + */ + + case MMC_TIMING_UHS_SDR12: + st_mmcss_set_static_delay(pdata->top_ioaddr); + ctrl_2 |= SDHCI_CTRL_UHS_SDR12 | SDHCI_CTRL_VDD_180; + break; + case MMC_TIMING_UHS_SDR25: + st_mmcss_set_static_delay(pdata->top_ioaddr); + ctrl_2 |= SDHCI_CTRL_UHS_SDR25 | SDHCI_CTRL_VDD_180; + break; + case MMC_TIMING_UHS_SDR50: + st_mmcss_set_static_delay(pdata->top_ioaddr); + ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180; + ret = sdhci_st_set_dll_for_clock(host); + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + st_mmcss_set_static_delay(pdata->top_ioaddr); + ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180; + ret = sdhci_st_set_dll_for_clock(host); + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + st_mmcss_set_static_delay(pdata->top_ioaddr); + ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180; + break; + } + + if (ret) + dev_warn(mmc_dev(host->mmc), "Error setting dll for clock " + "(uhs %d)\n", uhs); + + dev_dbg(mmc_dev(host->mmc), "uhs %d, ctrl_2 %04X\n", uhs, ctrl_2); + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static u32 sdhci_st_readl(struct sdhci_host *host, int reg) +{ + u32 ret; + + switch (reg) { + case SDHCI_CAPABILITIES: + ret = readl_relaxed(host->ioaddr + reg); + /* Support 3.3V and 1.8V */ + ret &= ~SDHCI_CAN_VDD_300; + break; + default: + ret = readl_relaxed(host->ioaddr + reg); + } + return ret; +} + +static const struct sdhci_ops sdhci_st_ops = { + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .read_l = sdhci_st_readl, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_st_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_st_pdata = { + .ops = &sdhci_st_ops, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_NO_HISPD_BIT, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_STOP_WITH_TC, +}; + + +static int sdhci_st_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct sdhci_host *host; + struct st_mmc_platform_data *pdata; + struct sdhci_pltfm_host *pltfm_host; + struct clk *clk, *icnclk; + int ret = 0; + u16 host_version; + struct resource *res; + struct reset_control *rstc; + + clk = devm_clk_get(&pdev->dev, "mmc"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Peripheral clk not found\n"); + return PTR_ERR(clk); + } + + /* ICN clock isn't compulsory, but use it if it's provided. */ + icnclk = devm_clk_get(&pdev->dev, "icn"); + if (IS_ERR(icnclk)) + icnclk = NULL; + + rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(rstc)) + rstc = NULL; + else + reset_control_deassert(rstc); + + host = sdhci_pltfm_init(pdev, &sdhci_st_pdata, sizeof(*pdata)); + if (IS_ERR(host)) { + dev_err(&pdev->dev, "Failed sdhci_pltfm_init\n"); + ret = PTR_ERR(host); + goto err_pltfm_init; + } + + pltfm_host = sdhci_priv(host); + pdata = sdhci_pltfm_priv(pltfm_host); + pdata->rstc = rstc; + + ret = mmc_of_parse(host->mmc); + if (ret) { + dev_err(&pdev->dev, "Failed mmc_of_parse\n"); + goto err_of; + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare clock\n"); + goto err_of; + } + + ret = clk_prepare_enable(icnclk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare icn clock\n"); + goto err_icnclk; + } + + /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "top-mmc-delay"); + pdata->top_ioaddr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pdata->top_ioaddr)) { + dev_warn(&pdev->dev, "FlashSS Top Dly registers not available"); + pdata->top_ioaddr = NULL; + } + + pltfm_host->clk = clk; + pdata->icnclk = icnclk; + + /* Configure the Arasan HC inside the flashSS */ + st_mmcss_cconfig(np, host); + + ret = sdhci_add_host(host); + if (ret) + goto err_out; + + host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); + + dev_info(&pdev->dev, "SDHCI ST Initialised: Host Version: 0x%x Vendor Version 0x%x\n", + ((host_version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT), + ((host_version & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT)); + + return 0; + +err_out: + clk_disable_unprepare(icnclk); +err_icnclk: + clk_disable_unprepare(clk); +err_of: + sdhci_pltfm_free(pdev); +err_pltfm_init: + if (rstc) + reset_control_assert(rstc); + + return ret; +} + +static int sdhci_st_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host); + struct reset_control *rstc = pdata->rstc; + int ret; + + ret = sdhci_pltfm_unregister(pdev); + + clk_disable_unprepare(pdata->icnclk); + + if (rstc) + reset_control_assert(rstc); + + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_st_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host); + int ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + ret = sdhci_suspend_host(host); + if (ret) + goto out; + + if (pdata->rstc) + reset_control_assert(pdata->rstc); + + clk_disable_unprepare(pdata->icnclk); + clk_disable_unprepare(pltfm_host->clk); +out: + return ret; +} + +static int sdhci_st_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host); + struct device_node *np = dev->of_node; + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; + + ret = clk_prepare_enable(pdata->icnclk); + if (ret) { + clk_disable_unprepare(pltfm_host->clk); + return ret; + } + + if (pdata->rstc) + reset_control_deassert(pdata->rstc); + + st_mmcss_cconfig(np, host); + + return sdhci_resume_host(host); +} +#endif + +static SIMPLE_DEV_PM_OPS(sdhci_st_pmops, sdhci_st_suspend, sdhci_st_resume); + +static const struct of_device_id st_sdhci_match[] = { + { .compatible = "st,sdhci" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, st_sdhci_match); + +static struct platform_driver sdhci_st_driver = { + .probe = sdhci_st_probe, + .remove = sdhci_st_remove, + .driver = { + .name = "sdhci-st", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sdhci_st_pmops, + .of_match_table = of_match_ptr(st_sdhci_match), + }, +}; + +module_platform_driver(sdhci_st_driver); + +MODULE_DESCRIPTION("SDHCI driver for STMicroelectronics SoCs"); +MODULE_AUTHOR("Giuseppe Cavallaro "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sdhci-st"); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c new file mode 100644 index 000000000..d8fd2b5ef --- /dev/null +++ b/drivers/mmc/host/sdhci-tegra.c @@ -0,0 +1,1839 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2010 Google, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-cqhci.h" +#include "sdhci-pltfm.h" +#include "cqhci.h" + +/* Tegra SDHOST controller vendor register definitions */ +#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100 +#define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000 +#define SDHCI_CLOCK_CTRL_TAP_SHIFT 16 +#define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000 +#define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24 +#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5) +#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3) +#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2) + +#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104 +#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31) + +#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c +#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00 +#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8 + +#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 +#define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0) +#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 +#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 +#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 +#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200 + +#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0 +#define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31) + +#define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc +#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31) + +#define SDHCI_VNDR_TUN_CTRL0_0 0x1c0 +#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000 +#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000 +#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18 +#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0 +#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6 +#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000 +#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13 +#define TRIES_128 2 +#define TRIES_256 4 +#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7 + +#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4 +#define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8 +#define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC +#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF +#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8 +#define TUNING_WORD_BIT_SIZE 32 + +#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4 +#define SDHCI_AUTO_CAL_START BIT(31) +#define SDHCI_AUTO_CAL_ENABLE BIT(29) +#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff + +#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0 +#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f +#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7 +#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31) +#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000 + +#define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec +#define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31) + +#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) +#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) +#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) +#define NVQUIRK_ENABLE_SDR50 BIT(3) +#define NVQUIRK_ENABLE_SDR104 BIT(4) +#define NVQUIRK_ENABLE_DDR50 BIT(5) +/* + * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads + * drive strength. + */ +#define NVQUIRK_HAS_PADCALIB BIT(6) +/* + * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads. + * 3V3/1V8 pad selection happens through pinctrl state selection depending + * on the signaling mode. + */ +#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7) +#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) +#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9) + +/* + * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra + * SDMMC hardware data timeout. + */ +#define NVQUIRK_HAS_TMCLK BIT(10) + +/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ +#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 + +#define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \ + SDHCI_TRNS_BLK_CNT_EN | \ + SDHCI_TRNS_DMA) + +struct sdhci_tegra_soc_data { + const struct sdhci_pltfm_data *pdata; + u64 dma_mask; + u32 nvquirks; + u8 min_tap_delay; + u8 max_tap_delay; +}; + +/* Magic pull up and pull down pad calibration offsets */ +struct sdhci_tegra_autocal_offsets { + u32 pull_up_3v3; + u32 pull_down_3v3; + u32 pull_up_3v3_timeout; + u32 pull_down_3v3_timeout; + u32 pull_up_1v8; + u32 pull_down_1v8; + u32 pull_up_1v8_timeout; + u32 pull_down_1v8_timeout; + u32 pull_up_sdr104; + u32 pull_down_sdr104; + u32 pull_up_hs400; + u32 pull_down_hs400; +}; + +struct sdhci_tegra { + const struct sdhci_tegra_soc_data *soc_data; + struct gpio_desc *power_gpio; + struct clk *tmclk; + bool ddr_signaling; + bool pad_calib_required; + bool pad_control_available; + + struct reset_control *rst; + struct pinctrl *pinctrl_sdmmc; + struct pinctrl_state *pinctrl_state_3v3; + struct pinctrl_state *pinctrl_state_1v8; + struct pinctrl_state *pinctrl_state_3v3_drv; + struct pinctrl_state *pinctrl_state_1v8_drv; + + struct sdhci_tegra_autocal_offsets autocal_offsets; + ktime_t last_calib; + + u32 default_tap; + u32 default_trim; + u32 dqs_trim; + bool enable_hwcq; + unsigned long curr_clk_rate; + u8 tuned_tap_delay; +}; + +static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + + if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) && + (reg == SDHCI_HOST_VERSION))) { + /* Erratum: Version register is invalid in HW. */ + return SDHCI_SPEC_200; + } + + return readw(host->ioaddr + reg); +} + +static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + switch (reg) { + case SDHCI_TRANSFER_MODE: + /* + * Postpone this write, we must do it together with a + * command write that is down below. + */ + pltfm_host->xfer_mode_shadow = val; + return; + case SDHCI_COMMAND: + writel((val << 16) | pltfm_host->xfer_mode_shadow, + host->ioaddr + SDHCI_TRANSFER_MODE); + return; + } + + writew(val, host->ioaddr + reg); +} + +static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + + /* Seems like we're getting spurious timeout and crc errors, so + * disable signalling of them. In case of real errors software + * timers should take care of eventually detecting them. + */ + if (unlikely(reg == SDHCI_SIGNAL_ENABLE)) + val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC); + + writel(val, host->ioaddr + reg); + + if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) && + (reg == SDHCI_INT_ENABLE))) { + /* Erratum: Must enable block gap interrupt detection */ + u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); + if (val & SDHCI_INT_CARD_INT) + gap_ctrl |= 0x8; + else + gap_ctrl &= ~0x8; + writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); + } +} + +static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable) +{ + bool status; + u32 reg; + + reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + status = !!(reg & SDHCI_CLOCK_CARD_EN); + + if (status == enable) + return status; + + if (enable) + reg |= SDHCI_CLOCK_CARD_EN; + else + reg &= ~SDHCI_CLOCK_CARD_EN; + + sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL); + + return status; +} + +static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) +{ + bool is_tuning_cmd = 0; + bool clk_enabled; + u8 cmd; + + if (reg == SDHCI_COMMAND) { + cmd = SDHCI_GET_CMD(val); + is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK || + cmd == MMC_SEND_TUNING_BLOCK_HS200; + } + + if (is_tuning_cmd) + clk_enabled = tegra_sdhci_configure_card_clk(host, 0); + + writew(val, host->ioaddr + reg); + + if (is_tuning_cmd) { + udelay(1); + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + tegra_sdhci_configure_card_clk(host, clk_enabled); + } +} + +static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) +{ + /* + * Write-enable shall be assumed if GPIO is missing in a board's + * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on + * Tegra. + */ + return mmc_gpio_get_ro(host->mmc); +} + +static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + int has_1v8, has_3v3; + + /* + * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad + * voltage configuration in order to perform voltage switching. This + * means that valid pinctrl info is required on SDHCI instances capable + * of performing voltage switching. Whether or not an SDHCI instance is + * capable of voltage switching is determined based on the regulator. + */ + + if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) + return true; + + if (IS_ERR(host->mmc->supply.vqmmc)) + return false; + + has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, + 1700000, 1950000); + + has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc, + 2700000, 3600000); + + if (has_1v8 == 1 && has_3v3 == 1) + return tegra_host->pad_control_available; + + /* Fixed voltage, no pad control required. */ + return true; +} + +static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + bool card_clk_enabled = false; + u32 reg; + + /* + * Touching the tap values is a bit tricky on some SoC generations. + * The quirk enables a workaround for a glitch that sometimes occurs if + * the tap values are changed. + */ + + if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP) + card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); + + reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK; + reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT; + sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + + if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP && + card_clk_enabled) { + udelay(1); + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + tegra_sdhci_configure_card_clk(host, card_clk_enabled); + } +} + +static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + u32 misc_ctrl, clk_ctrl, pad_ctrl; + + sdhci_and_cqhci_reset(host, mask); + + if (!(mask & SDHCI_RESET_ALL)) + return; + + tegra_sdhci_set_tap(host, tegra_host->default_tap); + + misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); + clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + + misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 | + SDHCI_MISC_CTRL_ENABLE_SDR50 | + SDHCI_MISC_CTRL_ENABLE_DDR50 | + SDHCI_MISC_CTRL_ENABLE_SDR104); + + clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK | + SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE); + + if (tegra_sdhci_is_pad_and_regulator_valid(host)) { + /* Erratum: Enable SDHCI spec v3.00 support */ + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; + /* Advertise UHS modes as supported by host */ + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50; + if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) + clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; + } + + clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT; + + sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); + sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + + if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) { + pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK; + pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL; + sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + + tegra_host->pad_calib_required = true; + } + + tegra_host->ddr_signaling = false; +} + +static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable) +{ + u32 val; + + /* + * Enable or disable the additional I/O pad used by the drive strength + * calibration process. + */ + val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + + if (enable) + val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; + else + val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD; + + sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + + if (enable) + usleep_range(1, 2); +} + +static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host, + u16 pdpu) +{ + u32 reg; + + reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); + reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK; + reg |= pdpu; + sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); +} + +static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage, + bool state_drvupdn) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct sdhci_tegra_autocal_offsets *offsets = + &tegra_host->autocal_offsets; + struct pinctrl_state *pinctrl_drvupdn = NULL; + int ret = 0; + u8 drvup = 0, drvdn = 0; + u32 reg; + + if (!state_drvupdn) { + /* PADS Drive Strength */ + if (voltage == MMC_SIGNAL_VOLTAGE_180) { + if (tegra_host->pinctrl_state_1v8_drv) { + pinctrl_drvupdn = + tegra_host->pinctrl_state_1v8_drv; + } else { + drvup = offsets->pull_up_1v8_timeout; + drvdn = offsets->pull_down_1v8_timeout; + } + } else { + if (tegra_host->pinctrl_state_3v3_drv) { + pinctrl_drvupdn = + tegra_host->pinctrl_state_3v3_drv; + } else { + drvup = offsets->pull_up_3v3_timeout; + drvdn = offsets->pull_down_3v3_timeout; + } + } + + if (pinctrl_drvupdn != NULL) { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + pinctrl_drvupdn); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "failed pads drvupdn, ret: %d\n", ret); + } else if ((drvup) || (drvdn)) { + reg = sdhci_readl(host, + SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK; + reg |= (drvup << 20) | (drvdn << 12); + sdhci_writel(host, reg, + SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + } + + } else { + /* Dual Voltage PADS Voltage selection */ + if (!tegra_host->pad_control_available) + return 0; + + if (voltage == MMC_SIGNAL_VOLTAGE_180) { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + tegra_host->pinctrl_state_1v8); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "setting 1.8V failed, ret: %d\n", ret); + } else { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + tegra_host->pinctrl_state_3v3); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "setting 3.3V failed, ret: %d\n", ret); + } + } + + return ret; +} + +static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct sdhci_tegra_autocal_offsets offsets = + tegra_host->autocal_offsets; + struct mmc_ios *ios = &host->mmc->ios; + bool card_clk_enabled; + u16 pdpu; + u32 reg; + int ret; + + switch (ios->timing) { + case MMC_TIMING_UHS_SDR104: + pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104; + break; + case MMC_TIMING_MMC_HS400: + pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400; + break; + default: + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) + pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8; + else + pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3; + } + + /* Set initial offset before auto-calibration */ + tegra_sdhci_set_pad_autocal_offset(host, pdpu); + + card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); + + tegra_sdhci_configure_cal_pad(host, true); + + reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); + reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START; + sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); + + usleep_range(1, 2); + /* 10 ms timeout */ + ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS, + reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE), + 1000, 10000); + + tegra_sdhci_configure_cal_pad(host, false); + + tegra_sdhci_configure_card_clk(host, card_clk_enabled); + + if (ret) { + dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n"); + + /* Disable automatic cal and use fixed Drive Strengths */ + reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); + reg &= ~SDHCI_AUTO_CAL_ENABLE; + sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); + + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "Setting drive strengths failed: %d\n", ret); + } +} + +static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct sdhci_tegra_autocal_offsets *autocal = + &tegra_host->autocal_offsets; + int err; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-3v3", + &autocal->pull_up_3v3); + if (err) + autocal->pull_up_3v3 = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-3v3", + &autocal->pull_down_3v3); + if (err) + autocal->pull_down_3v3 = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-1v8", + &autocal->pull_up_1v8); + if (err) + autocal->pull_up_1v8 = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-1v8", + &autocal->pull_down_1v8); + if (err) + autocal->pull_down_1v8 = 0; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-sdr104", + &autocal->pull_up_sdr104); + if (err) + autocal->pull_up_sdr104 = autocal->pull_up_1v8; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-sdr104", + &autocal->pull_down_sdr104); + if (err) + autocal->pull_down_sdr104 = autocal->pull_down_1v8; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-hs400", + &autocal->pull_up_hs400); + if (err) + autocal->pull_up_hs400 = autocal->pull_up_1v8; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-hs400", + &autocal->pull_down_hs400); + if (err) + autocal->pull_down_hs400 = autocal->pull_down_1v8; + + /* + * Different fail-safe drive strength values based on the signaling + * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls. + * So, avoid reading below device tree properties for SoCs that don't + * have NVQUIRK_NEEDS_PAD_CONTROL. + */ + if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL)) + return; + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-3v3-timeout", + &autocal->pull_up_3v3_timeout); + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_3v3) && + (tegra_host->pinctrl_state_3v3_drv == NULL)) + pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", + mmc_hostname(host->mmc)); + autocal->pull_up_3v3_timeout = 0; + } + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-3v3-timeout", + &autocal->pull_down_3v3_timeout); + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_3v3) && + (tegra_host->pinctrl_state_3v3_drv == NULL)) + pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", + mmc_hostname(host->mmc)); + autocal->pull_down_3v3_timeout = 0; + } + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-up-offset-1v8-timeout", + &autocal->pull_up_1v8_timeout); + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_1v8) && + (tegra_host->pinctrl_state_1v8_drv == NULL)) + pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", + mmc_hostname(host->mmc)); + autocal->pull_up_1v8_timeout = 0; + } + + err = device_property_read_u32(host->mmc->parent, + "nvidia,pad-autocal-pull-down-offset-1v8-timeout", + &autocal->pull_down_1v8_timeout); + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_1v8) && + (tegra_host->pinctrl_state_1v8_drv == NULL)) + pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", + mmc_hostname(host->mmc)); + autocal->pull_down_1v8_timeout = 0; + } +} + +static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib); + + /* 100 ms calibration interval is specified in the TRM */ + if (ktime_to_ms(since_calib) > 100) { + tegra_sdhci_pad_autocalib(host); + tegra_host->last_calib = ktime_get(); + } + + sdhci_request(mmc, mrq); +} + +static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + int err; + + err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap", + &tegra_host->default_tap); + if (err) + tegra_host->default_tap = 0; + + err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim", + &tegra_host->default_trim); + if (err) + tegra_host->default_trim = 0; + + err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim", + &tegra_host->dqs_trim); + if (err) + tegra_host->dqs_trim = 0x11; +} + +static void tegra_sdhci_parse_dt(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + + if (device_property_read_bool(host->mmc->parent, "supports-cqe")) + tegra_host->enable_hwcq = true; + else + tegra_host->enable_hwcq = false; + + tegra_sdhci_parse_pad_autocal_dt(host); + tegra_sdhci_parse_tap_and_trim(host); +} + +static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + unsigned long host_clk; + + if (!clock) + return sdhci_set_clock(host, clock); + + /* + * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI + * divider to be configured to divided the host clock by two. The SDHCI + * clock divider is calculated as part of sdhci_set_clock() by + * sdhci_calc_clk(). The divider is calculated from host->max_clk and + * the requested clock rate. + * + * By setting the host->max_clk to clock * 2 the divider calculation + * will always result in the correct value for DDR50/52 modes, + * regardless of clock rate rounding, which may happen if the value + * from clk_get_rate() is used. + */ + host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; + clk_set_rate(pltfm_host->clk, host_clk); + tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk); + if (tegra_host->ddr_signaling) + host->max_clk = host_clk; + else + host->max_clk = clk_get_rate(pltfm_host->clk); + + sdhci_set_clock(host, clock); + + if (tegra_host->pad_calib_required) { + tegra_sdhci_pad_autocalib(host); + tegra_host->pad_calib_required = false; + } +} + +static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 val; + + val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); + + if (ios->enhanced_strobe) { + val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; + /* + * When CMD13 is sent from mmc_select_hs400es() after + * switching to HS400ES mode, the bus is operating at + * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR. + * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI + * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host + * controller CAR clock and the interface clock are rate matched. + */ + tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR); + } else { + val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE; + } + + sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL); +} + +static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return clk_round_rate(pltfm_host->clk, UINT_MAX); +} + +static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim) +{ + u32 val; + + val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); + val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK; + val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT; + sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES); +} + +static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host) +{ + u32 reg; + int err; + + reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); + reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE; + sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG); + + /* 1 ms sleep, 5 ms timeout */ + err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA, + reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE), + 1000, 5000); + if (err) + dev_err(mmc_dev(host->mmc), + "HS400 delay line calibration timed out\n"); +} + +static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up, + u8 thd_low, u8 fixed_tap) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + u32 val, tun_status; + u8 word, bit, edge1, tap, window; + bool tap_result; + bool start_fail = false; + bool start_pass = false; + bool end_pass = false; + bool first_fail = false; + bool first_pass = false; + u8 start_pass_tap = 0; + u8 end_pass_tap = 0; + u8 first_fail_tap = 0; + u8 first_pass_tap = 0; + u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE; + + /* + * Read auto-tuned results and extract good valid passing window by + * filtering out un-wanted bubble/partial/merged windows. + */ + for (word = 0; word < total_tuning_words; word++) { + val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); + val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK; + val |= word; + sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); + tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0); + bit = 0; + while (bit < TUNING_WORD_BIT_SIZE) { + tap = word * TUNING_WORD_BIT_SIZE + bit; + tap_result = tun_status & (1 << bit); + if (!tap_result && !start_fail) { + start_fail = true; + if (!first_fail) { + first_fail_tap = tap; + first_fail = true; + } + + } else if (tap_result && start_fail && !start_pass) { + start_pass_tap = tap; + start_pass = true; + if (!first_pass) { + first_pass_tap = tap; + first_pass = true; + } + + } else if (!tap_result && start_fail && start_pass && + !end_pass) { + end_pass_tap = tap - 1; + end_pass = true; + } else if (tap_result && start_pass && start_fail && + end_pass) { + window = end_pass_tap - start_pass_tap; + /* discard merged window and bubble window */ + if (window >= thd_up || window < thd_low) { + start_pass_tap = tap; + end_pass = false; + } else { + /* set tap at middle of valid window */ + tap = start_pass_tap + window / 2; + tegra_host->tuned_tap_delay = tap; + return; + } + } + + bit++; + } + } + + if (!first_fail) { + WARN(1, "no edge detected, continue with hw tuned delay.\n"); + } else if (first_pass) { + /* set tap location at fixed tap relative to the first edge */ + edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2; + if (edge1 - 1 > fixed_tap) + tegra_host->tuned_tap_delay = edge1 - fixed_tap; + else + tegra_host->tuned_tap_delay = edge1 + fixed_tap; + } +} + +static void tegra_sdhci_post_tuning(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + u32 avg_tap_dly, val, min_tap_dly, max_tap_dly; + u8 fixed_tap, start_tap, end_tap, window_width; + u8 thdupper, thdlower; + u8 num_iter; + u32 clk_rate_mhz, period_ps, bestcase, worstcase; + + /* retain HW tuned tap to use incase if no correction is needed */ + val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL); + tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >> + SDHCI_CLOCK_CTRL_TAP_SHIFT; + if (soc_data->min_tap_delay && soc_data->max_tap_delay) { + min_tap_dly = soc_data->min_tap_delay; + max_tap_dly = soc_data->max_tap_delay; + clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC; + period_ps = USEC_PER_SEC / clk_rate_mhz; + bestcase = period_ps / min_tap_dly; + worstcase = period_ps / max_tap_dly; + /* + * Upper and Lower bound thresholds used to detect merged and + * bubble windows + */ + thdupper = (2 * worstcase + bestcase) / 2; + thdlower = worstcase / 4; + /* + * fixed tap is used when HW tuning result contains single edge + * and tap is set at fixed tap delay relative to the first edge + */ + avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly); + fixed_tap = avg_tap_dly / 2; + + val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1); + start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; + end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) & + SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK; + window_width = end_tap - start_tap; + num_iter = host->tuning_loop_count; + /* + * partial window includes edges of the tuning range. + * merged window includes more taps so window width is higher + * than upper threshold. + */ + if (start_tap == 0 || (end_tap == (num_iter - 1)) || + (end_tap == num_iter - 2) || window_width >= thdupper) { + pr_debug("%s: Apply tuning correction\n", + mmc_hostname(host->mmc)); + tegra_sdhci_tap_correction(host, thdupper, thdlower, + fixed_tap); + } + } + + tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); +} + +static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + int err; + + err = sdhci_execute_tuning(mmc, opcode); + if (!err && !host->tuning_err) + tegra_sdhci_post_tuning(host); + + return err; +} + +static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host, + unsigned timing) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + bool set_default_tap = false; + bool set_dqs_trim = false; + bool do_hs400_dll_cal = false; + u8 iter = TRIES_256; + u32 val; + + tegra_host->ddr_signaling = false; + switch (timing) { + case MMC_TIMING_UHS_SDR50: + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* Don't set default tap on tunable modes. */ + iter = TRIES_128; + break; + case MMC_TIMING_MMC_HS400: + set_dqs_trim = true; + do_hs400_dll_cal = true; + iter = TRIES_128; + break; + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + tegra_host->ddr_signaling = true; + set_default_tap = true; + break; + default: + set_default_tap = true; + break; + } + + val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0); + val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK | + SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK | + SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK); + val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT | + 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT | + 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT); + sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0); + sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0); + + host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256; + + sdhci_set_uhs_signaling(host, timing); + + tegra_sdhci_pad_autocalib(host); + + if (tegra_host->tuned_tap_delay && !set_default_tap) + tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay); + else + tegra_sdhci_set_tap(host, tegra_host->default_tap); + + if (set_dqs_trim) + tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim); + + if (do_hs400_dll_cal) + tegra_sdhci_hs400_dll_cal(host); +} + +static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + unsigned int min, max; + + /* + * Start search for minimum tap value at 10, as smaller values are + * may wrongly be reported as working but fail at higher speeds, + * according to the TRM. + */ + min = 10; + while (min < 255) { + tegra_sdhci_set_tap(host, min); + if (!mmc_send_tuning(host->mmc, opcode, NULL)) + break; + min++; + } + + /* Find the maximum tap value that still passes. */ + max = min + 1; + while (max < 255) { + tegra_sdhci_set_tap(host, max); + if (mmc_send_tuning(host->mmc, opcode, NULL)) { + max--; + break; + } + max++; + } + + /* The TRM states the ideal tap value is at 75% in the passing range. */ + tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4)); + + return mmc_send_tuning(host->mmc, opcode, NULL); +} + +static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + int ret = 0; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); + if (ret < 0) + return ret; + ret = sdhci_start_signal_voltage_switch(mmc, ios); + } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + ret = sdhci_start_signal_voltage_switch(mmc, ios); + if (ret < 0) + return ret; + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); + } + + if (tegra_host->pad_calib_required) + tegra_sdhci_pad_autocalib(host); + + return ret; +} + +static int tegra_sdhci_init_pinctrl_info(struct device *dev, + struct sdhci_tegra *tegra_host) +{ + tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev); + if (IS_ERR(tegra_host->pinctrl_sdmmc)) { + dev_dbg(dev, "No pinctrl info, err: %ld\n", + PTR_ERR(tegra_host->pinctrl_sdmmc)); + return -1; + } + + tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state( + tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv"); + if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) { + if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV) + tegra_host->pinctrl_state_1v8_drv = NULL; + } + + tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state( + tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv"); + if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) { + if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV) + tegra_host->pinctrl_state_3v3_drv = NULL; + } + + tegra_host->pinctrl_state_3v3 = + pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3"); + if (IS_ERR(tegra_host->pinctrl_state_3v3)) { + dev_warn(dev, "Missing 3.3V pad state, err: %ld\n", + PTR_ERR(tegra_host->pinctrl_state_3v3)); + return -1; + } + + tegra_host->pinctrl_state_1v8 = + pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8"); + if (IS_ERR(tegra_host->pinctrl_state_1v8)) { + dev_warn(dev, "Missing 1.8V pad state, err: %ld\n", + PTR_ERR(tegra_host->pinctrl_state_1v8)); + return -1; + } + + tegra_host->pad_control_available = true; + + return 0; +} + +static void tegra_sdhci_voltage_switch(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + + if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) + tegra_host->pad_calib_required = true; +} + +static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg) +{ + struct mmc_host *mmc = cq_host->mmc; + struct sdhci_host *host = mmc_priv(mmc); + u8 ctrl; + ktime_t timeout; + bool timed_out; + + /* + * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to + * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need + * to be re-configured. + * Tegra CQHCI/SDHCI prevents write access to block size register when + * CQE is unhalted. So handling CQE resume sequence here to configure + * SDHCI block registers prior to exiting CQE halt state. + */ + if (reg == CQHCI_CTL && !(val & CQHCI_HALT) && + cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) { + sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); + sdhci_cqe_enable(mmc); + writel(val, cq_host->mmio + reg); + timeout = ktime_add_us(ktime_get(), 50); + while (1) { + timed_out = ktime_compare(ktime_get(), timeout) > 0; + ctrl = cqhci_readl(cq_host, CQHCI_CTL); + if (!(ctrl & CQHCI_HALT) || timed_out) + break; + } + /* + * CQE usually resumes very quick, but incase if Tegra CQE + * doesn't resume retry unhalt. + */ + if (timed_out) + writel(val, cq_host->mmio + reg); + } else { + writel(val, cq_host->mmio + reg); + } +} + +static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc, + struct mmc_request *mrq, u64 *data) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc)); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + + if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING && + mrq->cmd->flags & MMC_RSP_R1B) + *data |= CQHCI_CMD_TIMING(1); +} + +static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + struct sdhci_host *host = mmc_priv(mmc); + u32 val; + + /* + * Tegra CQHCI/SDMMC design prevents write access to sdhci block size + * register when CQE is enabled and unhalted. + * CQHCI driver enables CQE prior to activation, so disable CQE before + * programming block size in sdhci controller and enable it back. + */ + if (!cq_host->activated) { + val = cqhci_readl(cq_host, CQHCI_CFG); + if (val & CQHCI_ENABLE) + cqhci_writel(cq_host, (val & ~CQHCI_ENABLE), + CQHCI_CFG); + sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE); + sdhci_cqe_enable(mmc); + if (val & CQHCI_ENABLE) + cqhci_writel(cq_host, val, CQHCI_CFG); + } + + /* + * CMD CRC errors are seen sometimes with some eMMC devices when status + * command is sent during transfer of last data block which is the + * default case as send status command block counter (CBC) is 1. + * Recommended fix to set CBC to 0 allowing send status command only + * when data lines are idle. + */ + val = cqhci_readl(cq_host, CQHCI_SSC1); + val &= ~CQHCI_SSC1_CBC_MASK; + cqhci_writel(cq_host, val, CQHCI_SSC1); +} + +static void sdhci_tegra_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static void tegra_sdhci_set_timeout(struct sdhci_host *host, + struct mmc_command *cmd) +{ + u32 val; + + /* + * HW busy detection timeout is based on programmed data timeout + * counter and maximum supported timeout is 11s which may not be + * enough for long operations like cache flush, sleep awake, erase. + * + * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows + * host controller to wait for busy state until the card is busy + * without HW timeout. + * + * So, use infinite busy wait mode for operations that may take + * more than maximum HW busy timeout of 11s otherwise use finite + * busy wait mode. + */ + val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); + if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC) + val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; + else + val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT; + sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL); + + __sdhci_set_timeout(host, cmd); +} + +static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + u32 reg; + + reg = cqhci_readl(cq_host, CQHCI_CFG); + reg |= CQHCI_ENABLE; + cqhci_writel(cq_host, reg, CQHCI_CFG); +} + +static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + reg = cqhci_readl(cq_host, CQHCI_CFG); + reg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, reg, CQHCI_CFG); + sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); +} + +static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { + .write_l = tegra_cqhci_writel, + .enable = sdhci_tegra_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_tegra_dumpregs, + .update_dcmd_desc = sdhci_tegra_update_dcmd_desc, + .pre_enable = sdhci_tegra_cqe_pre_enable, + .post_disable = sdhci_tegra_cqe_post_disable, +}; + +static int tegra_sdhci_set_dma_mask(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *platform = sdhci_priv(host); + struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform); + const struct sdhci_tegra_soc_data *soc = tegra->soc_data; + struct device *dev = mmc_dev(host->mmc); + + if (soc->dma_mask) + return dma_set_mask_and_coherent(dev, soc->dma_mask); + + return 0; +} + +static const struct sdhci_ops tegra_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, + .read_w = tegra_sdhci_readw, + .write_l = tegra_sdhci_writel, + .set_clock = tegra_sdhci_set_clock, + .set_dma_mask = tegra_sdhci_set_dma_mask, + .set_bus_width = sdhci_set_bus_width, + .reset = tegra_sdhci_reset, + .platform_execute_tuning = tegra_sdhci_execute_tuning, + .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, + .voltage_switch = tegra_sdhci_voltage_switch, + .get_max_clock = tegra_sdhci_get_max_clock, +}; + +static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .ops = &tegra_sdhci_ops, +}; + +static const struct sdhci_tegra_soc_data soc_data_tegra20 = { + .pdata = &sdhci_tegra20_pdata, + .dma_mask = DMA_BIT_MASK(32), + .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | + NVQUIRK_ENABLE_BLOCK_GAP_DET, +}; + +static const struct sdhci_pltfm_data sdhci_tegra30_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_BROKEN_HS200 | + /* + * Auto-CMD23 leads to "Got command interrupt 0x00010000 even + * though no command operation was in progress." + * + * The exact reason is unknown, as the same hardware seems + * to support Auto CMD23 on a downstream 3.1 kernel. + */ + SDHCI_QUIRK2_ACMD23_BROKEN, + .ops = &tegra_sdhci_ops, +}; + +static const struct sdhci_tegra_soc_data soc_data_tegra30 = { + .pdata = &sdhci_tegra30_pdata, + .dma_mask = DMA_BIT_MASK(32), + .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | + NVQUIRK_ENABLE_SDR50 | + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_PADCALIB, +}; + +static const struct sdhci_ops tegra114_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, + .read_w = tegra_sdhci_readw, + .write_w = tegra_sdhci_writew, + .write_l = tegra_sdhci_writel, + .set_clock = tegra_sdhci_set_clock, + .set_dma_mask = tegra_sdhci_set_dma_mask, + .set_bus_width = sdhci_set_bus_width, + .reset = tegra_sdhci_reset, + .platform_execute_tuning = tegra_sdhci_execute_tuning, + .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, + .voltage_switch = tegra_sdhci_voltage_switch, + .get_max_clock = tegra_sdhci_get_max_clock, +}; + +static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &tegra114_sdhci_ops, +}; + +static const struct sdhci_tegra_soc_data soc_data_tegra114 = { + .pdata = &sdhci_tegra114_pdata, + .dma_mask = DMA_BIT_MASK(32), +}; + +static const struct sdhci_pltfm_data sdhci_tegra124_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &tegra114_sdhci_ops, +}; + +static const struct sdhci_tegra_soc_data soc_data_tegra124 = { + .pdata = &sdhci_tegra124_pdata, + .dma_mask = DMA_BIT_MASK(34), +}; + +static const struct sdhci_ops tegra210_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, + .read_w = tegra_sdhci_readw, + .write_w = tegra210_sdhci_writew, + .write_l = tegra_sdhci_writel, + .set_clock = tegra_sdhci_set_clock, + .set_dma_mask = tegra_sdhci_set_dma_mask, + .set_bus_width = sdhci_set_bus_width, + .reset = tegra_sdhci_reset, + .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, + .voltage_switch = tegra_sdhci_voltage_switch, + .get_max_clock = tegra_sdhci_get_max_clock, + .set_timeout = tegra_sdhci_set_timeout, +}; + +static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &tegra210_sdhci_ops, +}; + +static const struct sdhci_tegra_soc_data soc_data_tegra210 = { + .pdata = &sdhci_tegra210_pdata, + .dma_mask = DMA_BIT_MASK(34), + .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | + NVQUIRK_HAS_PADCALIB | + NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | + NVQUIRK_ENABLE_SDR50 | + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK, + .min_tap_delay = 106, + .max_tap_delay = 185, +}; + +static const struct sdhci_ops tegra186_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, + .read_w = tegra_sdhci_readw, + .write_l = tegra_sdhci_writel, + .set_clock = tegra_sdhci_set_clock, + .set_dma_mask = tegra_sdhci_set_dma_mask, + .set_bus_width = sdhci_set_bus_width, + .reset = tegra_sdhci_reset, + .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, + .voltage_switch = tegra_sdhci_voltage_switch, + .get_max_clock = tegra_sdhci_get_max_clock, + .irq = sdhci_tegra_cqhci_irq, + .set_timeout = tegra_sdhci_set_timeout, +}; + +static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .ops = &tegra186_sdhci_ops, +}; + +static const struct sdhci_tegra_soc_data soc_data_tegra186 = { + .pdata = &sdhci_tegra186_pdata, + .dma_mask = DMA_BIT_MASK(40), + .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | + NVQUIRK_HAS_PADCALIB | + NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | + NVQUIRK_ENABLE_SDR50 | + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK | + NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING, + .min_tap_delay = 84, + .max_tap_delay = 136, +}; + +static const struct sdhci_tegra_soc_data soc_data_tegra194 = { + .pdata = &sdhci_tegra186_pdata, + .dma_mask = DMA_BIT_MASK(39), + .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | + NVQUIRK_HAS_PADCALIB | + NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | + NVQUIRK_ENABLE_SDR50 | + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK, + .min_tap_delay = 96, + .max_tap_delay = 139, +}; + +static const struct of_device_id sdhci_tegra_dt_match[] = { + { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 }, + { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 }, + { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, + { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 }, + { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, + { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, + { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match); + +static int sdhci_tegra_add_host(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if (!tegra_host->enable_hwcq) + return sdhci_add_host(host); + + sdhci_enable_v4_mode(host); + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + + cq_host = devm_kzalloc(host->mmc->parent, + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR; + cq_host->ops = &sdhci_tegra_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + +static int sdhci_tegra_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + const struct sdhci_tegra_soc_data *soc_data; + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_tegra *tegra_host; + struct clk *clk; + int rc; + + match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); + if (!match) + return -EINVAL; + soc_data = match->data; + + host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host)); + if (IS_ERR(host)) + return PTR_ERR(host); + pltfm_host = sdhci_priv(host); + + tegra_host = sdhci_pltfm_priv(pltfm_host); + tegra_host->ddr_signaling = false; + tegra_host->pad_calib_required = false; + tegra_host->pad_control_available = false; + tegra_host->soc_data = soc_data; + + if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) { + rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host); + if (rc == 0) + host->mmc_host_ops.start_signal_voltage_switch = + sdhci_tegra_start_signal_voltage_switch; + } + + /* Hook to periodically rerun pad calibration */ + if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) + host->mmc_host_ops.request = tegra_sdhci_request; + + host->mmc_host_ops.hs400_enhanced_strobe = + tegra_sdhci_hs400_enhanced_strobe; + + if (!host->ops->platform_execute_tuning) + host->mmc_host_ops.execute_tuning = + tegra_sdhci_execute_hw_tuning; + + rc = mmc_of_parse(host->mmc); + if (rc) + goto err_parse_dt; + + if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) + host->mmc->caps |= MMC_CAP_1_8V_DDR; + + /* HW busy detection is supported, but R1B responses are required. */ + host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; + + tegra_sdhci_parse_dt(host); + + tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", + GPIOD_OUT_HIGH); + if (IS_ERR(tegra_host->power_gpio)) { + rc = PTR_ERR(tegra_host->power_gpio); + goto err_power_req; + } + + /* + * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host + * timeout clock and SW can choose TMCLK or SDCLK for hardware + * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of + * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL. + * + * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses + * 12Mhz TMCLK which is advertised in host capability register. + * With TMCLK of 12Mhz provides maximum data timeout period that can + * be achieved is 11s better than using SDCLK for data timeout. + * + * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's + * supporting separate TMCLK. + */ + + if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) { + clk = devm_clk_get(&pdev->dev, "tmclk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -EPROBE_DEFER) + goto err_power_req; + + dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc); + clk = NULL; + } + + clk_set_rate(clk, 12000000); + rc = clk_prepare_enable(clk); + if (rc) { + dev_err(&pdev->dev, + "failed to enable tmclk: %d\n", rc); + goto err_power_req; + } + + tegra_host->tmclk = clk; + } + + clk = devm_clk_get(mmc_dev(host->mmc), NULL); + if (IS_ERR(clk)) { + rc = dev_err_probe(&pdev->dev, PTR_ERR(clk), + "failed to get clock\n"); + goto err_clk_get; + } + clk_prepare_enable(clk); + pltfm_host->clk = clk; + + tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, + "sdhci"); + if (IS_ERR(tegra_host->rst)) { + rc = PTR_ERR(tegra_host->rst); + dev_err(&pdev->dev, "failed to get reset control: %d\n", rc); + goto err_rst_get; + } + + rc = reset_control_assert(tegra_host->rst); + if (rc) + goto err_rst_get; + + usleep_range(2000, 4000); + + rc = reset_control_deassert(tegra_host->rst); + if (rc) + goto err_rst_get; + + usleep_range(2000, 4000); + + rc = sdhci_tegra_add_host(host); + if (rc) + goto err_add_host; + + return 0; + +err_add_host: + reset_control_assert(tegra_host->rst); +err_rst_get: + clk_disable_unprepare(pltfm_host->clk); +err_clk_get: + clk_disable_unprepare(tegra_host->tmclk); +err_power_req: +err_parse_dt: + sdhci_pltfm_free(pdev); + return rc; +} + +static int sdhci_tegra_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + + sdhci_remove_host(host, 0); + + reset_control_assert(tegra_host->rst); + usleep_range(2000, 4000); + clk_disable_unprepare(pltfm_host->clk); + clk_disable_unprepare(tegra_host->tmclk); + + sdhci_pltfm_free(pdev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int __maybe_unused sdhci_tegra_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int ret; + + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + + ret = sdhci_suspend_host(host); + if (ret) { + cqhci_resume(host->mmc); + return ret; + } + + clk_disable_unprepare(pltfm_host->clk); + return 0; +} + +static int __maybe_unused sdhci_tegra_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; + + ret = sdhci_resume_host(host); + if (ret) + goto disable_clk; + + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_resume(host->mmc); + if (ret) + goto suspend_host; + } + + return 0; + +suspend_host: + sdhci_suspend_host(host); +disable_clk: + clk_disable_unprepare(pltfm_host->clk); + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend, + sdhci_tegra_resume); + +static struct platform_driver sdhci_tegra_driver = { + .driver = { + .name = "sdhci-tegra", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_tegra_dt_match, + .pm = &sdhci_tegra_dev_pm_ops, + }, + .probe = sdhci_tegra_probe, + .remove = sdhci_tegra_remove, +}; + +module_platform_driver(sdhci_tegra_driver); + +MODULE_DESCRIPTION("SDHCI driver for Tegra"); +MODULE_AUTHOR("Google, Inc."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c new file mode 100644 index 000000000..03ce57ef4 --- /dev/null +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -0,0 +1,844 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PHY support for Xenon SDHC + * + * Copyright (C) 2016 Marvell, All Rights Reserved. + * + * Author: Hu Ziji + * Date: 2016-8-24 + */ + +#include +#include +#include +#include + +#include "sdhci-pltfm.h" +#include "sdhci-xenon.h" + +/* Register base for eMMC PHY 5.0 Version */ +#define XENON_EMMC_5_0_PHY_REG_BASE 0x0160 +/* Register base for eMMC PHY 5.1 Version */ +#define XENON_EMMC_PHY_REG_BASE 0x0170 + +#define XENON_EMMC_PHY_TIMING_ADJUST XENON_EMMC_PHY_REG_BASE +#define XENON_EMMC_5_0_PHY_TIMING_ADJUST XENON_EMMC_5_0_PHY_REG_BASE +#define XENON_TIMING_ADJUST_SLOW_MODE BIT(29) +#define XENON_TIMING_ADJUST_SDIO_MODE BIT(28) +#define XENON_SAMPL_INV_QSP_PHASE_SELECT BIT(18) +#define XENON_SAMPL_INV_QSP_PHASE_SELECT_SHIFT 18 +#define XENON_PHY_INITIALIZAION BIT(31) +#define XENON_WAIT_CYCLE_BEFORE_USING_MASK 0xF +#define XENON_WAIT_CYCLE_BEFORE_USING_SHIFT 12 +#define XENON_FC_SYNC_EN_DURATION_MASK 0xF +#define XENON_FC_SYNC_EN_DURATION_SHIFT 8 +#define XENON_FC_SYNC_RST_EN_DURATION_MASK 0xF +#define XENON_FC_SYNC_RST_EN_DURATION_SHIFT 4 +#define XENON_FC_SYNC_RST_DURATION_MASK 0xF +#define XENON_FC_SYNC_RST_DURATION_SHIFT 0 + +#define XENON_EMMC_PHY_FUNC_CONTROL (XENON_EMMC_PHY_REG_BASE + 0x4) +#define XENON_EMMC_5_0_PHY_FUNC_CONTROL \ + (XENON_EMMC_5_0_PHY_REG_BASE + 0x4) +#define XENON_ASYNC_DDRMODE_MASK BIT(23) +#define XENON_ASYNC_DDRMODE_SHIFT 23 +#define XENON_CMD_DDR_MODE BIT(16) +#define XENON_DQ_DDR_MODE_SHIFT 8 +#define XENON_DQ_DDR_MODE_MASK 0xFF +#define XENON_DQ_ASYNC_MODE BIT(4) + +#define XENON_EMMC_PHY_PAD_CONTROL (XENON_EMMC_PHY_REG_BASE + 0x8) +#define XENON_EMMC_5_0_PHY_PAD_CONTROL \ + (XENON_EMMC_5_0_PHY_REG_BASE + 0x8) +#define XENON_REC_EN_SHIFT 24 +#define XENON_REC_EN_MASK 0xF +#define XENON_FC_DQ_RECEN BIT(24) +#define XENON_FC_CMD_RECEN BIT(25) +#define XENON_FC_QSP_RECEN BIT(26) +#define XENON_FC_QSN_RECEN BIT(27) +#define XENON_OEN_QSN BIT(28) +#define XENON_AUTO_RECEN_CTRL BIT(30) +#define XENON_FC_ALL_CMOS_RECEIVER 0xF000 + +#define XENON_EMMC5_FC_QSP_PD BIT(18) +#define XENON_EMMC5_FC_QSP_PU BIT(22) +#define XENON_EMMC5_FC_CMD_PD BIT(17) +#define XENON_EMMC5_FC_CMD_PU BIT(21) +#define XENON_EMMC5_FC_DQ_PD BIT(16) +#define XENON_EMMC5_FC_DQ_PU BIT(20) + +#define XENON_EMMC_PHY_PAD_CONTROL1 (XENON_EMMC_PHY_REG_BASE + 0xC) +#define XENON_EMMC5_1_FC_QSP_PD BIT(9) +#define XENON_EMMC5_1_FC_QSP_PU BIT(25) +#define XENON_EMMC5_1_FC_CMD_PD BIT(8) +#define XENON_EMMC5_1_FC_CMD_PU BIT(24) +#define XENON_EMMC5_1_FC_DQ_PD 0xFF +#define XENON_EMMC5_1_FC_DQ_PU (0xFF << 16) + +#define XENON_EMMC_PHY_PAD_CONTROL2 (XENON_EMMC_PHY_REG_BASE + 0x10) +#define XENON_EMMC_5_0_PHY_PAD_CONTROL2 \ + (XENON_EMMC_5_0_PHY_REG_BASE + 0xC) +#define XENON_ZNR_MASK 0x1F +#define XENON_ZNR_SHIFT 8 +#define XENON_ZPR_MASK 0x1F +/* Preferred ZNR and ZPR value vary between different boards. + * The specific ZNR and ZPR value should be defined here + * according to board actual timing. + */ +#define XENON_ZNR_DEF_VALUE 0xF +#define XENON_ZPR_DEF_VALUE 0xF + +#define XENON_EMMC_PHY_DLL_CONTROL (XENON_EMMC_PHY_REG_BASE + 0x14) +#define XENON_EMMC_5_0_PHY_DLL_CONTROL \ + (XENON_EMMC_5_0_PHY_REG_BASE + 0x10) +#define XENON_DLL_ENABLE BIT(31) +#define XENON_DLL_UPDATE_STROBE_5_0 BIT(30) +#define XENON_DLL_REFCLK_SEL BIT(30) +#define XENON_DLL_UPDATE BIT(23) +#define XENON_DLL_PHSEL1_SHIFT 24 +#define XENON_DLL_PHSEL0_SHIFT 16 +#define XENON_DLL_PHASE_MASK 0x3F +#define XENON_DLL_PHASE_90_DEGREE 0x1F +#define XENON_DLL_FAST_LOCK BIT(5) +#define XENON_DLL_GAIN2X BIT(3) +#define XENON_DLL_BYPASS_EN BIT(0) + +#define XENON_EMMC_5_0_PHY_LOGIC_TIMING_ADJUST \ + (XENON_EMMC_5_0_PHY_REG_BASE + 0x14) +#define XENON_EMMC_5_0_PHY_LOGIC_TIMING_VALUE 0x5A54 +#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18) +#define XENON_LOGIC_TIMING_VALUE 0x00AA8977 + +/* + * List offset of PHY registers and some special register values + * in eMMC PHY 5.0 or eMMC PHY 5.1 + */ +struct xenon_emmc_phy_regs { + /* Offset of Timing Adjust register */ + u16 timing_adj; + /* Offset of Func Control register */ + u16 func_ctrl; + /* Offset of Pad Control register */ + u16 pad_ctrl; + /* Offset of Pad Control register 2 */ + u16 pad_ctrl2; + /* Offset of DLL Control register */ + u16 dll_ctrl; + /* Offset of Logic Timing Adjust register */ + u16 logic_timing_adj; + /* DLL Update Enable bit */ + u32 dll_update; + /* value in Logic Timing Adjustment register */ + u32 logic_timing_val; +}; + +static const char * const phy_types[] = { + "emmc 5.0 phy", + "emmc 5.1 phy" +}; + +enum xenon_phy_type_enum { + EMMC_5_0_PHY, + EMMC_5_1_PHY, + NR_PHY_TYPES +}; + +enum soc_pad_ctrl_type { + SOC_PAD_SD, + SOC_PAD_FIXED_1_8V, +}; + +struct soc_pad_ctrl { + /* Register address of SoC PHY PAD ctrl */ + void __iomem *reg; + /* SoC PHY PAD ctrl type */ + enum soc_pad_ctrl_type pad_type; + /* SoC specific operation to set SoC PHY PAD */ + void (*set_soc_pad)(struct sdhci_host *host, + unsigned char signal_voltage); +}; + +static struct xenon_emmc_phy_regs xenon_emmc_5_0_phy_regs = { + .timing_adj = XENON_EMMC_5_0_PHY_TIMING_ADJUST, + .func_ctrl = XENON_EMMC_5_0_PHY_FUNC_CONTROL, + .pad_ctrl = XENON_EMMC_5_0_PHY_PAD_CONTROL, + .pad_ctrl2 = XENON_EMMC_5_0_PHY_PAD_CONTROL2, + .dll_ctrl = XENON_EMMC_5_0_PHY_DLL_CONTROL, + .logic_timing_adj = XENON_EMMC_5_0_PHY_LOGIC_TIMING_ADJUST, + .dll_update = XENON_DLL_UPDATE_STROBE_5_0, + .logic_timing_val = XENON_EMMC_5_0_PHY_LOGIC_TIMING_VALUE, +}; + +static struct xenon_emmc_phy_regs xenon_emmc_5_1_phy_regs = { + .timing_adj = XENON_EMMC_PHY_TIMING_ADJUST, + .func_ctrl = XENON_EMMC_PHY_FUNC_CONTROL, + .pad_ctrl = XENON_EMMC_PHY_PAD_CONTROL, + .pad_ctrl2 = XENON_EMMC_PHY_PAD_CONTROL2, + .dll_ctrl = XENON_EMMC_PHY_DLL_CONTROL, + .logic_timing_adj = XENON_EMMC_PHY_LOGIC_TIMING_ADJUST, + .dll_update = XENON_DLL_UPDATE, + .logic_timing_val = XENON_LOGIC_TIMING_VALUE, +}; + +/* + * eMMC PHY configuration and operations + */ +struct xenon_emmc_phy_params { + bool slow_mode; + + u8 znr; + u8 zpr; + + /* Nr of consecutive Sampling Points of a Valid Sampling Window */ + u8 nr_tun_times; + /* Divider for calculating Tuning Step */ + u8 tun_step_divider; + + struct soc_pad_ctrl pad_ctrl; +}; + +static int xenon_alloc_emmc_phy(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + struct xenon_emmc_phy_params *params; + + params = devm_kzalloc(mmc_dev(host->mmc), sizeof(*params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + priv->phy_params = params; + if (priv->phy_type == EMMC_5_0_PHY) + priv->emmc_phy_regs = &xenon_emmc_5_0_phy_regs; + else + priv->emmc_phy_regs = &xenon_emmc_5_1_phy_regs; + + return 0; +} + +/* + * eMMC 5.0/5.1 PHY init/re-init. + * eMMC PHY init should be executed after: + * 1. SDCLK frequency changes. + * 2. SDCLK is stopped and re-enabled. + * 3. config in emmc_phy_regs->timing_adj and emmc_phy_regs->func_ctrl + * are changed + */ +static int xenon_emmc_phy_init(struct sdhci_host *host) +{ + u32 reg; + u32 wait, clock; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs; + + reg = sdhci_readl(host, phy_regs->timing_adj); + reg |= XENON_PHY_INITIALIZAION; + sdhci_writel(host, reg, phy_regs->timing_adj); + + /* Add duration of FC_SYNC_RST */ + wait = ((reg >> XENON_FC_SYNC_RST_DURATION_SHIFT) & + XENON_FC_SYNC_RST_DURATION_MASK); + /* Add interval between FC_SYNC_EN and FC_SYNC_RST */ + wait += ((reg >> XENON_FC_SYNC_RST_EN_DURATION_SHIFT) & + XENON_FC_SYNC_RST_EN_DURATION_MASK); + /* Add duration of asserting FC_SYNC_EN */ + wait += ((reg >> XENON_FC_SYNC_EN_DURATION_SHIFT) & + XENON_FC_SYNC_EN_DURATION_MASK); + /* Add duration of waiting for PHY */ + wait += ((reg >> XENON_WAIT_CYCLE_BEFORE_USING_SHIFT) & + XENON_WAIT_CYCLE_BEFORE_USING_MASK); + /* 4 additional bus clock and 4 AXI bus clock are required */ + wait += 8; + wait <<= 20; + + clock = host->clock; + if (!clock) + /* Use the possibly slowest bus frequency value */ + clock = XENON_LOWEST_SDCLK_FREQ; + /* get the wait time */ + wait /= clock; + wait++; + /* wait for host eMMC PHY init completes */ + udelay(wait); + + reg = sdhci_readl(host, phy_regs->timing_adj); + reg &= XENON_PHY_INITIALIZAION; + if (reg) { + dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n", + wait); + return -ETIMEDOUT; + } + + return 0; +} + +#define ARMADA_3700_SOC_PAD_1_8V 0x1 +#define ARMADA_3700_SOC_PAD_3_3V 0x0 + +static void armada_3700_soc_pad_voltage_set(struct sdhci_host *host, + unsigned char signal_voltage) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + struct xenon_emmc_phy_params *params = priv->phy_params; + + if (params->pad_ctrl.pad_type == SOC_PAD_FIXED_1_8V) { + writel(ARMADA_3700_SOC_PAD_1_8V, params->pad_ctrl.reg); + } else if (params->pad_ctrl.pad_type == SOC_PAD_SD) { + if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) + writel(ARMADA_3700_SOC_PAD_1_8V, params->pad_ctrl.reg); + else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) + writel(ARMADA_3700_SOC_PAD_3_3V, params->pad_ctrl.reg); + } +} + +/* + * Set SoC PHY voltage PAD control register, + * according to the operation voltage on PAD. + * The detailed operation depends on SoC implementation. + */ +static void xenon_emmc_phy_set_soc_pad(struct sdhci_host *host, + unsigned char signal_voltage) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + struct xenon_emmc_phy_params *params = priv->phy_params; + + if (!params->pad_ctrl.reg) + return; + + if (params->pad_ctrl.set_soc_pad) + params->pad_ctrl.set_soc_pad(host, signal_voltage); +} + +/* + * Enable eMMC PHY HW DLL + * DLL should be enabled and stable before HS200/SDR104 tuning, + * and before HS400 data strobe setting. + */ +static int xenon_emmc_phy_enable_dll(struct sdhci_host *host) +{ + u32 reg; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs; + ktime_t timeout; + + if (WARN_ON(host->clock <= MMC_HIGH_52_MAX_DTR)) + return -EINVAL; + + reg = sdhci_readl(host, phy_regs->dll_ctrl); + if (reg & XENON_DLL_ENABLE) + return 0; + + /* Enable DLL */ + reg = sdhci_readl(host, phy_regs->dll_ctrl); + reg |= (XENON_DLL_ENABLE | XENON_DLL_FAST_LOCK); + + /* + * Set Phase as 90 degree, which is most common value. + * Might set another value if necessary. + * The granularity is 1 degree. + */ + reg &= ~((XENON_DLL_PHASE_MASK << XENON_DLL_PHSEL0_SHIFT) | + (XENON_DLL_PHASE_MASK << XENON_DLL_PHSEL1_SHIFT)); + reg |= ((XENON_DLL_PHASE_90_DEGREE << XENON_DLL_PHSEL0_SHIFT) | + (XENON_DLL_PHASE_90_DEGREE << XENON_DLL_PHSEL1_SHIFT)); + + reg &= ~XENON_DLL_BYPASS_EN; + reg |= phy_regs->dll_update; + if (priv->phy_type == EMMC_5_1_PHY) + reg &= ~XENON_DLL_REFCLK_SEL; + sdhci_writel(host, reg, phy_regs->dll_ctrl); + + /* Wait max 32 ms */ + timeout = ktime_add_ms(ktime_get(), 32); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) & + XENON_DLL_LOCK_STATE) + break; + if (timedout) { + dev_err(mmc_dev(host->mmc), "Wait for DLL Lock time-out\n"); + return -ETIMEDOUT; + } + udelay(100); + } + return 0; +} + +/* + * Config to eMMC PHY to prepare for tuning. + * Enable HW DLL and set the TUNING_STEP + */ +static int xenon_emmc_phy_config_tuning(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + struct xenon_emmc_phy_params *params = priv->phy_params; + u32 reg, tuning_step; + int ret; + + if (host->clock <= MMC_HIGH_52_MAX_DTR) + return -EINVAL; + + ret = xenon_emmc_phy_enable_dll(host); + if (ret) + return ret; + + /* Achieve TUNING_STEP with HW DLL help */ + reg = sdhci_readl(host, XENON_SLOT_DLL_CUR_DLY_VAL); + tuning_step = reg / params->tun_step_divider; + if (unlikely(tuning_step > XENON_TUNING_STEP_MASK)) { + dev_warn(mmc_dev(host->mmc), + "HS200 TUNING_STEP %d is larger than MAX value\n", + tuning_step); + tuning_step = XENON_TUNING_STEP_MASK; + } + + /* Set TUNING_STEP for later tuning */ + reg = sdhci_readl(host, XENON_SLOT_OP_STATUS_CTRL); + reg &= ~(XENON_TUN_CONSECUTIVE_TIMES_MASK << + XENON_TUN_CONSECUTIVE_TIMES_SHIFT); + reg |= (params->nr_tun_times << XENON_TUN_CONSECUTIVE_TIMES_SHIFT); + reg &= ~(XENON_TUNING_STEP_MASK << XENON_TUNING_STEP_SHIFT); + reg |= (tuning_step << XENON_TUNING_STEP_SHIFT); + sdhci_writel(host, reg, XENON_SLOT_OP_STATUS_CTRL); + + return 0; +} + +static void xenon_emmc_phy_disable_strobe(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + /* Disable both SDHC Data Strobe and Enhanced Strobe */ + reg = sdhci_readl(host, XENON_SLOT_EMMC_CTRL); + reg &= ~(XENON_ENABLE_DATA_STROBE | XENON_ENABLE_RESP_STROBE); + sdhci_writel(host, reg, XENON_SLOT_EMMC_CTRL); + + /* Clear Strobe line Pull down or Pull up */ + if (priv->phy_type == EMMC_5_0_PHY) { + reg = sdhci_readl(host, XENON_EMMC_5_0_PHY_PAD_CONTROL); + reg &= ~(XENON_EMMC5_FC_QSP_PD | XENON_EMMC5_FC_QSP_PU); + sdhci_writel(host, reg, XENON_EMMC_5_0_PHY_PAD_CONTROL); + } else { + reg = sdhci_readl(host, XENON_EMMC_PHY_PAD_CONTROL1); + reg &= ~(XENON_EMMC5_1_FC_QSP_PD | XENON_EMMC5_1_FC_QSP_PU); + sdhci_writel(host, reg, XENON_EMMC_PHY_PAD_CONTROL1); + } +} + +/* Set HS400 Data Strobe and Enhanced Strobe */ +static void xenon_emmc_phy_strobe_delay_adj(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + if (WARN_ON(host->timing != MMC_TIMING_MMC_HS400)) + return; + + if (host->clock <= MMC_HIGH_52_MAX_DTR) + return; + + dev_dbg(mmc_dev(host->mmc), "starts HS400 strobe delay adjustment\n"); + + xenon_emmc_phy_enable_dll(host); + + /* Enable SDHC Data Strobe */ + reg = sdhci_readl(host, XENON_SLOT_EMMC_CTRL); + reg |= XENON_ENABLE_DATA_STROBE; + /* + * Enable SDHC Enhanced Strobe if supported + * Xenon Enhanced Strobe should be enabled only when + * 1. card is in HS400 mode and + * 2. SDCLK is higher than 52MHz + * 3. DLL is enabled + */ + if (host->mmc->ios.enhanced_strobe) + reg |= XENON_ENABLE_RESP_STROBE; + sdhci_writel(host, reg, XENON_SLOT_EMMC_CTRL); + + /* Set Data Strobe Pull down */ + if (priv->phy_type == EMMC_5_0_PHY) { + reg = sdhci_readl(host, XENON_EMMC_5_0_PHY_PAD_CONTROL); + reg |= XENON_EMMC5_FC_QSP_PD; + reg &= ~XENON_EMMC5_FC_QSP_PU; + sdhci_writel(host, reg, XENON_EMMC_5_0_PHY_PAD_CONTROL); + } else { + reg = sdhci_readl(host, XENON_EMMC_PHY_PAD_CONTROL1); + reg |= XENON_EMMC5_1_FC_QSP_PD; + reg &= ~XENON_EMMC5_1_FC_QSP_PU; + sdhci_writel(host, reg, XENON_EMMC_PHY_PAD_CONTROL1); + } +} + +/* + * If eMMC PHY Slow Mode is required in lower speed mode (SDCLK < 55MHz) + * in SDR mode, enable Slow Mode to bypass eMMC PHY. + * SDIO slower SDR mode also requires Slow Mode. + * + * If Slow Mode is enabled, return true. + * Otherwise, return false. + */ +static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host, + unsigned char timing) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + struct xenon_emmc_phy_params *params = priv->phy_params; + struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs; + u32 reg; + int ret; + + if (host->clock > MMC_HIGH_52_MAX_DTR) + return false; + + reg = sdhci_readl(host, phy_regs->timing_adj); + /* When in slower SDR mode, enable Slow Mode for SDIO + * or when Slow Mode flag is set + */ + switch (timing) { + case MMC_TIMING_LEGACY: + /* + * If Slow Mode is required, enable Slow Mode by default + * in early init phase to avoid any potential issue. + */ + if (params->slow_mode) { + reg |= XENON_TIMING_ADJUST_SLOW_MODE; + ret = true; + } else { + reg &= ~XENON_TIMING_ADJUST_SLOW_MODE; + ret = false; + } + break; + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_SDR12: + case MMC_TIMING_SD_HS: + case MMC_TIMING_MMC_HS: + if ((priv->init_card_type == MMC_TYPE_SDIO) || + params->slow_mode) { + reg |= XENON_TIMING_ADJUST_SLOW_MODE; + ret = true; + break; + } + fallthrough; + default: + reg &= ~XENON_TIMING_ADJUST_SLOW_MODE; + ret = false; + } + + sdhci_writel(host, reg, phy_regs->timing_adj); + return ret; +} + +/* + * Set-up eMMC 5.0/5.1 PHY. + * Specific configuration depends on the current speed mode in use. + */ +static void xenon_emmc_phy_set(struct sdhci_host *host, + unsigned char timing) +{ + u32 reg; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + struct xenon_emmc_phy_params *params = priv->phy_params; + struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs; + + dev_dbg(mmc_dev(host->mmc), "eMMC PHY setting starts\n"); + + /* Setup pad, set bit[28] and bits[26:24] */ + reg = sdhci_readl(host, phy_regs->pad_ctrl); + reg |= (XENON_FC_DQ_RECEN | XENON_FC_CMD_RECEN | + XENON_FC_QSP_RECEN | XENON_OEN_QSN); + /* All FC_XX_RECEIVCE should be set as CMOS Type */ + reg |= XENON_FC_ALL_CMOS_RECEIVER; + sdhci_writel(host, reg, phy_regs->pad_ctrl); + + /* Set CMD and DQ Pull Up */ + if (priv->phy_type == EMMC_5_0_PHY) { + reg = sdhci_readl(host, XENON_EMMC_5_0_PHY_PAD_CONTROL); + reg |= (XENON_EMMC5_FC_CMD_PU | XENON_EMMC5_FC_DQ_PU); + reg &= ~(XENON_EMMC5_FC_CMD_PD | XENON_EMMC5_FC_DQ_PD); + sdhci_writel(host, reg, XENON_EMMC_5_0_PHY_PAD_CONTROL); + } else { + reg = sdhci_readl(host, XENON_EMMC_PHY_PAD_CONTROL1); + reg |= (XENON_EMMC5_1_FC_CMD_PU | XENON_EMMC5_1_FC_DQ_PU); + reg &= ~(XENON_EMMC5_1_FC_CMD_PD | XENON_EMMC5_1_FC_DQ_PD); + sdhci_writel(host, reg, XENON_EMMC_PHY_PAD_CONTROL1); + } + + if (timing == MMC_TIMING_LEGACY) { + xenon_emmc_phy_slow_mode(host, timing); + goto phy_init; + } + + /* + * If SDIO card, set SDIO Mode + * Otherwise, clear SDIO Mode + */ + reg = sdhci_readl(host, phy_regs->timing_adj); + if (priv->init_card_type == MMC_TYPE_SDIO) + reg |= XENON_TIMING_ADJUST_SDIO_MODE; + else + reg &= ~XENON_TIMING_ADJUST_SDIO_MODE; + sdhci_writel(host, reg, phy_regs->timing_adj); + + if (xenon_emmc_phy_slow_mode(host, timing)) + goto phy_init; + + /* + * Set preferred ZNR and ZPR value + * The ZNR and ZPR value vary between different boards. + * Define them both in sdhci-xenon-emmc-phy.h. + */ + reg = sdhci_readl(host, phy_regs->pad_ctrl2); + reg &= ~((XENON_ZNR_MASK << XENON_ZNR_SHIFT) | XENON_ZPR_MASK); + reg |= ((params->znr << XENON_ZNR_SHIFT) | params->zpr); + sdhci_writel(host, reg, phy_regs->pad_ctrl2); + + /* + * When setting EMMC_PHY_FUNC_CONTROL register, + * SD clock should be disabled + */ + reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL); + reg &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL); + + reg = sdhci_readl(host, phy_regs->func_ctrl); + switch (timing) { + case MMC_TIMING_MMC_HS400: + reg |= (XENON_DQ_DDR_MODE_MASK << XENON_DQ_DDR_MODE_SHIFT) | + XENON_CMD_DDR_MODE; + reg &= ~XENON_DQ_ASYNC_MODE; + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + reg |= (XENON_DQ_DDR_MODE_MASK << XENON_DQ_DDR_MODE_SHIFT) | + XENON_CMD_DDR_MODE | XENON_DQ_ASYNC_MODE; + break; + default: + reg &= ~((XENON_DQ_DDR_MODE_MASK << XENON_DQ_DDR_MODE_SHIFT) | + XENON_CMD_DDR_MODE); + reg |= XENON_DQ_ASYNC_MODE; + } + sdhci_writel(host, reg, phy_regs->func_ctrl); + + /* Enable bus clock */ + reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL); + reg |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL); + + if (timing == MMC_TIMING_MMC_HS400) + /* Hardware team recommend a value for HS400 */ + sdhci_writel(host, phy_regs->logic_timing_val, + phy_regs->logic_timing_adj); + else + xenon_emmc_phy_disable_strobe(host); + +phy_init: + xenon_emmc_phy_init(host); + + dev_dbg(mmc_dev(host->mmc), "eMMC PHY setting completes\n"); +} + +static int get_dt_pad_ctrl_data(struct sdhci_host *host, + struct device_node *np, + struct xenon_emmc_phy_params *params) +{ + int ret = 0; + const char *name; + struct resource iomem; + + if (of_device_is_compatible(np, "marvell,armada-3700-sdhci")) + params->pad_ctrl.set_soc_pad = armada_3700_soc_pad_voltage_set; + else + return 0; + + if (of_address_to_resource(np, 1, &iomem)) { + dev_err(mmc_dev(host->mmc), "Unable to find SoC PAD ctrl register address for %pOFn\n", + np); + return -EINVAL; + } + + params->pad_ctrl.reg = devm_ioremap_resource(mmc_dev(host->mmc), + &iomem); + if (IS_ERR(params->pad_ctrl.reg)) + return PTR_ERR(params->pad_ctrl.reg); + + ret = of_property_read_string(np, "marvell,pad-type", &name); + if (ret) { + dev_err(mmc_dev(host->mmc), "Unable to determine SoC PHY PAD ctrl type\n"); + return ret; + } + if (!strcmp(name, "sd")) { + params->pad_ctrl.pad_type = SOC_PAD_SD; + } else if (!strcmp(name, "fixed-1-8v")) { + params->pad_ctrl.pad_type = SOC_PAD_FIXED_1_8V; + } else { + dev_err(mmc_dev(host->mmc), "Unsupported SoC PHY PAD ctrl type %s\n", + name); + return -EINVAL; + } + + return ret; +} + +static int xenon_emmc_phy_parse_param_dt(struct sdhci_host *host, + struct device_node *np, + struct xenon_emmc_phy_params *params) +{ + u32 value; + + params->slow_mode = false; + if (of_property_read_bool(np, "marvell,xenon-phy-slow-mode")) + params->slow_mode = true; + + params->znr = XENON_ZNR_DEF_VALUE; + if (!of_property_read_u32(np, "marvell,xenon-phy-znr", &value)) + params->znr = value & XENON_ZNR_MASK; + + params->zpr = XENON_ZPR_DEF_VALUE; + if (!of_property_read_u32(np, "marvell,xenon-phy-zpr", &value)) + params->zpr = value & XENON_ZPR_MASK; + + params->nr_tun_times = XENON_TUN_CONSECUTIVE_TIMES; + if (!of_property_read_u32(np, "marvell,xenon-phy-nr-success-tun", + &value)) + params->nr_tun_times = value & XENON_TUN_CONSECUTIVE_TIMES_MASK; + + params->tun_step_divider = XENON_TUNING_STEP_DIVIDER; + if (!of_property_read_u32(np, "marvell,xenon-phy-tun-step-divider", + &value)) + params->tun_step_divider = value & 0xFF; + + return get_dt_pad_ctrl_data(host, np, params); +} + +/* Set SoC PHY Voltage PAD */ +void xenon_soc_pad_ctrl(struct sdhci_host *host, + unsigned char signal_voltage) +{ + xenon_emmc_phy_set_soc_pad(host, signal_voltage); +} + +/* + * Setting PHY when card is working in High Speed Mode. + * HS400 set Data Strobe and Enhanced Strobe if it is supported. + * HS200/SDR104 set tuning config to prepare for tuning. + */ +static int xenon_hs_delay_adj(struct sdhci_host *host) +{ + int ret = 0; + + if (WARN_ON(host->clock <= XENON_DEFAULT_SDCLK_FREQ)) + return -EINVAL; + + switch (host->timing) { + case MMC_TIMING_MMC_HS400: + xenon_emmc_phy_strobe_delay_adj(host); + return 0; + case MMC_TIMING_MMC_HS200: + case MMC_TIMING_UHS_SDR104: + return xenon_emmc_phy_config_tuning(host); + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + /* + * DDR Mode requires driver to scan Sampling Fixed Delay Line, + * to find out a perfect operation sampling point. + * It is hard to implement such a scan in host driver + * since initiating commands by host driver is not safe. + * Thus so far just keep PHY Sampling Fixed Delay in + * default value of DDR mode. + * + * If any timing issue occurs in DDR mode on Marvell products, + * please contact maintainer for internal support in Marvell. + */ + dev_warn_once(mmc_dev(host->mmc), "Timing issue might occur in DDR mode\n"); + return 0; + } + + return ret; +} + +/* + * Adjust PHY setting. + * PHY setting should be adjusted when SDCLK frequency, Bus Width + * or Speed Mode is changed. + * Additional config are required when card is working in High Speed mode, + * after leaving Legacy Mode. + */ +int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret = 0; + + if (!host->clock) { + priv->clock = 0; + return 0; + } + + /* + * The timing, frequency or bus width is changed, + * better to set eMMC PHY based on current setting + * and adjust Xenon SDHC delay. + */ + if ((host->clock == priv->clock) && + (ios->bus_width == priv->bus_width) && + (ios->timing == priv->timing)) + return 0; + + xenon_emmc_phy_set(host, ios->timing); + + /* Update the record */ + priv->bus_width = ios->bus_width; + + priv->timing = ios->timing; + priv->clock = host->clock; + + /* Legacy mode is a special case */ + if (ios->timing == MMC_TIMING_LEGACY) + return 0; + + if (host->clock > XENON_DEFAULT_SDCLK_FREQ) + ret = xenon_hs_delay_adj(host); + return ret; +} + +static int xenon_add_phy(struct device_node *np, struct sdhci_host *host, + const char *phy_name) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + priv->phy_type = match_string(phy_types, NR_PHY_TYPES, phy_name); + if (priv->phy_type < 0) { + dev_err(mmc_dev(host->mmc), + "Unable to determine PHY name %s. Use default eMMC 5.1 PHY\n", + phy_name); + priv->phy_type = EMMC_5_1_PHY; + } + + ret = xenon_alloc_emmc_phy(host); + if (ret) + return ret; + + return xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params); +} + +int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host) +{ + const char *phy_type = NULL; + + if (!of_property_read_string(np, "marvell,xenon-phy-type", &phy_type)) + return xenon_add_phy(np, host, phy_type); + + return xenon_add_phy(np, host, "emmc 5.1 phy"); +} diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c new file mode 100644 index 000000000..d509198c0 --- /dev/null +++ b/drivers/mmc/host/sdhci-xenon.c @@ -0,0 +1,687 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for Marvell Xenon SDHC as a platform device + * + * Copyright (C) 2016 Marvell, All Rights Reserved. + * + * Author: Hu Ziji + * Date: 2016-8-24 + * + * Inspired by Jisheng Zhang + * Special thanks to Video BG4 project team. + */ + +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" +#include "sdhci-xenon.h" + +static int xenon_enable_internal_clk(struct sdhci_host *host) +{ + u32 reg; + ktime_t timeout; + + reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL); + reg |= SDHCI_CLOCK_INT_EN; + sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL); + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (reg & SDHCI_CLOCK_INT_STABLE) + break; + if (timedout) { + dev_err(mmc_dev(host->mmc), "Internal clock never stabilised.\n"); + return -ETIMEDOUT; + } + usleep_range(900, 1100); + } + + return 0; +} + +/* Set SDCLK-off-while-idle */ +static void xenon_set_sdclk_off_idle(struct sdhci_host *host, + unsigned char sdhc_id, bool enable) +{ + u32 reg; + u32 mask; + + reg = sdhci_readl(host, XENON_SYS_OP_CTRL); + /* Get the bit shift basing on the SDHC index */ + mask = (0x1 << (XENON_SDCLK_IDLEOFF_ENABLE_SHIFT + sdhc_id)); + if (enable) + reg |= mask; + else + reg &= ~mask; + + sdhci_writel(host, reg, XENON_SYS_OP_CTRL); +} + +/* Enable/Disable the Auto Clock Gating function */ +static void xenon_set_acg(struct sdhci_host *host, bool enable) +{ + u32 reg; + + reg = sdhci_readl(host, XENON_SYS_OP_CTRL); + if (enable) + reg &= ~XENON_AUTO_CLKGATE_DISABLE_MASK; + else + reg |= XENON_AUTO_CLKGATE_DISABLE_MASK; + sdhci_writel(host, reg, XENON_SYS_OP_CTRL); +} + +/* Enable this SDHC */ +static void xenon_enable_sdhc(struct sdhci_host *host, + unsigned char sdhc_id) +{ + u32 reg; + + reg = sdhci_readl(host, XENON_SYS_OP_CTRL); + reg |= (BIT(sdhc_id) << XENON_SLOT_ENABLE_SHIFT); + sdhci_writel(host, reg, XENON_SYS_OP_CTRL); + + host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; + /* + * Force to clear BUS_TEST to + * skip bus_test_pre and bus_test_post + */ + host->mmc->caps &= ~MMC_CAP_BUS_WIDTH_TEST; +} + +/* Disable this SDHC */ +static void xenon_disable_sdhc(struct sdhci_host *host, + unsigned char sdhc_id) +{ + u32 reg; + + reg = sdhci_readl(host, XENON_SYS_OP_CTRL); + reg &= ~(BIT(sdhc_id) << XENON_SLOT_ENABLE_SHIFT); + sdhci_writel(host, reg, XENON_SYS_OP_CTRL); +} + +/* Enable Parallel Transfer Mode */ +static void xenon_enable_sdhc_parallel_tran(struct sdhci_host *host, + unsigned char sdhc_id) +{ + u32 reg; + + reg = sdhci_readl(host, XENON_SYS_EXT_OP_CTRL); + reg |= BIT(sdhc_id); + sdhci_writel(host, reg, XENON_SYS_EXT_OP_CTRL); +} + +/* Mask command conflict error */ +static void xenon_mask_cmd_conflict_err(struct sdhci_host *host) +{ + u32 reg; + + reg = sdhci_readl(host, XENON_SYS_EXT_OP_CTRL); + reg |= XENON_MASK_CMD_CONFLICT_ERR; + sdhci_writel(host, reg, XENON_SYS_EXT_OP_CTRL); +} + +static void xenon_retune_setup(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + /* Disable the Re-Tuning Request functionality */ + reg = sdhci_readl(host, XENON_SLOT_RETUNING_REQ_CTRL); + reg &= ~XENON_RETUNING_COMPATIBLE; + sdhci_writel(host, reg, XENON_SLOT_RETUNING_REQ_CTRL); + + /* Disable the Re-tuning Interrupt */ + reg = sdhci_readl(host, SDHCI_SIGNAL_ENABLE); + reg &= ~SDHCI_INT_RETUNE; + sdhci_writel(host, reg, SDHCI_SIGNAL_ENABLE); + reg = sdhci_readl(host, SDHCI_INT_ENABLE); + reg &= ~SDHCI_INT_RETUNE; + sdhci_writel(host, reg, SDHCI_INT_ENABLE); + + /* Force to use Tuning Mode 1 */ + host->tuning_mode = SDHCI_TUNING_MODE_1; + /* Set re-tuning period */ + host->tuning_count = 1 << (priv->tuning_count - 1); +} + +/* + * Operations inside struct sdhci_ops + */ +/* Recover the Register Setting cleared during SOFTWARE_RESET_ALL */ +static void xenon_reset_exit(struct sdhci_host *host, + unsigned char sdhc_id, u8 mask) +{ + /* Only SOFTWARE RESET ALL will clear the register setting */ + if (!(mask & SDHCI_RESET_ALL)) + return; + + /* Disable tuning request and auto-retuning again */ + xenon_retune_setup(host); + + /* + * The ACG should be turned off at the early init time, in order + * to solve a possible issues with the 1.8V regulator stabilization. + * The feature is enabled in later stage. + */ + xenon_set_acg(host, false); + + xenon_set_sdclk_off_idle(host, sdhc_id, false); + + xenon_mask_cmd_conflict_err(host); +} + +static void xenon_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + + sdhci_reset(host, mask); + xenon_reset_exit(host, priv->sdhc_id, mask); +} + +/* + * Xenon defines different values for HS200 and HS400 + * in Host_Control_2 + */ +static void xenon_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u16 ctrl_2; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if (timing == MMC_TIMING_MMC_HS200) + ctrl_2 |= XENON_CTRL_HS200; + else if (timing == MMC_TIMING_UHS_SDR104) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if ((timing == MMC_TIMING_UHS_DDR50) || + (timing == MMC_TIMING_MMC_DDR52)) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + else if (timing == MMC_TIMING_MMC_HS400) + ctrl_2 |= XENON_CTRL_HS400; + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static void xenon_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + u8 pwr = host->pwr; + + sdhci_set_power_noreg(host, mode, vdd); + + if (host->pwr == pwr) + return; + + if (host->pwr == 0) + vdd = 0; + + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); +} + +static void xenon_voltage_switch(struct sdhci_host *host) +{ + /* Wait for 5ms after set 1.8V signal enable bit */ + usleep_range(5000, 5500); +} + +static const struct sdhci_ops sdhci_xenon_ops = { + .voltage_switch = xenon_voltage_switch, + .set_clock = sdhci_set_clock, + .set_power = xenon_set_power, + .set_bus_width = sdhci_set_bus_width, + .reset = xenon_reset, + .set_uhs_signaling = xenon_set_uhs_signaling, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, +}; + +static const struct sdhci_pltfm_data sdhci_xenon_pdata = { + .ops = &sdhci_xenon_ops, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +}; + +/* + * Xenon Specific Operations in mmc_host_ops + */ +static void xenon_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + /* + * HS400/HS200/eMMC HS doesn't have Preset Value register. + * However, sdhci_set_ios will read HS400/HS200 Preset register. + * Disable Preset Value register for HS400/HS200. + * eMMC HS with preset_enabled set will trigger a bug in + * get_preset_value(). + */ + if ((ios->timing == MMC_TIMING_MMC_HS400) || + (ios->timing == MMC_TIMING_MMC_HS200) || + (ios->timing == MMC_TIMING_MMC_HS)) { + host->preset_enabled = false; + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + host->flags &= ~SDHCI_PV_ENABLED; + + reg = sdhci_readw(host, SDHCI_HOST_CONTROL2); + reg &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + sdhci_writew(host, reg, SDHCI_HOST_CONTROL2); + } else { + host->quirks2 &= ~SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + } + + sdhci_set_ios(mmc, ios); + xenon_phy_adj(host, ios); + + if (host->clock > XENON_DEFAULT_SDCLK_FREQ) + xenon_set_sdclk_off_idle(host, priv->sdhc_id, true); +} + +static int xenon_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* + * Before SD/SDIO set signal voltage, SD bus clock should be + * disabled. However, sdhci_set_clock will also disable the Internal + * clock in mmc_set_signal_voltage(). + * If Internal clock is disabled, the 3.3V/1.8V bit can not be updated. + * Thus here manually enable internal clock. + * + * After switch completes, it is unnecessary to disable internal clock, + * since keeping internal clock active obeys SD spec. + */ + xenon_enable_internal_clk(host); + + xenon_soc_pad_ctrl(host, ios->signal_voltage); + + /* + * If Vqmmc is fixed on platform, vqmmc regulator should be unavailable. + * Thus SDHCI_CTRL_VDD_180 bit might not work then. + * Skip the standard voltage switch to avoid any issue. + */ + if (PTR_ERR(mmc->supply.vqmmc) == -ENODEV) + return 0; + + return sdhci_start_signal_voltage_switch(mmc, ios); +} + +/* + * Update card type. + * priv->init_card_type will be used in PHY timing adjustment. + */ +static void xenon_init_card(struct mmc_host *mmc, struct mmc_card *card) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + + /* Update card type*/ + priv->init_card_type = card->type; +} + +static int xenon_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + + if (host->timing == MMC_TIMING_UHS_DDR50 || + host->timing == MMC_TIMING_MMC_DDR52) + return 0; + + /* + * Currently force Xenon driver back to support mode 1 only, + * even though Xenon might claim to support mode 2 or mode 3. + * It requires more time to test mode 2/mode 3 on more platforms. + */ + if (host->tuning_mode != SDHCI_TUNING_MODE_1) + xenon_retune_setup(host); + + return sdhci_execute_tuning(mmc, opcode); +} + +static void xenon_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 reg; + u8 sdhc_id = priv->sdhc_id; + + sdhci_enable_sdio_irq(mmc, enable); + + if (enable) { + /* + * Set SDIO Card Inserted indication + * to enable detecting SDIO async irq. + */ + reg = sdhci_readl(host, XENON_SYS_CFG_INFO); + reg |= (1 << (sdhc_id + XENON_SLOT_TYPE_SDIO_SHIFT)); + sdhci_writel(host, reg, XENON_SYS_CFG_INFO); + } else { + /* Clear SDIO Card Inserted indication */ + reg = sdhci_readl(host, XENON_SYS_CFG_INFO); + reg &= ~(1 << (sdhc_id + XENON_SLOT_TYPE_SDIO_SHIFT)); + sdhci_writel(host, reg, XENON_SYS_CFG_INFO); + } +} + +static void xenon_replace_mmc_host_ops(struct sdhci_host *host) +{ + host->mmc_host_ops.set_ios = xenon_set_ios; + host->mmc_host_ops.start_signal_voltage_switch = + xenon_start_signal_voltage_switch; + host->mmc_host_ops.init_card = xenon_init_card; + host->mmc_host_ops.execute_tuning = xenon_execute_tuning; + host->mmc_host_ops.enable_sdio_irq = xenon_enable_sdio_irq; +} + +/* + * Parse Xenon specific DT properties: + * sdhc-id: the index of current SDHC. + * Refer to XENON_SYS_CFG_INFO register + * tun-count: the interval between re-tuning + */ +static int xenon_probe_dt(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct sdhci_host *host = platform_get_drvdata(pdev); + struct mmc_host *mmc = host->mmc; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 sdhc_id, nr_sdhc; + u32 tuning_count; + + /* Disable HS200 on Armada AP806 */ + if (of_device_is_compatible(np, "marvell,armada-ap806-sdhci")) + host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; + + sdhc_id = 0x0; + if (!of_property_read_u32(np, "marvell,xenon-sdhc-id", &sdhc_id)) { + nr_sdhc = sdhci_readl(host, XENON_SYS_CFG_INFO); + nr_sdhc &= XENON_NR_SUPPORTED_SLOT_MASK; + if (unlikely(sdhc_id > nr_sdhc)) { + dev_err(mmc_dev(mmc), "SDHC Index %d exceeds Number of SDHCs %d\n", + sdhc_id, nr_sdhc); + return -EINVAL; + } + } + priv->sdhc_id = sdhc_id; + + tuning_count = XENON_DEF_TUNING_COUNT; + if (!of_property_read_u32(np, "marvell,xenon-tun-count", + &tuning_count)) { + if (unlikely(tuning_count >= XENON_TMR_RETUN_NO_PRESENT)) { + dev_err(mmc_dev(mmc), "Wrong Re-tuning Count. Set default value %d\n", + XENON_DEF_TUNING_COUNT); + tuning_count = XENON_DEF_TUNING_COUNT; + } + } + priv->tuning_count = tuning_count; + + return xenon_phy_parse_dt(np, host); +} + +static int xenon_sdhc_prepare(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + u8 sdhc_id = priv->sdhc_id; + + /* Enable SDHC */ + xenon_enable_sdhc(host, sdhc_id); + + /* Enable ACG */ + xenon_set_acg(host, true); + + /* Enable Parallel Transfer Mode */ + xenon_enable_sdhc_parallel_tran(host, sdhc_id); + + /* Disable SDCLK-Off-While-Idle before card init */ + xenon_set_sdclk_off_idle(host, sdhc_id, false); + + xenon_mask_cmd_conflict_err(host); + + return 0; +} + +static void xenon_sdhc_unprepare(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + u8 sdhc_id = priv->sdhc_id; + + /* disable SDHC */ + xenon_disable_sdhc(host, sdhc_id); +} + +static int xenon_probe(struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_host *host; + struct xenon_priv *priv; + int err; + + host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata, + sizeof(struct xenon_priv)); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + + /* + * Link Xenon specific mmc_host_ops function, + * to replace standard ones in sdhci_ops. + */ + xenon_replace_mmc_host_ops(host); + + pltfm_host->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(pltfm_host->clk)) { + err = PTR_ERR(pltfm_host->clk); + dev_err(&pdev->dev, "Failed to setup input clk: %d\n", err); + goto free_pltfm; + } + err = clk_prepare_enable(pltfm_host->clk); + if (err) + goto free_pltfm; + + priv->axi_clk = devm_clk_get(&pdev->dev, "axi"); + if (IS_ERR(priv->axi_clk)) { + err = PTR_ERR(priv->axi_clk); + if (err == -EPROBE_DEFER) + goto err_clk; + } else { + err = clk_prepare_enable(priv->axi_clk); + if (err) + goto err_clk; + } + + err = mmc_of_parse(host->mmc); + if (err) + goto err_clk_axi; + + sdhci_get_of_property(pdev); + + xenon_set_acg(host, false); + + /* Xenon specific dt parse */ + err = xenon_probe_dt(pdev); + if (err) + goto err_clk_axi; + + err = xenon_sdhc_prepare(host); + if (err) + goto err_clk_axi; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); + + err = sdhci_add_host(host); + if (err) + goto remove_sdhc; + + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; + +remove_sdhc: + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + xenon_sdhc_unprepare(host); +err_clk_axi: + clk_disable_unprepare(priv->axi_clk); +err_clk: + clk_disable_unprepare(pltfm_host->clk); +free_pltfm: + sdhci_pltfm_free(pdev); + return err; +} + +static int xenon_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + sdhci_remove_host(host, 0); + + xenon_sdhc_unprepare(host); + clk_disable_unprepare(priv->axi_clk); + clk_disable_unprepare(pltfm_host->clk); + + sdhci_pltfm_free(pdev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int xenon_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = pm_runtime_force_suspend(dev); + + priv->restore_needed = true; + return ret; +} +#endif + +#ifdef CONFIG_PM +static int xenon_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = sdhci_runtime_suspend_host(host); + if (ret) + return ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + clk_disable_unprepare(pltfm_host->clk); + /* + * Need to update the priv->clock here, or when runtime resume + * back, phy don't aware the clock change and won't adjust phy + * which will cause cmd err + */ + priv->clock = 0; + return 0; +} + +static int xenon_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) { + dev_err(dev, "can't enable mainck\n"); + return ret; + } + + if (priv->restore_needed) { + ret = xenon_sdhc_prepare(host); + if (ret) + goto out; + priv->restore_needed = false; + } + + ret = sdhci_runtime_resume_host(host, 0); + if (ret) + goto out; + return 0; +out: + clk_disable_unprepare(pltfm_host->clk); + return ret; +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops sdhci_xenon_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(xenon_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(xenon_runtime_suspend, + xenon_runtime_resume, + NULL) +}; + +static const struct of_device_id sdhci_xenon_dt_ids[] = { + { .compatible = "marvell,armada-ap806-sdhci",}, + { .compatible = "marvell,armada-cp110-sdhci",}, + { .compatible = "marvell,armada-3700-sdhci",}, + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_xenon_dt_ids); + +static struct platform_driver sdhci_xenon_driver = { + .driver = { + .name = "xenon-sdhci", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_xenon_dt_ids, + .pm = &sdhci_xenon_dev_pm_ops, + }, + .probe = xenon_probe, + .remove = xenon_remove, +}; + +module_platform_driver(sdhci_xenon_driver); + +MODULE_DESCRIPTION("SDHCI platform driver for Marvell Xenon SDHC"); +MODULE_AUTHOR("Hu Ziji "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h new file mode 100644 index 000000000..593b82d7b --- /dev/null +++ b/drivers/mmc/host/sdhci-xenon.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Marvell, All Rights Reserved. + * + * Author: Hu Ziji + * Date: 2016-8-24 + */ +#ifndef SDHCI_XENON_H_ +#define SDHCI_XENON_H_ + +/* Register Offset of Xenon SDHC self-defined register */ +#define XENON_SYS_CFG_INFO 0x0104 +#define XENON_SLOT_TYPE_SDIO_SHIFT 24 +#define XENON_NR_SUPPORTED_SLOT_MASK 0x7 + +#define XENON_SYS_OP_CTRL 0x0108 +#define XENON_AUTO_CLKGATE_DISABLE_MASK BIT(20) +#define XENON_SDCLK_IDLEOFF_ENABLE_SHIFT 8 +#define XENON_SLOT_ENABLE_SHIFT 0 + +#define XENON_SYS_EXT_OP_CTRL 0x010C +#define XENON_MASK_CMD_CONFLICT_ERR BIT(8) + +#define XENON_SLOT_OP_STATUS_CTRL 0x0128 +#define XENON_TUN_CONSECUTIVE_TIMES_SHIFT 16 +#define XENON_TUN_CONSECUTIVE_TIMES_MASK 0x7 +#define XENON_TUN_CONSECUTIVE_TIMES 0x4 +#define XENON_TUNING_STEP_SHIFT 12 +#define XENON_TUNING_STEP_MASK 0xF +#define XENON_TUNING_STEP_DIVIDER BIT(6) + +#define XENON_SLOT_EMMC_CTRL 0x0130 +#define XENON_ENABLE_RESP_STROBE BIT(25) +#define XENON_ENABLE_DATA_STROBE BIT(24) + +#define XENON_SLOT_RETUNING_REQ_CTRL 0x0144 +/* retuning compatible */ +#define XENON_RETUNING_COMPATIBLE 0x1 + +#define XENON_SLOT_EXT_PRESENT_STATE 0x014C +#define XENON_DLL_LOCK_STATE 0x1 + +#define XENON_SLOT_DLL_CUR_DLY_VAL 0x0150 + +/* Tuning Parameter */ +#define XENON_TMR_RETUN_NO_PRESENT 0xF +#define XENON_DEF_TUNING_COUNT 0x9 + +#define XENON_DEFAULT_SDCLK_FREQ 400000 +#define XENON_LOWEST_SDCLK_FREQ 100000 + +/* Xenon specific Mode Select value */ +#define XENON_CTRL_HS200 0x5 +#define XENON_CTRL_HS400 0x6 + +struct xenon_priv { + unsigned char tuning_count; + /* idx of SDHC */ + u8 sdhc_id; + + /* + * eMMC/SD/SDIO require different register settings. + * Xenon driver has to recognize card type + * before mmc_host->card is not available. + * This field records the card type during init. + * It is updated in xenon_init_card(). + * + * It is only valid during initialization after it is updated. + * Do not access this variable in normal transfers after + * initialization completes. + */ + unsigned int init_card_type; + + /* + * The bus_width, timing, and clock fields in below + * record the current ios setting of Xenon SDHC. + * Driver will adjust PHY setting if any change to + * ios affects PHY timing. + */ + unsigned char bus_width; + unsigned char timing; + unsigned int clock; + struct clk *axi_clk; + + int phy_type; + /* + * Contains board-specific PHY parameters + * passed from device tree. + */ + void *phy_params; + struct xenon_emmc_phy_regs *emmc_phy_regs; + bool restore_needed; +}; + +int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios); +int xenon_phy_parse_dt(struct device_node *np, + struct sdhci_host *host); +void xenon_soc_pad_ctrl(struct sdhci_host *host, + unsigned char signal_voltage); +#endif diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c new file mode 100644 index 000000000..bad01cc68 --- /dev/null +++ b/drivers/mmc/host/sdhci.c @@ -0,0 +1,4929 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver + * + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. + * + * Thanks to the following companies for their support: + * + * - JMicron (hardware and technical support) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "sdhci.h" + +#define DRIVER_NAME "sdhci" + +#define DBG(f, x...) \ + pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) + +#define SDHCI_DUMP(f, x...) \ + pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) + +#define MAX_TUNING_LOOP 40 + +static unsigned int debug_quirks = 0; +static unsigned int debug_quirks2; + +static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); + +static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); + +void sdhci_dumpregs(struct sdhci_host *host) +{ + SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); + + SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", + sdhci_readl(host, SDHCI_DMA_ADDRESS), + sdhci_readw(host, SDHCI_HOST_VERSION)); + SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", + sdhci_readw(host, SDHCI_BLOCK_SIZE), + sdhci_readw(host, SDHCI_BLOCK_COUNT)); + SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", + sdhci_readl(host, SDHCI_ARGUMENT), + sdhci_readw(host, SDHCI_TRANSFER_MODE)); + SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", + sdhci_readl(host, SDHCI_PRESENT_STATE), + sdhci_readb(host, SDHCI_HOST_CONTROL)); + SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", + sdhci_readb(host, SDHCI_POWER_CONTROL), + sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); + SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", + sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), + sdhci_readw(host, SDHCI_CLOCK_CONTROL)); + SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", + sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), + sdhci_readl(host, SDHCI_INT_STATUS)); + SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", + sdhci_readl(host, SDHCI_INT_ENABLE), + sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); + SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", + sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), + sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); + SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", + sdhci_readl(host, SDHCI_CAPABILITIES), + sdhci_readl(host, SDHCI_CAPABILITIES_1)); + SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", + sdhci_readw(host, SDHCI_COMMAND), + sdhci_readl(host, SDHCI_MAX_CURRENT)); + SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", + sdhci_readl(host, SDHCI_RESPONSE), + sdhci_readl(host, SDHCI_RESPONSE + 4)); + SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", + sdhci_readl(host, SDHCI_RESPONSE + 8), + sdhci_readl(host, SDHCI_RESPONSE + 12)); + SDHCI_DUMP("Host ctl2: 0x%08x\n", + sdhci_readw(host, SDHCI_HOST_CONTROL2)); + + if (host->flags & SDHCI_USE_ADMA) { + if (host->flags & SDHCI_USE_64_BIT_DMA) { + SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", + sdhci_readl(host, SDHCI_ADMA_ERROR), + sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), + sdhci_readl(host, SDHCI_ADMA_ADDRESS)); + } else { + SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", + sdhci_readl(host, SDHCI_ADMA_ERROR), + sdhci_readl(host, SDHCI_ADMA_ADDRESS)); + } + } + + if (host->ops->dump_vendor_regs) + host->ops->dump_vendor_regs(host); + + SDHCI_DUMP("============================================\n"); +} +EXPORT_SYMBOL_GPL(sdhci_dumpregs); + +/*****************************************************************************\ + * * + * Low level functions * + * * +\*****************************************************************************/ + +static void sdhci_do_enable_v4_mode(struct sdhci_host *host) +{ + u16 ctrl2; + + ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (ctrl2 & SDHCI_CTRL_V4_MODE) + return; + + ctrl2 |= SDHCI_CTRL_V4_MODE; + sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); +} + +/* + * This can be called before sdhci_add_host() by Vendor's host controller + * driver to enable v4 mode if supported. + */ +void sdhci_enable_v4_mode(struct sdhci_host *host) +{ + host->v4_mode = true; + sdhci_do_enable_v4_mode(host); +} +EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); + +static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) +{ + return cmd->data || cmd->flags & MMC_RSP_BUSY; +} + +static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) +{ + u32 present; + + if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || + !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) + return; + + if (enable) { + present = sdhci_readl(host, SDHCI_PRESENT_STATE) & + SDHCI_CARD_PRESENT; + + host->ier |= present ? SDHCI_INT_CARD_REMOVE : + SDHCI_INT_CARD_INSERT; + } else { + host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); + } + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +} + +static void sdhci_enable_card_detection(struct sdhci_host *host) +{ + sdhci_set_card_detection(host, true); +} + +static void sdhci_disable_card_detection(struct sdhci_host *host) +{ + sdhci_set_card_detection(host, false); +} + +static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) +{ + if (host->bus_on) + return; + host->bus_on = true; + pm_runtime_get_noresume(host->mmc->parent); +} + +static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) +{ + if (!host->bus_on) + return; + host->bus_on = false; + pm_runtime_put_noidle(host->mmc->parent); +} + +void sdhci_reset(struct sdhci_host *host, u8 mask) +{ + ktime_t timeout; + + sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); + + if (mask & SDHCI_RESET_ALL) { + host->clock = 0; + /* Reset-all turns off SD Bus Power */ + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + sdhci_runtime_pm_bus_off(host); + } + + /* Wait max 100 ms */ + timeout = ktime_add_ms(ktime_get(), 100); + + /* hw clears the bit when it's done */ + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) + break; + if (timedout) { + pr_err("%s: Reset 0x%x never completed.\n", + mmc_hostname(host->mmc), (int)mask); + sdhci_dumpregs(host); + return; + } + udelay(10); + } +} +EXPORT_SYMBOL_GPL(sdhci_reset); + +static void sdhci_do_reset(struct sdhci_host *host, u8 mask) +{ + if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { + struct mmc_host *mmc = host->mmc; + + if (!mmc->ops->get_cd(mmc)) + return; + } + + host->ops->reset(host, mask); + + if (mask & SDHCI_RESET_ALL) { + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->enable_dma) + host->ops->enable_dma(host); + } + + /* Resetting the controller clears many */ + host->preset_enabled = false; + } +} + +static void sdhci_set_default_irqs(struct sdhci_host *host) +{ + host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | + SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | + SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | + SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | + SDHCI_INT_RESPONSE; + + if (host->tuning_mode == SDHCI_TUNING_MODE_2 || + host->tuning_mode == SDHCI_TUNING_MODE_3) + host->ier |= SDHCI_INT_RETUNE; + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +} + +static void sdhci_config_dma(struct sdhci_host *host) +{ + u8 ctrl; + u16 ctrl2; + + if (host->version < SDHCI_SPEC_200) + return; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + + /* + * Always adjust the DMA selection as some controllers + * (e.g. JMicron) can't do PIO properly when the selection + * is ADMA. + */ + ctrl &= ~SDHCI_CTRL_DMA_MASK; + if (!(host->flags & SDHCI_REQ_USE_DMA)) + goto out; + + /* Note if DMA Select is zero then SDMA is selected */ + if (host->flags & SDHCI_USE_ADMA) + ctrl |= SDHCI_CTRL_ADMA32; + + if (host->flags & SDHCI_USE_64_BIT_DMA) { + /* + * If v4 mode, all supported DMA can be 64-bit addressing if + * controller supports 64-bit system address, otherwise only + * ADMA can support 64-bit addressing. + */ + if (host->v4_mode) { + ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl2 |= SDHCI_CTRL_64BIT_ADDR; + sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); + } else if (host->flags & SDHCI_USE_ADMA) { + /* + * Don't need to undo SDHCI_CTRL_ADMA32 in order to + * set SDHCI_CTRL_ADMA64. + */ + ctrl |= SDHCI_CTRL_ADMA64; + } + } + +out: + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + +static void sdhci_init(struct sdhci_host *host, int soft) +{ + struct mmc_host *mmc = host->mmc; + unsigned long flags; + + if (soft) + sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + else + sdhci_do_reset(host, SDHCI_RESET_ALL); + + if (host->v4_mode) + sdhci_do_enable_v4_mode(host); + + spin_lock_irqsave(&host->lock, flags); + sdhci_set_default_irqs(host); + spin_unlock_irqrestore(&host->lock, flags); + + host->cqe_on = false; + + if (soft) { + /* force clock reconfiguration */ + host->clock = 0; + host->reinit_uhs = true; + mmc->ops->set_ios(mmc, &mmc->ios); + } +} + +static void sdhci_reinit(struct sdhci_host *host) +{ + u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); + + sdhci_init(host, 0); + sdhci_enable_card_detection(host); + + /* + * A change to the card detect bits indicates a change in present state, + * refer sdhci_set_card_detection(). A card detect interrupt might have + * been missed while the host controller was being reset, so trigger a + * rescan to check. + */ + if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); +} + +static void __sdhci_led_activate(struct sdhci_host *host) +{ + u8 ctrl; + + if (host->quirks & SDHCI_QUIRK_NO_LED) + return; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl |= SDHCI_CTRL_LED; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + +static void __sdhci_led_deactivate(struct sdhci_host *host) +{ + u8 ctrl; + + if (host->quirks & SDHCI_QUIRK_NO_LED) + return; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_LED; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + +#if IS_REACHABLE(CONFIG_LEDS_CLASS) +static void sdhci_led_control(struct led_classdev *led, + enum led_brightness brightness) +{ + struct sdhci_host *host = container_of(led, struct sdhci_host, led); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + if (host->runtime_suspended) + goto out; + + if (brightness == LED_OFF) + __sdhci_led_deactivate(host); + else + __sdhci_led_activate(host); +out: + spin_unlock_irqrestore(&host->lock, flags); +} + +static int sdhci_led_register(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + + if (host->quirks & SDHCI_QUIRK_NO_LED) + return 0; + + snprintf(host->led_name, sizeof(host->led_name), + "%s::", mmc_hostname(mmc)); + + host->led.name = host->led_name; + host->led.brightness = LED_OFF; + host->led.default_trigger = mmc_hostname(mmc); + host->led.brightness_set = sdhci_led_control; + + return led_classdev_register(mmc_dev(mmc), &host->led); +} + +static void sdhci_led_unregister(struct sdhci_host *host) +{ + if (host->quirks & SDHCI_QUIRK_NO_LED) + return; + + led_classdev_unregister(&host->led); +} + +static inline void sdhci_led_activate(struct sdhci_host *host) +{ +} + +static inline void sdhci_led_deactivate(struct sdhci_host *host) +{ +} + +#else + +static inline int sdhci_led_register(struct sdhci_host *host) +{ + return 0; +} + +static inline void sdhci_led_unregister(struct sdhci_host *host) +{ +} + +static inline void sdhci_led_activate(struct sdhci_host *host) +{ + __sdhci_led_activate(host); +} + +static inline void sdhci_led_deactivate(struct sdhci_host *host) +{ + __sdhci_led_deactivate(host); +} + +#endif + +static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, + unsigned long timeout) +{ + if (sdhci_data_line_cmd(mrq->cmd)) + mod_timer(&host->data_timer, timeout); + else + mod_timer(&host->timer, timeout); +} + +static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) +{ + if (sdhci_data_line_cmd(mrq->cmd)) + del_timer(&host->data_timer); + else + del_timer(&host->timer); +} + +static inline bool sdhci_has_requests(struct sdhci_host *host) +{ + return host->cmd || host->data_cmd; +} + +/*****************************************************************************\ + * * + * Core functions * + * * +\*****************************************************************************/ + +static void sdhci_read_block_pio(struct sdhci_host *host) +{ + unsigned long flags; + size_t blksize, len, chunk; + u32 scratch; + u8 *buf; + + DBG("PIO reading\n"); + + blksize = host->data->blksz; + chunk = 0; + + local_irq_save(flags); + + while (blksize) { + BUG_ON(!sg_miter_next(&host->sg_miter)); + + len = min(host->sg_miter.length, blksize); + + blksize -= len; + host->sg_miter.consumed = len; + + buf = host->sg_miter.addr; + + while (len) { + if (chunk == 0) { + scratch = sdhci_readl(host, SDHCI_BUFFER); + chunk = 4; + } + + *buf = scratch & 0xFF; + + buf++; + scratch >>= 8; + chunk--; + len--; + } + } + + sg_miter_stop(&host->sg_miter); + + local_irq_restore(flags); +} + +static void sdhci_write_block_pio(struct sdhci_host *host) +{ + unsigned long flags; + size_t blksize, len, chunk; + u32 scratch; + u8 *buf; + + DBG("PIO writing\n"); + + blksize = host->data->blksz; + chunk = 0; + scratch = 0; + + local_irq_save(flags); + + while (blksize) { + BUG_ON(!sg_miter_next(&host->sg_miter)); + + len = min(host->sg_miter.length, blksize); + + blksize -= len; + host->sg_miter.consumed = len; + + buf = host->sg_miter.addr; + + while (len) { + scratch |= (u32)*buf << (chunk * 8); + + buf++; + chunk++; + len--; + + if ((chunk == 4) || ((len == 0) && (blksize == 0))) { + sdhci_writel(host, scratch, SDHCI_BUFFER); + chunk = 0; + scratch = 0; + } + } + } + + sg_miter_stop(&host->sg_miter); + + local_irq_restore(flags); +} + +static void sdhci_transfer_pio(struct sdhci_host *host) +{ + u32 mask; + + if (host->blocks == 0) + return; + + if (host->data->flags & MMC_DATA_READ) + mask = SDHCI_DATA_AVAILABLE; + else + mask = SDHCI_SPACE_AVAILABLE; + + /* + * Some controllers (JMicron JMB38x) mess up the buffer bits + * for transfers < 4 bytes. As long as it is just one block, + * we can ignore the bits. + */ + if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && + (host->data->blocks == 1)) + mask = ~0; + + while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { + if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) + udelay(100); + + if (host->data->flags & MMC_DATA_READ) + sdhci_read_block_pio(host); + else + sdhci_write_block_pio(host); + + host->blocks--; + if (host->blocks == 0) + break; + } + + DBG("PIO transfer complete.\n"); +} + +static int sdhci_pre_dma_transfer(struct sdhci_host *host, + struct mmc_data *data, int cookie) +{ + int sg_count; + + /* + * If the data buffers are already mapped, return the previous + * dma_map_sg() result. + */ + if (data->host_cookie == COOKIE_PRE_MAPPED) + return data->sg_count; + + /* Bounce write requests to the bounce buffer */ + if (host->bounce_buffer) { + unsigned int length = data->blksz * data->blocks; + + if (length > host->bounce_buffer_size) { + pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", + mmc_hostname(host->mmc), length, + host->bounce_buffer_size); + return -EIO; + } + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + /* Copy the data to the bounce buffer */ + if (host->ops->copy_to_bounce_buffer) { + host->ops->copy_to_bounce_buffer(host, + data, length); + } else { + sg_copy_to_buffer(data->sg, data->sg_len, + host->bounce_buffer, length); + } + } + /* Switch ownership to the DMA */ + dma_sync_single_for_device(host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + mmc_get_dma_dir(data)); + /* Just a dummy value */ + sg_count = 1; + } else { + /* Just access the data directly from memory */ + sg_count = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } + + if (sg_count == 0) + return -ENOSPC; + + data->sg_count = sg_count; + data->host_cookie = cookie; + + return sg_count; +} + +static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) +{ + local_irq_save(*flags); + return kmap_atomic(sg_page(sg)) + sg->offset; +} + +static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) +{ + kunmap_atomic(buffer); + local_irq_restore(*flags); +} + +void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, unsigned int cmd) +{ + struct sdhci_adma2_64_desc *dma_desc = *desc; + + /* 32-bit and 64-bit descriptors have these members in same position */ + dma_desc->cmd = cpu_to_le16(cmd); + dma_desc->len = cpu_to_le16(len); + dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); + + if (host->flags & SDHCI_USE_64_BIT_DMA) + dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); + + *desc += host->desc_sz; +} +EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); + +static inline void __sdhci_adma_write_desc(struct sdhci_host *host, + void **desc, dma_addr_t addr, + int len, unsigned int cmd) +{ + if (host->ops->adma_write_desc) + host->ops->adma_write_desc(host, desc, addr, len, cmd); + else + sdhci_adma_write_desc(host, desc, addr, len, cmd); +} + +static void sdhci_adma_mark_end(void *desc) +{ + struct sdhci_adma2_64_desc *dma_desc = desc; + + /* 32-bit and 64-bit descriptors have 'cmd' in same position */ + dma_desc->cmd |= cpu_to_le16(ADMA2_END); +} + +static void sdhci_adma_table_pre(struct sdhci_host *host, + struct mmc_data *data, int sg_count) +{ + struct scatterlist *sg; + unsigned long flags; + dma_addr_t addr, align_addr; + void *desc, *align; + char *buffer; + int len, offset, i; + + /* + * The spec does not specify endianness of descriptor table. + * We currently guess that it is LE. + */ + + host->sg_count = sg_count; + + desc = host->adma_table; + align = host->align_buffer; + + align_addr = host->align_addr; + + for_each_sg(data->sg, sg, host->sg_count, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + + /* + * The SDHCI specification states that ADMA addresses must + * be 32-bit aligned. If they aren't, then we use a bounce + * buffer for the (up to three) bytes that screw up the + * alignment. + */ + offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & + SDHCI_ADMA2_MASK; + if (offset) { + if (data->flags & MMC_DATA_WRITE) { + buffer = sdhci_kmap_atomic(sg, &flags); + memcpy(align, buffer, offset); + sdhci_kunmap_atomic(buffer, &flags); + } + + /* tran, valid */ + __sdhci_adma_write_desc(host, &desc, align_addr, + offset, ADMA2_TRAN_VALID); + + BUG_ON(offset > 65536); + + align += SDHCI_ADMA2_ALIGN; + align_addr += SDHCI_ADMA2_ALIGN; + + addr += offset; + len -= offset; + } + + /* + * The block layer forces a minimum segment size of PAGE_SIZE, + * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write + * multiple descriptors, noting that the ADMA table is sized + * for 4KiB chunks anyway, so it will be big enough. + */ + while (len > host->max_adma) { + int n = 32 * 1024; /* 32KiB*/ + + __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); + addr += n; + len -= n; + } + + /* tran, valid */ + if (len) + __sdhci_adma_write_desc(host, &desc, addr, len, + ADMA2_TRAN_VALID); + + /* + * If this triggers then we have a calculation bug + * somewhere. :/ + */ + WARN_ON((desc - host->adma_table) >= host->adma_table_sz); + } + + if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { + /* Mark the last descriptor as the terminating descriptor */ + if (desc != host->adma_table) { + desc -= host->desc_sz; + sdhci_adma_mark_end(desc); + } + } else { + /* Add a terminating entry - nop, end, valid */ + __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); + } +} + +static void sdhci_adma_table_post(struct sdhci_host *host, + struct mmc_data *data) +{ + struct scatterlist *sg; + int i, size; + void *align; + char *buffer; + unsigned long flags; + + if (data->flags & MMC_DATA_READ) { + bool has_unaligned = false; + + /* Do a quick scan of the SG list for any unaligned mappings */ + for_each_sg(data->sg, sg, host->sg_count, i) + if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { + has_unaligned = true; + break; + } + + if (has_unaligned) { + dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, + data->sg_len, DMA_FROM_DEVICE); + + align = host->align_buffer; + + for_each_sg(data->sg, sg, host->sg_count, i) { + if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { + size = SDHCI_ADMA2_ALIGN - + (sg_dma_address(sg) & SDHCI_ADMA2_MASK); + + buffer = sdhci_kmap_atomic(sg, &flags); + memcpy(buffer, align, size); + sdhci_kunmap_atomic(buffer, &flags); + + align += SDHCI_ADMA2_ALIGN; + } + } + } + } +} + +static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) +{ + sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); + if (host->flags & SDHCI_USE_64_BIT_DMA) + sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); +} + +static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) +{ + if (host->bounce_buffer) + return host->bounce_addr; + else + return sg_dma_address(host->data->sg); +} + +static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) +{ + if (host->v4_mode) + sdhci_set_adma_addr(host, addr); + else + sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); +} + +static unsigned int sdhci_target_timeout(struct sdhci_host *host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + unsigned int target_timeout; + + /* timeout in us */ + if (!data) { + target_timeout = cmd->busy_timeout * 1000; + } else { + target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); + if (host->clock && data->timeout_clks) { + unsigned long long val; + + /* + * data->timeout_clks is in units of clock cycles. + * host->clock is in Hz. target_timeout is in us. + * Hence, us = 1000000 * cycles / Hz. Round up. + */ + val = 1000000ULL * data->timeout_clks; + if (do_div(val, host->clock)) + target_timeout++; + target_timeout += val; + } + } + + return target_timeout; +} + +static void sdhci_calc_sw_timeout(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + struct mmc_host *mmc = host->mmc; + struct mmc_ios *ios = &mmc->ios; + unsigned char bus_width = 1 << ios->bus_width; + unsigned int blksz; + unsigned int freq; + u64 target_timeout; + u64 transfer_time; + + target_timeout = sdhci_target_timeout(host, cmd, data); + target_timeout *= NSEC_PER_USEC; + + if (data) { + blksz = data->blksz; + freq = host->mmc->actual_clock ? : host->clock; + transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); + do_div(transfer_time, freq); + /* multiply by '2' to account for any unknowns */ + transfer_time = transfer_time * 2; + /* calculate timeout for the entire data */ + host->data_timeout = data->blocks * target_timeout + + transfer_time; + } else { + host->data_timeout = target_timeout; + } + + if (host->data_timeout) + host->data_timeout += MMC_CMD_TRANSFER_TIME; +} + +static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, + bool *too_big) +{ + u8 count; + struct mmc_data *data; + unsigned target_timeout, current_timeout; + + *too_big = true; + + /* + * If the host controller provides us with an incorrect timeout + * value, just skip the check and use 0xE. The hardware may take + * longer to time out, but that's much better than having a too-short + * timeout value. + */ + if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) + return 0xE; + + /* Unspecified command, asume max */ + if (cmd == NULL) + return 0xE; + + data = cmd->data; + /* Unspecified timeout, assume max */ + if (!data && !cmd->busy_timeout) + return 0xE; + + /* timeout in us */ + target_timeout = sdhci_target_timeout(host, cmd, data); + + /* + * Figure out needed cycles. + * We do this in steps in order to fit inside a 32 bit int. + * The first step is the minimum timeout, which will have a + * minimum resolution of 6 bits: + * (1) 2^13*1000 > 2^22, + * (2) host->timeout_clk < 2^16 + * => + * (1) / (2) > 2^6 + */ + count = 0; + current_timeout = (1 << 13) * 1000 / host->timeout_clk; + while (current_timeout < target_timeout) { + count++; + current_timeout <<= 1; + if (count >= 0xF) + break; + } + + if (count >= 0xF) { + if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) + DBG("Too large timeout 0x%x requested for CMD%d!\n", + count, cmd->opcode); + count = 0xE; + } else { + *too_big = false; + } + + return count; +} + +static void sdhci_set_transfer_irqs(struct sdhci_host *host) +{ + u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; + u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; + + if (host->flags & SDHCI_REQ_USE_DMA) + host->ier = (host->ier & ~pio_irqs) | dma_irqs; + else + host->ier = (host->ier & ~dma_irqs) | pio_irqs; + + if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) + host->ier |= SDHCI_INT_AUTO_CMD_ERR; + else + host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +} + +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) +{ + if (enable) + host->ier |= SDHCI_INT_DATA_TIMEOUT; + else + host->ier &= ~SDHCI_INT_DATA_TIMEOUT; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +} +EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); + +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +{ + bool too_big = false; + u8 count = sdhci_calc_timeout(host, cmd, &too_big); + + if (too_big && + host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { + sdhci_calc_sw_timeout(host, cmd); + sdhci_set_data_timeout_irq(host, false); + } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { + sdhci_set_data_timeout_irq(host, true); + } + + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); +} +EXPORT_SYMBOL_GPL(__sdhci_set_timeout); + +static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +{ + if (host->ops->set_timeout) + host->ops->set_timeout(host, cmd); + else + __sdhci_set_timeout(host, cmd); +} + +static void sdhci_initialize_data(struct sdhci_host *host, + struct mmc_data *data) +{ + WARN_ON(host->data); + + /* Sanity checks */ + BUG_ON(data->blksz * data->blocks > 524288); + BUG_ON(data->blksz > host->mmc->max_blk_size); + BUG_ON(data->blocks > 65535); + + host->data = data; + host->data_early = 0; + host->data->bytes_xfered = 0; +} + +static inline void sdhci_set_block_info(struct sdhci_host *host, + struct mmc_data *data) +{ + /* Set the DMA boundary value and block size */ + sdhci_writew(host, + SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), + SDHCI_BLOCK_SIZE); + /* + * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count + * can be supported, in that case 16-bit block count register must be 0. + */ + if (host->version >= SDHCI_SPEC_410 && host->v4_mode && + (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { + if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) + sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); + sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); + } else { + sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + } +} + +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); + + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + struct scatterlist *sg; + unsigned int length_mask, offset_mask; + int i; + + host->flags |= SDHCI_REQ_USE_DMA; + + /* + * FIXME: This doesn't account for merging when mapping the + * scatterlist. + * + * The assumption here being that alignment and lengths are + * the same after DMA mapping to device address space. + */ + length_mask = 0; + offset_mask = 0; + if (host->flags & SDHCI_USE_ADMA) { + if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { + length_mask = 3; + /* + * As we use up to 3 byte chunks to work + * around alignment problems, we need to + * check the offset as well. + */ + offset_mask = 3; + } + } else { + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) + length_mask = 3; + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) + offset_mask = 3; + } + + if (unlikely(length_mask | offset_mask)) { + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->length & length_mask) { + DBG("Reverting to PIO because of transfer size (%d)\n", + sg->length); + host->flags &= ~SDHCI_REQ_USE_DMA; + break; + } + if (sg->offset & offset_mask) { + DBG("Reverting to PIO because of bad alignment\n"); + host->flags &= ~SDHCI_REQ_USE_DMA; + break; + } + } + } + } + + sdhci_config_dma(host); + + if (host->flags & SDHCI_REQ_USE_DMA) { + int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); + + if (sg_cnt <= 0) { + /* + * This only happens when someone fed + * us an invalid request. + */ + WARN_ON(1); + host->flags &= ~SDHCI_REQ_USE_DMA; + } else if (host->flags & SDHCI_USE_ADMA) { + sdhci_adma_table_pre(host, data, sg_cnt); + sdhci_set_adma_addr(host, host->adma_addr); + } else { + WARN_ON(sg_cnt != 1); + sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); + } + } + + if (!(host->flags & SDHCI_REQ_USE_DMA)) { + int flags; + + flags = SG_MITER_ATOMIC; + if (host->data->flags & MMC_DATA_READ) + flags |= SG_MITER_TO_SG; + else + flags |= SG_MITER_FROM_SG; + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); + host->blocks = data->blocks; + } + + sdhci_set_transfer_irqs(host); + + sdhci_set_block_info(host, data); +} + +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + +static int sdhci_external_dma_init(struct sdhci_host *host) +{ + int ret = 0; + struct mmc_host *mmc = host->mmc; + + host->tx_chan = dma_request_chan(mmc->parent, "tx"); + if (IS_ERR(host->tx_chan)) { + ret = PTR_ERR(host->tx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request TX DMA channel.\n"); + host->tx_chan = NULL; + return ret; + } + + host->rx_chan = dma_request_chan(mmc->parent, "rx"); + if (IS_ERR(host->rx_chan)) { + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + ret = PTR_ERR(host->rx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request RX DMA channel.\n"); + host->rx_chan = NULL; + } + + return ret; +} + +static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + +static int sdhci_external_dma_setup(struct sdhci_host *host, + struct mmc_command *cmd) +{ + int ret, i; + enum dma_transfer_direction dir; + struct dma_async_tx_descriptor *desc; + struct mmc_data *data = cmd->data; + struct dma_chan *chan; + struct dma_slave_config cfg; + dma_cookie_t cookie; + int sg_cnt; + + if (!host->mapbase) + return -EINVAL; + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = host->mapbase + SDHCI_BUFFER; + cfg.dst_addr = host->mapbase + SDHCI_BUFFER; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + /* Sanity check: all the SG entries must be aligned by block size. */ + for (i = 0; i < data->sg_len; i++) { + if ((data->sg + i)->length % data->blksz) + return -EINVAL; + } + + chan = sdhci_external_dma_channel(host, data); + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) + return ret; + + sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); + if (sg_cnt <= 0) + return -EINVAL; + + dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; + desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + desc->callback = NULL; + desc->callback_param = NULL; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + ret = cookie; + + return ret; +} + +static void sdhci_external_dma_release(struct sdhci_host *host) +{ + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + if (host->rx_chan) { + dma_release_channel(host->rx_chan); + host->rx_chan = NULL; + } + + sdhci_switch_external_dma(host, false); +} + +static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); + + host->flags |= SDHCI_REQ_USE_DMA; + sdhci_set_transfer_irqs(host); + + sdhci_set_block_info(host, data); +} + +static void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (!sdhci_external_dma_setup(host, cmd)) { + __sdhci_external_dma_prepare_data(host, cmd); + } else { + sdhci_external_dma_release(host); + pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", + mmc_hostname(host->mmc)); + sdhci_prepare_data(host, cmd); + } +} + +static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct dma_chan *chan; + + if (!cmd->data) + return; + + chan = sdhci_external_dma_channel(host, cmd->data); + if (chan) + dma_async_issue_pending(chan); +} + +#else + +static inline int sdhci_external_dma_init(struct sdhci_host *host) +{ + return -EOPNOTSUPP; +} + +static inline void sdhci_external_dma_release(struct sdhci_host *host) +{ +} + +static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + /* This should never happen */ + WARN_ON_ONCE(1); +} + +static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ +} + +static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return NULL; +} + +#endif + +void sdhci_switch_external_dma(struct sdhci_host *host, bool en) +{ + host->use_external_dma = en; +} +EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); + +static inline bool sdhci_auto_cmd12(struct sdhci_host *host, + struct mmc_request *mrq) +{ + return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && + !mrq->cap_cmd_during_tfr; +} + +static inline bool sdhci_auto_cmd23(struct sdhci_host *host, + struct mmc_request *mrq) +{ + return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); +} + +static inline bool sdhci_manual_cmd23(struct sdhci_host *host, + struct mmc_request *mrq) +{ + return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); +} + +static inline void sdhci_auto_cmd_select(struct sdhci_host *host, + struct mmc_command *cmd, + u16 *mode) +{ + bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && + (cmd->opcode != SD_IO_RW_EXTENDED); + bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); + u16 ctrl2; + + /* + * In case of Version 4.10 or later, use of 'Auto CMD Auto + * Select' is recommended rather than use of 'Auto CMD12 + * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode + * here because some controllers (e.g sdhci-of-dwmshc) expect it. + */ + if (host->version >= SDHCI_SPEC_410 && host->v4_mode && + (use_cmd12 || use_cmd23)) { + *mode |= SDHCI_TRNS_AUTO_SEL; + + ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (use_cmd23) + ctrl2 |= SDHCI_CMD23_ENABLE; + else + ctrl2 &= ~SDHCI_CMD23_ENABLE; + sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); + + return; + } + + /* + * If we are sending CMD23, CMD12 never gets sent + * on successful completion (so no Auto-CMD12). + */ + if (use_cmd12) + *mode |= SDHCI_TRNS_AUTO_CMD12; + else if (use_cmd23) + *mode |= SDHCI_TRNS_AUTO_CMD23; +} + +static void sdhci_set_transfer_mode(struct sdhci_host *host, + struct mmc_command *cmd) +{ + u16 mode = 0; + struct mmc_data *data = cmd->data; + + if (data == NULL) { + if (host->quirks2 & + SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { + /* must not clear SDHCI_TRANSFER_MODE when tuning */ + if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) + sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); + } else { + /* clear Auto CMD settings for no data CMDs */ + mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); + sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | + SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); + } + return; + } + + WARN_ON(!host->data); + + if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) + mode = SDHCI_TRNS_BLK_CNT_EN; + + if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { + mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; + sdhci_auto_cmd_select(host, cmd, &mode); + if (sdhci_auto_cmd23(host, cmd->mrq)) + sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); + } + + if (data->flags & MMC_DATA_READ) + mode |= SDHCI_TRNS_READ; + if (host->flags & SDHCI_REQ_USE_DMA) + mode |= SDHCI_TRNS_DMA; + + sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); +} + +static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) +{ + return (!(host->flags & SDHCI_DEVICE_DEAD) && + ((mrq->cmd && mrq->cmd->error) || + (mrq->sbc && mrq->sbc->error) || + (mrq->data && mrq->data->stop && mrq->data->stop->error) || + (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); +} + +static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) +{ + int i; + + for (i = 0; i < SDHCI_MAX_MRQS; i++) { + if (host->mrqs_done[i] == mrq) { + WARN_ON(1); + return; + } + } + + for (i = 0; i < SDHCI_MAX_MRQS; i++) { + if (!host->mrqs_done[i]) { + host->mrqs_done[i] = mrq; + break; + } + } + + WARN_ON(i >= SDHCI_MAX_MRQS); +} + +static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +{ + if (host->cmd && host->cmd->mrq == mrq) + host->cmd = NULL; + + if (host->data_cmd && host->data_cmd->mrq == mrq) + host->data_cmd = NULL; + + if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) + host->deferred_cmd = NULL; + + if (host->data && host->data->mrq == mrq) + host->data = NULL; + + if (sdhci_needs_reset(host, mrq)) + host->pending_reset = true; + + sdhci_set_mrq_done(host, mrq); + + sdhci_del_timer(host, mrq); + + if (!sdhci_has_requests(host)) + sdhci_led_deactivate(host); +} + +static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +{ + __sdhci_finish_mrq(host, mrq); + + queue_work(host->complete_wq, &host->complete_work); +} + +static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) +{ + struct mmc_command *data_cmd = host->data_cmd; + struct mmc_data *data = host->data; + + host->data = NULL; + host->data_cmd = NULL; + + /* + * The controller needs a reset of internal state machines upon error + * conditions. + */ + if (data->error) { + if (!host->cmd || host->cmd == data_cmd) + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); + } + + if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == + (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) + sdhci_adma_table_post(host, data); + + /* + * The specification states that the block count register must + * be updated, but it does not specify at what point in the + * data flow. That makes the register entirely useless to read + * back so we have to assume that nothing made it to the card + * in the event of an error. + */ + if (data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blksz * data->blocks; + + /* + * Need to send CMD12 if - + * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) + * b) error in multiblock transfer + */ + if (data->stop && + ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || + data->error)) { + /* + * 'cap_cmd_during_tfr' request must not use the command line + * after mmc_command_done() has been called. It is upper layer's + * responsibility to send the stop command if required. + */ + if (data->mrq->cap_cmd_during_tfr) { + __sdhci_finish_mrq(host, data->mrq); + } else { + /* Avoid triggering warning in sdhci_send_command() */ + host->cmd = NULL; + if (!sdhci_send_command(host, data->stop)) { + if (sw_data_timeout) { + /* + * This is anyway a sw data timeout, so + * give up now. + */ + data->stop->error = -EIO; + __sdhci_finish_mrq(host, data->mrq); + } else { + WARN_ON(host->deferred_cmd); + host->deferred_cmd = data->stop; + } + } + } + } else { + __sdhci_finish_mrq(host, data->mrq); + } +} + +static void sdhci_finish_data(struct sdhci_host *host) +{ + __sdhci_finish_data(host, false); +} + +static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) +{ + int flags; + u32 mask; + unsigned long timeout; + + WARN_ON(host->cmd); + + /* Initially, a command has no error */ + cmd->error = 0; + + if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && + cmd->opcode == MMC_STOP_TRANSMISSION) + cmd->flags |= MMC_RSP_BUSY; + + mask = SDHCI_CMD_INHIBIT; + if (sdhci_data_line_cmd(cmd)) + mask |= SDHCI_DATA_INHIBIT; + + /* We shouldn't wait for data inihibit for stop commands, even + though they might use busy signaling */ + if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) + mask &= ~SDHCI_DATA_INHIBIT; + + if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) + return false; + + host->cmd = cmd; + host->data_timeout = 0; + if (sdhci_data_line_cmd(cmd)) { + WARN_ON(host->data_cmd); + host->data_cmd = cmd; + sdhci_set_timeout(host, cmd); + } + + if (cmd->data) { + if (host->use_external_dma) + sdhci_external_dma_prepare_data(host, cmd); + else + sdhci_prepare_data(host, cmd); + } + + sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); + + sdhci_set_transfer_mode(host, cmd); + + if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { + WARN_ONCE(1, "Unsupported response type!\n"); + /* + * This does not happen in practice because 136-bit response + * commands never have busy waiting, so rather than complicate + * the error path, just remove busy waiting and continue. + */ + cmd->flags &= ~MMC_RSP_BUSY; + } + + if (!(cmd->flags & MMC_RSP_PRESENT)) + flags = SDHCI_CMD_RESP_NONE; + else if (cmd->flags & MMC_RSP_136) + flags = SDHCI_CMD_RESP_LONG; + else if (cmd->flags & MMC_RSP_BUSY) + flags = SDHCI_CMD_RESP_SHORT_BUSY; + else + flags = SDHCI_CMD_RESP_SHORT; + + if (cmd->flags & MMC_RSP_CRC) + flags |= SDHCI_CMD_CRC; + if (cmd->flags & MMC_RSP_OPCODE) + flags |= SDHCI_CMD_INDEX; + + /* CMD19 is special in that the Data Present Select should be set */ + if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || + cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) + flags |= SDHCI_CMD_DATA; + + timeout = jiffies; + if (host->data_timeout) + timeout += nsecs_to_jiffies(host->data_timeout); + else if (!cmd->data && cmd->busy_timeout > 9000) + timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; + else + timeout += 10 * HZ; + sdhci_mod_timer(host, cmd->mrq, timeout); + + if (host->use_external_dma) + sdhci_external_dma_pre_transfer(host, cmd); + + sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); + + return true; +} + +static bool sdhci_present_error(struct sdhci_host *host, + struct mmc_command *cmd, bool present) +{ + if (!present || host->flags & SDHCI_DEVICE_DEAD) { + cmd->error = -ENOMEDIUM; + return true; + } + + return false; +} + +static bool sdhci_send_command_retry(struct sdhci_host *host, + struct mmc_command *cmd, + unsigned long flags) + __releases(host->lock) + __acquires(host->lock) +{ + struct mmc_command *deferred_cmd = host->deferred_cmd; + int timeout = 10; /* Approx. 10 ms */ + bool present; + + while (!sdhci_send_command(host, cmd)) { + if (!timeout--) { + pr_err("%s: Controller never released inhibit bit(s).\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + cmd->error = -EIO; + return false; + } + + spin_unlock_irqrestore(&host->lock, flags); + + usleep_range(1000, 1250); + + present = host->mmc->ops->get_cd(host->mmc); + + spin_lock_irqsave(&host->lock, flags); + + /* A deferred command might disappear, handle that */ + if (cmd == deferred_cmd && cmd != host->deferred_cmd) + return true; + + if (sdhci_present_error(host, cmd, present)) + return false; + } + + if (cmd == host->deferred_cmd) + host->deferred_cmd = NULL; + + return true; +} + +static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) +{ + int i, reg; + + for (i = 0; i < 4; i++) { + reg = SDHCI_RESPONSE + (3 - i) * 4; + cmd->resp[i] = sdhci_readl(host, reg); + } + + if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) + return; + + /* CRC is stripped so we need to do some shifting */ + for (i = 0; i < 4; i++) { + cmd->resp[i] <<= 8; + if (i != 3) + cmd->resp[i] |= cmd->resp[i + 1] >> 24; + } +} + +static void sdhci_finish_command(struct sdhci_host *host) +{ + struct mmc_command *cmd = host->cmd; + + host->cmd = NULL; + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + sdhci_read_rsp_136(host, cmd); + } else { + cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); + } + } + + if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) + mmc_command_done(host->mmc, cmd->mrq); + + /* + * The host can send and interrupt when the busy state has + * ended, allowing us to wait without wasting CPU cycles. + * The busy signal uses DAT0 so this is similar to waiting + * for data to complete. + * + * Note: The 1.0 specification is a bit ambiguous about this + * feature so there might be some problems with older + * controllers. + */ + if (cmd->flags & MMC_RSP_BUSY) { + if (cmd->data) { + DBG("Cannot wait for busy signal when also doing a data transfer"); + } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && + cmd == host->data_cmd) { + /* Command complete before busy is ended */ + return; + } + } + + /* Finished CMD23, now send actual command. */ + if (cmd == cmd->mrq->sbc) { + if (!sdhci_send_command(host, cmd->mrq->cmd)) { + WARN_ON(host->deferred_cmd); + host->deferred_cmd = cmd->mrq->cmd; + } + } else { + + /* Processed actual command. */ + if (host->data && host->data_early) + sdhci_finish_data(host); + + if (!cmd->data) + __sdhci_finish_mrq(host, cmd->mrq); + } +} + +static u16 sdhci_get_preset_value(struct sdhci_host *host) +{ + u16 preset = 0; + + switch (host->timing) { + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); + break; + case MMC_TIMING_UHS_SDR12: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); + break; + case MMC_TIMING_UHS_SDR25: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); + break; + case MMC_TIMING_UHS_SDR50: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); + break; + case MMC_TIMING_MMC_HS400: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); + break; + default: + pr_warn("%s: Invalid UHS-I mode selected\n", + mmc_hostname(host->mmc)); + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); + break; + } + return preset; +} + +u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, + unsigned int *actual_clock) +{ + int div = 0; /* Initialized for compiler warning */ + int real_div = div, clk_mul = 1; + u16 clk = 0; + bool switch_base_clk = false; + + if (host->version >= SDHCI_SPEC_300) { + if (host->preset_enabled) { + u16 pre_val; + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + pre_val = sdhci_get_preset_value(host); + div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); + if (host->clk_mul && + (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { + clk = SDHCI_PROG_CLOCK_MODE; + real_div = div + 1; + clk_mul = host->clk_mul; + } else { + real_div = max_t(int, 1, div << 1); + } + goto clock_set; + } + + /* + * Check if the Host Controller supports Programmable Clock + * Mode. + */ + if (host->clk_mul) { + for (div = 1; div <= 1024; div++) { + if ((host->max_clk * host->clk_mul / div) + <= clock) + break; + } + if ((host->max_clk * host->clk_mul / div) <= clock) { + /* + * Set Programmable Clock Mode in the Clock + * Control register. + */ + clk = SDHCI_PROG_CLOCK_MODE; + real_div = div; + clk_mul = host->clk_mul; + div--; + } else { + /* + * Divisor can be too small to reach clock + * speed requirement. Then use the base clock. + */ + switch_base_clk = true; + } + } + + if (!host->clk_mul || switch_base_clk) { + /* Version 3.00 divisors must be a multiple of 2. */ + if (host->max_clk <= clock) + div = 1; + else { + for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; + div += 2) { + if ((host->max_clk / div) <= clock) + break; + } + } + real_div = div; + div >>= 1; + if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) + && !div && host->max_clk <= 25000000) + div = 1; + } + } else { + /* Version 2.00 divisors must be a power of 2. */ + for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { + if ((host->max_clk / div) <= clock) + break; + } + real_div = div; + div >>= 1; + } + +clock_set: + if (real_div) + *actual_clock = (host->max_clk * clk_mul) / real_div; + clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; + clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) + << SDHCI_DIVIDER_HI_SHIFT; + + return clk; +} +EXPORT_SYMBOL_GPL(sdhci_calc_clk); + +void sdhci_enable_clk(struct sdhci_host *host, u16 clk) +{ + ktime_t timeout; + + clk |= SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + /* Wait max 150 ms */ + timeout = ktime_add_ms(ktime_get(), 150); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (clk & SDHCI_CLOCK_INT_STABLE) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + return; + } + udelay(10); + } + + if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { + clk |= SDHCI_CLOCK_PLL_EN; + clk &= ~SDHCI_CLOCK_INT_STABLE; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + /* Wait max 150 ms */ + timeout = ktime_add_ms(ktime_get(), 150); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (clk & SDHCI_CLOCK_INT_STABLE) + break; + if (timedout) { + pr_err("%s: PLL clock never stabilised.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + return; + } + udelay(10); + } + } + + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); +} +EXPORT_SYMBOL_GPL(sdhci_enable_clk); + +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + + host->mmc->actual_clock = 0; + + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_enable_clk(host, clk); +} +EXPORT_SYMBOL_GPL(sdhci_set_clock); + +static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + + if (mode != MMC_POWER_OFF) + sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); + else + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); +} + +void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + u8 pwr = 0; + + if (mode != MMC_POWER_OFF) { + switch (1 << vdd) { + case MMC_VDD_165_195: + /* + * Without a regulator, SDHCI does not support 2.0v + * so we only get here if the driver deliberately + * added the 2.0v range to ocr_avail. Map it to 1.8v + * for the purpose of turning on the power. + */ + case MMC_VDD_20_21: + pwr = SDHCI_POWER_180; + break; + case MMC_VDD_29_30: + case MMC_VDD_30_31: + pwr = SDHCI_POWER_300; + break; + case MMC_VDD_32_33: + case MMC_VDD_33_34: + /* + * 3.4 ~ 3.6V are valid only for those platforms where it's + * known that the voltage range is supported by hardware. + */ + case MMC_VDD_34_35: + case MMC_VDD_35_36: + pwr = SDHCI_POWER_330; + break; + default: + WARN(1, "%s: Invalid vdd %#x\n", + mmc_hostname(host->mmc), vdd); + break; + } + } + + if (host->pwr == pwr) + return; + + host->pwr = pwr; + + if (pwr == 0) { + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + sdhci_runtime_pm_bus_off(host); + } else { + /* + * Spec says that we should clear the power reg before setting + * a new value. Some controllers don't seem to like this though. + */ + if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + + /* + * At least the Marvell CaFe chip gets confused if we set the + * voltage and set turn on power at the same time, so set the + * voltage first. + */ + if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + + pwr |= SDHCI_POWER_ON; + + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + sdhci_runtime_pm_bus_on(host); + + /* + * Some controllers need an extra 10ms delay of 10ms before + * they can apply clock after applying power + */ + if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) + mdelay(10); + } +} +EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); + +void sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + if (IS_ERR(host->mmc->supply.vmmc)) + sdhci_set_power_noreg(host, mode, vdd); + else + sdhci_set_power_reg(host, mode, vdd); +} +EXPORT_SYMBOL_GPL(sdhci_set_power); + +/* + * Some controllers need to configure a valid bus voltage on their power + * register regardless of whether an external regulator is taking care of power + * supply. This helper function takes care of it if set as the controller's + * sdhci_ops.set_power callback. + */ +void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, + unsigned char mode, + unsigned short vdd) +{ + if (!IS_ERR(host->mmc->supply.vmmc)) { + struct mmc_host *mmc = host->mmc; + + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + } + sdhci_set_power_noreg(host, mode, vdd); +} +EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); + +/*****************************************************************************\ + * * + * MMC callbacks * + * * +\*****************************************************************************/ + +void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct mmc_command *cmd; + unsigned long flags; + bool present; + + /* Firstly check card presence */ + present = mmc->ops->get_cd(mmc); + + spin_lock_irqsave(&host->lock, flags); + + sdhci_led_activate(host); + + if (sdhci_present_error(host, mrq->cmd, present)) + goto out_finish; + + cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; + + if (!sdhci_send_command_retry(host, cmd, flags)) + goto out_finish; + + spin_unlock_irqrestore(&host->lock, flags); + + return; + +out_finish: + sdhci_finish_mrq(host, mrq); + spin_unlock_irqrestore(&host->lock, flags); +} +EXPORT_SYMBOL_GPL(sdhci_request); + +int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct mmc_command *cmd; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&host->lock, flags); + + if (sdhci_present_error(host, mrq->cmd, true)) { + sdhci_finish_mrq(host, mrq); + goto out_finish; + } + + cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; + + /* + * The HSQ may send a command in interrupt context without polling + * the busy signaling, which means we should return BUSY if controller + * has not released inhibit bits to allow HSQ trying to send request + * again in non-atomic context. So we should not finish this request + * here. + */ + if (!sdhci_send_command(host, cmd)) + ret = -EBUSY; + else + sdhci_led_activate(host); + +out_finish: + spin_unlock_irqrestore(&host->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(sdhci_request_atomic); + +void sdhci_set_bus_width(struct sdhci_host *host, int width) +{ + u8 ctrl; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + if (width == MMC_BUS_WIDTH_8) { + ctrl &= ~SDHCI_CTRL_4BITBUS; + ctrl |= SDHCI_CTRL_8BITBUS; + } else { + if (host->mmc->caps & MMC_CAP_8_BIT_DATA) + ctrl &= ~SDHCI_CTRL_8BITBUS; + if (width == MMC_BUS_WIDTH_4) + ctrl |= SDHCI_CTRL_4BITBUS; + else + ctrl &= ~SDHCI_CTRL_4BITBUS; + } + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} +EXPORT_SYMBOL_GPL(sdhci_set_bus_width); + +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) +{ + u16 ctrl_2; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if ((timing == MMC_TIMING_MMC_HS200) || + (timing == MMC_TIMING_UHS_SDR104)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if ((timing == MMC_TIMING_UHS_DDR50) || + (timing == MMC_TIMING_MMC_DDR52)) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + else if (timing == MMC_TIMING_MMC_HS400) + ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} +EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); + +static bool sdhci_timing_has_preset(unsigned char timing) +{ + switch (timing) { + case MMC_TIMING_UHS_SDR12: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_SDR50: + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + return true; + }; + return false; +} + +static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing) +{ + return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && + sdhci_timing_has_preset(timing); +} + +static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios) +{ + /* + * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK + * Frequency. Check if preset values need to be enabled, or the Driver + * Strength needs updating. Note, clock changes are handled separately. + */ + return !host->preset_enabled && + (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); +} + +void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + bool reinit_uhs = host->reinit_uhs; + bool turning_on_clk = false; + u8 ctrl; + + host->reinit_uhs = false; + + if (ios->power_mode == MMC_POWER_UNDEFINED) + return; + + if (host->flags & SDHCI_DEVICE_DEAD) { + if (!IS_ERR(mmc->supply.vmmc) && + ios->power_mode == MMC_POWER_OFF) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + return; + } + + /* + * Reset the chip on each power off. + * Should clear out any weird states. + */ + if (ios->power_mode == MMC_POWER_OFF) { + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); + sdhci_reinit(host); + } + + if (host->version >= SDHCI_SPEC_300 && + (ios->power_mode == MMC_POWER_UP) && + !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) + sdhci_enable_preset_value(host, false); + + if (!ios->clock || ios->clock != host->clock) { + turning_on_clk = ios->clock && !host->clock; + + host->ops->set_clock(host, ios->clock); + host->clock = ios->clock; + + if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && + host->clock) { + host->timeout_clk = host->mmc->actual_clock ? + host->mmc->actual_clock / 1000 : + host->clock / 1000; + host->mmc->max_busy_timeout = + host->ops->get_max_timeout_count ? + host->ops->get_max_timeout_count(host) : + 1 << 27; + host->mmc->max_busy_timeout /= host->timeout_clk; + } + } + + if (host->ops->set_power) + host->ops->set_power(host, ios->power_mode, ios->vdd); + else + sdhci_set_power(host, ios->power_mode, ios->vdd); + + if (host->ops->platform_send_init_74_clocks) + host->ops->platform_send_init_74_clocks(host, ios->power_mode); + + host->ops->set_bus_width(host, ios->bus_width); + + /* + * Special case to avoid multiple clock changes during voltage + * switching. + */ + if (!reinit_uhs && + turning_on_clk && + host->timing == ios->timing && + host->version >= SDHCI_SPEC_300 && + !sdhci_presetable_values_change(host, ios)) + return; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + + if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { + if (ios->timing == MMC_TIMING_SD_HS || + ios->timing == MMC_TIMING_MMC_HS || + ios->timing == MMC_TIMING_MMC_HS400 || + ios->timing == MMC_TIMING_MMC_HS200 || + ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_SDR50 || + ios->timing == MMC_TIMING_UHS_SDR104 || + ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_UHS_SDR25) + ctrl |= SDHCI_CTRL_HISPD; + else + ctrl &= ~SDHCI_CTRL_HISPD; + } + + if (host->version >= SDHCI_SPEC_300) { + u16 clk, ctrl_2; + + if (!host->preset_enabled) { + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + /* + * We only need to set Driver Strength if the + * preset value enable is not set. + */ + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; + if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; + else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; + else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; + else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; + else { + pr_warn("%s: invalid driver type, default to driver type B\n", + mmc_hostname(mmc)); + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; + } + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + host->drv_type = ios->drv_type; + } else { + /* + * According to SDHC Spec v3.00, if the Preset Value + * Enable in the Host Control 2 register is set, we + * need to reset SD Clock Enable before changing High + * Speed Enable to avoid generating clock gliches. + */ + + /* Reset SD Clock Enable */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + + /* Re-enable SD Clock */ + host->ops->set_clock(host, host->clock); + } + + /* Reset SD Clock Enable */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + host->ops->set_uhs_signaling(host, ios->timing); + host->timing = ios->timing; + + if (sdhci_preset_needed(host, ios->timing)) { + u16 preset; + + sdhci_enable_preset_value(host, true); + preset = sdhci_get_preset_value(host); + ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, + preset); + host->drv_type = ios->drv_type; + } + + /* Re-enable SD Clock */ + host->ops->set_clock(host, host->clock); + } else + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + + /* + * Some (ENE) controllers go apeshit on some ios operation, + * signalling timeout and CRC errors even on CMD0. Resetting + * it on each ios seems to solve the problem. + */ + if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) + sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); +} +EXPORT_SYMBOL_GPL(sdhci_set_ios); + +static int sdhci_get_cd(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + int gpio_cd = mmc_gpio_get_cd(mmc); + + if (host->flags & SDHCI_DEVICE_DEAD) + return 0; + + /* If nonremovable, assume that the card is always present. */ + if (!mmc_card_is_removable(host->mmc)) + return 1; + + /* + * Try slot gpio detect, if defined it take precedence + * over build in controller functionality + */ + if (gpio_cd >= 0) + return !!gpio_cd; + + /* If polling, assume that the card is always present. */ + if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) + return 1; + + /* Host native card detect */ + return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +} + +static int sdhci_check_ro(struct sdhci_host *host) +{ + unsigned long flags; + int is_readonly; + + spin_lock_irqsave(&host->lock, flags); + + if (host->flags & SDHCI_DEVICE_DEAD) + is_readonly = 0; + else if (host->ops->get_ro) + is_readonly = host->ops->get_ro(host); + else if (mmc_can_gpio_ro(host->mmc)) + is_readonly = mmc_gpio_get_ro(host->mmc); + else + is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) + & SDHCI_WRITE_PROTECT); + + spin_unlock_irqrestore(&host->lock, flags); + + /* This quirk needs to be replaced by a callback-function later */ + return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? + !is_readonly : is_readonly; +} + +#define SAMPLE_COUNT 5 + +static int sdhci_get_ro(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + int i, ro_count; + + if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) + return sdhci_check_ro(host); + + ro_count = 0; + for (i = 0; i < SAMPLE_COUNT; i++) { + if (sdhci_check_ro(host)) { + if (++ro_count > SAMPLE_COUNT / 2) + return 1; + } + msleep(30); + } + return 0; +} + +static void sdhci_hw_reset(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + + if (host->ops && host->ops->hw_reset) + host->ops->hw_reset(host); +} + +static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) +{ + if (!(host->flags & SDHCI_DEVICE_DEAD)) { + if (enable) + host->ier |= SDHCI_INT_CARD_INT; + else + host->ier &= ~SDHCI_INT_CARD_INT; + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } +} + +void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + + if (enable) + pm_runtime_get_noresume(host->mmc->parent); + + spin_lock_irqsave(&host->lock, flags); + sdhci_enable_sdio_irq_nolock(host, enable); + spin_unlock_irqrestore(&host->lock, flags); + + if (!enable) + pm_runtime_put_noidle(host->mmc->parent); +} +EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); + +static void sdhci_ack_sdio_irq(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + sdhci_enable_sdio_irq_nolock(host, true); + spin_unlock_irqrestore(&host->lock, flags); +} + +int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + u16 ctrl; + int ret; + + /* + * Signal Voltage Switching is only applicable for Host Controllers + * v3.00 and above. + */ + if (host->version < SDHCI_SPEC_300) + return 0; + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + if (!(host->flags & SDHCI_SIGNALING_330)) + return -EINVAL; + /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ + ctrl &= ~SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) { + pr_warn("%s: Switching to 3.3V signalling voltage failed\n", + mmc_hostname(mmc)); + return -EIO; + } + } + /* Wait for 5ms */ + usleep_range(5000, 5500); + + /* 3.3V regulator output should be stable within 5 ms */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl & SDHCI_CTRL_VDD_180)) + return 0; + + pr_warn("%s: 3.3V regulator output did not become stable\n", + mmc_hostname(mmc)); + + return -EAGAIN; + case MMC_SIGNAL_VOLTAGE_180: + if (!(host->flags & SDHCI_SIGNALING_180)) + return -EINVAL; + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) { + pr_warn("%s: Switching to 1.8V signalling voltage failed\n", + mmc_hostname(mmc)); + return -EIO; + } + } + + /* + * Enable 1.8V Signal Enable in the Host Control2 + * register + */ + ctrl |= SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + /* Some controller need to do more when switching */ + if (host->ops->voltage_switch) + host->ops->voltage_switch(host); + + /* 1.8V regulator output should be stable within 5 ms */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (ctrl & SDHCI_CTRL_VDD_180) + return 0; + + pr_warn("%s: 1.8V regulator output did not become stable\n", + mmc_hostname(mmc)); + + return -EAGAIN; + case MMC_SIGNAL_VOLTAGE_120: + if (!(host->flags & SDHCI_SIGNALING_120)) + return -EINVAL; + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) { + pr_warn("%s: Switching to 1.2V signalling voltage failed\n", + mmc_hostname(mmc)); + return -EIO; + } + } + return 0; + default: + /* No signal voltage switch required */ + return 0; + } +} +EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); + +static int sdhci_card_busy(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 present_state; + + /* Check whether DAT[0] is 0 */ + present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); + + return !(present_state & SDHCI_DATA_0_LVL_MASK); +} + +static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->flags |= SDHCI_HS400_TUNING; + spin_unlock_irqrestore(&host->lock, flags); + + return 0; +} + +void sdhci_start_tuning(struct sdhci_host *host) +{ + u16 ctrl; + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl |= SDHCI_CTRL_EXEC_TUNING; + if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) + ctrl |= SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + /* + * As per the Host Controller spec v3.00, tuning command + * generates Buffer Read Ready interrupt, so enable that. + * + * Note: The spec clearly says that when tuning sequence + * is being performed, the controller does not generate + * interrupts other than Buffer Read Ready interrupt. But + * to make sure we don't hit a controller bug, we _only_ + * enable Buffer Read Ready interrupt here. + */ + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); +} +EXPORT_SYMBOL_GPL(sdhci_start_tuning); + +void sdhci_end_tuning(struct sdhci_host *host) +{ + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +} +EXPORT_SYMBOL_GPL(sdhci_end_tuning); + +void sdhci_reset_tuning(struct sdhci_host *host) +{ + u16 ctrl; + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl &= ~SDHCI_CTRL_TUNED_CLK; + ctrl &= ~SDHCI_CTRL_EXEC_TUNING; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); +} +EXPORT_SYMBOL_GPL(sdhci_reset_tuning); + +void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) +{ + sdhci_reset_tuning(host); + + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); + + sdhci_end_tuning(host); + + mmc_abort_tuning(host->mmc, opcode); +} +EXPORT_SYMBOL_GPL(sdhci_abort_tuning); + +/* + * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI + * tuning command does not have a data payload (or rather the hardware does it + * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command + * interrupt setup is different to other commands and there is no timeout + * interrupt so special handling is needed. + */ +void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) +{ + struct mmc_host *mmc = host->mmc; + struct mmc_command cmd = {}; + struct mmc_request mrq = {}; + unsigned long flags; + u32 b = host->sdma_boundary; + + spin_lock_irqsave(&host->lock, flags); + + cmd.opcode = opcode; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + cmd.mrq = &mrq; + + mrq.cmd = &cmd; + /* + * In response to CMD19, the card sends 64 bytes of tuning + * block to the Host Controller. So we set the block size + * to 64 here. + */ + if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && + mmc->ios.bus_width == MMC_BUS_WIDTH_8) + sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); + else + sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); + + /* + * The tuning block is sent by the card to the host controller. + * So we set the TRNS_READ bit in the Transfer Mode register. + * This also takes care of setting DMA Enable and Multi Block + * Select in the same register to 0. + */ + sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); + + if (!sdhci_send_command_retry(host, &cmd, flags)) { + spin_unlock_irqrestore(&host->lock, flags); + host->tuning_done = 0; + return; + } + + host->cmd = NULL; + + sdhci_del_timer(host, &mrq); + + host->tuning_done = 0; + + spin_unlock_irqrestore(&host->lock, flags); + + /* Wait for Buffer Read Ready interrupt */ + wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), + msecs_to_jiffies(50)); + +} +EXPORT_SYMBOL_GPL(sdhci_send_tuning); + +static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + int i; + + /* + * Issue opcode repeatedly till Execute Tuning is set to 0 or the number + * of loops reaches tuning loop count. + */ + for (i = 0; i < host->tuning_loop_count; i++) { + u16 ctrl; + + sdhci_send_tuning(host, opcode); + + if (!host->tuning_done) { + pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", + mmc_hostname(host->mmc)); + sdhci_abort_tuning(host, opcode); + return -ETIMEDOUT; + } + + /* Spec does not require a delay between tuning cycles */ + if (host->tuning_delay > 0) + mdelay(host->tuning_delay); + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { + if (ctrl & SDHCI_CTRL_TUNED_CLK) + return 0; /* Success! */ + break; + } + + } + + pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", + mmc_hostname(host->mmc)); + sdhci_reset_tuning(host); + return -EAGAIN; +} + +int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + int err = 0; + unsigned int tuning_count = 0; + bool hs400_tuning; + + hs400_tuning = host->flags & SDHCI_HS400_TUNING; + + if (host->tuning_mode == SDHCI_TUNING_MODE_1) + tuning_count = host->tuning_count; + + /* + * The Host Controller needs tuning in case of SDR104 and DDR50 + * mode, and for SDR50 mode when Use Tuning for SDR50 is set in + * the Capabilities register. + * If the Host Controller supports the HS200 mode then the + * tuning function has to be executed. + */ + switch (host->timing) { + /* HS400 tuning is done in HS200 mode */ + case MMC_TIMING_MMC_HS400: + err = -EINVAL; + goto out; + + case MMC_TIMING_MMC_HS200: + /* + * Periodic re-tuning for HS400 is not expected to be needed, so + * disable it here. + */ + if (hs400_tuning) + tuning_count = 0; + break; + + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_DDR50: + break; + + case MMC_TIMING_UHS_SDR50: + if (host->flags & SDHCI_SDR50_NEEDS_TUNING) + break; + fallthrough; + + default: + goto out; + } + + if (host->ops->platform_execute_tuning) { + err = host->ops->platform_execute_tuning(host, opcode); + goto out; + } + + host->mmc->retune_period = tuning_count; + + if (host->tuning_delay < 0) + host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; + + sdhci_start_tuning(host); + + host->tuning_err = __sdhci_execute_tuning(host, opcode); + + sdhci_end_tuning(host); +out: + host->flags &= ~SDHCI_HS400_TUNING; + + return err; +} +EXPORT_SYMBOL_GPL(sdhci_execute_tuning); + +static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) +{ + /* Host Controller v3.00 defines preset value registers */ + if (host->version < SDHCI_SPEC_300) + return; + + /* + * We only enable or disable Preset Value if they are not already + * enabled or disabled respectively. Otherwise, we bail out. + */ + if (host->preset_enabled != enable) { + u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + if (enable) + ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; + else + ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + if (enable) + host->flags |= SDHCI_PV_ENABLED; + else + host->flags &= ~SDHCI_PV_ENABLED; + + host->preset_enabled = enable; + } +} + +static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (data->host_cookie != COOKIE_UNMAPPED) + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + + data->host_cookie = COOKIE_UNMAPPED; +} + +static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + + mrq->data->host_cookie = COOKIE_UNMAPPED; + + /* + * No pre-mapping in the pre hook if we're using the bounce buffer, + * for that we would need two bounce buffers since one buffer is + * in flight when this is getting called. + */ + if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) + sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); +} + +static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) +{ + if (host->data_cmd) { + host->data_cmd->error = err; + sdhci_finish_mrq(host, host->data_cmd->mrq); + } + + if (host->cmd) { + host->cmd->error = err; + sdhci_finish_mrq(host, host->cmd->mrq); + } +} + +static void sdhci_card_event(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + int present; + + /* First check if client has provided their own card event */ + if (host->ops->card_event) + host->ops->card_event(host); + + present = mmc->ops->get_cd(mmc); + + spin_lock_irqsave(&host->lock, flags); + + /* Check sdhci_has_requests() first in case we are runtime suspended */ + if (sdhci_has_requests(host) && !present) { + pr_err("%s: Card removed during transfer!\n", + mmc_hostname(host->mmc)); + pr_err("%s: Resetting controller.\n", + mmc_hostname(host->mmc)); + + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); + + sdhci_error_out_mrqs(host, -ENOMEDIUM); + } + + spin_unlock_irqrestore(&host->lock, flags); +} + +static const struct mmc_host_ops sdhci_ops = { + .request = sdhci_request, + .post_req = sdhci_post_req, + .pre_req = sdhci_pre_req, + .set_ios = sdhci_set_ios, + .get_cd = sdhci_get_cd, + .get_ro = sdhci_get_ro, + .hw_reset = sdhci_hw_reset, + .enable_sdio_irq = sdhci_enable_sdio_irq, + .ack_sdio_irq = sdhci_ack_sdio_irq, + .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, + .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, + .execute_tuning = sdhci_execute_tuning, + .card_event = sdhci_card_event, + .card_busy = sdhci_card_busy, +}; + +/*****************************************************************************\ + * * + * Request done * + * * +\*****************************************************************************/ + +static bool sdhci_request_done(struct sdhci_host *host) +{ + unsigned long flags; + struct mmc_request *mrq; + int i; + + spin_lock_irqsave(&host->lock, flags); + + for (i = 0; i < SDHCI_MAX_MRQS; i++) { + mrq = host->mrqs_done[i]; + if (mrq) + break; + } + + if (!mrq) { + spin_unlock_irqrestore(&host->lock, flags); + return true; + } + + /* + * The controller needs a reset of internal state machines + * upon error conditions. + */ + if (sdhci_needs_reset(host, mrq)) { + /* + * Do not finish until command and data lines are available for + * reset. Note there can only be one other mrq, so it cannot + * also be in mrqs_done, otherwise host->cmd and host->data_cmd + * would both be null. + */ + if (host->cmd || host->data_cmd) { + spin_unlock_irqrestore(&host->lock, flags); + return true; + } + + /* Some controllers need this kick or reset won't work here */ + if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) + /* This is to force an update */ + host->ops->set_clock(host, host->clock); + + /* + * Spec says we should do both at the same time, but Ricoh + * controllers do not like that. + */ + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); + + host->pending_reset = false; + } + + /* + * Always unmap the data buffers if they were mapped by + * sdhci_prepare_data() whenever we finish with a request. + * This avoids leaking DMA mappings on error. + */ + if (host->flags & SDHCI_REQ_USE_DMA) { + struct mmc_data *data = mrq->data; + + if (host->use_external_dma && data && + (mrq->cmd->error || data->error)) { + struct dma_chan *chan = sdhci_external_dma_channel(host, data); + + host->mrqs_done[i] = NULL; + spin_unlock_irqrestore(&host->lock, flags); + dmaengine_terminate_sync(chan); + spin_lock_irqsave(&host->lock, flags); + sdhci_set_mrq_done(host, mrq); + } + + if (data && data->host_cookie == COOKIE_MAPPED) { + if (host->bounce_buffer) { + /* + * On reads, copy the bounced data into the + * sglist + */ + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { + unsigned int length = data->bytes_xfered; + + if (length > host->bounce_buffer_size) { + pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", + mmc_hostname(host->mmc), + host->bounce_buffer_size, + data->bytes_xfered); + /* Cap it down and continue */ + length = host->bounce_buffer_size; + } + dma_sync_single_for_cpu( + host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + DMA_FROM_DEVICE); + sg_copy_from_buffer(data->sg, + data->sg_len, + host->bounce_buffer, + length); + } else { + /* No copying, just switch ownership */ + dma_sync_single_for_cpu( + host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + mmc_get_dma_dir(data)); + } + } else { + /* Unmap the raw data */ + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + } + data->host_cookie = COOKIE_UNMAPPED; + } + } + + host->mrqs_done[i] = NULL; + + spin_unlock_irqrestore(&host->lock, flags); + + if (host->ops->request_done) + host->ops->request_done(host, mrq); + else + mmc_request_done(host->mmc, mrq); + + return false; +} + +static void sdhci_complete_work(struct work_struct *work) +{ + struct sdhci_host *host = container_of(work, struct sdhci_host, + complete_work); + + while (!sdhci_request_done(host)) + ; +} + +static void sdhci_timeout_timer(struct timer_list *t) +{ + struct sdhci_host *host; + unsigned long flags; + + host = from_timer(host, t, timer); + + spin_lock_irqsave(&host->lock, flags); + + if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { + pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + + host->cmd->error = -ETIMEDOUT; + sdhci_finish_mrq(host, host->cmd->mrq); + } + + spin_unlock_irqrestore(&host->lock, flags); +} + +static void sdhci_timeout_data_timer(struct timer_list *t) +{ + struct sdhci_host *host; + unsigned long flags; + + host = from_timer(host, t, data_timer); + + spin_lock_irqsave(&host->lock, flags); + + if (host->data || host->data_cmd || + (host->cmd && sdhci_data_line_cmd(host->cmd))) { + pr_err("%s: Timeout waiting for hardware interrupt.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + + if (host->data) { + host->data->error = -ETIMEDOUT; + __sdhci_finish_data(host, true); + queue_work(host->complete_wq, &host->complete_work); + } else if (host->data_cmd) { + host->data_cmd->error = -ETIMEDOUT; + sdhci_finish_mrq(host, host->data_cmd->mrq); + } else { + host->cmd->error = -ETIMEDOUT; + sdhci_finish_mrq(host, host->cmd->mrq); + } + } + + spin_unlock_irqrestore(&host->lock, flags); +} + +/*****************************************************************************\ + * * + * Interrupt handling * + * * +\*****************************************************************************/ + +static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) +{ + /* Handle auto-CMD12 error */ + if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { + struct mmc_request *mrq = host->data_cmd->mrq; + u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); + int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? + SDHCI_INT_DATA_TIMEOUT : + SDHCI_INT_DATA_CRC; + + /* Treat auto-CMD12 error the same as data error */ + if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { + *intmask_p |= data_err_bit; + return; + } + } + + if (!host->cmd) { + /* + * SDHCI recovers from errors by resetting the cmd and data + * circuits. Until that is done, there very well might be more + * interrupts, so ignore them in that case. + */ + if (host->pending_reset) + return; + pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", + mmc_hostname(host->mmc), (unsigned)intmask); + sdhci_dumpregs(host); + return; + } + + if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | + SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { + if (intmask & SDHCI_INT_TIMEOUT) + host->cmd->error = -ETIMEDOUT; + else + host->cmd->error = -EILSEQ; + + /* Treat data command CRC error the same as data CRC error */ + if (host->cmd->data && + (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == + SDHCI_INT_CRC) { + host->cmd = NULL; + *intmask_p |= SDHCI_INT_DATA_CRC; + return; + } + + __sdhci_finish_mrq(host, host->cmd->mrq); + return; + } + + /* Handle auto-CMD23 error */ + if (intmask & SDHCI_INT_AUTO_CMD_ERR) { + struct mmc_request *mrq = host->cmd->mrq; + u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); + int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? + -ETIMEDOUT : + -EILSEQ; + + if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { + mrq->sbc->error = err; + __sdhci_finish_mrq(host, mrq); + return; + } + } + + if (intmask & SDHCI_INT_RESPONSE) + sdhci_finish_command(host); +} + +static void sdhci_adma_show_error(struct sdhci_host *host) +{ + void *desc = host->adma_table; + dma_addr_t dma = host->adma_addr; + + sdhci_dumpregs(host); + + while (true) { + struct sdhci_adma2_64_desc *dma_desc = desc; + + if (host->flags & SDHCI_USE_64_BIT_DMA) + SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", + (unsigned long long)dma, + le32_to_cpu(dma_desc->addr_hi), + le32_to_cpu(dma_desc->addr_lo), + le16_to_cpu(dma_desc->len), + le16_to_cpu(dma_desc->cmd)); + else + SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", + (unsigned long long)dma, + le32_to_cpu(dma_desc->addr_lo), + le16_to_cpu(dma_desc->len), + le16_to_cpu(dma_desc->cmd)); + + desc += host->desc_sz; + dma += host->desc_sz; + + if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) + break; + } +} + +static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) +{ + u32 command; + + /* CMD19 generates _only_ Buffer Read Ready interrupt */ + if (intmask & SDHCI_INT_DATA_AVAIL) { + command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); + if (command == MMC_SEND_TUNING_BLOCK || + command == MMC_SEND_TUNING_BLOCK_HS200) { + host->tuning_done = 1; + wake_up(&host->buf_ready_int); + return; + } + } + + if (!host->data) { + struct mmc_command *data_cmd = host->data_cmd; + + /* + * The "data complete" interrupt is also used to + * indicate that a busy state has ended. See comment + * above in sdhci_cmd_irq(). + */ + if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { + if (intmask & SDHCI_INT_DATA_TIMEOUT) { + host->data_cmd = NULL; + data_cmd->error = -ETIMEDOUT; + __sdhci_finish_mrq(host, data_cmd->mrq); + return; + } + if (intmask & SDHCI_INT_DATA_END) { + host->data_cmd = NULL; + /* + * Some cards handle busy-end interrupt + * before the command completed, so make + * sure we do things in the proper order. + */ + if (host->cmd == data_cmd) + return; + + __sdhci_finish_mrq(host, data_cmd->mrq); + return; + } + } + + /* + * SDHCI recovers from errors by resetting the cmd and data + * circuits. Until that is done, there very well might be more + * interrupts, so ignore them in that case. + */ + if (host->pending_reset) + return; + + pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", + mmc_hostname(host->mmc), (unsigned)intmask); + sdhci_dumpregs(host); + + return; + } + + if (intmask & SDHCI_INT_DATA_TIMEOUT) + host->data->error = -ETIMEDOUT; + else if (intmask & SDHCI_INT_DATA_END_BIT) + host->data->error = -EILSEQ; + else if ((intmask & SDHCI_INT_DATA_CRC) && + SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) + != MMC_BUS_TEST_R) + host->data->error = -EILSEQ; + else if (intmask & SDHCI_INT_ADMA_ERROR) { + pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), + intmask); + sdhci_adma_show_error(host); + host->data->error = -EIO; + if (host->ops->adma_workaround) + host->ops->adma_workaround(host, intmask); + } + + if (host->data->error) + sdhci_finish_data(host); + else { + if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) + sdhci_transfer_pio(host); + + /* + * We currently don't do anything fancy with DMA + * boundaries, but as we can't disable the feature + * we need to at least restart the transfer. + * + * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) + * should return a valid address to continue from, but as + * some controllers are faulty, don't trust them. + */ + if (intmask & SDHCI_INT_DMA_END) { + dma_addr_t dmastart, dmanow; + + dmastart = sdhci_sdma_address(host); + dmanow = dmastart + host->data->bytes_xfered; + /* + * Force update to the next DMA block boundary. + */ + dmanow = (dmanow & + ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + + SDHCI_DEFAULT_BOUNDARY_SIZE; + host->data->bytes_xfered = dmanow - dmastart; + DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", + &dmastart, host->data->bytes_xfered, &dmanow); + sdhci_set_sdma_addr(host, dmanow); + } + + if (intmask & SDHCI_INT_DATA_END) { + if (host->cmd == host->data_cmd) { + /* + * Data managed to finish before the + * command completed. Make sure we do + * things in the proper order. + */ + host->data_early = 1; + } else { + sdhci_finish_data(host); + } + } + } +} + +static inline bool sdhci_defer_done(struct sdhci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + return host->pending_reset || host->always_defer_done || + ((host->flags & SDHCI_REQ_USE_DMA) && data && + data->host_cookie == COOKIE_MAPPED); +} + +static irqreturn_t sdhci_irq(int irq, void *dev_id) +{ + struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; + irqreturn_t result = IRQ_NONE; + struct sdhci_host *host = dev_id; + u32 intmask, mask, unexpected = 0; + int max_loops = 16; + int i; + + spin_lock(&host->lock); + + if (host->runtime_suspended) { + spin_unlock(&host->lock); + return IRQ_NONE; + } + + intmask = sdhci_readl(host, SDHCI_INT_STATUS); + if (!intmask || intmask == 0xffffffff) { + result = IRQ_NONE; + goto out; + } + + do { + DBG("IRQ status 0x%08x\n", intmask); + + if (host->ops->irq) { + intmask = host->ops->irq(host, intmask); + if (!intmask) + goto cont; + } + + /* Clear selected interrupts. */ + mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | + SDHCI_INT_BUS_POWER); + sdhci_writel(host, mask, SDHCI_INT_STATUS); + + if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { + u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & + SDHCI_CARD_PRESENT; + + /* + * There is a observation on i.mx esdhc. INSERT + * bit will be immediately set again when it gets + * cleared, if a card is inserted. We have to mask + * the irq to prevent interrupt storm which will + * freeze the system. And the REMOVE gets the + * same situation. + * + * More testing are needed here to ensure it works + * for other platforms though. + */ + host->ier &= ~(SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE); + host->ier |= present ? SDHCI_INT_CARD_REMOVE : + SDHCI_INT_CARD_INSERT; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + + sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); + + host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE); + result = IRQ_WAKE_THREAD; + } + + if (intmask & SDHCI_INT_CMD_MASK) + sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); + + if (intmask & SDHCI_INT_DATA_MASK) + sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); + + if (intmask & SDHCI_INT_BUS_POWER) + pr_err("%s: Card is consuming too much power!\n", + mmc_hostname(host->mmc)); + + if (intmask & SDHCI_INT_RETUNE) + mmc_retune_needed(host->mmc); + + if ((intmask & SDHCI_INT_CARD_INT) && + (host->ier & SDHCI_INT_CARD_INT)) { + sdhci_enable_sdio_irq_nolock(host, false); + sdio_signal_irq(host->mmc); + } + + intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | + SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | + SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | + SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); + + if (intmask) { + unexpected |= intmask; + sdhci_writel(host, intmask, SDHCI_INT_STATUS); + } +cont: + if (result == IRQ_NONE) + result = IRQ_HANDLED; + + intmask = sdhci_readl(host, SDHCI_INT_STATUS); + } while (intmask && --max_loops); + + /* Determine if mrqs can be completed immediately */ + for (i = 0; i < SDHCI_MAX_MRQS; i++) { + struct mmc_request *mrq = host->mrqs_done[i]; + + if (!mrq) + continue; + + if (sdhci_defer_done(host, mrq)) { + result = IRQ_WAKE_THREAD; + } else { + mrqs_done[i] = mrq; + host->mrqs_done[i] = NULL; + } + } +out: + if (host->deferred_cmd) + result = IRQ_WAKE_THREAD; + + spin_unlock(&host->lock); + + /* Process mrqs ready for immediate completion */ + for (i = 0; i < SDHCI_MAX_MRQS; i++) { + if (!mrqs_done[i]) + continue; + + if (host->ops->request_done) + host->ops->request_done(host, mrqs_done[i]); + else + mmc_request_done(host->mmc, mrqs_done[i]); + } + + if (unexpected) { + pr_err("%s: Unexpected interrupt 0x%08x.\n", + mmc_hostname(host->mmc), unexpected); + sdhci_dumpregs(host); + } + + return result; +} + +static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) +{ + struct sdhci_host *host = dev_id; + struct mmc_command *cmd; + unsigned long flags; + u32 isr; + + while (!sdhci_request_done(host)) + ; + + spin_lock_irqsave(&host->lock, flags); + + isr = host->thread_isr; + host->thread_isr = 0; + + cmd = host->deferred_cmd; + if (cmd && !sdhci_send_command_retry(host, cmd, flags)) + sdhci_finish_mrq(host, cmd->mrq); + + spin_unlock_irqrestore(&host->lock, flags); + + if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { + struct mmc_host *mmc = host->mmc; + + mmc->ops->card_event(mmc); + mmc_detect_change(mmc, msecs_to_jiffies(200)); + } + + return IRQ_HANDLED; +} + +/*****************************************************************************\ + * * + * Suspend/resume * + * * +\*****************************************************************************/ + +#ifdef CONFIG_PM + +static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) +{ + return mmc_card_is_removable(host->mmc) && + !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && + !mmc_can_gpio_cd(host->mmc); +} + +/* + * To enable wakeup events, the corresponding events have to be enabled in + * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal + * Table' in the SD Host Controller Standard Specification. + * It is useless to restore SDHCI_INT_ENABLE state in + * sdhci_disable_irq_wakeups() since it will be set by + * sdhci_enable_card_detection() or sdhci_init(). + */ +static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) +{ + u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | + SDHCI_WAKE_ON_INT; + u32 irq_val = 0; + u8 wake_val = 0; + u8 val; + + if (sdhci_cd_irq_can_wakeup(host)) { + wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; + irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; + } + + if (mmc_card_wake_sdio_irq(host->mmc)) { + wake_val |= SDHCI_WAKE_ON_INT; + irq_val |= SDHCI_INT_CARD_INT; + } + + if (!irq_val) + return false; + + val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); + val &= ~mask; + val |= wake_val; + sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); + + sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); + + host->irq_wake_enabled = !enable_irq_wake(host->irq); + + return host->irq_wake_enabled; +} + +static void sdhci_disable_irq_wakeups(struct sdhci_host *host) +{ + u8 val; + u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE + | SDHCI_WAKE_ON_INT; + + val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); + val &= ~mask; + sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); + + disable_irq_wake(host->irq); + + host->irq_wake_enabled = false; +} + +int sdhci_suspend_host(struct sdhci_host *host) +{ + sdhci_disable_card_detection(host); + + mmc_retune_timer_stop(host->mmc); + + if (!device_may_wakeup(mmc_dev(host->mmc)) || + !sdhci_enable_irq_wakeups(host)) { + host->ier = 0; + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); + free_irq(host->irq, host); + } + + return 0; +} + +EXPORT_SYMBOL_GPL(sdhci_suspend_host); + +int sdhci_resume_host(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + int ret = 0; + + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->enable_dma) + host->ops->enable_dma(host); + } + + if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && + (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { + /* Card keeps power but host controller does not */ + sdhci_init(host, 0); + host->pwr = 0; + host->clock = 0; + host->reinit_uhs = true; + mmc->ops->set_ios(mmc, &mmc->ios); + } else { + sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); + } + + if (host->irq_wake_enabled) { + sdhci_disable_irq_wakeups(host); + } else { + ret = request_threaded_irq(host->irq, sdhci_irq, + sdhci_thread_irq, IRQF_SHARED, + mmc_hostname(host->mmc), host); + if (ret) + return ret; + } + + sdhci_enable_card_detection(host); + + return ret; +} + +EXPORT_SYMBOL_GPL(sdhci_resume_host); + +int sdhci_runtime_suspend_host(struct sdhci_host *host) +{ + unsigned long flags; + + mmc_retune_timer_stop(host->mmc); + + spin_lock_irqsave(&host->lock, flags); + host->ier &= SDHCI_INT_CARD_INT; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + spin_unlock_irqrestore(&host->lock, flags); + + synchronize_hardirq(host->irq); + + spin_lock_irqsave(&host->lock, flags); + host->runtime_suspended = true; + spin_unlock_irqrestore(&host->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); + +int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) +{ + struct mmc_host *mmc = host->mmc; + unsigned long flags; + int host_flags = host->flags; + + if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->enable_dma) + host->ops->enable_dma(host); + } + + sdhci_init(host, soft_reset); + + if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && + mmc->ios.power_mode != MMC_POWER_OFF) { + /* Force clock and power re-program */ + host->pwr = 0; + host->clock = 0; + host->reinit_uhs = true; + mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); + mmc->ops->set_ios(mmc, &mmc->ios); + + if ((host_flags & SDHCI_PV_ENABLED) && + !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { + spin_lock_irqsave(&host->lock, flags); + sdhci_enable_preset_value(host, true); + spin_unlock_irqrestore(&host->lock, flags); + } + + if ((mmc->caps2 & MMC_CAP2_HS400_ES) && + mmc->ops->hs400_enhanced_strobe) + mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); + } + + spin_lock_irqsave(&host->lock, flags); + + host->runtime_suspended = false; + + /* Enable SDIO IRQ */ + if (sdio_irq_claimed(mmc)) + sdhci_enable_sdio_irq_nolock(host, true); + + /* Enable Card Detection */ + sdhci_enable_card_detection(host); + + spin_unlock_irqrestore(&host->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); + +#endif /* CONFIG_PM */ + +/*****************************************************************************\ + * * + * Command Queue Engine (CQE) helpers * + * * +\*****************************************************************************/ + +void sdhci_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + u8 ctrl; + + spin_lock_irqsave(&host->lock, flags); + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_DMA_MASK; + /* + * Host from V4.10 supports ADMA3 DMA type. + * ADMA3 performs integrated descriptor which is more suitable + * for cmd queuing to fetch both command and transfer descriptors. + */ + if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) + ctrl |= SDHCI_CTRL_ADMA3; + else if (host->flags & SDHCI_USE_64_BIT_DMA) + ctrl |= SDHCI_CTRL_ADMA64; + else + ctrl |= SDHCI_CTRL_ADMA32; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + + sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), + SDHCI_BLOCK_SIZE); + + /* Set maximum timeout */ + sdhci_set_timeout(host, NULL); + + host->ier = host->cqe_ier; + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + + host->cqe_on = true; + + pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", + mmc_hostname(mmc), host->ier, + sdhci_readl(host, SDHCI_INT_STATUS)); + + spin_unlock_irqrestore(&host->lock, flags); +} +EXPORT_SYMBOL_GPL(sdhci_cqe_enable); + +void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + sdhci_set_default_irqs(host); + + host->cqe_on = false; + + if (recovery) { + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); + } + + pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", + mmc_hostname(mmc), host->ier, + sdhci_readl(host, SDHCI_INT_STATUS)); + + spin_unlock_irqrestore(&host->lock, flags); +} +EXPORT_SYMBOL_GPL(sdhci_cqe_disable); + +bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, + int *data_error) +{ + u32 mask; + + if (!host->cqe_on) + return false; + + if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) + *cmd_error = -EILSEQ; + else if (intmask & SDHCI_INT_TIMEOUT) + *cmd_error = -ETIMEDOUT; + else + *cmd_error = 0; + + if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) + *data_error = -EILSEQ; + else if (intmask & SDHCI_INT_DATA_TIMEOUT) + *data_error = -ETIMEDOUT; + else if (intmask & SDHCI_INT_ADMA_ERROR) + *data_error = -EIO; + else + *data_error = 0; + + /* Clear selected interrupts. */ + mask = intmask & host->cqe_ier; + sdhci_writel(host, mask, SDHCI_INT_STATUS); + + if (intmask & SDHCI_INT_BUS_POWER) + pr_err("%s: Card is consuming too much power!\n", + mmc_hostname(host->mmc)); + + intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); + if (intmask) { + sdhci_writel(host, intmask, SDHCI_INT_STATUS); + pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", + mmc_hostname(host->mmc), intmask); + sdhci_dumpregs(host); + } + + return true; +} +EXPORT_SYMBOL_GPL(sdhci_cqe_irq); + +/*****************************************************************************\ + * * + * Device allocation/registration * + * * +\*****************************************************************************/ + +struct sdhci_host *sdhci_alloc_host(struct device *dev, + size_t priv_size) +{ + struct mmc_host *mmc; + struct sdhci_host *host; + + WARN_ON(dev == NULL); + + mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); + if (!mmc) + return ERR_PTR(-ENOMEM); + + host = mmc_priv(mmc); + host->mmc = mmc; + host->mmc_host_ops = sdhci_ops; + mmc->ops = &host->mmc_host_ops; + + host->flags = SDHCI_SIGNALING_330; + + host->cqe_ier = SDHCI_CQE_INT_MASK; + host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; + + host->tuning_delay = -1; + host->tuning_loop_count = MAX_TUNING_LOOP; + + host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; + + /* + * The DMA table descriptor count is calculated as the maximum + * number of segments times 2, to allow for an alignment + * descriptor for each segment, plus 1 for a nop end descriptor. + */ + host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; + host->max_adma = 65536; + + return host; +} + +EXPORT_SYMBOL_GPL(sdhci_alloc_host); + +static int sdhci_set_dma_mask(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct device *dev = mmc_dev(mmc); + int ret = -EINVAL; + + if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) + host->flags &= ~SDHCI_USE_64_BIT_DMA; + + /* Try 64-bit mask if hardware is capable of it */ + if (host->flags & SDHCI_USE_64_BIT_DMA) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) { + pr_warn("%s: Failed to set 64-bit DMA mask.\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_64_BIT_DMA; + } + } + + /* 32-bit mask as default & fallback */ + if (ret) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + pr_warn("%s: Failed to set 32-bit DMA mask.\n", + mmc_hostname(mmc)); + } + + return ret; +} + +void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, + const u32 *caps, const u32 *caps1) +{ + u16 v; + u64 dt_caps_mask = 0; + u64 dt_caps = 0; + + if (host->read_caps) + return; + + host->read_caps = true; + + if (debug_quirks) + host->quirks = debug_quirks; + + if (debug_quirks2) + host->quirks2 = debug_quirks2; + + sdhci_do_reset(host, SDHCI_RESET_ALL); + + if (host->v4_mode) + sdhci_do_enable_v4_mode(host); + + device_property_read_u64_array(mmc_dev(host->mmc), + "sdhci-caps-mask", &dt_caps_mask, 1); + device_property_read_u64_array(mmc_dev(host->mmc), + "sdhci-caps", &dt_caps, 1); + + v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); + host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; + + if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) + return; + + if (caps) { + host->caps = *caps; + } else { + host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); + host->caps &= ~lower_32_bits(dt_caps_mask); + host->caps |= lower_32_bits(dt_caps); + } + + if (host->version < SDHCI_SPEC_300) + return; + + if (caps1) { + host->caps1 = *caps1; + } else { + host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); + host->caps1 &= ~upper_32_bits(dt_caps_mask); + host->caps1 |= upper_32_bits(dt_caps); + } +} +EXPORT_SYMBOL_GPL(__sdhci_read_caps); + +static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + unsigned int max_blocks; + unsigned int bounce_size; + int ret; + + /* + * Cap the bounce buffer at 64KB. Using a bigger bounce buffer + * has diminishing returns, this is probably because SD/MMC + * cards are usually optimized to handle this size of requests. + */ + bounce_size = SZ_64K; + /* + * Adjust downwards to maximum request size if this is less + * than our segment size, else hammer down the maximum + * request size to the maximum buffer size. + */ + if (mmc->max_req_size < bounce_size) + bounce_size = mmc->max_req_size; + max_blocks = bounce_size / 512; + + /* + * When we just support one segment, we can get significant + * speedups by the help of a bounce buffer to group scattered + * reads/writes together. + */ + host->bounce_buffer = devm_kmalloc(mmc->parent, + bounce_size, + GFP_KERNEL); + if (!host->bounce_buffer) { + pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", + mmc_hostname(mmc), + bounce_size); + /* + * Exiting with zero here makes sure we proceed with + * mmc->max_segs == 1. + */ + return; + } + + host->bounce_addr = dma_map_single(mmc->parent, + host->bounce_buffer, + bounce_size, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(mmc->parent, host->bounce_addr); + if (ret) + /* Again fall back to max_segs == 1 */ + return; + host->bounce_buffer_size = bounce_size; + + /* Lie about this since we're bouncing */ + mmc->max_segs = max_blocks; + mmc->max_seg_size = bounce_size; + mmc->max_req_size = bounce_size; + + pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", + mmc_hostname(mmc), max_blocks, bounce_size); +} + +static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) +{ + /* + * According to SD Host Controller spec v4.10, bit[27] added from + * version 4.10 in Capabilities Register is used as 64-bit System + * Address support for V4 mode. + */ + if (host->version >= SDHCI_SPEC_410 && host->v4_mode) + return host->caps & SDHCI_CAN_64BIT_V4; + + return host->caps & SDHCI_CAN_64BIT; +} + +int sdhci_setup_host(struct sdhci_host *host) +{ + struct mmc_host *mmc; + u32 max_current_caps; + unsigned int ocr_avail; + unsigned int override_timeout_clk; + u32 max_clk; + int ret = 0; + bool enable_vqmmc = false; + + WARN_ON(host == NULL); + if (host == NULL) + return -EINVAL; + + mmc = host->mmc; + + /* + * If there are external regulators, get them. Note this must be done + * early before resetting the host and reading the capabilities so that + * the host can take the appropriate action if regulators are not + * available. + */ + if (!mmc->supply.vqmmc) { + ret = mmc_regulator_get_supply(mmc); + if (ret) + return ret; + enable_vqmmc = true; + } + + DBG("Version: 0x%08x | Present: 0x%08x\n", + sdhci_readw(host, SDHCI_HOST_VERSION), + sdhci_readl(host, SDHCI_PRESENT_STATE)); + DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", + sdhci_readl(host, SDHCI_CAPABILITIES), + sdhci_readl(host, SDHCI_CAPABILITIES_1)); + + sdhci_read_caps(host); + + override_timeout_clk = host->timeout_clk; + + if (host->version > SDHCI_SPEC_420) { + pr_err("%s: Unknown controller version (%d). You may experience problems.\n", + mmc_hostname(mmc), host->version); + } + + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) + host->flags |= SDHCI_USE_SDMA; + else if (!(host->caps & SDHCI_CAN_DO_SDMA)) + DBG("Controller doesn't have SDMA capability\n"); + else + host->flags |= SDHCI_USE_SDMA; + + if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && + (host->flags & SDHCI_USE_SDMA)) { + DBG("Disabling DMA as it is marked broken\n"); + host->flags &= ~SDHCI_USE_SDMA; + } + + if ((host->version >= SDHCI_SPEC_200) && + (host->caps & SDHCI_CAN_DO_ADMA2)) + host->flags |= SDHCI_USE_ADMA; + + if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && + (host->flags & SDHCI_USE_ADMA)) { + DBG("Disabling ADMA as it is marked broken\n"); + host->flags &= ~SDHCI_USE_ADMA; + } + + if (sdhci_can_64bit_dma(host)) + host->flags |= SDHCI_USE_64_BIT_DMA; + + if (host->use_external_dma) { + ret = sdhci_external_dma_init(host); + if (ret == -EPROBE_DEFER) + goto unreg; + /* + * Fall back to use the DMA/PIO integrated in standard SDHCI + * instead of external DMA devices. + */ + else if (ret) + sdhci_switch_external_dma(host, false); + /* Disable internal DMA sources */ + else + host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); + } + + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->set_dma_mask) + ret = host->ops->set_dma_mask(host); + else + ret = sdhci_set_dma_mask(host); + + if (!ret && host->ops->enable_dma) + ret = host->ops->enable_dma(host); + + if (ret) { + pr_warn("%s: No suitable DMA available - falling back to PIO\n", + mmc_hostname(mmc)); + host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); + + ret = 0; + } + } + + /* SDMA does not support 64-bit DMA if v4 mode not set */ + if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) + host->flags &= ~SDHCI_USE_SDMA; + + if (host->flags & SDHCI_USE_ADMA) { + dma_addr_t dma; + void *buf; + + if (!(host->flags & SDHCI_USE_64_BIT_DMA)) + host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; + else if (!host->alloc_desc_sz) + host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); + + host->desc_sz = host->alloc_desc_sz; + host->adma_table_sz = host->adma_table_cnt * host->desc_sz; + + host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; + /* + * Use zalloc to zero the reserved high 32-bits of 128-bit + * descriptors so that they never need to be written. + */ + buf = dma_alloc_coherent(mmc_dev(mmc), + host->align_buffer_sz + host->adma_table_sz, + &dma, GFP_KERNEL); + if (!buf) { + pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; + } else if ((dma + host->align_buffer_sz) & + (SDHCI_ADMA2_DESC_ALIGN - 1)) { + pr_warn("%s: unable to allocate aligned ADMA descriptor\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; + dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + + host->adma_table_sz, buf, dma); + } else { + host->align_buffer = buf; + host->align_addr = dma; + + host->adma_table = buf + host->align_buffer_sz; + host->adma_addr = dma + host->align_buffer_sz; + } + } + + /* + * If we use DMA, then it's up to the caller to set the DMA + * mask, but PIO does not need the hw shim so we set a new + * mask here in that case. + */ + if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { + host->dma_mask = DMA_BIT_MASK(64); + mmc_dev(mmc)->dma_mask = &host->dma_mask; + } + + if (host->version >= SDHCI_SPEC_300) + host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); + else + host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); + + host->max_clk *= 1000000; + if (host->max_clk == 0 || host->quirks & + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { + if (!host->ops->get_max_clock) { + pr_err("%s: Hardware doesn't specify base clock frequency.\n", + mmc_hostname(mmc)); + ret = -ENODEV; + goto undma; + } + host->max_clk = host->ops->get_max_clock(host); + } + + /* + * In case of Host Controller v3.00, find out whether clock + * multiplier is supported. + */ + host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); + + /* + * In case the value in Clock Multiplier is 0, then programmable + * clock mode is not supported, otherwise the actual clock + * multiplier is one more than the value of Clock Multiplier + * in the Capabilities Register. + */ + if (host->clk_mul) + host->clk_mul += 1; + + /* + * Set host parameters. + */ + max_clk = host->max_clk; + + if (host->ops->get_min_clock) + mmc->f_min = host->ops->get_min_clock(host); + else if (host->version >= SDHCI_SPEC_300) { + if (host->clk_mul) + max_clk = host->max_clk * host->clk_mul; + /* + * Divided Clock Mode minimum clock rate is always less than + * Programmable Clock Mode minimum clock rate. + */ + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; + } else + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; + + if (!mmc->f_max || mmc->f_max > max_clk) + mmc->f_max = max_clk; + + if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { + host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); + + if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) + host->timeout_clk *= 1000; + + if (host->timeout_clk == 0) { + if (!host->ops->get_timeout_clock) { + pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", + mmc_hostname(mmc)); + ret = -ENODEV; + goto undma; + } + + host->timeout_clk = + DIV_ROUND_UP(host->ops->get_timeout_clock(host), + 1000); + } + + if (override_timeout_clk) + host->timeout_clk = override_timeout_clk; + + mmc->max_busy_timeout = host->ops->get_max_timeout_count ? + host->ops->get_max_timeout_count(host) : 1 << 27; + mmc->max_busy_timeout /= host->timeout_clk; + } + + if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && + !host->ops->get_max_timeout_count) + mmc->max_busy_timeout = 0; + + mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + + if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) + host->flags |= SDHCI_AUTO_CMD12; + + /* + * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. + * For v4 mode, SDMA may use Auto-CMD23 as well. + */ + if ((host->version >= SDHCI_SPEC_300) && + ((host->flags & SDHCI_USE_ADMA) || + !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && + !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { + host->flags |= SDHCI_AUTO_CMD23; + DBG("Auto-CMD23 available\n"); + } else { + DBG("Auto-CMD23 unavailable\n"); + } + + /* + * A controller may support 8-bit width, but the board itself + * might not have the pins brought out. Boards that support + * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in + * their platform code before calling sdhci_add_host(), and we + * won't assume 8-bit width for hosts without that CAP. + */ + if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) + mmc->caps |= MMC_CAP_4_BIT_DATA; + + if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) + mmc->caps &= ~MMC_CAP_CMD23; + + if (host->caps & SDHCI_CAN_DO_HISPD) + mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; + + if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && + mmc_card_is_removable(mmc) && + mmc_gpio_get_cd(host->mmc) < 0) + mmc->caps |= MMC_CAP_NEEDS_POLL; + + if (!IS_ERR(mmc->supply.vqmmc)) { + if (enable_vqmmc) { + ret = regulator_enable(mmc->supply.vqmmc); + host->sdhci_core_to_disable_vqmmc = !ret; + } + + /* If vqmmc provides no 1.8V signalling, then there's no UHS */ + if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, + 1950000)) + host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50); + + /* In eMMC case vqmmc might be a fixed 1.8V regulator */ + if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, + 3600000)) + host->flags &= ~SDHCI_SIGNALING_330; + + if (ret) { + pr_warn("%s: Failed to enable vqmmc regulator: %d\n", + mmc_hostname(mmc), ret); + mmc->supply.vqmmc = ERR_PTR(-EINVAL); + } + + } + + if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { + host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50); + /* + * The SDHCI controller in a SoC might support HS200/HS400 + * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), + * but if the board is modeled such that the IO lines are not + * connected to 1.8v then HS200/HS400 cannot be supported. + * Disable HS200/HS400 if the board does not have 1.8v connected + * to the IO lines. (Applicable for other modes in 1.8v) + */ + mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); + mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); + } + + /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ + if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50)) + mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; + + /* SDR104 supports also implies SDR50 support */ + if (host->caps1 & SDHCI_SUPPORT_SDR104) { + mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; + /* SD3.0: SDR104 is supported so (for eMMC) the caps2 + * field can be promoted to support HS200. + */ + if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) + mmc->caps2 |= MMC_CAP2_HS200; + } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { + mmc->caps |= MMC_CAP_UHS_SDR50; + } + + if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && + (host->caps1 & SDHCI_SUPPORT_HS400)) + mmc->caps2 |= MMC_CAP2_HS400; + + if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && + (IS_ERR(mmc->supply.vqmmc) || + !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, + 1300000))) + mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; + + if ((host->caps1 & SDHCI_SUPPORT_DDR50) && + !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) + mmc->caps |= MMC_CAP_UHS_DDR50; + + /* Does the host need tuning for SDR50? */ + if (host->caps1 & SDHCI_USE_SDR50_TUNING) + host->flags |= SDHCI_SDR50_NEEDS_TUNING; + + /* Driver Type(s) (A, C, D) supported by the host */ + if (host->caps1 & SDHCI_DRIVER_TYPE_A) + mmc->caps |= MMC_CAP_DRIVER_TYPE_A; + if (host->caps1 & SDHCI_DRIVER_TYPE_C) + mmc->caps |= MMC_CAP_DRIVER_TYPE_C; + if (host->caps1 & SDHCI_DRIVER_TYPE_D) + mmc->caps |= MMC_CAP_DRIVER_TYPE_D; + + /* Initial value for re-tuning timer count */ + host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, + host->caps1); + + /* + * In case Re-tuning Timer is not disabled, the actual value of + * re-tuning timer will be 2 ^ (n - 1). + */ + if (host->tuning_count) + host->tuning_count = 1 << (host->tuning_count - 1); + + /* Re-tuning mode supported by the Host Controller */ + host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); + + ocr_avail = 0; + + /* + * According to SD Host Controller spec v3.00, if the Host System + * can afford more than 150mA, Host Driver should set XPC to 1. Also + * the value is meaningful only if Voltage Support in the Capabilities + * register is set. The actual current value is 4 times the register + * value. + */ + max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); + if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { + int curr = regulator_get_current_limit(mmc->supply.vmmc); + if (curr > 0) { + + /* convert to SDHCI_MAX_CURRENT format */ + curr = curr/1000; /* convert to mA */ + curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; + + curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); + max_current_caps = + FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | + FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | + FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); + } + } + + if (host->caps & SDHCI_CAN_VDD_330) { + ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; + + mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, + max_current_caps) * + SDHCI_MAX_CURRENT_MULTIPLIER; + } + if (host->caps & SDHCI_CAN_VDD_300) { + ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; + + mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, + max_current_caps) * + SDHCI_MAX_CURRENT_MULTIPLIER; + } + if (host->caps & SDHCI_CAN_VDD_180) { + ocr_avail |= MMC_VDD_165_195; + + mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, + max_current_caps) * + SDHCI_MAX_CURRENT_MULTIPLIER; + } + + /* If OCR set by host, use it instead. */ + if (host->ocr_mask) + ocr_avail = host->ocr_mask; + + /* If OCR set by external regulators, give it highest prio. */ + if (mmc->ocr_avail) + ocr_avail = mmc->ocr_avail; + + mmc->ocr_avail = ocr_avail; + mmc->ocr_avail_sdio = ocr_avail; + if (host->ocr_avail_sdio) + mmc->ocr_avail_sdio &= host->ocr_avail_sdio; + mmc->ocr_avail_sd = ocr_avail; + if (host->ocr_avail_sd) + mmc->ocr_avail_sd &= host->ocr_avail_sd; + else /* normal SD controllers don't support 1.8V */ + mmc->ocr_avail_sd &= ~MMC_VDD_165_195; + mmc->ocr_avail_mmc = ocr_avail; + if (host->ocr_avail_mmc) + mmc->ocr_avail_mmc &= host->ocr_avail_mmc; + + if (mmc->ocr_avail == 0) { + pr_err("%s: Hardware doesn't report any support voltages.\n", + mmc_hostname(mmc)); + ret = -ENODEV; + goto unreg; + } + + if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | + MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || + (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) + host->flags |= SDHCI_SIGNALING_180; + + if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) + host->flags |= SDHCI_SIGNALING_120; + + spin_lock_init(&host->lock); + + /* + * Maximum number of sectors in one transfer. Limited by SDMA boundary + * size (512KiB). Note some tuning modes impose a 4MiB limit, but this + * is less anyway. + */ + mmc->max_req_size = 524288; + + /* + * Maximum number of segments. Depends on if the hardware + * can do scatter/gather or not. + */ + if (host->flags & SDHCI_USE_ADMA) { + mmc->max_segs = SDHCI_MAX_SEGS; + } else if (host->flags & SDHCI_USE_SDMA) { + mmc->max_segs = 1; + if (swiotlb_max_segment()) { + unsigned int max_req_size = (1 << IO_TLB_SHIFT) * + IO_TLB_SEGSIZE; + mmc->max_req_size = min(mmc->max_req_size, + max_req_size); + } + } else { /* PIO */ + mmc->max_segs = SDHCI_MAX_SEGS; + } + + /* + * Maximum segment size. Could be one segment with the maximum number + * of bytes. When doing hardware scatter/gather, each entry cannot + * be larger than 64 KiB though. + */ + if (host->flags & SDHCI_USE_ADMA) { + if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { + host->max_adma = 65532; /* 32-bit alignment */ + mmc->max_seg_size = 65535; + } else { + mmc->max_seg_size = 65536; + } + } else { + mmc->max_seg_size = mmc->max_req_size; + } + + /* + * Maximum block size. This varies from controller to controller and + * is specified in the capabilities register. + */ + if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { + mmc->max_blk_size = 2; + } else { + mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> + SDHCI_MAX_BLOCK_SHIFT; + if (mmc->max_blk_size >= 3) { + pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", + mmc_hostname(mmc)); + mmc->max_blk_size = 0; + } + } + + mmc->max_blk_size = 512 << mmc->max_blk_size; + + /* + * Maximum block count. + */ + mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; + + if (mmc->max_segs == 1) + /* This may alter mmc->*_blk_* parameters */ + sdhci_allocate_bounce_buffer(host); + + return 0; + +unreg: + if (host->sdhci_core_to_disable_vqmmc) + regulator_disable(mmc->supply.vqmmc); +undma: + if (host->align_buffer) + dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + + host->adma_table_sz, host->align_buffer, + host->align_addr); + host->adma_table = NULL; + host->align_buffer = NULL; + + return ret; +} +EXPORT_SYMBOL_GPL(sdhci_setup_host); + +void sdhci_cleanup_host(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + + if (host->sdhci_core_to_disable_vqmmc) + regulator_disable(mmc->supply.vqmmc); + + if (host->align_buffer) + dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + + host->adma_table_sz, host->align_buffer, + host->align_addr); + + if (host->use_external_dma) + sdhci_external_dma_release(host); + + host->adma_table = NULL; + host->align_buffer = NULL; +} +EXPORT_SYMBOL_GPL(sdhci_cleanup_host); + +int __sdhci_add_host(struct sdhci_host *host) +{ + unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; + struct mmc_host *mmc = host->mmc; + int ret; + + if ((mmc->caps2 & MMC_CAP2_CQE) && + (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { + mmc->caps2 &= ~MMC_CAP2_CQE; + mmc->cqe_ops = NULL; + } + + host->complete_wq = alloc_workqueue("sdhci", flags, 0); + if (!host->complete_wq) + return -ENOMEM; + + INIT_WORK(&host->complete_work, sdhci_complete_work); + + timer_setup(&host->timer, sdhci_timeout_timer, 0); + timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); + + init_waitqueue_head(&host->buf_ready_int); + + sdhci_init(host, 0); + + ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, + IRQF_SHARED, mmc_hostname(mmc), host); + if (ret) { + pr_err("%s: Failed to request IRQ %d: %d\n", + mmc_hostname(mmc), host->irq, ret); + goto unwq; + } + + ret = sdhci_led_register(host); + if (ret) { + pr_err("%s: Failed to register LED device: %d\n", + mmc_hostname(mmc), ret); + goto unirq; + } + + ret = mmc_add_host(mmc); + if (ret) + goto unled; + + pr_info("%s: SDHCI controller on %s [%s] using %s\n", + mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), + host->use_external_dma ? "External DMA" : + (host->flags & SDHCI_USE_ADMA) ? + (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : + (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); + + sdhci_enable_card_detection(host); + + return 0; + +unled: + sdhci_led_unregister(host); +unirq: + sdhci_do_reset(host, SDHCI_RESET_ALL); + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); + free_irq(host->irq, host); +unwq: + destroy_workqueue(host->complete_wq); + + return ret; +} +EXPORT_SYMBOL_GPL(__sdhci_add_host); + +int sdhci_add_host(struct sdhci_host *host) +{ + int ret; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + + return ret; +} +EXPORT_SYMBOL_GPL(sdhci_add_host); + +void sdhci_remove_host(struct sdhci_host *host, int dead) +{ + struct mmc_host *mmc = host->mmc; + unsigned long flags; + + if (dead) { + spin_lock_irqsave(&host->lock, flags); + + host->flags |= SDHCI_DEVICE_DEAD; + + if (sdhci_has_requests(host)) { + pr_err("%s: Controller removed during " + " transfer!\n", mmc_hostname(mmc)); + sdhci_error_out_mrqs(host, -ENOMEDIUM); + } + + spin_unlock_irqrestore(&host->lock, flags); + } + + sdhci_disable_card_detection(host); + + mmc_remove_host(mmc); + + sdhci_led_unregister(host); + + if (!dead) + sdhci_do_reset(host, SDHCI_RESET_ALL); + + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); + free_irq(host->irq, host); + + del_timer_sync(&host->timer); + del_timer_sync(&host->data_timer); + + destroy_workqueue(host->complete_wq); + + if (host->sdhci_core_to_disable_vqmmc) + regulator_disable(mmc->supply.vqmmc); + + if (host->align_buffer) + dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + + host->adma_table_sz, host->align_buffer, + host->align_addr); + + if (host->use_external_dma) + sdhci_external_dma_release(host); + + host->adma_table = NULL; + host->align_buffer = NULL; +} + +EXPORT_SYMBOL_GPL(sdhci_remove_host); + +void sdhci_free_host(struct sdhci_host *host) +{ + mmc_free_host(host->mmc); +} + +EXPORT_SYMBOL_GPL(sdhci_free_host); + +/*****************************************************************************\ + * * + * Driver init/exit * + * * +\*****************************************************************************/ + +static int __init sdhci_drv_init(void) +{ + pr_info(DRIVER_NAME + ": Secure Digital Host Controller Interface driver\n"); + pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); + + return 0; +} + +static void __exit sdhci_drv_exit(void) +{ +} + +module_init(sdhci_drv_init); +module_exit(sdhci_drv_exit); + +module_param(debug_quirks, uint, 0444); +module_param(debug_quirks2, uint, 0444); + +MODULE_AUTHOR("Pierre Ossman "); +MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); +MODULE_LICENSE("GPL"); + +MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); +MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h new file mode 100644 index 000000000..4db57c3a8 --- /dev/null +++ b/drivers/mmc/host/sdhci.h @@ -0,0 +1,816 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * linux/drivers/mmc/host/sdhci.h - Secure Digital Host Controller Interface driver + * + * Header file for Host Controller registers and I/O accessors. + * + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. + */ +#ifndef __SDHCI_HW_H +#define __SDHCI_HW_H + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Controller registers + */ + +#define SDHCI_DMA_ADDRESS 0x00 +#define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS +#define SDHCI_32BIT_BLK_CNT SDHCI_DMA_ADDRESS + +#define SDHCI_BLOCK_SIZE 0x04 +#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) + +#define SDHCI_BLOCK_COUNT 0x06 + +#define SDHCI_ARGUMENT 0x08 + +#define SDHCI_TRANSFER_MODE 0x0C +#define SDHCI_TRNS_DMA 0x01 +#define SDHCI_TRNS_BLK_CNT_EN 0x02 +#define SDHCI_TRNS_AUTO_CMD12 0x04 +#define SDHCI_TRNS_AUTO_CMD23 0x08 +#define SDHCI_TRNS_AUTO_SEL 0x0C +#define SDHCI_TRNS_READ 0x10 +#define SDHCI_TRNS_MULTI 0x20 + +#define SDHCI_COMMAND 0x0E +#define SDHCI_CMD_RESP_MASK 0x03 +#define SDHCI_CMD_CRC 0x08 +#define SDHCI_CMD_INDEX 0x10 +#define SDHCI_CMD_DATA 0x20 +#define SDHCI_CMD_ABORTCMD 0xC0 + +#define SDHCI_CMD_RESP_NONE 0x00 +#define SDHCI_CMD_RESP_LONG 0x01 +#define SDHCI_CMD_RESP_SHORT 0x02 +#define SDHCI_CMD_RESP_SHORT_BUSY 0x03 + +#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) +#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f) + +#define SDHCI_RESPONSE 0x10 + +#define SDHCI_BUFFER 0x20 + +#define SDHCI_PRESENT_STATE 0x24 +#define SDHCI_CMD_INHIBIT 0x00000001 +#define SDHCI_DATA_INHIBIT 0x00000002 +#define SDHCI_DOING_WRITE 0x00000100 +#define SDHCI_DOING_READ 0x00000200 +#define SDHCI_SPACE_AVAILABLE 0x00000400 +#define SDHCI_DATA_AVAILABLE 0x00000800 +#define SDHCI_CARD_PRESENT 0x00010000 +#define SDHCI_CARD_PRES_SHIFT 16 +#define SDHCI_CD_STABLE 0x00020000 +#define SDHCI_CD_LVL 0x00040000 +#define SDHCI_CD_LVL_SHIFT 18 +#define SDHCI_WRITE_PROTECT 0x00080000 +#define SDHCI_DATA_LVL_MASK 0x00F00000 +#define SDHCI_DATA_LVL_SHIFT 20 +#define SDHCI_DATA_0_LVL_MASK 0x00100000 +#define SDHCI_CMD_LVL 0x01000000 + +#define SDHCI_HOST_CONTROL 0x28 +#define SDHCI_CTRL_LED 0x01 +#define SDHCI_CTRL_4BITBUS 0x02 +#define SDHCI_CTRL_HISPD 0x04 +#define SDHCI_CTRL_DMA_MASK 0x18 +#define SDHCI_CTRL_SDMA 0x00 +#define SDHCI_CTRL_ADMA1 0x08 +#define SDHCI_CTRL_ADMA32 0x10 +#define SDHCI_CTRL_ADMA64 0x18 +#define SDHCI_CTRL_ADMA3 0x18 +#define SDHCI_CTRL_8BITBUS 0x20 +#define SDHCI_CTRL_CDTEST_INS 0x40 +#define SDHCI_CTRL_CDTEST_EN 0x80 + +#define SDHCI_POWER_CONTROL 0x29 +#define SDHCI_POWER_ON 0x01 +#define SDHCI_POWER_180 0x0A +#define SDHCI_POWER_300 0x0C +#define SDHCI_POWER_330 0x0E + +#define SDHCI_BLOCK_GAP_CONTROL 0x2A + +#define SDHCI_WAKE_UP_CONTROL 0x2B +#define SDHCI_WAKE_ON_INT 0x01 +#define SDHCI_WAKE_ON_INSERT 0x02 +#define SDHCI_WAKE_ON_REMOVE 0x04 + +#define SDHCI_CLOCK_CONTROL 0x2C +#define SDHCI_DIVIDER_SHIFT 8 +#define SDHCI_DIVIDER_HI_SHIFT 6 +#define SDHCI_DIV_MASK 0xFF +#define SDHCI_DIV_MASK_LEN 8 +#define SDHCI_DIV_HI_MASK 0x300 +#define SDHCI_PROG_CLOCK_MODE 0x0020 +#define SDHCI_CLOCK_CARD_EN 0x0004 +#define SDHCI_CLOCK_PLL_EN 0x0008 +#define SDHCI_CLOCK_INT_STABLE 0x0002 +#define SDHCI_CLOCK_INT_EN 0x0001 + +#define SDHCI_TIMEOUT_CONTROL 0x2E + +#define SDHCI_SOFTWARE_RESET 0x2F +#define SDHCI_RESET_ALL 0x01 +#define SDHCI_RESET_CMD 0x02 +#define SDHCI_RESET_DATA 0x04 + +#define SDHCI_INT_STATUS 0x30 +#define SDHCI_INT_ENABLE 0x34 +#define SDHCI_SIGNAL_ENABLE 0x38 +#define SDHCI_INT_RESPONSE 0x00000001 +#define SDHCI_INT_DATA_END 0x00000002 +#define SDHCI_INT_BLK_GAP 0x00000004 +#define SDHCI_INT_DMA_END 0x00000008 +#define SDHCI_INT_SPACE_AVAIL 0x00000010 +#define SDHCI_INT_DATA_AVAIL 0x00000020 +#define SDHCI_INT_CARD_INSERT 0x00000040 +#define SDHCI_INT_CARD_REMOVE 0x00000080 +#define SDHCI_INT_CARD_INT 0x00000100 +#define SDHCI_INT_RETUNE 0x00001000 +#define SDHCI_INT_CQE 0x00004000 +#define SDHCI_INT_ERROR 0x00008000 +#define SDHCI_INT_TIMEOUT 0x00010000 +#define SDHCI_INT_CRC 0x00020000 +#define SDHCI_INT_END_BIT 0x00040000 +#define SDHCI_INT_INDEX 0x00080000 +#define SDHCI_INT_DATA_TIMEOUT 0x00100000 +#define SDHCI_INT_DATA_CRC 0x00200000 +#define SDHCI_INT_DATA_END_BIT 0x00400000 +#define SDHCI_INT_BUS_POWER 0x00800000 +#define SDHCI_INT_AUTO_CMD_ERR 0x01000000 +#define SDHCI_INT_ADMA_ERROR 0x02000000 + +#define SDHCI_INT_NORMAL_MASK 0x00007FFF +#define SDHCI_INT_ERROR_MASK 0xFFFF8000 + +#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \ + SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \ + SDHCI_INT_AUTO_CMD_ERR) +#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ + SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ + SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ + SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \ + SDHCI_INT_BLK_GAP) +#define SDHCI_INT_ALL_MASK ((unsigned int)-1) + +#define SDHCI_CQE_INT_ERR_MASK ( \ + SDHCI_INT_ADMA_ERROR | SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | \ + SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | \ + SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT) + +#define SDHCI_CQE_INT_MASK (SDHCI_CQE_INT_ERR_MASK | SDHCI_INT_CQE) + +#define SDHCI_AUTO_CMD_STATUS 0x3C +#define SDHCI_AUTO_CMD_TIMEOUT 0x00000002 +#define SDHCI_AUTO_CMD_CRC 0x00000004 +#define SDHCI_AUTO_CMD_END_BIT 0x00000008 +#define SDHCI_AUTO_CMD_INDEX 0x00000010 + +#define SDHCI_HOST_CONTROL2 0x3E +#define SDHCI_CTRL_UHS_MASK 0x0007 +#define SDHCI_CTRL_UHS_SDR12 0x0000 +#define SDHCI_CTRL_UHS_SDR25 0x0001 +#define SDHCI_CTRL_UHS_SDR50 0x0002 +#define SDHCI_CTRL_UHS_SDR104 0x0003 +#define SDHCI_CTRL_UHS_DDR50 0x0004 +#define SDHCI_CTRL_HS400 0x0005 /* Non-standard */ +#define SDHCI_CTRL_VDD_180 0x0008 +#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030 +#define SDHCI_CTRL_DRV_TYPE_B 0x0000 +#define SDHCI_CTRL_DRV_TYPE_A 0x0010 +#define SDHCI_CTRL_DRV_TYPE_C 0x0020 +#define SDHCI_CTRL_DRV_TYPE_D 0x0030 +#define SDHCI_CTRL_EXEC_TUNING 0x0040 +#define SDHCI_CTRL_TUNED_CLK 0x0080 +#define SDHCI_CMD23_ENABLE 0x0800 +#define SDHCI_CTRL_V4_MODE 0x1000 +#define SDHCI_CTRL_64BIT_ADDR 0x2000 +#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000 + +#define SDHCI_CAPABILITIES 0x40 +#define SDHCI_TIMEOUT_CLK_MASK GENMASK(5, 0) +#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080 +#define SDHCI_CLOCK_BASE_MASK GENMASK(13, 8) +#define SDHCI_CLOCK_V3_BASE_MASK GENMASK(15, 8) +#define SDHCI_MAX_BLOCK_MASK 0x00030000 +#define SDHCI_MAX_BLOCK_SHIFT 16 +#define SDHCI_CAN_DO_8BIT 0x00040000 +#define SDHCI_CAN_DO_ADMA2 0x00080000 +#define SDHCI_CAN_DO_ADMA1 0x00100000 +#define SDHCI_CAN_DO_HISPD 0x00200000 +#define SDHCI_CAN_DO_SDMA 0x00400000 +#define SDHCI_CAN_DO_SUSPEND 0x00800000 +#define SDHCI_CAN_VDD_330 0x01000000 +#define SDHCI_CAN_VDD_300 0x02000000 +#define SDHCI_CAN_VDD_180 0x04000000 +#define SDHCI_CAN_64BIT_V4 0x08000000 +#define SDHCI_CAN_64BIT 0x10000000 + +#define SDHCI_CAPABILITIES_1 0x44 +#define SDHCI_SUPPORT_SDR50 0x00000001 +#define SDHCI_SUPPORT_SDR104 0x00000002 +#define SDHCI_SUPPORT_DDR50 0x00000004 +#define SDHCI_DRIVER_TYPE_A 0x00000010 +#define SDHCI_DRIVER_TYPE_C 0x00000020 +#define SDHCI_DRIVER_TYPE_D 0x00000040 +#define SDHCI_RETUNING_TIMER_COUNT_MASK GENMASK(11, 8) +#define SDHCI_USE_SDR50_TUNING 0x00002000 +#define SDHCI_RETUNING_MODE_MASK GENMASK(15, 14) +#define SDHCI_CLOCK_MUL_MASK GENMASK(23, 16) +#define SDHCI_CAN_DO_ADMA3 0x08000000 +#define SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */ + +#define SDHCI_MAX_CURRENT 0x48 +#define SDHCI_MAX_CURRENT_LIMIT GENMASK(7, 0) +#define SDHCI_MAX_CURRENT_330_MASK GENMASK(7, 0) +#define SDHCI_MAX_CURRENT_300_MASK GENMASK(15, 8) +#define SDHCI_MAX_CURRENT_180_MASK GENMASK(23, 16) +#define SDHCI_MAX_CURRENT_MULTIPLIER 4 + +/* 4C-4F reserved for more max current */ + +#define SDHCI_SET_ACMD12_ERROR 0x50 +#define SDHCI_SET_INT_ERROR 0x52 + +#define SDHCI_ADMA_ERROR 0x54 + +/* 55-57 reserved */ + +#define SDHCI_ADMA_ADDRESS 0x58 +#define SDHCI_ADMA_ADDRESS_HI 0x5C + +/* 60-FB reserved */ + +#define SDHCI_PRESET_FOR_HIGH_SPEED 0x64 +#define SDHCI_PRESET_FOR_SDR12 0x66 +#define SDHCI_PRESET_FOR_SDR25 0x68 +#define SDHCI_PRESET_FOR_SDR50 0x6A +#define SDHCI_PRESET_FOR_SDR104 0x6C +#define SDHCI_PRESET_FOR_DDR50 0x6E +#define SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */ +#define SDHCI_PRESET_DRV_MASK GENMASK(15, 14) +#define SDHCI_PRESET_CLKGEN_SEL BIT(10) +#define SDHCI_PRESET_SDCLK_FREQ_MASK GENMASK(9, 0) + +#define SDHCI_SLOT_INT_STATUS 0xFC + +#define SDHCI_HOST_VERSION 0xFE +#define SDHCI_VENDOR_VER_MASK 0xFF00 +#define SDHCI_VENDOR_VER_SHIFT 8 +#define SDHCI_SPEC_VER_MASK 0x00FF +#define SDHCI_SPEC_VER_SHIFT 0 +#define SDHCI_SPEC_100 0 +#define SDHCI_SPEC_200 1 +#define SDHCI_SPEC_300 2 +#define SDHCI_SPEC_400 3 +#define SDHCI_SPEC_410 4 +#define SDHCI_SPEC_420 5 + +/* + * End of controller registers. + */ + +#define SDHCI_MAX_DIV_SPEC_200 256 +#define SDHCI_MAX_DIV_SPEC_300 2046 + +/* + * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2. + */ +#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) +#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) + +/* ADMA2 32-bit DMA descriptor size */ +#define SDHCI_ADMA2_32_DESC_SZ 8 + +/* ADMA2 32-bit descriptor */ +struct sdhci_adma2_32_desc { + __le16 cmd; + __le16 len; + __le32 addr; +} __packed __aligned(4); + +/* ADMA2 data alignment */ +#define SDHCI_ADMA2_ALIGN 4 +#define SDHCI_ADMA2_MASK (SDHCI_ADMA2_ALIGN - 1) + +/* + * ADMA2 descriptor alignment. Some controllers (e.g. Intel) require 8 byte + * alignment for the descriptor table even in 32-bit DMA mode. Memory + * allocation is at least 8 byte aligned anyway, so just stipulate 8 always. + */ +#define SDHCI_ADMA2_DESC_ALIGN 8 + +/* + * ADMA2 64-bit DMA descriptor size + * According to SD Host Controller spec v4.10, there are two kinds of + * descriptors for 64-bit addressing mode: 96-bit Descriptor and 128-bit + * Descriptor, if Host Version 4 Enable is set in the Host Control 2 + * register, 128-bit Descriptor will be selected. + */ +#define SDHCI_ADMA2_64_DESC_SZ(host) ((host)->v4_mode ? 16 : 12) + +/* + * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte + * aligned. + */ +struct sdhci_adma2_64_desc { + __le16 cmd; + __le16 len; + __le32 addr_lo; + __le32 addr_hi; +} __packed __aligned(4); + +#define ADMA2_TRAN_VALID 0x21 +#define ADMA2_NOP_END_VALID 0x3 +#define ADMA2_END 0x2 + +/* + * Maximum segments assuming a 512KiB maximum requisition size and a minimum + * 4KiB page size. Note this also allows enough for multiple descriptors in + * case of PAGE_SIZE >= 64KiB. + */ +#define SDHCI_MAX_SEGS 128 + +/* Allow for a a command request and a data request at the same time */ +#define SDHCI_MAX_MRQS 2 + +/* + * 48bit command and 136 bit response in 100KHz clock could take upto 2.48ms. + * However since the start time of the command, the time between + * command and response, and the time between response and start of data is + * not known, set the command transfer time to 10ms. + */ +#define MMC_CMD_TRANSFER_TIME (10 * NSEC_PER_MSEC) /* max 10 ms */ + +enum sdhci_cookie { + COOKIE_UNMAPPED, + COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */ + COOKIE_MAPPED, /* mapped by sdhci_prepare_data() */ +}; + +struct sdhci_host { + /* Data set by hardware interface driver */ + const char *hw_name; /* Hardware bus name */ + + unsigned int quirks; /* Deviations from spec. */ + +/* Controller doesn't honor resets unless we touch the clock register */ +#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) +/* Controller has bad caps bits, but really supports DMA */ +#define SDHCI_QUIRK_FORCE_DMA (1<<1) +/* Controller doesn't like to be reset when there is no card inserted. */ +#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) +/* Controller doesn't like clearing the power reg before a change */ +#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) +/* Controller has flaky internal state so reset it on each ios change */ +#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) +/* Controller has an unusable DMA engine */ +#define SDHCI_QUIRK_BROKEN_DMA (1<<5) +/* Controller has an unusable ADMA engine */ +#define SDHCI_QUIRK_BROKEN_ADMA (1<<6) +/* Controller can only DMA from 32-bit aligned addresses */ +#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7) +/* Controller can only DMA chunk sizes that are a multiple of 32 bits */ +#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8) +/* Controller can only ADMA chunks that are a multiple of 32 bits */ +#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9) +/* Controller needs to be reset after each request to stay stable */ +#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10) +/* Controller needs voltage and power writes to happen separately */ +#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11) +/* Controller provides an incorrect timeout value for transfers */ +#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) +/* Controller has an issue with buffer bits for small transfers */ +#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13) +/* Controller does not provide transfer-complete interrupt when not busy */ +#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14) +/* Controller has unreliable card detection */ +#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) +/* Controller reports inverted write-protect state */ +#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) +/* Controller has unusable command queue engine */ +#define SDHCI_QUIRK_BROKEN_CQE (1<<17) +/* Controller does not like fast PIO transfers */ +#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) +/* Controller does not have a LED */ +#define SDHCI_QUIRK_NO_LED (1<<19) +/* Controller has to be forced to use block size of 2048 bytes */ +#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20) +/* Controller cannot do multi-block transfers */ +#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21) +/* Controller can only handle 1-bit data transfers */ +#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22) +/* Controller needs 10ms delay between applying power and clock */ +#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) +/* Controller uses SDCLK instead of TMCLK for data timeouts */ +#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) +/* Controller reports wrong base clock capability */ +#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25) +/* Controller cannot support End Attribute in NOP ADMA descriptor */ +#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26) +/* Controller is missing device caps. Use caps provided by host */ +#define SDHCI_QUIRK_MISSING_CAPS (1<<27) +/* Controller uses Auto CMD12 command to stop the transfer */ +#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) +/* Controller doesn't have HISPD bit field in HI-SPEED SD card */ +#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29) +/* Controller treats ADMA descriptors with length 0000h incorrectly */ +#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30) +/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */ +#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31) + + unsigned int quirks2; /* More deviations from spec. */ + +#define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0) +#define SDHCI_QUIRK2_HOST_NO_CMD23 (1<<1) +/* The system physically doesn't support 1.8v, even if the host does */ +#define SDHCI_QUIRK2_NO_1_8_V (1<<2) +#define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3) +#define SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON (1<<4) +/* Controller has a non-standard host control register */ +#define SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1<<5) +/* Controller does not support HS200 */ +#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6) +/* Controller does not support DDR50 */ +#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) +/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ +#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) +/* Controller does not support 64-bit DMA */ +#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9) +/* need clear transfer mode register before send cmd */ +#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10) +/* Capability register bit-63 indicates HS400 support */ +#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11) +/* forced tuned clock */ +#define SDHCI_QUIRK2_TUNING_WORK_AROUND (1<<12) +/* disable the block count for single block transactions */ +#define SDHCI_QUIRK2_SUPPORT_SINGLE (1<<13) +/* Controller broken with using ACMD23 */ +#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) +/* Broken Clock divider zero in controller */ +#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) +/* Controller has CRC in 136 bit Command Response */ +#define SDHCI_QUIRK2_RSP_136_HAS_CRC (1<<16) +/* + * Disable HW timeout if the requested timeout is more than the maximum + * obtainable timeout. + */ +#define SDHCI_QUIRK2_DISABLE_HW_TIMEOUT (1<<17) +/* + * 32-bit block count may not support eMMC where upper bits of CMD23 are used + * for other purposes. Consequently we support 16-bit block count by default. + * Otherwise, SDHCI_QUIRK2_USE_32BIT_BLK_CNT can be selected to use 32-bit + * block count. + */ +#define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18) + + int irq; /* Device IRQ */ + void __iomem *ioaddr; /* Mapped address */ + phys_addr_t mapbase; /* physical address base */ + char *bounce_buffer; /* For packing SDMA reads/writes */ + dma_addr_t bounce_addr; + unsigned int bounce_buffer_size; + + const struct sdhci_ops *ops; /* Low level hw interface */ + + /* Internal data */ + struct mmc_host *mmc; /* MMC structure */ + struct mmc_host_ops mmc_host_ops; /* MMC host ops */ + u64 dma_mask; /* custom DMA mask */ + +#if IS_ENABLED(CONFIG_LEDS_CLASS) + struct led_classdev led; /* LED control */ + char led_name[32]; +#endif + + spinlock_t lock; /* Mutex */ + + int flags; /* Host attributes */ +#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */ +#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ +#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ +#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ +#define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */ +#define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */ +#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */ +#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ +#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ +#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ +#define SDHCI_SIGNALING_330 (1<<14) /* Host is capable of 3.3V signaling */ +#define SDHCI_SIGNALING_180 (1<<15) /* Host is capable of 1.8V signaling */ +#define SDHCI_SIGNALING_120 (1<<16) /* Host is capable of 1.2V signaling */ + + unsigned int version; /* SDHCI spec. version */ + + unsigned int max_clk; /* Max possible freq (MHz) */ + unsigned int timeout_clk; /* Timeout freq (KHz) */ + unsigned int clk_mul; /* Clock Muliplier value */ + + unsigned int clock; /* Current clock (MHz) */ + u8 pwr; /* Current voltage */ + u8 drv_type; /* Current UHS-I driver type */ + bool reinit_uhs; /* Force UHS-related re-initialization */ + + bool runtime_suspended; /* Host is runtime suspended */ + bool bus_on; /* Bus power prevents runtime suspend */ + bool preset_enabled; /* Preset is enabled */ + bool pending_reset; /* Cmd/data reset is pending */ + bool irq_wake_enabled; /* IRQ wakeup is enabled */ + bool v4_mode; /* Host Version 4 Enable */ + bool use_external_dma; /* Host selects to use external DMA */ + bool always_defer_done; /* Always defer to complete requests */ + + struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */ + struct mmc_command *cmd; /* Current command */ + struct mmc_command *data_cmd; /* Current data command */ + struct mmc_command *deferred_cmd; /* Deferred command */ + struct mmc_data *data; /* Current data request */ + unsigned int data_early:1; /* Data finished before cmd */ + + struct sg_mapping_iter sg_miter; /* SG state for PIO */ + unsigned int blocks; /* remaining PIO blocks */ + + int sg_count; /* Mapped sg entries */ + int max_adma; /* Max. length in ADMA descriptor */ + + void *adma_table; /* ADMA descriptor table */ + void *align_buffer; /* Bounce buffer */ + + size_t adma_table_sz; /* ADMA descriptor table size */ + size_t align_buffer_sz; /* Bounce buffer size */ + + dma_addr_t adma_addr; /* Mapped ADMA descr. table */ + dma_addr_t align_addr; /* Mapped bounce buffer */ + + unsigned int desc_sz; /* ADMA current descriptor size */ + unsigned int alloc_desc_sz; /* ADMA descr. max size host supports */ + + struct workqueue_struct *complete_wq; /* Request completion wq */ + struct work_struct complete_work; /* Request completion work */ + + struct timer_list timer; /* Timer for timeouts */ + struct timer_list data_timer; /* Timer for data timeouts */ + +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; +#endif + + u32 caps; /* CAPABILITY_0 */ + u32 caps1; /* CAPABILITY_1 */ + bool read_caps; /* Capability flags have been read */ + + bool sdhci_core_to_disable_vqmmc; /* sdhci core can disable vqmmc */ + unsigned int ocr_avail_sdio; /* OCR bit masks */ + unsigned int ocr_avail_sd; + unsigned int ocr_avail_mmc; + u32 ocr_mask; /* available voltages */ + + unsigned timing; /* Current timing */ + + u32 thread_isr; + + /* cached registers */ + u32 ier; + + bool cqe_on; /* CQE is operating */ + u32 cqe_ier; /* CQE interrupt mask */ + u32 cqe_err_ier; /* CQE error interrupt mask */ + + wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */ + unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */ + + unsigned int tuning_count; /* Timer count for re-tuning */ + unsigned int tuning_mode; /* Re-tuning mode supported by host */ + unsigned int tuning_err; /* Error code for re-tuning */ +#define SDHCI_TUNING_MODE_1 0 +#define SDHCI_TUNING_MODE_2 1 +#define SDHCI_TUNING_MODE_3 2 + /* Delay (ms) between tuning commands */ + int tuning_delay; + int tuning_loop_count; + + /* Host SDMA buffer boundary. */ + u32 sdma_boundary; + + /* Host ADMA table count */ + u32 adma_table_cnt; + + u64 data_timeout; + + unsigned long private[] ____cacheline_aligned; +}; + +struct sdhci_ops { +#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS + u32 (*read_l)(struct sdhci_host *host, int reg); + u16 (*read_w)(struct sdhci_host *host, int reg); + u8 (*read_b)(struct sdhci_host *host, int reg); + void (*write_l)(struct sdhci_host *host, u32 val, int reg); + void (*write_w)(struct sdhci_host *host, u16 val, int reg); + void (*write_b)(struct sdhci_host *host, u8 val, int reg); +#endif + + void (*set_clock)(struct sdhci_host *host, unsigned int clock); + void (*set_power)(struct sdhci_host *host, unsigned char mode, + unsigned short vdd); + + u32 (*irq)(struct sdhci_host *host, u32 intmask); + + int (*set_dma_mask)(struct sdhci_host *host); + int (*enable_dma)(struct sdhci_host *host); + unsigned int (*get_max_clock)(struct sdhci_host *host); + unsigned int (*get_min_clock)(struct sdhci_host *host); + /* get_timeout_clock should return clk rate in unit of Hz */ + unsigned int (*get_timeout_clock)(struct sdhci_host *host); + unsigned int (*get_max_timeout_count)(struct sdhci_host *host); + void (*set_timeout)(struct sdhci_host *host, + struct mmc_command *cmd); + void (*set_bus_width)(struct sdhci_host *host, int width); + void (*platform_send_init_74_clocks)(struct sdhci_host *host, + u8 power_mode); + unsigned int (*get_ro)(struct sdhci_host *host); + void (*reset)(struct sdhci_host *host, u8 mask); + int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); + void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); + void (*hw_reset)(struct sdhci_host *host); + void (*adma_workaround)(struct sdhci_host *host, u32 intmask); + void (*card_event)(struct sdhci_host *host); + void (*voltage_switch)(struct sdhci_host *host); + void (*adma_write_desc)(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, unsigned int cmd); + void (*copy_to_bounce_buffer)(struct sdhci_host *host, + struct mmc_data *data, + unsigned int length); + void (*request_done)(struct sdhci_host *host, + struct mmc_request *mrq); + void (*dump_vendor_regs)(struct sdhci_host *host); +}; + +#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS + +static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) +{ + if (unlikely(host->ops->write_l)) + host->ops->write_l(host, val, reg); + else + writel(val, host->ioaddr + reg); +} + +static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) +{ + if (unlikely(host->ops->write_w)) + host->ops->write_w(host, val, reg); + else + writew(val, host->ioaddr + reg); +} + +static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) +{ + if (unlikely(host->ops->write_b)) + host->ops->write_b(host, val, reg); + else + writeb(val, host->ioaddr + reg); +} + +static inline u32 sdhci_readl(struct sdhci_host *host, int reg) +{ + if (unlikely(host->ops->read_l)) + return host->ops->read_l(host, reg); + else + return readl(host->ioaddr + reg); +} + +static inline u16 sdhci_readw(struct sdhci_host *host, int reg) +{ + if (unlikely(host->ops->read_w)) + return host->ops->read_w(host, reg); + else + return readw(host->ioaddr + reg); +} + +static inline u8 sdhci_readb(struct sdhci_host *host, int reg) +{ + if (unlikely(host->ops->read_b)) + return host->ops->read_b(host, reg); + else + return readb(host->ioaddr + reg); +} + +#else + +static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) +{ + writel(val, host->ioaddr + reg); +} + +static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) +{ + writew(val, host->ioaddr + reg); +} + +static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) +{ + writeb(val, host->ioaddr + reg); +} + +static inline u32 sdhci_readl(struct sdhci_host *host, int reg) +{ + return readl(host->ioaddr + reg); +} + +static inline u16 sdhci_readw(struct sdhci_host *host, int reg) +{ + return readw(host->ioaddr + reg); +} + +static inline u8 sdhci_readb(struct sdhci_host *host, int reg) +{ + return readb(host->ioaddr + reg); +} + +#endif /* CONFIG_MMC_SDHCI_IO_ACCESSORS */ + +struct sdhci_host *sdhci_alloc_host(struct device *dev, size_t priv_size); +void sdhci_free_host(struct sdhci_host *host); + +static inline void *sdhci_priv(struct sdhci_host *host) +{ + return host->private; +} + +void sdhci_card_detect(struct sdhci_host *host); +void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, + const u32 *caps, const u32 *caps1); +int sdhci_setup_host(struct sdhci_host *host); +void sdhci_cleanup_host(struct sdhci_host *host); +int __sdhci_add_host(struct sdhci_host *host); +int sdhci_add_host(struct sdhci_host *host); +void sdhci_remove_host(struct sdhci_host *host, int dead); + +static inline void sdhci_read_caps(struct sdhci_host *host) +{ + __sdhci_read_caps(host, NULL, NULL, NULL); +} + +u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, + unsigned int *actual_clock); +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); +void sdhci_enable_clk(struct sdhci_host *host, u16 clk); +void sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd); +void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, + unsigned char mode, + unsigned short vdd); +void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, + unsigned short vdd); +void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq); +int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq); +void sdhci_set_bus_width(struct sdhci_host *host, int width); +void sdhci_reset(struct sdhci_host *host, u8 mask); +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); +int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); +void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); +int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios); +void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable); +void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, unsigned int cmd); + +#ifdef CONFIG_PM +int sdhci_suspend_host(struct sdhci_host *host); +int sdhci_resume_host(struct sdhci_host *host); +int sdhci_runtime_suspend_host(struct sdhci_host *host); +int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset); +#endif + +void sdhci_cqe_enable(struct mmc_host *mmc); +void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery); +bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, + int *data_error); + +void sdhci_dumpregs(struct sdhci_host *host); +void sdhci_enable_v4_mode(struct sdhci_host *host); + +void sdhci_start_tuning(struct sdhci_host *host); +void sdhci_end_tuning(struct sdhci_host *host); +void sdhci_reset_tuning(struct sdhci_host *host); +void sdhci_send_tuning(struct sdhci_host *host, u32 opcode); +void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode); +void sdhci_switch_external_dma(struct sdhci_host *host, bool en); +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); + +#endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c new file mode 100644 index 000000000..eb52e0c5a --- /dev/null +++ b/drivers/mmc/host/sdhci_am654.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sdhci_am654.c - SDHCI driver for TI's AM654 SOCs + * + * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cqhci.h" +#include "sdhci-cqhci.h" +#include "sdhci-pltfm.h" + +/* CTL_CFG Registers */ +#define CTL_CFG_2 0x14 +#define CTL_CFG_3 0x18 + +#define SLOTTYPE_MASK GENMASK(31, 30) +#define SLOTTYPE_EMBEDDED BIT(30) +#define TUNINGFORSDR50_MASK BIT(13) + +/* PHY Registers */ +#define PHY_CTRL1 0x100 +#define PHY_CTRL2 0x104 +#define PHY_CTRL3 0x108 +#define PHY_CTRL4 0x10C +#define PHY_CTRL5 0x110 +#define PHY_CTRL6 0x114 +#define PHY_STAT1 0x130 +#define PHY_STAT2 0x134 + +#define IOMUX_ENABLE_SHIFT 31 +#define IOMUX_ENABLE_MASK BIT(IOMUX_ENABLE_SHIFT) +#define OTAPDLYENA_SHIFT 20 +#define OTAPDLYENA_MASK BIT(OTAPDLYENA_SHIFT) +#define OTAPDLYSEL_SHIFT 12 +#define OTAPDLYSEL_MASK GENMASK(15, 12) +#define STRBSEL_SHIFT 24 +#define STRBSEL_4BIT_MASK GENMASK(27, 24) +#define STRBSEL_8BIT_MASK GENMASK(31, 24) +#define SEL50_SHIFT 8 +#define SEL50_MASK BIT(SEL50_SHIFT) +#define SEL100_SHIFT 9 +#define SEL100_MASK BIT(SEL100_SHIFT) +#define FREQSEL_SHIFT 8 +#define FREQSEL_MASK GENMASK(10, 8) +#define CLKBUFSEL_SHIFT 0 +#define CLKBUFSEL_MASK GENMASK(2, 0) +#define DLL_TRIM_ICP_SHIFT 4 +#define DLL_TRIM_ICP_MASK GENMASK(7, 4) +#define DR_TY_SHIFT 20 +#define DR_TY_MASK GENMASK(22, 20) +#define ENDLL_SHIFT 1 +#define ENDLL_MASK BIT(ENDLL_SHIFT) +#define DLLRDY_SHIFT 0 +#define DLLRDY_MASK BIT(DLLRDY_SHIFT) +#define PDB_SHIFT 0 +#define PDB_MASK BIT(PDB_SHIFT) +#define CALDONE_SHIFT 1 +#define CALDONE_MASK BIT(CALDONE_SHIFT) +#define RETRIM_SHIFT 17 +#define RETRIM_MASK BIT(RETRIM_SHIFT) +#define SELDLYTXCLK_SHIFT 17 +#define SELDLYTXCLK_MASK BIT(SELDLYTXCLK_SHIFT) +#define SELDLYRXCLK_SHIFT 16 +#define SELDLYRXCLK_MASK BIT(SELDLYRXCLK_SHIFT) +#define ITAPDLYSEL_SHIFT 0 +#define ITAPDLYSEL_MASK GENMASK(4, 0) +#define ITAPDLYENA_SHIFT 8 +#define ITAPDLYENA_MASK BIT(ITAPDLYENA_SHIFT) +#define ITAPCHGWIN_SHIFT 9 +#define ITAPCHGWIN_MASK BIT(ITAPCHGWIN_SHIFT) + +#define DRIVER_STRENGTH_50_OHM 0x0 +#define DRIVER_STRENGTH_33_OHM 0x1 +#define DRIVER_STRENGTH_66_OHM 0x2 +#define DRIVER_STRENGTH_100_OHM 0x3 +#define DRIVER_STRENGTH_40_OHM 0x4 + +#define CLOCK_TOO_SLOW_HZ 50000000 + +/* Command Queue Host Controller Interface Base address */ +#define SDHCI_AM654_CQE_BASE_ADDR 0x200 + +static struct regmap_config sdhci_am654_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .fast_io = true, +}; + +struct timing_data { + const char *otap_binding; + const char *itap_binding; + u32 capability; +}; + +static const struct timing_data td[] = { + [MMC_TIMING_LEGACY] = {"ti,otap-del-sel-legacy", + "ti,itap-del-sel-legacy", + 0}, + [MMC_TIMING_MMC_HS] = {"ti,otap-del-sel-mmc-hs", + "ti,itap-del-sel-mmc-hs", + MMC_CAP_MMC_HIGHSPEED}, + [MMC_TIMING_SD_HS] = {"ti,otap-del-sel-sd-hs", + "ti,itap-del-sel-sd-hs", + MMC_CAP_SD_HIGHSPEED}, + [MMC_TIMING_UHS_SDR12] = {"ti,otap-del-sel-sdr12", + "ti,itap-del-sel-sdr12", + MMC_CAP_UHS_SDR12}, + [MMC_TIMING_UHS_SDR25] = {"ti,otap-del-sel-sdr25", + "ti,itap-del-sel-sdr25", + MMC_CAP_UHS_SDR25}, + [MMC_TIMING_UHS_SDR50] = {"ti,otap-del-sel-sdr50", + NULL, + MMC_CAP_UHS_SDR50}, + [MMC_TIMING_UHS_SDR104] = {"ti,otap-del-sel-sdr104", + NULL, + MMC_CAP_UHS_SDR104}, + [MMC_TIMING_UHS_DDR50] = {"ti,otap-del-sel-ddr50", + NULL, + MMC_CAP_UHS_DDR50}, + [MMC_TIMING_MMC_DDR52] = {"ti,otap-del-sel-ddr52", + "ti,itap-del-sel-ddr52", + MMC_CAP_DDR}, + [MMC_TIMING_MMC_HS200] = {"ti,otap-del-sel-hs200", + NULL, + MMC_CAP2_HS200}, + [MMC_TIMING_MMC_HS400] = {"ti,otap-del-sel-hs400", + NULL, + MMC_CAP2_HS400}, +}; + +struct sdhci_am654_data { + struct regmap *base; + bool legacy_otapdly; + int otap_del_sel[ARRAY_SIZE(td)]; + int itap_del_sel[ARRAY_SIZE(td)]; + int clkbuf_sel; + int trm_icp; + int drv_strength; + int strb_sel; + u32 flags; + u32 quirks; + +#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0) +}; + +struct sdhci_am654_driver_data { + const struct sdhci_pltfm_data *pdata; + u32 flags; +#define IOMUX_PRESENT (1 << 0) +#define FREQSEL_2_BIT (1 << 1) +#define STRBSEL_4_BIT (1 << 2) +#define DLL_PRESENT (1 << 3) +#define DLL_CALIB (1 << 4) +}; + +static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + int sel50, sel100, freqsel; + u32 mask, val; + int ret; + + /* Disable delay chain mode */ + regmap_update_bits(sdhci_am654->base, PHY_CTRL5, + SELDLYTXCLK_MASK | SELDLYRXCLK_MASK, 0); + + if (sdhci_am654->flags & FREQSEL_2_BIT) { + switch (clock) { + case 200000000: + sel50 = 0; + sel100 = 0; + break; + case 100000000: + sel50 = 0; + sel100 = 1; + break; + default: + sel50 = 1; + sel100 = 0; + } + + /* Configure PHY DLL frequency */ + mask = SEL50_MASK | SEL100_MASK; + val = (sel50 << SEL50_SHIFT) | (sel100 << SEL100_SHIFT); + regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val); + + } else { + switch (clock) { + case 200000000: + freqsel = 0x0; + break; + default: + freqsel = 0x4; + } + + regmap_update_bits(sdhci_am654->base, PHY_CTRL5, FREQSEL_MASK, + freqsel << FREQSEL_SHIFT); + } + /* Configure DLL TRIM */ + mask = DLL_TRIM_ICP_MASK; + val = sdhci_am654->trm_icp << DLL_TRIM_ICP_SHIFT; + + /* Configure DLL driver strength */ + mask |= DR_TY_MASK; + val |= sdhci_am654->drv_strength << DR_TY_SHIFT; + regmap_update_bits(sdhci_am654->base, PHY_CTRL1, mask, val); + + /* Enable DLL */ + regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, + 0x1 << ENDLL_SHIFT); + /* + * Poll for DLL ready. Use a one second timeout. + * Works in all experiments done so far + */ + ret = regmap_read_poll_timeout(sdhci_am654->base, PHY_STAT1, val, + val & DLLRDY_MASK, 1000, 1000000); + if (ret) { + dev_err(mmc_dev(host->mmc), "DLL failed to relock\n"); + return; + } +} + +static void sdhci_am654_write_itapdly(struct sdhci_am654_data *sdhci_am654, + u32 itapdly) +{ + /* Set ITAPCHGWIN before writing to ITAPDLY */ + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, + 1 << ITAPCHGWIN_SHIFT); + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYSEL_MASK, + itapdly << ITAPDLYSEL_SHIFT); + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0); +} + +static void sdhci_am654_setup_delay_chain(struct sdhci_am654_data *sdhci_am654, + unsigned char timing) +{ + u32 mask, val; + + regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0); + + val = 1 << SELDLYTXCLK_SHIFT | 1 << SELDLYRXCLK_SHIFT; + mask = SELDLYTXCLK_MASK | SELDLYRXCLK_MASK; + regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val); + + sdhci_am654_write_itapdly(sdhci_am654, + sdhci_am654->itap_del_sel[timing]); +} + +static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + unsigned char timing = host->mmc->ios.timing; + u32 otap_del_sel; + u32 otap_del_ena; + u32 mask, val; + + regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0); + + sdhci_set_clock(host, clock); + + /* Setup DLL Output TAP delay */ + if (sdhci_am654->legacy_otapdly) + otap_del_sel = sdhci_am654->otap_del_sel[0]; + else + otap_del_sel = sdhci_am654->otap_del_sel[timing]; + + otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0; + + mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; + val = (otap_del_ena << OTAPDLYENA_SHIFT) | + (otap_del_sel << OTAPDLYSEL_SHIFT); + + /* Write to STRBSEL for HS400 speed mode */ + if (timing == MMC_TIMING_MMC_HS400) { + if (sdhci_am654->flags & STRBSEL_4_BIT) + mask |= STRBSEL_4BIT_MASK; + else + mask |= STRBSEL_8BIT_MASK; + + val |= sdhci_am654->strb_sel << STRBSEL_SHIFT; + } + + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val); + + if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ) + sdhci_am654_setup_dll(host, clock); + else + sdhci_am654_setup_delay_chain(sdhci_am654, timing); + + regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK, + sdhci_am654->clkbuf_sel); +} + +static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host, + unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + unsigned char timing = host->mmc->ios.timing; + u32 otap_del_sel; + u32 mask, val; + + /* Setup DLL Output TAP delay */ + if (sdhci_am654->legacy_otapdly) + otap_del_sel = sdhci_am654->otap_del_sel[0]; + else + otap_del_sel = sdhci_am654->otap_del_sel[timing]; + + mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; + val = (0x1 << OTAPDLYENA_SHIFT) | + (otap_del_sel << OTAPDLYSEL_SHIFT); + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val); + + regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK, + sdhci_am654->clkbuf_sel); + + sdhci_set_clock(host, clock); +} + +static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg) +{ + writeb(val, host->ioaddr + reg); + usleep_range(1000, 10000); + return readb(host->ioaddr + reg); +} + +#define MAX_POWER_ON_TIMEOUT 1500000 /* us */ +static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) +{ + unsigned char timing = host->mmc->ios.timing; + u8 pwr; + int ret; + + if (reg == SDHCI_HOST_CONTROL) { + switch (timing) { + /* + * According to the data manual, HISPD bit + * should not be set in these speed modes. + */ + case MMC_TIMING_SD_HS: + case MMC_TIMING_MMC_HS: + val &= ~SDHCI_CTRL_HISPD; + } + } + + writeb(val, host->ioaddr + reg); + if (reg == SDHCI_POWER_CONTROL && (val & SDHCI_POWER_ON)) { + /* + * Power on will not happen until the card detect debounce + * timer expires. Wait at least 1.5 seconds for the power on + * bit to be set + */ + ret = read_poll_timeout(sdhci_am654_write_power_on, pwr, + pwr & SDHCI_POWER_ON, 0, + MAX_POWER_ON_TIMEOUT, false, host, val, + reg); + if (ret) + dev_info(mmc_dev(host->mmc), "Power on failed\n"); + } +} + +static void sdhci_am654_reset(struct sdhci_host *host, u8 mask) +{ + u8 ctrl; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + + sdhci_and_cqhci_reset(host, mask); + + if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) { + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl |= SDHCI_CTRL_CDTEST_INS | SDHCI_CTRL_CDTEST_EN; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } +} + +static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + int err = sdhci_execute_tuning(mmc, opcode); + + if (err) + return err; + /* + * Tuning data remains in the buffer after tuning. + * Do a command and data reset to get rid of it + */ + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + + return 0; +} + +static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +#define ITAP_MAX 32 +static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host, + u32 opcode) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + int cur_val, prev_val = 1, fail_len = 0, pass_window = 0, pass_len; + u32 itap; + + /* Enable ITAPDLY */ + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK, + 1 << ITAPDLYENA_SHIFT); + + for (itap = 0; itap < ITAP_MAX; itap++) { + sdhci_am654_write_itapdly(sdhci_am654, itap); + + cur_val = !mmc_send_tuning(host->mmc, opcode, NULL); + if (cur_val && !prev_val) + pass_window = itap; + + if (!cur_val) + fail_len++; + + prev_val = cur_val; + } + /* + * Having determined the length of the failing window and start of + * the passing window calculate the length of the passing window and + * set the final value halfway through it considering the range as a + * circular buffer + */ + pass_len = ITAP_MAX - fail_len; + itap = (pass_window + (pass_len >> 1)) % ITAP_MAX; + sdhci_am654_write_itapdly(sdhci_am654, itap); + + return 0; +} + +static struct sdhci_ops sdhci_am654_ops = { + .platform_execute_tuning = sdhci_am654_platform_execute_tuning, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_bus_width = sdhci_set_bus_width, + .set_power = sdhci_set_power_and_bus_voltage, + .set_clock = sdhci_am654_set_clock, + .write_b = sdhci_am654_write_b, + .irq = sdhci_am654_cqhci_irq, + .reset = sdhci_and_cqhci_reset, +}; + +static const struct sdhci_pltfm_data sdhci_am654_pdata = { + .ops = &sdhci_am654_ops, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + +static const struct sdhci_am654_driver_data sdhci_am654_sr1_drvdata = { + .pdata = &sdhci_am654_pdata, + .flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT | + DLL_CALIB, +}; + +static const struct sdhci_am654_driver_data sdhci_am654_drvdata = { + .pdata = &sdhci_am654_pdata, + .flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT, +}; + +static struct sdhci_ops sdhci_j721e_8bit_ops = { + .platform_execute_tuning = sdhci_am654_platform_execute_tuning, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_bus_width = sdhci_set_bus_width, + .set_power = sdhci_set_power_and_bus_voltage, + .set_clock = sdhci_am654_set_clock, + .write_b = sdhci_am654_write_b, + .irq = sdhci_am654_cqhci_irq, + .reset = sdhci_and_cqhci_reset, +}; + +static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { + .ops = &sdhci_j721e_8bit_ops, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + +static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = { + .pdata = &sdhci_j721e_8bit_pdata, + .flags = DLL_PRESENT | DLL_CALIB, +}; + +static struct sdhci_ops sdhci_j721e_4bit_ops = { + .platform_execute_tuning = sdhci_am654_platform_execute_tuning, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .set_bus_width = sdhci_set_bus_width, + .set_power = sdhci_set_power_and_bus_voltage, + .set_clock = sdhci_j721e_4bit_set_clock, + .write_b = sdhci_am654_write_b, + .irq = sdhci_am654_cqhci_irq, + .reset = sdhci_am654_reset, +}; + +static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = { + .ops = &sdhci_j721e_4bit_ops, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + +static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = { + .pdata = &sdhci_j721e_4bit_pdata, + .flags = IOMUX_PRESENT, +}; + +static const struct soc_device_attribute sdhci_am654_devices[] = { + { .family = "AM65X", + .revision = "SR1.0", + .data = &sdhci_am654_sr1_drvdata + }, + {/* sentinel */} +}; + +static void sdhci_am654_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static const struct cqhci_host_ops sdhci_am654_cqhci_ops = { + .enable = sdhci_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_am654_dumpregs, +}; + +static int sdhci_am654_cqe_add_host(struct sdhci_host *host) +{ + struct cqhci_host *cq_host; + int ret; + + cq_host = devm_kzalloc(host->mmc->parent, sizeof(struct cqhci_host), + GFP_KERNEL); + if (!cq_host) + return -ENOMEM; + + cq_host->mmio = host->ioaddr + SDHCI_AM654_CQE_BASE_ADDR; + cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + cq_host->ops = &sdhci_am654_cqhci_ops; + + host->mmc->caps2 |= MMC_CAP2_CQE; + + ret = cqhci_init(cq_host, host->mmc, 1); + + return ret; +} + +static int sdhci_am654_get_otap_delay(struct sdhci_host *host, + struct sdhci_am654_data *sdhci_am654) +{ + struct device *dev = mmc_dev(host->mmc); + int i; + int ret; + + ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].otap_binding, + &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]); + if (ret) { + /* + * ti,otap-del-sel-legacy is mandatory, look for old binding + * if not found. + */ + ret = device_property_read_u32(dev, "ti,otap-del-sel", + &sdhci_am654->otap_del_sel[0]); + if (ret) { + dev_err(dev, "Couldn't find otap-del-sel\n"); + + return ret; + } + + dev_info(dev, "Using legacy binding ti,otap-del-sel\n"); + sdhci_am654->legacy_otapdly = true; + + return 0; + } + + for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) { + + ret = device_property_read_u32(dev, td[i].otap_binding, + &sdhci_am654->otap_del_sel[i]); + if (ret) { + dev_dbg(dev, "Couldn't find %s\n", + td[i].otap_binding); + /* + * Remove the corresponding capability + * if an otap-del-sel value is not found + */ + if (i <= MMC_TIMING_MMC_DDR52) + host->mmc->caps &= ~td[i].capability; + else + host->mmc->caps2 &= ~td[i].capability; + } + + if (td[i].itap_binding) + device_property_read_u32(dev, td[i].itap_binding, + &sdhci_am654->itap_del_sel[i]); + } + + return 0; +} + +static int sdhci_am654_init(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + u32 ctl_cfg_2 = 0; + u32 mask; + u32 val; + int ret; + + /* Reset OTAP to default value */ + mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, 0x0); + + if (sdhci_am654->flags & DLL_CALIB) { + regmap_read(sdhci_am654->base, PHY_STAT1, &val); + if (~val & CALDONE_MASK) { + /* Calibrate IO lines */ + regmap_update_bits(sdhci_am654->base, PHY_CTRL1, + PDB_MASK, PDB_MASK); + ret = regmap_read_poll_timeout(sdhci_am654->base, + PHY_STAT1, val, + val & CALDONE_MASK, + 1, 20); + if (ret) + return ret; + } + } + + /* Enable pins by setting IO mux to 0 */ + if (sdhci_am654->flags & IOMUX_PRESENT) + regmap_update_bits(sdhci_am654->base, PHY_CTRL1, + IOMUX_ENABLE_MASK, 0); + + /* Set slot type based on SD or eMMC */ + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + ctl_cfg_2 = SLOTTYPE_EMBEDDED; + + regmap_update_bits(sdhci_am654->base, CTL_CFG_2, SLOTTYPE_MASK, + ctl_cfg_2); + + /* Enable tuning for SDR50 */ + regmap_update_bits(sdhci_am654->base, CTL_CFG_3, TUNINGFORSDR50_MASK, + TUNINGFORSDR50_MASK); + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + ret = sdhci_am654_cqe_add_host(host); + if (ret) + goto err_cleanup_host; + + ret = sdhci_am654_get_otap_delay(host, sdhci_am654); + if (ret) + goto err_cleanup_host; + + ret = __sdhci_add_host(host); + if (ret) + goto err_cleanup_host; + + return 0; + +err_cleanup_host: + sdhci_cleanup_host(host); + return ret; +} + +static int sdhci_am654_get_of_property(struct platform_device *pdev, + struct sdhci_am654_data *sdhci_am654) +{ + struct device *dev = &pdev->dev; + int drv_strength; + int ret; + + if (sdhci_am654->flags & DLL_PRESENT) { + ret = device_property_read_u32(dev, "ti,trm-icp", + &sdhci_am654->trm_icp); + if (ret) + return ret; + + ret = device_property_read_u32(dev, "ti,driver-strength-ohm", + &drv_strength); + if (ret) + return ret; + + switch (drv_strength) { + case 50: + sdhci_am654->drv_strength = DRIVER_STRENGTH_50_OHM; + break; + case 33: + sdhci_am654->drv_strength = DRIVER_STRENGTH_33_OHM; + break; + case 66: + sdhci_am654->drv_strength = DRIVER_STRENGTH_66_OHM; + break; + case 100: + sdhci_am654->drv_strength = DRIVER_STRENGTH_100_OHM; + break; + case 40: + sdhci_am654->drv_strength = DRIVER_STRENGTH_40_OHM; + break; + default: + dev_err(dev, "Invalid driver strength\n"); + return -EINVAL; + } + } + + device_property_read_u32(dev, "ti,strobe-sel", &sdhci_am654->strb_sel); + device_property_read_u32(dev, "ti,clkbuf-sel", + &sdhci_am654->clkbuf_sel); + + if (device_property_read_bool(dev, "ti,fails-without-test-cd")) + sdhci_am654->quirks |= SDHCI_AM654_QUIRK_FORCE_CDTEST; + + sdhci_get_of_property(pdev); + + return 0; +} + +static const struct of_device_id sdhci_am654_of_match[] = { + { + .compatible = "ti,am654-sdhci-5.1", + .data = &sdhci_am654_drvdata, + }, + { + .compatible = "ti,j721e-sdhci-8bit", + .data = &sdhci_j721e_8bit_drvdata, + }, + { + .compatible = "ti,j721e-sdhci-4bit", + .data = &sdhci_j721e_4bit_drvdata, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sdhci_am654_of_match); + +static int sdhci_am654_probe(struct platform_device *pdev) +{ + const struct sdhci_am654_driver_data *drvdata; + const struct soc_device_attribute *soc; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_am654_data *sdhci_am654; + const struct of_device_id *match; + struct sdhci_host *host; + struct clk *clk_xin; + struct device *dev = &pdev->dev; + void __iomem *base; + int ret; + + match = of_match_node(sdhci_am654_of_match, pdev->dev.of_node); + drvdata = match->data; + + /* Update drvdata based on SoC revision */ + soc = soc_device_match(sdhci_am654_devices); + if (soc && soc->data) + drvdata = soc->data; + + host = sdhci_pltfm_init(pdev, drvdata->pdata, sizeof(*sdhci_am654)); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + sdhci_am654->flags = drvdata->flags; + + clk_xin = devm_clk_get(dev, "clk_xin"); + if (IS_ERR(clk_xin)) { + dev_err(dev, "clk_xin clock not found.\n"); + ret = PTR_ERR(clk_xin); + goto err_pltfm_free; + } + + pltfm_host->clk = clk_xin; + + /* Clocks are enabled using pm_runtime */ + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + goto pm_runtime_disable; + } + + base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + goto pm_runtime_put; + } + + sdhci_am654->base = devm_regmap_init_mmio(dev, base, + &sdhci_am654_regmap_config); + if (IS_ERR(sdhci_am654->base)) { + dev_err(dev, "Failed to initialize regmap\n"); + ret = PTR_ERR(sdhci_am654->base); + goto pm_runtime_put; + } + + ret = sdhci_am654_get_of_property(pdev, sdhci_am654); + if (ret) + goto pm_runtime_put; + + ret = mmc_of_parse(host->mmc); + if (ret) { + dev_err(dev, "parsing dt failed (%d)\n", ret); + goto pm_runtime_put; + } + + host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning; + + ret = sdhci_am654_init(host); + if (ret) + goto pm_runtime_put; + + return 0; + +pm_runtime_put: + pm_runtime_put_sync(dev); +pm_runtime_disable: + pm_runtime_disable(dev); +err_pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_am654_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + int ret; + + sdhci_remove_host(host, true); + ret = pm_runtime_put_sync(&pdev->dev); + if (ret < 0) + return ret; + + pm_runtime_disable(&pdev->dev); + sdhci_pltfm_free(pdev); + + return 0; +} + +static struct platform_driver sdhci_am654_driver = { + .driver = { + .name = "sdhci-am654", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = sdhci_am654_of_match, + }, + .probe = sdhci_am654_probe, + .remove = sdhci_am654_remove, +}; + +module_platform_driver(sdhci_am654_driver); + +MODULE_DESCRIPTION("Driver for SDHCI Controller on TI's AM654 devices"); +MODULE_AUTHOR("Faiz Abbas "); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c new file mode 100644 index 000000000..7ede74bf3 --- /dev/null +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/drivers/mmc/host/sdhci_f_sdh30.c + * + * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd + * Vincent Yang + * Copyright (C) 2015 Linaro Ltd Andy Green + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "sdhci-pltfm.h" +#include "sdhci_f_sdh30.h" + +struct f_sdhost_priv { + struct clk *clk_iface; + struct clk *clk; + u32 vendor_hs200; + struct device *dev; + bool enable_cmd_dat_delay; +}; + +static void *sdhci_f_sdhost_priv(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return sdhci_pltfm_priv(pltfm_host); +} + +static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) +{ + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); + u32 ctrl = 0; + + usleep_range(2500, 3000); + ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2); + ctrl |= F_SDH30_CRES_O_DN; + sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2); + ctrl |= F_SDH30_MSEL_O_1_8; + sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2); + + ctrl &= ~F_SDH30_CRES_O_DN; + sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2); + usleep_range(2500, 3000); + + if (priv->vendor_hs200) { + dev_info(priv->dev, "%s: setting hs200\n", __func__); + ctrl = sdhci_readl(host, F_SDH30_ESD_CONTROL); + ctrl |= priv->vendor_hs200; + sdhci_writel(host, ctrl, F_SDH30_ESD_CONTROL); + } + + ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING); + ctrl |= F_SDH30_CMD_CHK_DIS; + sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING); +} + +static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) +{ + return F_SDH30_MIN_CLOCK; +} + +static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) +{ + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); + u32 ctl; + + if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) + sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL); + + sdhci_reset(host, mask); + + if (priv->enable_cmd_dat_delay) { + ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL); + ctl |= F_SDH30_CMD_DAT_DELAY; + sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL); + } +} + +static const struct sdhci_ops sdhci_f_sdh30_ops = { + .voltage_switch = sdhci_f_sdh30_soft_voltage_switch, + .get_min_clock = sdhci_f_sdh30_get_min_clock, + .reset = sdhci_f_sdh30_reset, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = { + .ops = &sdhci_f_sdh30_ops, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_INVERTED_WRITE_PROTECT, + .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE + | SDHCI_QUIRK2_TUNING_WORK_AROUND, +}; + +static int sdhci_f_sdh30_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct device *dev = &pdev->dev; + int ctrl = 0, ret = 0; + struct f_sdhost_priv *priv; + struct sdhci_pltfm_host *pltfm_host; + u32 reg = 0; + + host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data, + sizeof(struct f_sdhost_priv)); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + priv->dev = dev; + + priv->enable_cmd_dat_delay = device_property_read_bool(dev, + "fujitsu,cmd-dat-delay-select"); + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err; + + if (dev_of_node(dev)) { + sdhci_get_of_property(pdev); + + priv->clk_iface = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(priv->clk_iface)) { + ret = PTR_ERR(priv->clk_iface); + goto err; + } + + ret = clk_prepare_enable(priv->clk_iface); + if (ret) + goto err; + + priv->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto err_clk; + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + goto err_clk; + } + + /* init vendor specific regs */ + ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG); + ctrl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 | + F_SDH30_AHB_INCR_4; + ctrl &= ~(F_SDH30_AHB_BIGED | F_SDH30_BUSLOCK_EN); + sdhci_writew(host, ctrl, F_SDH30_AHB_CONFIG); + + reg = sdhci_readl(host, F_SDH30_ESD_CONTROL); + sdhci_writel(host, reg & ~F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL); + msleep(20); + sdhci_writel(host, reg | F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL); + + reg = sdhci_readl(host, SDHCI_CAPABILITIES); + if (reg & SDHCI_CAN_DO_8BIT) + priv->vendor_hs200 = F_SDH30_EMMC_HS200; + + if (!(reg & SDHCI_TIMEOUT_CLK_MASK)) + host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; + + ret = sdhci_add_host(host); + if (ret) + goto err_add_host; + + return 0; + +err_add_host: + clk_disable_unprepare(priv->clk); +err_clk: + clk_disable_unprepare(priv->clk_iface); +err: + sdhci_pltfm_free(pdev); + + return ret; +} + +static int sdhci_f_sdh30_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); + struct clk *clk_iface = priv->clk_iface; + struct clk *clk = priv->clk; + + sdhci_pltfm_unregister(pdev); + + clk_disable_unprepare(clk_iface); + clk_disable_unprepare(clk); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id f_sdh30_dt_ids[] = { + { .compatible = "fujitsu,mb86s70-sdhci-3.0" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id f_sdh30_acpi_ids[] = { + { "SCX0002" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, f_sdh30_acpi_ids); +#endif + +static struct platform_driver sdhci_f_sdh30_driver = { + .driver = { + .name = "f_sdh30", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(f_sdh30_dt_ids), + .acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids), + .pm = &sdhci_pltfm_pmops, + }, + .probe = sdhci_f_sdh30_probe, + .remove = sdhci_f_sdh30_remove, +}; + +module_platform_driver(sdhci_f_sdh30_driver); + +MODULE_DESCRIPTION("F_SDH30 SD Card Controller driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("FUJITSU SEMICONDUCTOR LTD."); +MODULE_ALIAS("platform:f_sdh30"); diff --git a/drivers/mmc/host/sdhci_f_sdh30.h b/drivers/mmc/host/sdhci_f_sdh30.h new file mode 100644 index 000000000..fc1ad28f7 --- /dev/null +++ b/drivers/mmc/host/sdhci_f_sdh30.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd + * Vincent Yang + * Copyright (C) 2015 Linaro Ltd Andy Green + * Copyright (C) 2019 Socionext Inc. + * + */ + +/* F_SDH30 extended Controller registers */ +#define F_SDH30_AHB_CONFIG 0x100 +#define F_SDH30_AHB_BIGED BIT(6) +#define F_SDH30_BUSLOCK_DMA BIT(5) +#define F_SDH30_BUSLOCK_EN BIT(4) +#define F_SDH30_SIN BIT(3) +#define F_SDH30_AHB_INCR_16 BIT(2) +#define F_SDH30_AHB_INCR_8 BIT(1) +#define F_SDH30_AHB_INCR_4 BIT(0) + +#define F_SDH30_TUNING_SETTING 0x108 +#define F_SDH30_CMD_CHK_DIS BIT(16) + +#define F_SDH30_IO_CONTROL2 0x114 +#define F_SDH30_CRES_O_DN BIT(19) +#define F_SDH30_MSEL_O_1_8 BIT(18) + +#define F_SDH30_ESD_CONTROL 0x124 +#define F_SDH30_EMMC_RST BIT(1) +#define F_SDH30_CMD_DAT_DELAY BIT(9) +#define F_SDH30_EMMC_HS200 BIT(24) + +#define F_SDH30_MIN_CLOCK 400000 diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c new file mode 100644 index 000000000..76a8cd3a1 --- /dev/null +++ b/drivers/mmc/host/sdricoh_cs.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sdricoh_cs.c - driver for Ricoh Secure Digital Card Readers that can be + * found on some Ricoh RL5c476 II cardbus bridge + * + * Copyright (C) 2006 - 2008 Sascha Sommer + */ + +/* +#define DEBUG +#define VERBOSE_DEBUG +*/ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#define DRIVER_NAME "sdricoh_cs" + +static unsigned int switchlocked; + +/* i/o region */ +#define SDRICOH_PCI_REGION 0 +#define SDRICOH_PCI_REGION_SIZE 0x1000 + +/* registers */ +#define R104_VERSION 0x104 +#define R200_CMD 0x200 +#define R204_CMD_ARG 0x204 +#define R208_DATAIO 0x208 +#define R20C_RESP 0x20c +#define R21C_STATUS 0x21c +#define R2E0_INIT 0x2e0 +#define R2E4_STATUS_RESP 0x2e4 +#define R2F0_RESET 0x2f0 +#define R224_MODE 0x224 +#define R226_BLOCKSIZE 0x226 +#define R228_POWER 0x228 +#define R230_DATA 0x230 + +/* flags for the R21C_STATUS register */ +#define STATUS_CMD_FINISHED 0x00000001 +#define STATUS_TRANSFER_FINISHED 0x00000004 +#define STATUS_CARD_INSERTED 0x00000020 +#define STATUS_CARD_LOCKED 0x00000080 +#define STATUS_CMD_TIMEOUT 0x00400000 +#define STATUS_READY_TO_READ 0x01000000 +#define STATUS_READY_TO_WRITE 0x02000000 +#define STATUS_BUSY 0x40000000 + +/* timeouts */ +#define SDRICOH_CMD_TIMEOUT_US 1000000 +#define SDRICOH_DATA_TIMEOUT_US 1000000 + +/* list of supported pcmcia devices */ +static const struct pcmcia_device_id pcmcia_ids[] = { + /* vendor and device strings followed by their crc32 hashes */ + PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed, + 0xc3901202), + PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay Controller", 0xd9f522ed, + 0xace80909), + PCMCIA_DEVICE_NULL, +}; + +MODULE_DEVICE_TABLE(pcmcia, pcmcia_ids); + +/* mmc privdata */ +struct sdricoh_host { + struct device *dev; + struct mmc_host *mmc; /* MMC structure */ + unsigned char __iomem *iobase; + struct pci_dev *pci_dev; + int app_cmd; +}; + +/***************** register i/o helper functions *****************************/ + +static inline unsigned int sdricoh_readl(struct sdricoh_host *host, + unsigned int reg) +{ + unsigned int value = readl(host->iobase + reg); + dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value); + return value; +} + +static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg, + unsigned int value) +{ + writel(value, host->iobase + reg); + dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value); + +} + +static inline unsigned int sdricoh_readw(struct sdricoh_host *host, + unsigned int reg) +{ + unsigned int value = readw(host->iobase + reg); + dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); + return value; +} + +static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg, + unsigned short value) +{ + writew(value, host->iobase + reg); + dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value); +} + +static inline unsigned int sdricoh_readb(struct sdricoh_host *host, + unsigned int reg) +{ + unsigned int value = readb(host->iobase + reg); + dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); + return value; +} + +static bool sdricoh_status_ok(struct sdricoh_host *host, unsigned int status, + unsigned int wanted) +{ + sdricoh_writel(host, R2E4_STATUS_RESP, status); + return status & wanted; +} + +static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted) +{ + int ret; + unsigned int status = 0; + struct device *dev = host->dev; + + ret = read_poll_timeout(sdricoh_readl, status, + sdricoh_status_ok(host, status, wanted), + 32, SDRICOH_DATA_TIMEOUT_US, false, + host, R21C_STATUS); + if (ret) { + dev_err(dev, "query_status: timeout waiting for %x\n", wanted); + return -ETIMEDOUT; + } + + /* do not do this check in the loop as some commands fail otherwise */ + if (status & 0x7F0000) { + dev_err(dev, "waiting for status bit %x failed\n", wanted); + return -EINVAL; + } + return 0; + +} + +static int sdricoh_mmc_cmd(struct sdricoh_host *host, struct mmc_command *cmd) +{ + unsigned int status, timeout_us; + int ret; + unsigned char opcode = cmd->opcode; + + /* reset status reg? */ + sdricoh_writel(host, R21C_STATUS, 0x18); + + /* MMC_APP_CMDs need some special handling */ + if (host->app_cmd) { + opcode |= 64; + host->app_cmd = 0; + } else if (opcode == MMC_APP_CMD) + host->app_cmd = 1; + + /* fill parameters */ + sdricoh_writel(host, R204_CMD_ARG, cmd->arg); + sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode); + + /* wait for command completion */ + if (!opcode) + return 0; + + timeout_us = cmd->busy_timeout ? cmd->busy_timeout * 1000 : + SDRICOH_CMD_TIMEOUT_US; + + ret = read_poll_timeout(sdricoh_readl, status, + sdricoh_status_ok(host, status, STATUS_CMD_FINISHED), + 32, timeout_us, false, + host, R21C_STATUS); + + /* + * Don't check for timeout status in the loop, as it's not always reset + * correctly. + */ + if (ret || status & STATUS_CMD_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +static int sdricoh_reset(struct sdricoh_host *host) +{ + dev_dbg(host->dev, "reset\n"); + sdricoh_writel(host, R2F0_RESET, 0x10001); + sdricoh_writel(host, R2E0_INIT, 0x10000); + if (sdricoh_readl(host, R2E0_INIT) != 0x10000) + return -EIO; + sdricoh_writel(host, R2E0_INIT, 0x10007); + + sdricoh_writel(host, R224_MODE, 0x2000000); + sdricoh_writel(host, R228_POWER, 0xe0); + + + /* status register ? */ + sdricoh_writel(host, R21C_STATUS, 0x18); + + return 0; +} + +static int sdricoh_blockio(struct sdricoh_host *host, int read, + u8 *buf, int len) +{ + int size; + u32 data = 0; + /* wait until the data is available */ + if (read) { + if (sdricoh_query_status(host, STATUS_READY_TO_READ)) + return -ETIMEDOUT; + sdricoh_writel(host, R21C_STATUS, 0x18); + /* read data */ + while (len) { + data = sdricoh_readl(host, R230_DATA); + size = min(len, 4); + len -= size; + while (size) { + *buf = data & 0xFF; + buf++; + data >>= 8; + size--; + } + } + } else { + if (sdricoh_query_status(host, STATUS_READY_TO_WRITE)) + return -ETIMEDOUT; + sdricoh_writel(host, R21C_STATUS, 0x18); + /* write data */ + while (len) { + size = min(len, 4); + len -= size; + while (size) { + data >>= 8; + data |= (u32)*buf << 24; + buf++; + size--; + } + sdricoh_writel(host, R230_DATA, data); + } + } + + return 0; +} + +static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdricoh_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = cmd->data; + struct device *dev = host->dev; + int i; + + dev_dbg(dev, "=============================\n"); + dev_dbg(dev, "sdricoh_request opcode=%i\n", cmd->opcode); + + sdricoh_writel(host, R21C_STATUS, 0x18); + + /* read/write commands seem to require this */ + if (data) { + sdricoh_writew(host, R226_BLOCKSIZE, data->blksz); + sdricoh_writel(host, R208_DATAIO, 0); + } + + cmd->error = sdricoh_mmc_cmd(host, cmd); + + /* read response buffer */ + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* CRC is stripped so we need to do some shifting. */ + for (i = 0; i < 4; i++) { + cmd->resp[i] = + sdricoh_readl(host, + R20C_RESP + (3 - i) * 4) << 8; + if (i != 3) + cmd->resp[i] |= + sdricoh_readb(host, R20C_RESP + + (3 - i) * 4 - 1); + } + } else + cmd->resp[0] = sdricoh_readl(host, R20C_RESP); + } + + /* transfer data */ + if (data && cmd->error == 0) { + dev_dbg(dev, "transfer: blksz %i blocks %i sg_len %i " + "sg length %i\n", data->blksz, data->blocks, + data->sg_len, data->sg->length); + + /* enter data reading mode */ + sdricoh_writel(host, R21C_STATUS, 0x837f031e); + for (i = 0; i < data->blocks; i++) { + size_t len = data->blksz; + u8 *buf; + struct page *page; + int result; + page = sg_page(data->sg); + + buf = kmap(page) + data->sg->offset + (len * i); + result = + sdricoh_blockio(host, + data->flags & MMC_DATA_READ, buf, len); + kunmap(page); + flush_dcache_page(page); + if (result) { + dev_err(dev, "sdricoh_request: cmd %i " + "block transfer failed\n", cmd->opcode); + cmd->error = result; + break; + } else + data->bytes_xfered += len; + } + + sdricoh_writel(host, R208_DATAIO, 1); + + if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED)) { + dev_err(dev, "sdricoh_request: transfer end error\n"); + cmd->error = -EINVAL; + } + } + /* FIXME check busy flag */ + + mmc_request_done(mmc, mrq); + dev_dbg(dev, "=============================\n"); +} + +static void sdricoh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdricoh_host *host = mmc_priv(mmc); + dev_dbg(host->dev, "set_ios\n"); + + if (ios->power_mode == MMC_POWER_ON) { + sdricoh_writel(host, R228_POWER, 0xc0e0); + + if (ios->bus_width == MMC_BUS_WIDTH_4) { + sdricoh_writel(host, R224_MODE, 0x2000300); + sdricoh_writel(host, R228_POWER, 0x40e0); + } else { + sdricoh_writel(host, R224_MODE, 0x2000340); + } + + } else if (ios->power_mode == MMC_POWER_UP) { + sdricoh_writel(host, R224_MODE, 0x2000320); + sdricoh_writel(host, R228_POWER, 0xe0); + } +} + +static int sdricoh_get_ro(struct mmc_host *mmc) +{ + struct sdricoh_host *host = mmc_priv(mmc); + unsigned int status; + + status = sdricoh_readl(host, R21C_STATUS); + sdricoh_writel(host, R2E4_STATUS_RESP, status); + + /* some notebooks seem to have the locked flag switched */ + if (switchlocked) + return !(status & STATUS_CARD_LOCKED); + + return (status & STATUS_CARD_LOCKED); +} + +static const struct mmc_host_ops sdricoh_ops = { + .request = sdricoh_request, + .set_ios = sdricoh_set_ios, + .get_ro = sdricoh_get_ro, +}; + +/* initialize the control and register it to the mmc framework */ +static int sdricoh_init_mmc(struct pci_dev *pci_dev, + struct pcmcia_device *pcmcia_dev) +{ + int result; + void __iomem *iobase; + struct mmc_host *mmc; + struct sdricoh_host *host; + struct device *dev = &pcmcia_dev->dev; + /* map iomem */ + if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) != + SDRICOH_PCI_REGION_SIZE) { + dev_dbg(dev, "unexpected pci resource len\n"); + return -ENODEV; + } + iobase = + pci_iomap(pci_dev, SDRICOH_PCI_REGION, SDRICOH_PCI_REGION_SIZE); + if (!iobase) { + dev_err(dev, "unable to map iobase\n"); + return -ENODEV; + } + /* check version? */ + if (readl(iobase + R104_VERSION) != 0x4000) { + dev_dbg(dev, "no supported mmc controller found\n"); + result = -ENODEV; + goto unmap_io; + } + /* allocate privdata */ + mmc = pcmcia_dev->priv = + mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev); + if (!mmc) { + dev_err(dev, "mmc_alloc_host failed\n"); + result = -ENOMEM; + goto unmap_io; + } + host = mmc_priv(mmc); + + host->iobase = iobase; + host->dev = dev; + host->pci_dev = pci_dev; + + mmc->ops = &sdricoh_ops; + + /* FIXME: frequency and voltage handling is done by the controller + */ + mmc->f_min = 450000; + mmc->f_max = 24000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps |= MMC_CAP_4_BIT_DATA; + + mmc->max_seg_size = 1024 * 512; + mmc->max_blk_size = 512; + + /* reset the controller */ + if (sdricoh_reset(host)) { + dev_dbg(dev, "could not reset\n"); + result = -EIO; + goto free_host; + } + + result = mmc_add_host(mmc); + + if (!result) { + dev_dbg(dev, "mmc host registered\n"); + return 0; + } +free_host: + mmc_free_host(mmc); +unmap_io: + pci_iounmap(pci_dev, iobase); + return result; +} + +/* search for supported mmc controllers */ +static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev) +{ + struct pci_dev *pci_dev = NULL; + + dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device" + " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]); + + /* search pci cardbus bridge that contains the mmc controller */ + /* the io region is already claimed by yenta_socket... */ + while ((pci_dev = + pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, + pci_dev))) { + /* try to init the device */ + if (!sdricoh_init_mmc(pci_dev, pcmcia_dev)) { + dev_info(&pcmcia_dev->dev, "MMC controller found\n"); + return 0; + } + + } + dev_err(&pcmcia_dev->dev, "No MMC controller was found.\n"); + return -ENODEV; +} + +static void sdricoh_pcmcia_detach(struct pcmcia_device *link) +{ + struct mmc_host *mmc = link->priv; + + dev_dbg(&link->dev, "detach\n"); + + /* remove mmc host */ + if (mmc) { + struct sdricoh_host *host = mmc_priv(mmc); + mmc_remove_host(mmc); + pci_iounmap(host->pci_dev, host->iobase); + pci_dev_put(host->pci_dev); + mmc_free_host(mmc); + } + pcmcia_disable_device(link); + +} + +#ifdef CONFIG_PM +static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) +{ + dev_dbg(&link->dev, "suspend\n"); + return 0; +} + +static int sdricoh_pcmcia_resume(struct pcmcia_device *link) +{ + struct mmc_host *mmc = link->priv; + dev_dbg(&link->dev, "resume\n"); + sdricoh_reset(mmc_priv(mmc)); + return 0; +} +#else +#define sdricoh_pcmcia_suspend NULL +#define sdricoh_pcmcia_resume NULL +#endif + +static struct pcmcia_driver sdricoh_driver = { + .name = DRIVER_NAME, + .probe = sdricoh_pcmcia_probe, + .remove = sdricoh_pcmcia_detach, + .id_table = pcmcia_ids, + .suspend = sdricoh_pcmcia_suspend, + .resume = sdricoh_pcmcia_resume, +}; +module_pcmcia_driver(sdricoh_driver); + +module_param(switchlocked, uint, 0444); + +MODULE_AUTHOR("Sascha Sommer "); +MODULE_DESCRIPTION("Ricoh PCMCIA Secure Digital Interface driver"); +MODULE_LICENSE("GPL"); + +MODULE_PARM_DESC(switchlocked, "Switch the cards locked status." + "Use this when unlocked cards are shown readonly (default 0)"); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c new file mode 100644 index 000000000..5dec9e239 --- /dev/null +++ b/drivers/mmc/host/sh_mmcif.c @@ -0,0 +1,1576 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MMCIF eMMC driver. + * + * Copyright (C) 2010 Renesas Solutions Corp. + * Yusuke Goda + */ + +/* + * The MMCIF driver is now processing MMC requests asynchronously, according + * to the Linux MMC API requirement. + * + * The MMCIF driver processes MMC requests in up to 3 stages: command, optional + * data, and optional stop. To achieve asynchronous processing each of these + * stages is split into two halves: a top and a bottom half. The top half + * initialises the hardware, installs a timeout handler to handle completion + * timeouts, and returns. In case of the command stage this immediately returns + * control to the caller, leaving all further processing to run asynchronously. + * All further request processing is performed by the bottom halves. + * + * The bottom half further consists of a "hard" IRQ handler, an IRQ handler + * thread, a DMA completion callback, if DMA is used, a timeout work, and + * request- and stage-specific handler methods. + * + * Each bottom half run begins with either a hardware interrupt, a DMA callback + * invocation, or a timeout work run. In case of an error or a successful + * processing completion, the MMC core is informed and the request processing is + * finished. In case processing has to continue, i.e., if data has to be read + * from or written to the card, or if a stop command has to be sent, the next + * top half is called, which performs the necessary hardware handling and + * reschedules the timeout work. This returns the driver state machine into the + * bottom half waiting state. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "sh_mmcif" + +/* CE_CMD_SET */ +#define CMD_MASK 0x3f000000 +#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22)) +#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */ +#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */ +#define CMD_SET_RBSY (1 << 21) /* R1b */ +#define CMD_SET_CCSEN (1 << 20) +#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */ +#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */ +#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */ +#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */ +#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */ +#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */ +#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */ +#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/ +#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/ +#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/ +#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/ +#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */ +#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */ +#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */ +#define CMD_SET_CCSH (1 << 5) +#define CMD_SET_DARS (1 << 2) /* Dual Data Rate */ +#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */ +#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */ +#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */ + +/* CE_CMD_CTRL */ +#define CMD_CTRL_BREAK (1 << 0) + +/* CE_BLOCK_SET */ +#define BLOCK_SIZE_MASK 0x0000ffff + +/* CE_INT */ +#define INT_CCSDE (1 << 29) +#define INT_CMD12DRE (1 << 26) +#define INT_CMD12RBE (1 << 25) +#define INT_CMD12CRE (1 << 24) +#define INT_DTRANE (1 << 23) +#define INT_BUFRE (1 << 22) +#define INT_BUFWEN (1 << 21) +#define INT_BUFREN (1 << 20) +#define INT_CCSRCV (1 << 19) +#define INT_RBSYE (1 << 17) +#define INT_CRSPE (1 << 16) +#define INT_CMDVIO (1 << 15) +#define INT_BUFVIO (1 << 14) +#define INT_WDATERR (1 << 11) +#define INT_RDATERR (1 << 10) +#define INT_RIDXERR (1 << 9) +#define INT_RSPERR (1 << 8) +#define INT_CCSTO (1 << 5) +#define INT_CRCSTO (1 << 4) +#define INT_WDATTO (1 << 3) +#define INT_RDATTO (1 << 2) +#define INT_RBSYTO (1 << 1) +#define INT_RSPTO (1 << 0) +#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \ + INT_RDATERR | INT_RIDXERR | INT_RSPERR | \ + INT_CCSTO | INT_CRCSTO | INT_WDATTO | \ + INT_RDATTO | INT_RBSYTO | INT_RSPTO) + +#define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \ + INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \ + INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE) + +#define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE) + +/* CE_INT_MASK */ +#define MASK_ALL 0x00000000 +#define MASK_MCCSDE (1 << 29) +#define MASK_MCMD12DRE (1 << 26) +#define MASK_MCMD12RBE (1 << 25) +#define MASK_MCMD12CRE (1 << 24) +#define MASK_MDTRANE (1 << 23) +#define MASK_MBUFRE (1 << 22) +#define MASK_MBUFWEN (1 << 21) +#define MASK_MBUFREN (1 << 20) +#define MASK_MCCSRCV (1 << 19) +#define MASK_MRBSYE (1 << 17) +#define MASK_MCRSPE (1 << 16) +#define MASK_MCMDVIO (1 << 15) +#define MASK_MBUFVIO (1 << 14) +#define MASK_MWDATERR (1 << 11) +#define MASK_MRDATERR (1 << 10) +#define MASK_MRIDXERR (1 << 9) +#define MASK_MRSPERR (1 << 8) +#define MASK_MCCSTO (1 << 5) +#define MASK_MCRCSTO (1 << 4) +#define MASK_MWDATTO (1 << 3) +#define MASK_MRDATTO (1 << 2) +#define MASK_MRBSYTO (1 << 1) +#define MASK_MRSPTO (1 << 0) + +#define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \ + MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \ + MASK_MCRCSTO | MASK_MWDATTO | \ + MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) + +#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \ + MASK_MBUFREN | MASK_MBUFWEN | \ + MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \ + MASK_MCMD12RBE | MASK_MCMD12CRE) + +/* CE_HOST_STS1 */ +#define STS1_CMDSEQ (1 << 31) + +/* CE_HOST_STS2 */ +#define STS2_CRCSTE (1 << 31) +#define STS2_CRC16E (1 << 30) +#define STS2_AC12CRCE (1 << 29) +#define STS2_RSPCRC7E (1 << 28) +#define STS2_CRCSTEBE (1 << 27) +#define STS2_RDATEBE (1 << 26) +#define STS2_AC12REBE (1 << 25) +#define STS2_RSPEBE (1 << 24) +#define STS2_AC12IDXE (1 << 23) +#define STS2_RSPIDXE (1 << 22) +#define STS2_CCSTO (1 << 15) +#define STS2_RDATTO (1 << 14) +#define STS2_DATBSYTO (1 << 13) +#define STS2_CRCSTTO (1 << 12) +#define STS2_AC12BSYTO (1 << 11) +#define STS2_RSPBSYTO (1 << 10) +#define STS2_AC12RSPTO (1 << 9) +#define STS2_RSPTO (1 << 8) +#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \ + STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE) +#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \ + STS2_DATBSYTO | STS2_CRCSTTO | \ + STS2_AC12BSYTO | STS2_RSPBSYTO | \ + STS2_AC12RSPTO | STS2_RSPTO) + +#define CLKDEV_EMMC_DATA 52000000 /* 52 MHz */ +#define CLKDEV_MMC_DATA 20000000 /* 20 MHz */ +#define CLKDEV_INIT 400000 /* 400 kHz */ + +enum sh_mmcif_state { + STATE_IDLE, + STATE_REQUEST, + STATE_IOS, + STATE_TIMEOUT, +}; + +enum sh_mmcif_wait_for { + MMCIF_WAIT_FOR_REQUEST, + MMCIF_WAIT_FOR_CMD, + MMCIF_WAIT_FOR_MREAD, + MMCIF_WAIT_FOR_MWRITE, + MMCIF_WAIT_FOR_READ, + MMCIF_WAIT_FOR_WRITE, + MMCIF_WAIT_FOR_READ_END, + MMCIF_WAIT_FOR_WRITE_END, + MMCIF_WAIT_FOR_STOP, +}; + +/* + * difference for each SoC + */ +struct sh_mmcif_host { + struct mmc_host *mmc; + struct mmc_request *mrq; + struct platform_device *pd; + struct clk *clk; + int bus_width; + unsigned char timing; + bool sd_error; + bool dying; + long timeout; + void __iomem *addr; + u32 *pio_ptr; + spinlock_t lock; /* protect sh_mmcif_host::state */ + enum sh_mmcif_state state; + enum sh_mmcif_wait_for wait_for; + struct delayed_work timeout_work; + size_t blocksize; + int sg_idx; + int sg_blkidx; + bool power; + bool ccs_enable; /* Command Completion Signal support */ + bool clk_ctrl2_enable; + struct mutex thread_lock; + u32 clkdiv_map; /* see CE_CLK_CTRL::CLKDIV */ + + /* DMA support */ + struct dma_chan *chan_rx; + struct dma_chan *chan_tx; + struct completion dma_complete; + bool dma_active; +}; + +static const struct of_device_id sh_mmcif_of_match[] = { + { .compatible = "renesas,sh-mmcif" }, + { } +}; +MODULE_DEVICE_TABLE(of, sh_mmcif_of_match); + +#define sh_mmcif_host_to_dev(host) (&host->pd->dev) + +static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, + unsigned int reg, u32 val) +{ + writel(val | readl(host->addr + reg), host->addr + reg); +} + +static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, + unsigned int reg, u32 val) +{ + writel(~val & readl(host->addr + reg), host->addr + reg); +} + +static void sh_mmcif_dma_complete(void *arg) +{ + struct sh_mmcif_host *host = arg; + struct mmc_request *mrq = host->mrq; + struct device *dev = sh_mmcif_host_to_dev(host); + + dev_dbg(dev, "Command completed\n"); + + if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n", + dev_name(dev))) + return; + + complete(&host->dma_complete); +} + +static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) +{ + struct mmc_data *data = host->mrq->data; + struct scatterlist *sg = data->sg; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan = host->chan_rx; + struct device *dev = sh_mmcif_host_to_dev(host); + dma_cookie_t cookie = -EINVAL; + int ret; + + ret = dma_map_sg(chan->device->dev, sg, data->sg_len, + DMA_FROM_DEVICE); + if (ret > 0) { + host->dma_active = true; + desc = dmaengine_prep_slave_sg(chan, sg, ret, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } + + if (desc) { + desc->callback = sh_mmcif_dma_complete; + desc->callback_param = host; + cookie = dmaengine_submit(desc); + sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); + dma_async_issue_pending(chan); + } + dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n", + __func__, data->sg_len, ret, cookie); + + if (!desc) { + /* DMA failed, fall back to PIO */ + if (ret >= 0) + ret = -EIO; + host->chan_rx = NULL; + host->dma_active = false; + dma_release_channel(chan); + /* Free the Tx channel too */ + chan = host->chan_tx; + if (chan) { + host->chan_tx = NULL; + dma_release_channel(chan); + } + dev_warn(dev, + "DMA failed: %d, falling back to PIO\n", ret); + sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); + } + + dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, + desc, cookie, data->sg_len); +} + +static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) +{ + struct mmc_data *data = host->mrq->data; + struct scatterlist *sg = data->sg; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan = host->chan_tx; + struct device *dev = sh_mmcif_host_to_dev(host); + dma_cookie_t cookie = -EINVAL; + int ret; + + ret = dma_map_sg(chan->device->dev, sg, data->sg_len, + DMA_TO_DEVICE); + if (ret > 0) { + host->dma_active = true; + desc = dmaengine_prep_slave_sg(chan, sg, ret, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } + + if (desc) { + desc->callback = sh_mmcif_dma_complete; + desc->callback_param = host; + cookie = dmaengine_submit(desc); + sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); + dma_async_issue_pending(chan); + } + dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n", + __func__, data->sg_len, ret, cookie); + + if (!desc) { + /* DMA failed, fall back to PIO */ + if (ret >= 0) + ret = -EIO; + host->chan_tx = NULL; + host->dma_active = false; + dma_release_channel(chan); + /* Free the Rx channel too */ + chan = host->chan_rx; + if (chan) { + host->chan_rx = NULL; + dma_release_channel(chan); + } + dev_warn(dev, + "DMA failed: %d, falling back to PIO\n", ret); + sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); + } + + dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__, + desc, cookie); +} + +static struct dma_chan * +sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + if (slave_id <= 0) + return NULL; + + return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id); +} + +static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host, + struct dma_chan *chan, + enum dma_transfer_direction direction) +{ + struct resource *res; + struct dma_slave_config cfg = { 0, }; + + res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); + cfg.direction = direction; + + if (direction == DMA_DEV_TO_MEM) { + cfg.src_addr = res->start + MMCIF_CE_DATA; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + } else { + cfg.dst_addr = res->start + MMCIF_CE_DATA; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + } + + return dmaengine_slave_config(chan, &cfg); +} + +static void sh_mmcif_request_dma(struct sh_mmcif_host *host) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + host->dma_active = false; + + /* We can only either use DMA for both Tx and Rx or not use it at all */ + if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) { + struct sh_mmcif_plat_data *pdata = dev->platform_data; + + host->chan_tx = sh_mmcif_request_dma_pdata(host, + pdata->slave_id_tx); + host->chan_rx = sh_mmcif_request_dma_pdata(host, + pdata->slave_id_rx); + } else { + host->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->chan_tx)) + host->chan_tx = NULL; + host->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->chan_rx)) + host->chan_rx = NULL; + } + dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, + host->chan_rx); + + if (!host->chan_tx || !host->chan_rx || + sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) || + sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM)) + goto error; + + return; + +error: + if (host->chan_tx) + dma_release_channel(host->chan_tx); + if (host->chan_rx) + dma_release_channel(host->chan_rx); + host->chan_tx = host->chan_rx = NULL; +} + +static void sh_mmcif_release_dma(struct sh_mmcif_host *host) +{ + sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); + /* Descriptors are freed automatically */ + if (host->chan_tx) { + struct dma_chan *chan = host->chan_tx; + host->chan_tx = NULL; + dma_release_channel(chan); + } + if (host->chan_rx) { + struct dma_chan *chan = host->chan_rx; + host->chan_rx = NULL; + dma_release_channel(chan); + } + + host->dma_active = false; +} + +static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + struct sh_mmcif_plat_data *p = dev->platform_data; + bool sup_pclk = p ? p->sup_pclk : false; + unsigned int current_clk = clk_get_rate(host->clk); + unsigned int clkdiv; + + sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); + sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); + + if (!clk) + return; + + if (host->clkdiv_map) { + unsigned int freq, best_freq, myclk, div, diff_min, diff; + int i; + + clkdiv = 0; + diff_min = ~0; + best_freq = 0; + for (i = 31; i >= 0; i--) { + if (!((1 << i) & host->clkdiv_map)) + continue; + + /* + * clk = parent_freq / div + * -> parent_freq = clk x div + */ + + div = 1 << (i + 1); + freq = clk_round_rate(host->clk, clk * div); + myclk = freq / div; + diff = (myclk > clk) ? myclk - clk : clk - myclk; + + if (diff <= diff_min) { + best_freq = freq; + clkdiv = i; + diff_min = diff; + } + } + + dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n", + (best_freq / (1 << (clkdiv + 1))), clk, + best_freq, clkdiv); + + clk_set_rate(host->clk, best_freq); + clkdiv = clkdiv << 16; + } else if (sup_pclk && clk == current_clk) { + clkdiv = CLK_SUP_PCLK; + } else { + clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16; + } + + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv); + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); +} + +static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) +{ + u32 tmp; + + tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL); + + sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); + sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); + if (host->ccs_enable) + tmp |= SCCSTO_29; + if (host->clk_ctrl2_enable) + sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000); + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | + SRSPTO_256 | SRBSYTO_29 | SRWDTO_29); + /* byte swap on */ + sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); +} + +static int sh_mmcif_error_manage(struct sh_mmcif_host *host) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + u32 state1, state2; + int ret, timeout; + + host->sd_error = false; + + state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); + state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); + dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1); + dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2); + + if (state1 & STS1_CMDSEQ) { + sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); + sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); + for (timeout = 10000; timeout; timeout--) { + if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) + & STS1_CMDSEQ)) + break; + mdelay(1); + } + if (!timeout) { + dev_err(dev, + "Forced end of command sequence timeout err\n"); + return -EIO; + } + sh_mmcif_sync_reset(host); + dev_dbg(dev, "Forced end of command sequence\n"); + return -EIO; + } + + if (state2 & STS2_CRC_ERR) { + dev_err(dev, " CRC error: state %u, wait %u\n", + host->state, host->wait_for); + ret = -EIO; + } else if (state2 & STS2_TIMEOUT_ERR) { + dev_err(dev, " Timeout: state %u, wait %u\n", + host->state, host->wait_for); + ret = -ETIMEDOUT; + } else { + dev_dbg(dev, " End/Index error: state %u, wait %u\n", + host->state, host->wait_for); + ret = -EIO; + } + return ret; +} + +static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) +{ + struct mmc_data *data = host->mrq->data; + + host->sg_blkidx += host->blocksize; + + /* data->sg->length must be a multiple of host->blocksize? */ + BUG_ON(host->sg_blkidx > data->sg->length); + + if (host->sg_blkidx == data->sg->length) { + host->sg_blkidx = 0; + if (++host->sg_idx < data->sg_len) + host->pio_ptr = sg_virt(++data->sg); + } else { + host->pio_ptr = p; + } + + return host->sg_idx != data->sg_len; +} + +static void sh_mmcif_single_read(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & + BLOCK_SIZE_MASK) + 3; + + host->wait_for = MMCIF_WAIT_FOR_READ; + + /* buf read enable */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); +} + +static bool sh_mmcif_read_block(struct sh_mmcif_host *host) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + struct mmc_data *data = host->mrq->data; + u32 *p = sg_virt(data->sg); + int i; + + if (host->sd_error) { + data->error = sh_mmcif_error_manage(host); + dev_dbg(dev, "%s(): %d\n", __func__, data->error); + return false; + } + + for (i = 0; i < host->blocksize / 4; i++) + *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); + + /* buffer read end */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); + host->wait_for = MMCIF_WAIT_FOR_READ_END; + + return true; +} + +static void sh_mmcif_multi_read(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!data->sg_len || !data->sg->length) + return; + + host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & + BLOCK_SIZE_MASK; + + host->wait_for = MMCIF_WAIT_FOR_MREAD; + host->sg_idx = 0; + host->sg_blkidx = 0; + host->pio_ptr = sg_virt(data->sg); + + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); +} + +static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + struct mmc_data *data = host->mrq->data; + u32 *p = host->pio_ptr; + int i; + + if (host->sd_error) { + data->error = sh_mmcif_error_manage(host); + dev_dbg(dev, "%s(): %d\n", __func__, data->error); + return false; + } + + BUG_ON(!data->sg->length); + + for (i = 0; i < host->blocksize / 4; i++) + *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); + + if (!sh_mmcif_next_block(host, p)) + return false; + + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); + + return true; +} + +static void sh_mmcif_single_write(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & + BLOCK_SIZE_MASK) + 3; + + host->wait_for = MMCIF_WAIT_FOR_WRITE; + + /* buf write enable */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); +} + +static bool sh_mmcif_write_block(struct sh_mmcif_host *host) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + struct mmc_data *data = host->mrq->data; + u32 *p = sg_virt(data->sg); + int i; + + if (host->sd_error) { + data->error = sh_mmcif_error_manage(host); + dev_dbg(dev, "%s(): %d\n", __func__, data->error); + return false; + } + + for (i = 0; i < host->blocksize / 4; i++) + sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); + + /* buffer write end */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); + host->wait_for = MMCIF_WAIT_FOR_WRITE_END; + + return true; +} + +static void sh_mmcif_multi_write(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!data->sg_len || !data->sg->length) + return; + + host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & + BLOCK_SIZE_MASK; + + host->wait_for = MMCIF_WAIT_FOR_MWRITE; + host->sg_idx = 0; + host->sg_blkidx = 0; + host->pio_ptr = sg_virt(data->sg); + + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); +} + +static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + struct mmc_data *data = host->mrq->data; + u32 *p = host->pio_ptr; + int i; + + if (host->sd_error) { + data->error = sh_mmcif_error_manage(host); + dev_dbg(dev, "%s(): %d\n", __func__, data->error); + return false; + } + + BUG_ON(!data->sg->length); + + for (i = 0; i < host->blocksize / 4; i++) + sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); + + if (!sh_mmcif_next_block(host, p)) + return false; + + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); + + return true; +} + +static void sh_mmcif_get_response(struct sh_mmcif_host *host, + struct mmc_command *cmd) +{ + if (cmd->flags & MMC_RSP_136) { + cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3); + cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2); + cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1); + cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); + } else + cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); +} + +static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, + struct mmc_command *cmd) +{ + cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12); +} + +static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + struct mmc_data *data = mrq->data; + struct mmc_command *cmd = mrq->cmd; + u32 opc = cmd->opcode; + u32 tmp = 0; + + /* Response Type check */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + tmp |= CMD_SET_RTYP_NO; + break; + case MMC_RSP_R1: + case MMC_RSP_R3: + tmp |= CMD_SET_RTYP_6B; + break; + case MMC_RSP_R1B: + tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B; + break; + case MMC_RSP_R2: + tmp |= CMD_SET_RTYP_17B; + break; + default: + dev_err(dev, "Unsupported response type.\n"); + break; + } + + /* WDAT / DATW */ + if (data) { + tmp |= CMD_SET_WDAT; + switch (host->bus_width) { + case MMC_BUS_WIDTH_1: + tmp |= CMD_SET_DATW_1; + break; + case MMC_BUS_WIDTH_4: + tmp |= CMD_SET_DATW_4; + break; + case MMC_BUS_WIDTH_8: + tmp |= CMD_SET_DATW_8; + break; + default: + dev_err(dev, "Unsupported bus width.\n"); + break; + } + switch (host->timing) { + case MMC_TIMING_MMC_DDR52: + /* + * MMC core will only set this timing, if the host + * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR + * capability. MMCIF implementations with this + * capability, e.g. sh73a0, will have to set it + * in their platform data. + */ + tmp |= CMD_SET_DARS; + break; + } + } + /* DWEN */ + if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) + tmp |= CMD_SET_DWEN; + /* CMLTE/CMD12EN */ + if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { + tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; + sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, + data->blocks << 16); + } + /* RIDXC[1:0] check bits */ + if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || + opc == MMC_SEND_CSD || opc == MMC_SEND_CID) + tmp |= CMD_SET_RIDXC_BITS; + /* RCRC7C[1:0] check bits */ + if (opc == MMC_SEND_OP_COND) + tmp |= CMD_SET_CRC7C_BITS; + /* RCRC7C[1:0] internal CRC7 */ + if (opc == MMC_ALL_SEND_CID || + opc == MMC_SEND_CSD || opc == MMC_SEND_CID) + tmp |= CMD_SET_CRC7C_INTERNAL; + + return (opc << 24) | tmp; +} + +static int sh_mmcif_data_trans(struct sh_mmcif_host *host, + struct mmc_request *mrq, u32 opc) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + + switch (opc) { + case MMC_READ_MULTIPLE_BLOCK: + sh_mmcif_multi_read(host, mrq); + return 0; + case MMC_WRITE_MULTIPLE_BLOCK: + sh_mmcif_multi_write(host, mrq); + return 0; + case MMC_WRITE_BLOCK: + sh_mmcif_single_write(host, mrq); + return 0; + case MMC_READ_SINGLE_BLOCK: + case MMC_SEND_EXT_CSD: + sh_mmcif_single_read(host, mrq); + return 0; + default: + dev_err(dev, "Unsupported CMD%d\n", opc); + return -EINVAL; + } +} + +static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_command *cmd = mrq->cmd; + u32 opc; + u32 mask = 0; + unsigned long flags; + + if (cmd->flags & MMC_RSP_BUSY) + mask = MASK_START_CMD | MASK_MRBSYE; + else + mask = MASK_START_CMD | MASK_MCRSPE; + + if (host->ccs_enable) + mask |= MASK_MCCSTO; + + if (mrq->data) { + sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); + sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, + mrq->data->blksz); + } + opc = sh_mmcif_set_cmd(host, mrq); + + if (host->ccs_enable) + sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); + else + sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); + /* set arg */ + sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); + /* set cmd */ + spin_lock_irqsave(&host->lock, flags); + sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); + + host->wait_for = MMCIF_WAIT_FOR_CMD; + schedule_delayed_work(&host->timeout_work, host->timeout); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + + switch (mrq->cmd->opcode) { + case MMC_READ_MULTIPLE_BLOCK: + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); + break; + case MMC_WRITE_MULTIPLE_BLOCK: + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); + break; + default: + dev_err(dev, "unsupported stop cmd\n"); + mrq->stop->error = sh_mmcif_error_manage(host); + return; + } + + host->wait_for = MMCIF_WAIT_FOR_STOP; +} + +static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sh_mmcif_host *host = mmc_priv(mmc); + struct device *dev = sh_mmcif_host_to_dev(host); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->state != STATE_IDLE) { + dev_dbg(dev, "%s() rejected, state %u\n", + __func__, host->state); + spin_unlock_irqrestore(&host->lock, flags); + mrq->cmd->error = -EAGAIN; + mmc_request_done(mmc, mrq); + return; + } + + host->state = STATE_REQUEST; + spin_unlock_irqrestore(&host->lock, flags); + + host->mrq = mrq; + + sh_mmcif_start_cmd(host, mrq); +} + +static void sh_mmcif_clk_setup(struct sh_mmcif_host *host) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + + if (host->mmc->f_max) { + unsigned int f_max, f_min = 0, f_min_old; + + f_max = host->mmc->f_max; + for (f_min_old = f_max; f_min_old > 2;) { + f_min = clk_round_rate(host->clk, f_min_old / 2); + if (f_min == f_min_old) + break; + f_min_old = f_min; + } + + /* + * This driver assumes this SoC is R-Car Gen2 or later + */ + host->clkdiv_map = 0x3ff; + + host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map)); + host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map)); + } else { + unsigned int clk = clk_get_rate(host->clk); + + host->mmc->f_max = clk / 2; + host->mmc->f_min = clk / 512; + } + + dev_dbg(dev, "clk max/min = %d/%d\n", + host->mmc->f_max, host->mmc->f_min); +} + +static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sh_mmcif_host *host = mmc_priv(mmc); + struct device *dev = sh_mmcif_host_to_dev(host); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->state != STATE_IDLE) { + dev_dbg(dev, "%s() rejected, state %u\n", + __func__, host->state); + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + host->state = STATE_IOS; + spin_unlock_irqrestore(&host->lock, flags); + + switch (ios->power_mode) { + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + if (!host->power) { + clk_prepare_enable(host->clk); + pm_runtime_get_sync(dev); + sh_mmcif_sync_reset(host); + sh_mmcif_request_dma(host); + host->power = true; + } + break; + case MMC_POWER_OFF: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + if (host->power) { + sh_mmcif_clock_control(host, 0); + sh_mmcif_release_dma(host); + pm_runtime_put(dev); + clk_disable_unprepare(host->clk); + host->power = false; + } + break; + case MMC_POWER_ON: + sh_mmcif_clock_control(host, ios->clock); + break; + } + + host->timing = ios->timing; + host->bus_width = ios->bus_width; + host->state = STATE_IDLE; +} + +static const struct mmc_host_ops sh_mmcif_ops = { + .request = sh_mmcif_request, + .set_ios = sh_mmcif_set_ios, + .get_cd = mmc_gpio_get_cd, +}; + +static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) +{ + struct mmc_command *cmd = host->mrq->cmd; + struct mmc_data *data = host->mrq->data; + struct device *dev = sh_mmcif_host_to_dev(host); + long time; + + if (host->sd_error) { + switch (cmd->opcode) { + case MMC_ALL_SEND_CID: + case MMC_SELECT_CARD: + case MMC_APP_CMD: + cmd->error = -ETIMEDOUT; + break; + default: + cmd->error = sh_mmcif_error_manage(host); + break; + } + dev_dbg(dev, "CMD%d error %d\n", + cmd->opcode, cmd->error); + host->sd_error = false; + return false; + } + if (!(cmd->flags & MMC_RSP_PRESENT)) { + cmd->error = 0; + return false; + } + + sh_mmcif_get_response(host, cmd); + + if (!data) + return false; + + /* + * Completion can be signalled from DMA callback and error, so, have to + * reset here, before setting .dma_active + */ + init_completion(&host->dma_complete); + + if (data->flags & MMC_DATA_READ) { + if (host->chan_rx) + sh_mmcif_start_dma_rx(host); + } else { + if (host->chan_tx) + sh_mmcif_start_dma_tx(host); + } + + if (!host->dma_active) { + data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); + return !data->error; + } + + /* Running in the IRQ thread, can sleep */ + time = wait_for_completion_interruptible_timeout(&host->dma_complete, + host->timeout); + + if (data->flags & MMC_DATA_READ) + dma_unmap_sg(host->chan_rx->device->dev, + data->sg, data->sg_len, + DMA_FROM_DEVICE); + else + dma_unmap_sg(host->chan_tx->device->dev, + data->sg, data->sg_len, + DMA_TO_DEVICE); + + if (host->sd_error) { + dev_err(host->mmc->parent, + "Error IRQ while waiting for DMA completion!\n"); + /* Woken up by an error IRQ: abort DMA */ + data->error = sh_mmcif_error_manage(host); + } else if (!time) { + dev_err(host->mmc->parent, "DMA timeout!\n"); + data->error = -ETIMEDOUT; + } else if (time < 0) { + dev_err(host->mmc->parent, + "wait_for_completion_...() error %ld!\n", time); + data->error = time; + } + sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, + BUF_ACC_DMAREN | BUF_ACC_DMAWEN); + host->dma_active = false; + + if (data->error) { + data->bytes_xfered = 0; + /* Abort DMA */ + if (data->flags & MMC_DATA_READ) + dmaengine_terminate_all(host->chan_rx); + else + dmaengine_terminate_all(host->chan_tx); + } + + return false; +} + +static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) +{ + struct sh_mmcif_host *host = dev_id; + struct mmc_request *mrq; + struct device *dev = sh_mmcif_host_to_dev(host); + bool wait = false; + unsigned long flags; + int wait_work; + + spin_lock_irqsave(&host->lock, flags); + wait_work = host->wait_for; + spin_unlock_irqrestore(&host->lock, flags); + + cancel_delayed_work_sync(&host->timeout_work); + + mutex_lock(&host->thread_lock); + + mrq = host->mrq; + if (!mrq) { + dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n", + host->state, host->wait_for); + mutex_unlock(&host->thread_lock); + return IRQ_HANDLED; + } + + /* + * All handlers return true, if processing continues, and false, if the + * request has to be completed - successfully or not + */ + switch (wait_work) { + case MMCIF_WAIT_FOR_REQUEST: + /* We're too late, the timeout has already kicked in */ + mutex_unlock(&host->thread_lock); + return IRQ_HANDLED; + case MMCIF_WAIT_FOR_CMD: + /* Wait for data? */ + wait = sh_mmcif_end_cmd(host); + break; + case MMCIF_WAIT_FOR_MREAD: + /* Wait for more data? */ + wait = sh_mmcif_mread_block(host); + break; + case MMCIF_WAIT_FOR_READ: + /* Wait for data end? */ + wait = sh_mmcif_read_block(host); + break; + case MMCIF_WAIT_FOR_MWRITE: + /* Wait data to write? */ + wait = sh_mmcif_mwrite_block(host); + break; + case MMCIF_WAIT_FOR_WRITE: + /* Wait for data end? */ + wait = sh_mmcif_write_block(host); + break; + case MMCIF_WAIT_FOR_STOP: + if (host->sd_error) { + mrq->stop->error = sh_mmcif_error_manage(host); + dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error); + break; + } + sh_mmcif_get_cmd12response(host, mrq->stop); + mrq->stop->error = 0; + break; + case MMCIF_WAIT_FOR_READ_END: + case MMCIF_WAIT_FOR_WRITE_END: + if (host->sd_error) { + mrq->data->error = sh_mmcif_error_manage(host); + dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error); + } + break; + default: + BUG(); + } + + if (wait) { + schedule_delayed_work(&host->timeout_work, host->timeout); + /* Wait for more data */ + mutex_unlock(&host->thread_lock); + return IRQ_HANDLED; + } + + if (host->wait_for != MMCIF_WAIT_FOR_STOP) { + struct mmc_data *data = mrq->data; + if (!mrq->cmd->error && data && !data->error) + data->bytes_xfered = + data->blocks * data->blksz; + + if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { + sh_mmcif_stop_cmd(host, mrq); + if (!mrq->stop->error) { + schedule_delayed_work(&host->timeout_work, host->timeout); + mutex_unlock(&host->thread_lock); + return IRQ_HANDLED; + } + } + } + + host->wait_for = MMCIF_WAIT_FOR_REQUEST; + host->state = STATE_IDLE; + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); + + mutex_unlock(&host->thread_lock); + + return IRQ_HANDLED; +} + +static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) +{ + struct sh_mmcif_host *host = dev_id; + struct device *dev = sh_mmcif_host_to_dev(host); + u32 state, mask; + + state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); + mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK); + if (host->ccs_enable) + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask)); + else + sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask)); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN); + + if (state & ~MASK_CLEAN) + dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n", + state); + + if (state & INT_ERR_STS || state & ~INT_ALL) { + host->sd_error = true; + dev_dbg(dev, "int err state = 0x%08x\n", state); + } + if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { + if (!host->mrq) + dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state); + if (!host->dma_active) + return IRQ_WAKE_THREAD; + else if (host->sd_error) + sh_mmcif_dma_complete(host); + } else { + dev_dbg(dev, "Unexpected IRQ 0x%x\n", state); + } + + return IRQ_HANDLED; +} + +static void sh_mmcif_timeout_work(struct work_struct *work) +{ + struct delayed_work *d = to_delayed_work(work); + struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); + struct mmc_request *mrq = host->mrq; + struct device *dev = sh_mmcif_host_to_dev(host); + unsigned long flags; + + if (host->dying) + /* Don't run after mmc_remove_host() */ + return; + + spin_lock_irqsave(&host->lock, flags); + if (host->state == STATE_IDLE) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + dev_err(dev, "Timeout waiting for %u on CMD%u\n", + host->wait_for, mrq->cmd->opcode); + + host->state = STATE_TIMEOUT; + spin_unlock_irqrestore(&host->lock, flags); + + /* + * Handle races with cancel_delayed_work(), unless + * cancel_delayed_work_sync() is used + */ + switch (host->wait_for) { + case MMCIF_WAIT_FOR_CMD: + mrq->cmd->error = sh_mmcif_error_manage(host); + break; + case MMCIF_WAIT_FOR_STOP: + mrq->stop->error = sh_mmcif_error_manage(host); + break; + case MMCIF_WAIT_FOR_MREAD: + case MMCIF_WAIT_FOR_MWRITE: + case MMCIF_WAIT_FOR_READ: + case MMCIF_WAIT_FOR_WRITE: + case MMCIF_WAIT_FOR_READ_END: + case MMCIF_WAIT_FOR_WRITE_END: + mrq->data->error = sh_mmcif_error_manage(host); + break; + default: + BUG(); + } + + host->state = STATE_IDLE; + host->wait_for = MMCIF_WAIT_FOR_REQUEST; + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); +} + +static void sh_mmcif_init_ocr(struct sh_mmcif_host *host) +{ + struct device *dev = sh_mmcif_host_to_dev(host); + struct sh_mmcif_plat_data *pd = dev->platform_data; + struct mmc_host *mmc = host->mmc; + + mmc_regulator_get_supply(mmc); + + if (!pd) + return; + + if (!mmc->ocr_avail) + mmc->ocr_avail = pd->ocr; + else if (pd->ocr) + dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); +} + +static int sh_mmcif_probe(struct platform_device *pdev) +{ + int ret = 0, irq[2]; + struct mmc_host *mmc; + struct sh_mmcif_host *host; + struct device *dev = &pdev->dev; + struct sh_mmcif_plat_data *pd = dev->platform_data; + void __iomem *reg; + const char *name; + + irq[0] = platform_get_irq(pdev, 0); + irq[1] = platform_get_irq_optional(pdev, 1); + if (irq[0] < 0) + return irq[0]; + + reg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev); + if (!mmc) + return -ENOMEM; + + ret = mmc_of_parse(mmc); + if (ret < 0) + goto err_host; + + host = mmc_priv(mmc); + host->mmc = mmc; + host->addr = reg; + host->timeout = msecs_to_jiffies(10000); + host->ccs_enable = true; + host->clk_ctrl2_enable = false; + + host->pd = pdev; + + spin_lock_init(&host->lock); + + mmc->ops = &sh_mmcif_ops; + sh_mmcif_init_ocr(host); + + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; + mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO; + mmc->max_busy_timeout = 10000; + + if (pd && pd->caps) + mmc->caps |= pd->caps; + mmc->max_segs = 32; + mmc->max_blk_size = 512; + mmc->max_req_size = PAGE_SIZE * mmc->max_segs; + mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; + mmc->max_seg_size = mmc->max_req_size; + + platform_set_drvdata(pdev, host); + + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + dev_err(dev, "cannot get clock: %d\n", ret); + goto err_host; + } + + ret = clk_prepare_enable(host->clk); + if (ret < 0) + goto err_host; + + sh_mmcif_clk_setup(host); + + pm_runtime_enable(dev); + host->power = false; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto err_clk; + + INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work); + + sh_mmcif_sync_reset(host); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + + name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error"; + ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr, + sh_mmcif_irqt, 0, name, host); + if (ret) { + dev_err(dev, "request_irq error (%s)\n", name); + goto err_clk; + } + if (irq[1] >= 0) { + ret = devm_request_threaded_irq(dev, irq[1], + sh_mmcif_intr, sh_mmcif_irqt, + 0, "sh_mmc:int", host); + if (ret) { + dev_err(dev, "request_irq error (sh_mmc:int)\n"); + goto err_clk; + } + } + + mutex_init(&host->thread_lock); + + ret = mmc_add_host(mmc); + if (ret < 0) + goto err_clk; + + dev_pm_qos_expose_latency_limit(dev, 100); + + dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n", + sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff, + clk_get_rate(host->clk) / 1000000UL); + + pm_runtime_put(dev); + clk_disable_unprepare(host->clk); + return ret; + +err_clk: + clk_disable_unprepare(host->clk); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); +err_host: + mmc_free_host(mmc); + return ret; +} + +static int sh_mmcif_remove(struct platform_device *pdev) +{ + struct sh_mmcif_host *host = platform_get_drvdata(pdev); + + host->dying = true; + clk_prepare_enable(host->clk); + pm_runtime_get_sync(&pdev->dev); + + dev_pm_qos_hide_latency_limit(&pdev->dev); + + mmc_remove_host(host->mmc); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + + /* + * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the + * mmc_remove_host() call above. But swapping order doesn't help either + * (a query on the linux-mmc mailing list didn't bring any replies). + */ + cancel_delayed_work_sync(&host->timeout_work); + + clk_disable_unprepare(host->clk); + mmc_free_host(host->mmc); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sh_mmcif_suspend(struct device *dev) +{ + struct sh_mmcif_host *host = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + pm_runtime_put(dev); + + return 0; +} + +static int sh_mmcif_resume(struct device *dev) +{ + return 0; +} +#endif + +static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume) +}; + +static struct platform_driver sh_mmcif_driver = { + .probe = sh_mmcif_probe, + .remove = sh_mmcif_remove, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &sh_mmcif_dev_pm_ops, + .of_match_table = sh_mmcif_of_match, + }, +}; + +module_platform_driver(sh_mmcif_driver); + +MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Yusuke Goda "); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c new file mode 100644 index 000000000..b834fde3f --- /dev/null +++ b/drivers/mmc/host/sunxi-mmc.c @@ -0,0 +1,1531 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for sunxi SD/MMC host controllers + * (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd. + * (C) Copyright 2007-2011 Aaron Maoye + * (C) Copyright 2013-2014 O2S GmbH + * (C) Copyright 2013-2014 David Lanzendörfer + * (C) Copyright 2013-2014 Hans de Goede + * (C) Copyright 2017 Sootech SA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* register offset definitions */ +#define SDXC_REG_GCTRL (0x00) /* SMC Global Control Register */ +#define SDXC_REG_CLKCR (0x04) /* SMC Clock Control Register */ +#define SDXC_REG_TMOUT (0x08) /* SMC Time Out Register */ +#define SDXC_REG_WIDTH (0x0C) /* SMC Bus Width Register */ +#define SDXC_REG_BLKSZ (0x10) /* SMC Block Size Register */ +#define SDXC_REG_BCNTR (0x14) /* SMC Byte Count Register */ +#define SDXC_REG_CMDR (0x18) /* SMC Command Register */ +#define SDXC_REG_CARG (0x1C) /* SMC Argument Register */ +#define SDXC_REG_RESP0 (0x20) /* SMC Response Register 0 */ +#define SDXC_REG_RESP1 (0x24) /* SMC Response Register 1 */ +#define SDXC_REG_RESP2 (0x28) /* SMC Response Register 2 */ +#define SDXC_REG_RESP3 (0x2C) /* SMC Response Register 3 */ +#define SDXC_REG_IMASK (0x30) /* SMC Interrupt Mask Register */ +#define SDXC_REG_MISTA (0x34) /* SMC Masked Interrupt Status Register */ +#define SDXC_REG_RINTR (0x38) /* SMC Raw Interrupt Status Register */ +#define SDXC_REG_STAS (0x3C) /* SMC Status Register */ +#define SDXC_REG_FTRGL (0x40) /* SMC FIFO Threshold Watermark Registe */ +#define SDXC_REG_FUNS (0x44) /* SMC Function Select Register */ +#define SDXC_REG_CBCR (0x48) /* SMC CIU Byte Count Register */ +#define SDXC_REG_BBCR (0x4C) /* SMC BIU Byte Count Register */ +#define SDXC_REG_DBGC (0x50) /* SMC Debug Enable Register */ +#define SDXC_REG_HWRST (0x78) /* SMC Card Hardware Reset for Register */ +#define SDXC_REG_DMAC (0x80) /* SMC IDMAC Control Register */ +#define SDXC_REG_DLBA (0x84) /* SMC IDMAC Descriptor List Base Addre */ +#define SDXC_REG_IDST (0x88) /* SMC IDMAC Status Register */ +#define SDXC_REG_IDIE (0x8C) /* SMC IDMAC Interrupt Enable Register */ +#define SDXC_REG_CHDA (0x90) +#define SDXC_REG_CBDA (0x94) + +/* New registers introduced in A64 */ +#define SDXC_REG_A12A 0x058 /* SMC Auto Command 12 Register */ +#define SDXC_REG_SD_NTSR 0x05C /* SMC New Timing Set Register */ +#define SDXC_REG_DRV_DL 0x140 /* Drive Delay Control Register */ +#define SDXC_REG_SAMP_DL_REG 0x144 /* SMC sample delay control */ +#define SDXC_REG_DS_DL_REG 0x148 /* SMC data strobe delay control */ + +#define mmc_readl(host, reg) \ + readl((host)->reg_base + SDXC_##reg) +#define mmc_writel(host, reg, value) \ + writel((value), (host)->reg_base + SDXC_##reg) + +/* global control register bits */ +#define SDXC_SOFT_RESET BIT(0) +#define SDXC_FIFO_RESET BIT(1) +#define SDXC_DMA_RESET BIT(2) +#define SDXC_INTERRUPT_ENABLE_BIT BIT(4) +#define SDXC_DMA_ENABLE_BIT BIT(5) +#define SDXC_DEBOUNCE_ENABLE_BIT BIT(8) +#define SDXC_POSEDGE_LATCH_DATA BIT(9) +#define SDXC_DDR_MODE BIT(10) +#define SDXC_MEMORY_ACCESS_DONE BIT(29) +#define SDXC_ACCESS_DONE_DIRECT BIT(30) +#define SDXC_ACCESS_BY_AHB BIT(31) +#define SDXC_ACCESS_BY_DMA (0 << 31) +#define SDXC_HARDWARE_RESET \ + (SDXC_SOFT_RESET | SDXC_FIFO_RESET | SDXC_DMA_RESET) + +/* clock control bits */ +#define SDXC_MASK_DATA0 BIT(31) +#define SDXC_CARD_CLOCK_ON BIT(16) +#define SDXC_LOW_POWER_ON BIT(17) + +/* bus width */ +#define SDXC_WIDTH1 0 +#define SDXC_WIDTH4 1 +#define SDXC_WIDTH8 2 + +/* smc command bits */ +#define SDXC_RESP_EXPIRE BIT(6) +#define SDXC_LONG_RESPONSE BIT(7) +#define SDXC_CHECK_RESPONSE_CRC BIT(8) +#define SDXC_DATA_EXPIRE BIT(9) +#define SDXC_WRITE BIT(10) +#define SDXC_SEQUENCE_MODE BIT(11) +#define SDXC_SEND_AUTO_STOP BIT(12) +#define SDXC_WAIT_PRE_OVER BIT(13) +#define SDXC_STOP_ABORT_CMD BIT(14) +#define SDXC_SEND_INIT_SEQUENCE BIT(15) +#define SDXC_UPCLK_ONLY BIT(21) +#define SDXC_READ_CEATA_DEV BIT(22) +#define SDXC_CCS_EXPIRE BIT(23) +#define SDXC_ENABLE_BIT_BOOT BIT(24) +#define SDXC_ALT_BOOT_OPTIONS BIT(25) +#define SDXC_BOOT_ACK_EXPIRE BIT(26) +#define SDXC_BOOT_ABORT BIT(27) +#define SDXC_VOLTAGE_SWITCH BIT(28) +#define SDXC_USE_HOLD_REGISTER BIT(29) +#define SDXC_START BIT(31) + +/* interrupt bits */ +#define SDXC_RESP_ERROR BIT(1) +#define SDXC_COMMAND_DONE BIT(2) +#define SDXC_DATA_OVER BIT(3) +#define SDXC_TX_DATA_REQUEST BIT(4) +#define SDXC_RX_DATA_REQUEST BIT(5) +#define SDXC_RESP_CRC_ERROR BIT(6) +#define SDXC_DATA_CRC_ERROR BIT(7) +#define SDXC_RESP_TIMEOUT BIT(8) +#define SDXC_DATA_TIMEOUT BIT(9) +#define SDXC_VOLTAGE_CHANGE_DONE BIT(10) +#define SDXC_FIFO_RUN_ERROR BIT(11) +#define SDXC_HARD_WARE_LOCKED BIT(12) +#define SDXC_START_BIT_ERROR BIT(13) +#define SDXC_AUTO_COMMAND_DONE BIT(14) +#define SDXC_END_BIT_ERROR BIT(15) +#define SDXC_SDIO_INTERRUPT BIT(16) +#define SDXC_CARD_INSERT BIT(30) +#define SDXC_CARD_REMOVE BIT(31) +#define SDXC_INTERRUPT_ERROR_BIT \ + (SDXC_RESP_ERROR | SDXC_RESP_CRC_ERROR | SDXC_DATA_CRC_ERROR | \ + SDXC_RESP_TIMEOUT | SDXC_DATA_TIMEOUT | SDXC_FIFO_RUN_ERROR | \ + SDXC_HARD_WARE_LOCKED | SDXC_START_BIT_ERROR | SDXC_END_BIT_ERROR) +#define SDXC_INTERRUPT_DONE_BIT \ + (SDXC_AUTO_COMMAND_DONE | SDXC_DATA_OVER | \ + SDXC_COMMAND_DONE | SDXC_VOLTAGE_CHANGE_DONE) + +/* status */ +#define SDXC_RXWL_FLAG BIT(0) +#define SDXC_TXWL_FLAG BIT(1) +#define SDXC_FIFO_EMPTY BIT(2) +#define SDXC_FIFO_FULL BIT(3) +#define SDXC_CARD_PRESENT BIT(8) +#define SDXC_CARD_DATA_BUSY BIT(9) +#define SDXC_DATA_FSM_BUSY BIT(10) +#define SDXC_DMA_REQUEST BIT(31) +#define SDXC_FIFO_SIZE 16 + +/* Function select */ +#define SDXC_CEATA_ON (0xceaa << 16) +#define SDXC_SEND_IRQ_RESPONSE BIT(0) +#define SDXC_SDIO_READ_WAIT BIT(1) +#define SDXC_ABORT_READ_DATA BIT(2) +#define SDXC_SEND_CCSD BIT(8) +#define SDXC_SEND_AUTO_STOPCCSD BIT(9) +#define SDXC_CEATA_DEV_IRQ_ENABLE BIT(10) + +/* IDMA controller bus mod bit field */ +#define SDXC_IDMAC_SOFT_RESET BIT(0) +#define SDXC_IDMAC_FIX_BURST BIT(1) +#define SDXC_IDMAC_IDMA_ON BIT(7) +#define SDXC_IDMAC_REFETCH_DES BIT(31) + +/* IDMA status bit field */ +#define SDXC_IDMAC_TRANSMIT_INTERRUPT BIT(0) +#define SDXC_IDMAC_RECEIVE_INTERRUPT BIT(1) +#define SDXC_IDMAC_FATAL_BUS_ERROR BIT(2) +#define SDXC_IDMAC_DESTINATION_INVALID BIT(4) +#define SDXC_IDMAC_CARD_ERROR_SUM BIT(5) +#define SDXC_IDMAC_NORMAL_INTERRUPT_SUM BIT(8) +#define SDXC_IDMAC_ABNORMAL_INTERRUPT_SUM BIT(9) +#define SDXC_IDMAC_HOST_ABORT_INTERRUPT BIT(10) +#define SDXC_IDMAC_IDLE (0 << 13) +#define SDXC_IDMAC_SUSPEND (1 << 13) +#define SDXC_IDMAC_DESC_READ (2 << 13) +#define SDXC_IDMAC_DESC_CHECK (3 << 13) +#define SDXC_IDMAC_READ_REQUEST_WAIT (4 << 13) +#define SDXC_IDMAC_WRITE_REQUEST_WAIT (5 << 13) +#define SDXC_IDMAC_READ (6 << 13) +#define SDXC_IDMAC_WRITE (7 << 13) +#define SDXC_IDMAC_DESC_CLOSE (8 << 13) + +/* +* If the idma-des-size-bits of property is ie 13, bufsize bits are: +* Bits 0-12: buf1 size +* Bits 13-25: buf2 size +* Bits 26-31: not used +* Since we only ever set buf1 size, we can simply store it directly. +*/ +#define SDXC_IDMAC_DES0_DIC BIT(1) /* disable interrupt on completion */ +#define SDXC_IDMAC_DES0_LD BIT(2) /* last descriptor */ +#define SDXC_IDMAC_DES0_FD BIT(3) /* first descriptor */ +#define SDXC_IDMAC_DES0_CH BIT(4) /* chain mode */ +#define SDXC_IDMAC_DES0_ER BIT(5) /* end of ring */ +#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ +#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ + +#define SDXC_CLK_400K 0 +#define SDXC_CLK_25M 1 +#define SDXC_CLK_50M 2 +#define SDXC_CLK_50M_DDR 3 +#define SDXC_CLK_50M_DDR_8BIT 4 + +#define SDXC_2X_TIMING_MODE BIT(31) + +#define SDXC_CAL_START BIT(15) +#define SDXC_CAL_DONE BIT(14) +#define SDXC_CAL_DL_SHIFT 8 +#define SDXC_CAL_DL_SW_EN BIT(7) +#define SDXC_CAL_DL_SW_SHIFT 0 +#define SDXC_CAL_DL_MASK 0x3f + +#define SDXC_CAL_TIMEOUT 3 /* in seconds, 3s is enough*/ + +struct sunxi_mmc_clk_delay { + u32 output; + u32 sample; +}; + +struct sunxi_idma_des { + __le32 config; + __le32 buf_size; + __le32 buf_addr_ptr1; + __le32 buf_addr_ptr2; +}; + +struct sunxi_mmc_cfg { + u32 idma_des_size_bits; + const struct sunxi_mmc_clk_delay *clk_delays; + + /* does the IP block support autocalibration? */ + bool can_calibrate; + + /* Does DATA0 needs to be masked while the clock is updated */ + bool mask_data0; + + /* + * hardware only supports new timing mode, either due to lack of + * a mode switch in the clock controller, or the mmc controller + * is permanently configured in the new timing mode, without the + * NTSR mode switch. + */ + bool needs_new_timings; + + /* clock hardware can switch between old and new timing modes */ + bool ccu_has_timings_switch; +}; + +struct sunxi_mmc_host { + struct device *dev; + struct mmc_host *mmc; + struct reset_control *reset; + const struct sunxi_mmc_cfg *cfg; + + /* IO mapping base */ + void __iomem *reg_base; + + /* clock management */ + struct clk *clk_ahb; + struct clk *clk_mmc; + struct clk *clk_sample; + struct clk *clk_output; + + /* irq */ + spinlock_t lock; + int irq; + u32 int_sum; + u32 sdio_imask; + + /* dma */ + dma_addr_t sg_dma; + void *sg_cpu; + bool wait_dma; + + struct mmc_request *mrq; + struct mmc_request *manual_stop_mrq; + int ferror; + + /* vqmmc */ + bool vqmmc_enabled; + + /* timings */ + bool use_new_timings; +}; + +static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host) +{ + unsigned long expire = jiffies + msecs_to_jiffies(250); + u32 rval; + + mmc_writel(host, REG_GCTRL, SDXC_HARDWARE_RESET); + do { + rval = mmc_readl(host, REG_GCTRL); + } while (time_before(jiffies, expire) && (rval & SDXC_HARDWARE_RESET)); + + if (rval & SDXC_HARDWARE_RESET) { + dev_err(mmc_dev(host->mmc), "fatal err reset timeout\n"); + return -EIO; + } + + return 0; +} + +static int sunxi_mmc_init_host(struct sunxi_mmc_host *host) +{ + u32 rval; + + if (sunxi_mmc_reset_host(host)) + return -EIO; + + /* + * Burst 8 transfers, RX trigger level: 7, TX trigger level: 8 + * + * TODO: sun9i has a larger FIFO and supports higher trigger values + */ + mmc_writel(host, REG_FTRGL, 0x20070008); + /* Maximum timeout value */ + mmc_writel(host, REG_TMOUT, 0xffffffff); + /* Unmask SDIO interrupt if needed */ + mmc_writel(host, REG_IMASK, host->sdio_imask); + /* Clear all pending interrupts */ + mmc_writel(host, REG_RINTR, 0xffffffff); + /* Debug register? undocumented */ + mmc_writel(host, REG_DBGC, 0xdeb); + /* Enable CEATA support */ + mmc_writel(host, REG_FUNS, SDXC_CEATA_ON); + /* Set DMA descriptor list base address */ + mmc_writel(host, REG_DLBA, host->sg_dma); + + rval = mmc_readl(host, REG_GCTRL); + rval |= SDXC_INTERRUPT_ENABLE_BIT; + /* Undocumented, but found in Allwinner code */ + rval &= ~SDXC_ACCESS_DONE_DIRECT; + mmc_writel(host, REG_GCTRL, rval); + + return 0; +} + +static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host, + struct mmc_data *data) +{ + struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu; + dma_addr_t next_desc = host->sg_dma; + int i, max_len = (1 << host->cfg->idma_des_size_bits); + + for (i = 0; i < data->sg_len; i++) { + pdes[i].config = cpu_to_le32(SDXC_IDMAC_DES0_CH | + SDXC_IDMAC_DES0_OWN | + SDXC_IDMAC_DES0_DIC); + + if (data->sg[i].length == max_len) + pdes[i].buf_size = 0; /* 0 == max_len */ + else + pdes[i].buf_size = cpu_to_le32(data->sg[i].length); + + next_desc += sizeof(struct sunxi_idma_des); + pdes[i].buf_addr_ptr1 = + cpu_to_le32(sg_dma_address(&data->sg[i])); + pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc); + } + + pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD); + pdes[i - 1].config |= cpu_to_le32(SDXC_IDMAC_DES0_LD | + SDXC_IDMAC_DES0_ER); + pdes[i - 1].config &= cpu_to_le32(~SDXC_IDMAC_DES0_DIC); + pdes[i - 1].buf_addr_ptr2 = 0; + + /* + * Avoid the io-store starting the idmac hitting io-mem before the + * descriptors hit the main-mem. + */ + wmb(); +} + +static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host, + struct mmc_data *data) +{ + u32 i, dma_len; + struct scatterlist *sg; + + dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + if (dma_len == 0) { + dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); + return -ENOMEM; + } + + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->offset & 3 || sg->length & 3) { + dev_err(mmc_dev(host->mmc), + "unaligned scatterlist: os %x length %d\n", + sg->offset, sg->length); + return -EINVAL; + } + } + + return 0; +} + +static void sunxi_mmc_start_dma(struct sunxi_mmc_host *host, + struct mmc_data *data) +{ + u32 rval; + + sunxi_mmc_init_idma_des(host, data); + + rval = mmc_readl(host, REG_GCTRL); + rval |= SDXC_DMA_ENABLE_BIT; + mmc_writel(host, REG_GCTRL, rval); + rval |= SDXC_DMA_RESET; + mmc_writel(host, REG_GCTRL, rval); + + mmc_writel(host, REG_DMAC, SDXC_IDMAC_SOFT_RESET); + + if (!(data->flags & MMC_DATA_WRITE)) + mmc_writel(host, REG_IDIE, SDXC_IDMAC_RECEIVE_INTERRUPT); + + mmc_writel(host, REG_DMAC, + SDXC_IDMAC_FIX_BURST | SDXC_IDMAC_IDMA_ON); +} + +static void sunxi_mmc_send_manual_stop(struct sunxi_mmc_host *host, + struct mmc_request *req) +{ + u32 arg, cmd_val, ri; + unsigned long expire = jiffies + msecs_to_jiffies(1000); + + cmd_val = SDXC_START | SDXC_RESP_EXPIRE | + SDXC_STOP_ABORT_CMD | SDXC_CHECK_RESPONSE_CRC; + + if (req->cmd->opcode == SD_IO_RW_EXTENDED) { + cmd_val |= SD_IO_RW_DIRECT; + arg = (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | + ((req->cmd->arg >> 28) & 0x7); + } else { + cmd_val |= MMC_STOP_TRANSMISSION; + arg = 0; + } + + mmc_writel(host, REG_CARG, arg); + mmc_writel(host, REG_CMDR, cmd_val); + + do { + ri = mmc_readl(host, REG_RINTR); + } while (!(ri & (SDXC_COMMAND_DONE | SDXC_INTERRUPT_ERROR_BIT)) && + time_before(jiffies, expire)); + + if (!(ri & SDXC_COMMAND_DONE) || (ri & SDXC_INTERRUPT_ERROR_BIT)) { + dev_err(mmc_dev(host->mmc), "send stop command failed\n"); + if (req->stop) + req->stop->resp[0] = -ETIMEDOUT; + } else { + if (req->stop) + req->stop->resp[0] = mmc_readl(host, REG_RESP0); + } + + mmc_writel(host, REG_RINTR, 0xffff); +} + +static void sunxi_mmc_dump_errinfo(struct sunxi_mmc_host *host) +{ + struct mmc_command *cmd = host->mrq->cmd; + struct mmc_data *data = host->mrq->data; + + /* For some cmds timeout is normal with sd/mmc cards */ + if ((host->int_sum & SDXC_INTERRUPT_ERROR_BIT) == + SDXC_RESP_TIMEOUT && (cmd->opcode == SD_IO_SEND_OP_COND || + cmd->opcode == SD_IO_RW_DIRECT)) + return; + + dev_dbg(mmc_dev(host->mmc), + "smc %d err, cmd %d,%s%s%s%s%s%s%s%s%s%s !!\n", + host->mmc->index, cmd->opcode, + data ? (data->flags & MMC_DATA_WRITE ? " WR" : " RD") : "", + host->int_sum & SDXC_RESP_ERROR ? " RE" : "", + host->int_sum & SDXC_RESP_CRC_ERROR ? " RCE" : "", + host->int_sum & SDXC_DATA_CRC_ERROR ? " DCE" : "", + host->int_sum & SDXC_RESP_TIMEOUT ? " RTO" : "", + host->int_sum & SDXC_DATA_TIMEOUT ? " DTO" : "", + host->int_sum & SDXC_FIFO_RUN_ERROR ? " FE" : "", + host->int_sum & SDXC_HARD_WARE_LOCKED ? " HL" : "", + host->int_sum & SDXC_START_BIT_ERROR ? " SBE" : "", + host->int_sum & SDXC_END_BIT_ERROR ? " EBE" : "" + ); +} + +/* Called in interrupt context! */ +static irqreturn_t sunxi_mmc_finalize_request(struct sunxi_mmc_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = mrq->data; + u32 rval; + + mmc_writel(host, REG_IMASK, host->sdio_imask); + mmc_writel(host, REG_IDIE, 0); + + if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) { + sunxi_mmc_dump_errinfo(host); + mrq->cmd->error = -ETIMEDOUT; + + if (data) { + data->error = -ETIMEDOUT; + host->manual_stop_mrq = mrq; + } + + if (mrq->stop) + mrq->stop->error = -ETIMEDOUT; + } else { + if (mrq->cmd->flags & MMC_RSP_136) { + mrq->cmd->resp[0] = mmc_readl(host, REG_RESP3); + mrq->cmd->resp[1] = mmc_readl(host, REG_RESP2); + mrq->cmd->resp[2] = mmc_readl(host, REG_RESP1); + mrq->cmd->resp[3] = mmc_readl(host, REG_RESP0); + } else { + mrq->cmd->resp[0] = mmc_readl(host, REG_RESP0); + } + + if (data) + data->bytes_xfered = data->blocks * data->blksz; + } + + if (data) { + mmc_writel(host, REG_IDST, 0x337); + mmc_writel(host, REG_DMAC, 0); + rval = mmc_readl(host, REG_GCTRL); + rval |= SDXC_DMA_RESET; + mmc_writel(host, REG_GCTRL, rval); + rval &= ~SDXC_DMA_ENABLE_BIT; + mmc_writel(host, REG_GCTRL, rval); + rval |= SDXC_FIFO_RESET; + mmc_writel(host, REG_GCTRL, rval); + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } + + mmc_writel(host, REG_RINTR, 0xffff); + + host->mrq = NULL; + host->int_sum = 0; + host->wait_dma = false; + + return host->manual_stop_mrq ? IRQ_WAKE_THREAD : IRQ_HANDLED; +} + +static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id) +{ + struct sunxi_mmc_host *host = dev_id; + struct mmc_request *mrq; + u32 msk_int, idma_int; + bool finalize = false; + bool sdio_int = false; + irqreturn_t ret = IRQ_HANDLED; + + spin_lock(&host->lock); + + idma_int = mmc_readl(host, REG_IDST); + msk_int = mmc_readl(host, REG_MISTA); + + dev_dbg(mmc_dev(host->mmc), "irq: rq %p mi %08x idi %08x\n", + host->mrq, msk_int, idma_int); + + mrq = host->mrq; + if (mrq) { + if (idma_int & SDXC_IDMAC_RECEIVE_INTERRUPT) + host->wait_dma = false; + + host->int_sum |= msk_int; + + /* Wait for COMMAND_DONE on RESPONSE_TIMEOUT before finalize */ + if ((host->int_sum & SDXC_RESP_TIMEOUT) && + !(host->int_sum & SDXC_COMMAND_DONE)) + mmc_writel(host, REG_IMASK, + host->sdio_imask | SDXC_COMMAND_DONE); + /* Don't wait for dma on error */ + else if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) + finalize = true; + else if ((host->int_sum & SDXC_INTERRUPT_DONE_BIT) && + !host->wait_dma) + finalize = true; + } + + if (msk_int & SDXC_SDIO_INTERRUPT) + sdio_int = true; + + mmc_writel(host, REG_RINTR, msk_int); + mmc_writel(host, REG_IDST, idma_int); + + if (finalize) + ret = sunxi_mmc_finalize_request(host); + + spin_unlock(&host->lock); + + if (finalize && ret == IRQ_HANDLED) + mmc_request_done(host->mmc, mrq); + + if (sdio_int) + mmc_signal_sdio_irq(host->mmc); + + return ret; +} + +static irqreturn_t sunxi_mmc_handle_manual_stop(int irq, void *dev_id) +{ + struct sunxi_mmc_host *host = dev_id; + struct mmc_request *mrq; + unsigned long iflags; + + spin_lock_irqsave(&host->lock, iflags); + mrq = host->manual_stop_mrq; + spin_unlock_irqrestore(&host->lock, iflags); + + if (!mrq) { + dev_err(mmc_dev(host->mmc), "no request for manual stop\n"); + return IRQ_HANDLED; + } + + dev_err(mmc_dev(host->mmc), "data error, sending stop command\n"); + + /* + * We will never have more than one outstanding request, + * and we do not complete the request until after + * we've cleared host->manual_stop_mrq so we do not need to + * spin lock this function. + * Additionally we have wait states within this function + * so having it in a lock is a very bad idea. + */ + sunxi_mmc_send_manual_stop(host, mrq); + + spin_lock_irqsave(&host->lock, iflags); + host->manual_stop_mrq = NULL; + spin_unlock_irqrestore(&host->lock, iflags); + + mmc_request_done(host->mmc, mrq); + + return IRQ_HANDLED; +} + +static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en) +{ + unsigned long expire = jiffies + msecs_to_jiffies(750); + u32 rval; + + dev_dbg(mmc_dev(host->mmc), "%sabling the clock\n", + oclk_en ? "en" : "dis"); + + rval = mmc_readl(host, REG_CLKCR); + rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON | SDXC_MASK_DATA0); + + if (oclk_en) + rval |= SDXC_CARD_CLOCK_ON; + if (host->cfg->mask_data0) + rval |= SDXC_MASK_DATA0; + + mmc_writel(host, REG_CLKCR, rval); + + rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER; + mmc_writel(host, REG_CMDR, rval); + + do { + rval = mmc_readl(host, REG_CMDR); + } while (time_before(jiffies, expire) && (rval & SDXC_START)); + + /* clear irq status bits set by the command */ + mmc_writel(host, REG_RINTR, + mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT); + + if (rval & SDXC_START) { + dev_err(mmc_dev(host->mmc), "fatal err update clk timeout\n"); + return -EIO; + } + + if (host->cfg->mask_data0) { + rval = mmc_readl(host, REG_CLKCR); + mmc_writel(host, REG_CLKCR, rval & ~SDXC_MASK_DATA0); + } + + return 0; +} + +static int sunxi_mmc_calibrate(struct sunxi_mmc_host *host, int reg_off) +{ + if (!host->cfg->can_calibrate) + return 0; + + /* + * FIXME: + * This is not clear how the calibration is supposed to work + * yet. The best rate have been obtained by simply setting the + * delay to 0, as Allwinner does in its BSP. + * + * The only mode that doesn't have such a delay is HS400, that + * is in itself a TODO. + */ + writel(SDXC_CAL_DL_SW_EN, host->reg_base + reg_off); + + return 0; +} + +static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host, + struct mmc_ios *ios, u32 rate) +{ + int index; + + /* clk controller delays not used under new timings mode */ + if (host->use_new_timings) + return 0; + + /* some old controllers don't support delays */ + if (!host->cfg->clk_delays) + return 0; + + /* determine delays */ + if (rate <= 400000) { + index = SDXC_CLK_400K; + } else if (rate <= 25000000) { + index = SDXC_CLK_25M; + } else if (rate <= 52000000) { + if (ios->timing != MMC_TIMING_UHS_DDR50 && + ios->timing != MMC_TIMING_MMC_DDR52) { + index = SDXC_CLK_50M; + } else if (ios->bus_width == MMC_BUS_WIDTH_8) { + index = SDXC_CLK_50M_DDR_8BIT; + } else { + index = SDXC_CLK_50M_DDR; + } + } else { + dev_dbg(mmc_dev(host->mmc), "Invalid clock... returning\n"); + return -EINVAL; + } + + clk_set_phase(host->clk_sample, host->cfg->clk_delays[index].sample); + clk_set_phase(host->clk_output, host->cfg->clk_delays[index].output); + + return 0; +} + +static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, + struct mmc_ios *ios) +{ + struct mmc_host *mmc = host->mmc; + long rate; + u32 rval, clock = ios->clock, div = 1; + int ret; + + ret = sunxi_mmc_oclk_onoff(host, 0); + if (ret) + return ret; + + /* Our clock is gated now */ + mmc->actual_clock = 0; + + if (!ios->clock) + return 0; + + /* + * Under the old timing mode, 8 bit DDR requires the module + * clock to be double the card clock. Under the new timing + * mode, all DDR modes require a doubled module clock. + * + * We currently only support the standard MMC DDR52 mode. + * This block should be updated once support for other DDR + * modes is added. + */ + if (ios->timing == MMC_TIMING_MMC_DDR52 && + (host->use_new_timings || + ios->bus_width == MMC_BUS_WIDTH_8)) { + div = 2; + clock <<= 1; + } + + if (host->use_new_timings && host->cfg->ccu_has_timings_switch) { + ret = sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true); + if (ret) { + dev_err(mmc_dev(mmc), + "error setting new timing mode\n"); + return ret; + } + } + + rate = clk_round_rate(host->clk_mmc, clock); + if (rate < 0) { + dev_err(mmc_dev(mmc), "error rounding clk to %d: %ld\n", + clock, rate); + return rate; + } + dev_dbg(mmc_dev(mmc), "setting clk to %d, rounded %ld\n", + clock, rate); + + /* setting clock rate */ + ret = clk_set_rate(host->clk_mmc, rate); + if (ret) { + dev_err(mmc_dev(mmc), "error setting clk to %ld: %d\n", + rate, ret); + return ret; + } + + /* set internal divider */ + rval = mmc_readl(host, REG_CLKCR); + rval &= ~0xff; + rval |= div - 1; + mmc_writel(host, REG_CLKCR, rval); + + /* update card clock rate to account for internal divider */ + rate /= div; + + /* + * Configure the controller to use the new timing mode if needed. + * On controllers that only support the new timing mode, such as + * the eMMC controller on the A64, this register does not exist, + * and any writes to it are ignored. + */ + if (host->use_new_timings) { + /* Don't touch the delay bits */ + rval = mmc_readl(host, REG_SD_NTSR); + rval |= SDXC_2X_TIMING_MODE; + mmc_writel(host, REG_SD_NTSR, rval); + } + + /* sunxi_mmc_clk_set_phase expects the actual card clock rate */ + ret = sunxi_mmc_clk_set_phase(host, ios, rate); + if (ret) + return ret; + + ret = sunxi_mmc_calibrate(host, SDXC_REG_SAMP_DL_REG); + if (ret) + return ret; + + /* + * FIXME: + * + * In HS400 we'll also need to calibrate the data strobe + * signal. This should only happen on the MMC2 controller (at + * least on the A64). + */ + + ret = sunxi_mmc_oclk_onoff(host, 1); + if (ret) + return ret; + + /* And we just enabled our clock back */ + mmc->actual_clock = rate; + + return 0; +} + +static void sunxi_mmc_set_bus_width(struct sunxi_mmc_host *host, + unsigned char width) +{ + switch (width) { + case MMC_BUS_WIDTH_1: + mmc_writel(host, REG_WIDTH, SDXC_WIDTH1); + break; + case MMC_BUS_WIDTH_4: + mmc_writel(host, REG_WIDTH, SDXC_WIDTH4); + break; + case MMC_BUS_WIDTH_8: + mmc_writel(host, REG_WIDTH, SDXC_WIDTH8); + break; + } +} + +static void sunxi_mmc_set_clk(struct sunxi_mmc_host *host, struct mmc_ios *ios) +{ + u32 rval; + + /* set ddr mode */ + rval = mmc_readl(host, REG_GCTRL); + if (ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_MMC_DDR52) + rval |= SDXC_DDR_MODE; + else + rval &= ~SDXC_DDR_MODE; + mmc_writel(host, REG_GCTRL, rval); + + host->ferror = sunxi_mmc_clk_set_rate(host, ios); + /* Android code had a usleep_range(50000, 55000); here */ +} + +static void sunxi_mmc_card_power(struct sunxi_mmc_host *host, + struct mmc_ios *ios) +{ + struct mmc_host *mmc = host->mmc; + + switch (ios->power_mode) { + case MMC_POWER_UP: + dev_dbg(mmc_dev(mmc), "Powering card up\n"); + + if (!IS_ERR(mmc->supply.vmmc)) { + host->ferror = mmc_regulator_set_ocr(mmc, + mmc->supply.vmmc, + ios->vdd); + if (host->ferror) + return; + } + + if (!IS_ERR(mmc->supply.vqmmc)) { + host->ferror = regulator_enable(mmc->supply.vqmmc); + if (host->ferror) { + dev_err(mmc_dev(mmc), + "failed to enable vqmmc\n"); + return; + } + host->vqmmc_enabled = true; + } + break; + + case MMC_POWER_OFF: + dev_dbg(mmc_dev(mmc), "Powering card off\n"); + + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) + regulator_disable(mmc->supply.vqmmc); + + host->vqmmc_enabled = false; + break; + + default: + dev_dbg(mmc_dev(mmc), "Ignoring unknown card power state\n"); + break; + } +} + +static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); + + sunxi_mmc_card_power(host, ios); + sunxi_mmc_set_bus_width(host, ios->bus_width); + sunxi_mmc_set_clk(host, ios); +} + +static int sunxi_mmc_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + int ret; + + /* vqmmc regulator is available */ + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + return ret < 0 ? ret : 0; + } + + /* no vqmmc regulator, assume fixed regulator at 3/3.3V */ + if (mmc->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) + return 0; + + return -EINVAL; +} + +static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); + unsigned long flags; + u32 imask; + + if (enable) + pm_runtime_get_noresume(host->dev); + + spin_lock_irqsave(&host->lock, flags); + + imask = mmc_readl(host, REG_IMASK); + if (enable) { + host->sdio_imask = SDXC_SDIO_INTERRUPT; + imask |= SDXC_SDIO_INTERRUPT; + } else { + host->sdio_imask = 0; + imask &= ~SDXC_SDIO_INTERRUPT; + } + mmc_writel(host, REG_IMASK, imask); + spin_unlock_irqrestore(&host->lock, flags); + + if (!enable) + pm_runtime_put_noidle(host->mmc->parent); +} + +static void sunxi_mmc_hw_reset(struct mmc_host *mmc) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); + mmc_writel(host, REG_HWRST, 0); + udelay(10); + mmc_writel(host, REG_HWRST, 1); + udelay(300); +} + +static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + unsigned long iflags; + u32 imask = SDXC_INTERRUPT_ERROR_BIT; + u32 cmd_val = SDXC_START | (cmd->opcode & 0x3f); + bool wait_dma = host->wait_dma; + int ret; + + /* Check for set_ios errors (should never happen) */ + if (host->ferror) { + mrq->cmd->error = host->ferror; + mmc_request_done(mmc, mrq); + return; + } + + if (data) { + ret = sunxi_mmc_map_dma(host, data); + if (ret < 0) { + dev_err(mmc_dev(mmc), "map DMA failed\n"); + cmd->error = ret; + data->error = ret; + mmc_request_done(mmc, mrq); + return; + } + } + + if (cmd->opcode == MMC_GO_IDLE_STATE) { + cmd_val |= SDXC_SEND_INIT_SEQUENCE; + imask |= SDXC_COMMAND_DONE; + } + + if (cmd->flags & MMC_RSP_PRESENT) { + cmd_val |= SDXC_RESP_EXPIRE; + if (cmd->flags & MMC_RSP_136) + cmd_val |= SDXC_LONG_RESPONSE; + if (cmd->flags & MMC_RSP_CRC) + cmd_val |= SDXC_CHECK_RESPONSE_CRC; + + if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) { + cmd_val |= SDXC_DATA_EXPIRE | SDXC_WAIT_PRE_OVER; + + if (cmd->data->stop) { + imask |= SDXC_AUTO_COMMAND_DONE; + cmd_val |= SDXC_SEND_AUTO_STOP; + } else { + imask |= SDXC_DATA_OVER; + } + + if (cmd->data->flags & MMC_DATA_WRITE) + cmd_val |= SDXC_WRITE; + else + wait_dma = true; + } else { + imask |= SDXC_COMMAND_DONE; + } + } else { + imask |= SDXC_COMMAND_DONE; + } + + dev_dbg(mmc_dev(mmc), "cmd %d(%08x) arg %x ie 0x%08x len %d\n", + cmd_val & 0x3f, cmd_val, cmd->arg, imask, + mrq->data ? mrq->data->blksz * mrq->data->blocks : 0); + + spin_lock_irqsave(&host->lock, iflags); + + if (host->mrq || host->manual_stop_mrq) { + spin_unlock_irqrestore(&host->lock, iflags); + + if (data) + dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + + dev_err(mmc_dev(mmc), "request already pending\n"); + mrq->cmd->error = -EBUSY; + mmc_request_done(mmc, mrq); + return; + } + + if (data) { + mmc_writel(host, REG_BLKSZ, data->blksz); + mmc_writel(host, REG_BCNTR, data->blksz * data->blocks); + sunxi_mmc_start_dma(host, data); + } + + host->mrq = mrq; + host->wait_dma = wait_dma; + mmc_writel(host, REG_IMASK, host->sdio_imask | imask); + mmc_writel(host, REG_CARG, cmd->arg); + mmc_writel(host, REG_CMDR, cmd_val); + + spin_unlock_irqrestore(&host->lock, iflags); +} + +static int sunxi_mmc_card_busy(struct mmc_host *mmc) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); + + return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY); +} + +static const struct mmc_host_ops sunxi_mmc_ops = { + .request = sunxi_mmc_request, + .set_ios = sunxi_mmc_set_ios, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, + .enable_sdio_irq = sunxi_mmc_enable_sdio_irq, + .start_signal_voltage_switch = sunxi_mmc_volt_switch, + .hw_reset = sunxi_mmc_hw_reset, + .card_busy = sunxi_mmc_card_busy, +}; + +static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 90, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 }, + /* Value from A83T "new timing mode". Works but might not be right. */ + [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 180 }, +}; + +static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 }, + [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 }, +}; + +static const struct sunxi_mmc_cfg sun4i_a10_cfg = { + .idma_des_size_bits = 13, + .clk_delays = NULL, + .can_calibrate = false, +}; + +static const struct sunxi_mmc_cfg sun5i_a13_cfg = { + .idma_des_size_bits = 16, + .clk_delays = NULL, + .can_calibrate = false, +}; + +static const struct sunxi_mmc_cfg sun7i_a20_cfg = { + .idma_des_size_bits = 16, + .clk_delays = sunxi_mmc_clk_delays, + .can_calibrate = false, +}; + +static const struct sunxi_mmc_cfg sun8i_a83t_emmc_cfg = { + .idma_des_size_bits = 16, + .clk_delays = sunxi_mmc_clk_delays, + .can_calibrate = false, + .ccu_has_timings_switch = true, +}; + +static const struct sunxi_mmc_cfg sun9i_a80_cfg = { + .idma_des_size_bits = 16, + .clk_delays = sun9i_mmc_clk_delays, + .can_calibrate = false, +}; + +static const struct sunxi_mmc_cfg sun50i_a64_cfg = { + .idma_des_size_bits = 16, + .clk_delays = NULL, + .can_calibrate = true, + .mask_data0 = true, + .needs_new_timings = true, +}; + +static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = { + .idma_des_size_bits = 13, + .clk_delays = NULL, + .can_calibrate = true, + .needs_new_timings = true, +}; + +static const struct of_device_id sunxi_mmc_of_match[] = { + { .compatible = "allwinner,sun4i-a10-mmc", .data = &sun4i_a10_cfg }, + { .compatible = "allwinner,sun5i-a13-mmc", .data = &sun5i_a13_cfg }, + { .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-emmc", .data = &sun8i_a83t_emmc_cfg }, + { .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg }, + { .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); + +static int sunxi_mmc_enable(struct sunxi_mmc_host *host) +{ + int ret; + + if (!IS_ERR(host->reset)) { + ret = reset_control_reset(host->reset); + if (ret) { + dev_err(host->dev, "Couldn't reset the MMC controller (%d)\n", + ret); + return ret; + } + } + + ret = clk_prepare_enable(host->clk_ahb); + if (ret) { + dev_err(host->dev, "Couldn't enable the bus clocks (%d)\n", ret); + goto error_assert_reset; + } + + ret = clk_prepare_enable(host->clk_mmc); + if (ret) { + dev_err(host->dev, "Enable mmc clk err %d\n", ret); + goto error_disable_clk_ahb; + } + + ret = clk_prepare_enable(host->clk_output); + if (ret) { + dev_err(host->dev, "Enable output clk err %d\n", ret); + goto error_disable_clk_mmc; + } + + ret = clk_prepare_enable(host->clk_sample); + if (ret) { + dev_err(host->dev, "Enable sample clk err %d\n", ret); + goto error_disable_clk_output; + } + + /* + * Sometimes the controller asserts the irq on boot for some reason, + * make sure the controller is in a sane state before enabling irqs. + */ + ret = sunxi_mmc_reset_host(host); + if (ret) + goto error_disable_clk_sample; + + return 0; + +error_disable_clk_sample: + clk_disable_unprepare(host->clk_sample); +error_disable_clk_output: + clk_disable_unprepare(host->clk_output); +error_disable_clk_mmc: + clk_disable_unprepare(host->clk_mmc); +error_disable_clk_ahb: + clk_disable_unprepare(host->clk_ahb); +error_assert_reset: + if (!IS_ERR(host->reset)) + reset_control_assert(host->reset); + return ret; +} + +static void sunxi_mmc_disable(struct sunxi_mmc_host *host) +{ + sunxi_mmc_reset_host(host); + + clk_disable_unprepare(host->clk_sample); + clk_disable_unprepare(host->clk_output); + clk_disable_unprepare(host->clk_mmc); + clk_disable_unprepare(host->clk_ahb); + + if (!IS_ERR(host->reset)) + reset_control_assert(host->reset); +} + +static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, + struct platform_device *pdev) +{ + int ret; + + host->cfg = of_device_get_match_data(&pdev->dev); + if (!host->cfg) + return -EINVAL; + + ret = mmc_regulator_get_supply(host->mmc); + if (ret) + return ret; + + host->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->reg_base)) + return PTR_ERR(host->reg_base); + + host->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(host->clk_ahb)) { + dev_err(&pdev->dev, "Could not get ahb clock\n"); + return PTR_ERR(host->clk_ahb); + } + + host->clk_mmc = devm_clk_get(&pdev->dev, "mmc"); + if (IS_ERR(host->clk_mmc)) { + dev_err(&pdev->dev, "Could not get mmc clock\n"); + return PTR_ERR(host->clk_mmc); + } + + if (host->cfg->clk_delays) { + host->clk_output = devm_clk_get(&pdev->dev, "output"); + if (IS_ERR(host->clk_output)) { + dev_err(&pdev->dev, "Could not get output clock\n"); + return PTR_ERR(host->clk_output); + } + + host->clk_sample = devm_clk_get(&pdev->dev, "sample"); + if (IS_ERR(host->clk_sample)) { + dev_err(&pdev->dev, "Could not get sample clock\n"); + return PTR_ERR(host->clk_sample); + } + } + + host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, + "ahb"); + if (PTR_ERR(host->reset) == -EPROBE_DEFER) + return PTR_ERR(host->reset); + + ret = sunxi_mmc_enable(host); + if (ret) + return ret; + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) { + ret = host->irq; + goto error_disable_mmc; + } + + return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq, + sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host); + +error_disable_mmc: + sunxi_mmc_disable(host); + return ret; +} + +static int sunxi_mmc_probe(struct platform_device *pdev) +{ + struct sunxi_mmc_host *host; + struct mmc_host *mmc; + int ret; + + mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "mmc alloc host failed\n"); + return -ENOMEM; + } + platform_set_drvdata(pdev, mmc); + + host = mmc_priv(mmc); + host->dev = &pdev->dev; + host->mmc = mmc; + spin_lock_init(&host->lock); + + ret = sunxi_mmc_resource_request(host, pdev); + if (ret) + goto error_free_host; + + host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &host->sg_dma, GFP_KERNEL); + if (!host->sg_cpu) { + dev_err(&pdev->dev, "Failed to allocate DMA descriptor mem\n"); + ret = -ENOMEM; + goto error_free_host; + } + + if (host->cfg->ccu_has_timings_switch) { + /* + * Supports both old and new timing modes. + * Try setting the clk to new timing mode. + */ + sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true); + + /* And check the result */ + ret = sunxi_ccu_get_mmc_timing_mode(host->clk_mmc); + if (ret < 0) { + /* + * For whatever reason we were not able to get + * the current active mode. Default to old mode. + */ + dev_warn(&pdev->dev, "MMC clk timing mode unknown\n"); + host->use_new_timings = false; + } else { + host->use_new_timings = !!ret; + } + } else if (host->cfg->needs_new_timings) { + /* Supports new timing mode only */ + host->use_new_timings = true; + } + + mmc->ops = &sunxi_mmc_ops; + mmc->max_blk_count = 8192; + mmc->max_blk_size = 4096; + mmc->max_segs = PAGE_SIZE / sizeof(struct sunxi_idma_des); + mmc->max_seg_size = (1 << host->cfg->idma_des_size_bits); + mmc->max_req_size = mmc->max_seg_size * mmc->max_segs; + /* 400kHz ~ 52MHz */ + mmc->f_min = 400000; + mmc->f_max = 52000000; + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | + MMC_CAP_SDIO_IRQ; + + /* + * Some H5 devices do not have signal traces precise enough to + * use HS DDR mode for their eMMC chips. + * + * We still enable HS DDR modes for all the other controller + * variants that support them. + */ + if ((host->cfg->clk_delays || host->use_new_timings) && + !of_device_is_compatible(pdev->dev.of_node, + "allwinner,sun50i-h5-emmc")) + mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; + + ret = mmc_of_parse(mmc); + if (ret) + goto error_free_dma; + + /* + * If we don't support delay chains in the SoC, we can't use any + * of the higher speed modes. Mask them out in case the device + * tree specifies the properties for them, which gets added to + * the caps by mmc_of_parse() above. + */ + if (!(host->cfg->clk_delays || host->use_new_timings)) { + mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | + MMC_CAP_1_2V_DDR | MMC_CAP_UHS); + mmc->caps2 &= ~MMC_CAP2_HS200; + } + + /* TODO: This driver doesn't support HS400 mode yet */ + mmc->caps2 &= ~MMC_CAP2_HS400; + + ret = sunxi_mmc_init_host(host); + if (ret) + goto error_free_dma; + + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = mmc_add_host(mmc); + if (ret) + goto error_free_dma; + + dev_info(&pdev->dev, "initialized, max. request size: %u KB%s\n", + mmc->max_req_size >> 10, + host->use_new_timings ? ", uses new timings mode" : ""); + + return 0; + +error_free_dma: + dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); +error_free_host: + mmc_free_host(mmc); + return ret; +} + +static int sunxi_mmc_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct sunxi_mmc_host *host = mmc_priv(mmc); + + mmc_remove_host(mmc); + pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) { + disable_irq(host->irq); + sunxi_mmc_disable(host); + } + dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); + mmc_free_host(mmc); + + return 0; +} + +#ifdef CONFIG_PM +static int sunxi_mmc_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct sunxi_mmc_host *host = mmc_priv(mmc); + int ret; + + ret = sunxi_mmc_enable(host); + if (ret) + return ret; + + sunxi_mmc_init_host(host); + sunxi_mmc_set_bus_width(host, mmc->ios.bus_width); + sunxi_mmc_set_clk(host, &mmc->ios); + enable_irq(host->irq); + + return 0; +} + +static int sunxi_mmc_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct sunxi_mmc_host *host = mmc_priv(mmc); + + /* + * When clocks are off, it's possible receiving + * fake interrupts, which will stall the system. + * Disabling the irq will prevent this. + */ + disable_irq(host->irq); + sunxi_mmc_reset_host(host); + sunxi_mmc_disable(host); + + return 0; +} +#endif + +static const struct dev_pm_ops sunxi_mmc_pm_ops = { + SET_RUNTIME_PM_OPS(sunxi_mmc_runtime_suspend, + sunxi_mmc_runtime_resume, + NULL) +}; + +static struct platform_driver sunxi_mmc_driver = { + .driver = { + .name = "sunxi-mmc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(sunxi_mmc_of_match), + .pm = &sunxi_mmc_pm_ops, + }, + .probe = sunxi_mmc_probe, + .remove = sunxi_mmc_remove, +}; +module_platform_driver(sunxi_mmc_driver); + +MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("David Lanzendörfer "); +MODULE_ALIAS("platform:sunxi-mmc"); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c new file mode 100644 index 000000000..fd8b72d3e --- /dev/null +++ b/drivers/mmc/host/tifm_sd.c @@ -0,0 +1,1092 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tifm_sd.c - TI FlashMedia driver + * + * Copyright (C) 2006 Alex Dubov + * + * Special thanks to Brad Campbell for extensive testing of this driver. + */ + + +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "tifm_sd" +#define DRIVER_VERSION "0.8" + +static bool no_dma = 0; +static bool fixed_timeout = 0; +module_param(no_dma, bool, 0644); +module_param(fixed_timeout, bool, 0644); + +/* Constants here are mostly from OMAP5912 datasheet */ +#define TIFM_MMCSD_RESET 0x0002 +#define TIFM_MMCSD_CLKMASK 0x03ff +#define TIFM_MMCSD_POWER 0x0800 +#define TIFM_MMCSD_4BBUS 0x8000 +#define TIFM_MMCSD_RXDE 0x8000 /* rx dma enable */ +#define TIFM_MMCSD_TXDE 0x0080 /* tx dma enable */ +#define TIFM_MMCSD_BUFINT 0x0c00 /* set bits: AE, AF */ +#define TIFM_MMCSD_DPE 0x0020 /* data timeout counted in kilocycles */ +#define TIFM_MMCSD_INAB 0x0080 /* abort / initialize command */ +#define TIFM_MMCSD_READ 0x8000 + +#define TIFM_MMCSD_ERRMASK 0x01e0 /* set bits: CCRC, CTO, DCRC, DTO */ +#define TIFM_MMCSD_EOC 0x0001 /* end of command phase */ +#define TIFM_MMCSD_CD 0x0002 /* card detect */ +#define TIFM_MMCSD_CB 0x0004 /* card enter busy state */ +#define TIFM_MMCSD_BRS 0x0008 /* block received/sent */ +#define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */ +#define TIFM_MMCSD_DTO 0x0020 /* data time-out */ +#define TIFM_MMCSD_DCRC 0x0040 /* data crc error */ +#define TIFM_MMCSD_CTO 0x0080 /* command time-out */ +#define TIFM_MMCSD_CCRC 0x0100 /* command crc error */ +#define TIFM_MMCSD_AF 0x0400 /* fifo almost full */ +#define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */ +#define TIFM_MMCSD_OCRB 0x1000 /* OCR busy */ +#define TIFM_MMCSD_CIRQ 0x2000 /* card irq (cmd40/sdio) */ +#define TIFM_MMCSD_CERR 0x4000 /* card status error */ + +#define TIFM_MMCSD_ODTO 0x0040 /* open drain / extended timeout */ +#define TIFM_MMCSD_CARD_RO 0x0200 /* card is read-only */ + +#define TIFM_MMCSD_FIFO_SIZE 0x0020 + +#define TIFM_MMCSD_RSP_R0 0x0000 +#define TIFM_MMCSD_RSP_R1 0x0100 +#define TIFM_MMCSD_RSP_R2 0x0200 +#define TIFM_MMCSD_RSP_R3 0x0300 +#define TIFM_MMCSD_RSP_R4 0x0400 +#define TIFM_MMCSD_RSP_R5 0x0500 +#define TIFM_MMCSD_RSP_R6 0x0600 + +#define TIFM_MMCSD_RSP_BUSY 0x0800 + +#define TIFM_MMCSD_CMD_BC 0x0000 +#define TIFM_MMCSD_CMD_BCR 0x1000 +#define TIFM_MMCSD_CMD_AC 0x2000 +#define TIFM_MMCSD_CMD_ADTC 0x3000 + +#define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL + +#define TIFM_MMCSD_REQ_TIMEOUT_MS 1000 + +enum { + CMD_READY = 0x0001, + FIFO_READY = 0x0002, + BRS_READY = 0x0004, + SCMD_ACTIVE = 0x0008, + SCMD_READY = 0x0010, + CARD_BUSY = 0x0020, + DATA_CARRY = 0x0040 +}; + +struct tifm_sd { + struct tifm_dev *dev; + + unsigned short eject:1, + open_drain:1, + no_dma:1; + unsigned short cmd_flags; + + unsigned int clk_freq; + unsigned int clk_div; + unsigned long timeout_jiffies; + + struct tasklet_struct finish_tasklet; + struct timer_list timer; + struct mmc_request *req; + + int sg_len; + int sg_pos; + unsigned int block_pos; + struct scatterlist bounce_buf; + unsigned char bounce_buf_data[TIFM_MMCSD_MAX_BLOCK_SIZE]; +}; + +/* for some reason, host won't respond correctly to readw/writew */ +static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg, + unsigned int off, unsigned int cnt) +{ + struct tifm_dev *sock = host->dev; + unsigned char *buf; + unsigned int pos = 0, val; + + buf = kmap_atomic(pg) + off; + if (host->cmd_flags & DATA_CARRY) { + buf[pos++] = host->bounce_buf_data[0]; + host->cmd_flags &= ~DATA_CARRY; + } + + while (pos < cnt) { + val = readl(sock->addr + SOCK_MMCSD_DATA); + buf[pos++] = val & 0xff; + if (pos == cnt) { + host->bounce_buf_data[0] = (val >> 8) & 0xff; + host->cmd_flags |= DATA_CARRY; + break; + } + buf[pos++] = (val >> 8) & 0xff; + } + kunmap_atomic(buf - off); +} + +static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, + unsigned int off, unsigned int cnt) +{ + struct tifm_dev *sock = host->dev; + unsigned char *buf; + unsigned int pos = 0, val; + + buf = kmap_atomic(pg) + off; + if (host->cmd_flags & DATA_CARRY) { + val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00); + writel(val, sock->addr + SOCK_MMCSD_DATA); + host->cmd_flags &= ~DATA_CARRY; + } + + while (pos < cnt) { + val = buf[pos++]; + if (pos == cnt) { + host->bounce_buf_data[0] = val & 0xff; + host->cmd_flags |= DATA_CARRY; + break; + } + val |= (buf[pos++] << 8) & 0xff00; + writel(val, sock->addr + SOCK_MMCSD_DATA); + } + kunmap_atomic(buf - off); +} + +static void tifm_sd_transfer_data(struct tifm_sd *host) +{ + struct mmc_data *r_data = host->req->cmd->data; + struct scatterlist *sg = r_data->sg; + unsigned int off, cnt, t_size = TIFM_MMCSD_FIFO_SIZE * 2; + unsigned int p_off, p_cnt; + struct page *pg; + + if (host->sg_pos == host->sg_len) + return; + while (t_size) { + cnt = sg[host->sg_pos].length - host->block_pos; + if (!cnt) { + host->block_pos = 0; + host->sg_pos++; + if (host->sg_pos == host->sg_len) { + if ((r_data->flags & MMC_DATA_WRITE) + && (host->cmd_flags & DATA_CARRY)) + writel(host->bounce_buf_data[0], + host->dev->addr + + SOCK_MMCSD_DATA); + + return; + } + cnt = sg[host->sg_pos].length; + } + off = sg[host->sg_pos].offset + host->block_pos; + + pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); + p_off = offset_in_page(off); + p_cnt = PAGE_SIZE - p_off; + p_cnt = min(p_cnt, cnt); + p_cnt = min(p_cnt, t_size); + + if (r_data->flags & MMC_DATA_READ) + tifm_sd_read_fifo(host, pg, p_off, p_cnt); + else if (r_data->flags & MMC_DATA_WRITE) + tifm_sd_write_fifo(host, pg, p_off, p_cnt); + + t_size -= p_cnt; + host->block_pos += p_cnt; + } +} + +static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off, + struct page *src, unsigned int src_off, + unsigned int count) +{ + unsigned char *src_buf = kmap_atomic(src) + src_off; + unsigned char *dst_buf = kmap_atomic(dst) + dst_off; + + memcpy(dst_buf, src_buf, count); + + kunmap_atomic(dst_buf - dst_off); + kunmap_atomic(src_buf - src_off); +} + +static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data) +{ + struct scatterlist *sg = r_data->sg; + unsigned int t_size = r_data->blksz; + unsigned int off, cnt; + unsigned int p_off, p_cnt; + struct page *pg; + + dev_dbg(&host->dev->dev, "bouncing block\n"); + while (t_size) { + cnt = sg[host->sg_pos].length - host->block_pos; + if (!cnt) { + host->block_pos = 0; + host->sg_pos++; + if (host->sg_pos == host->sg_len) + return; + cnt = sg[host->sg_pos].length; + } + off = sg[host->sg_pos].offset + host->block_pos; + + pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); + p_off = offset_in_page(off); + p_cnt = PAGE_SIZE - p_off; + p_cnt = min(p_cnt, cnt); + p_cnt = min(p_cnt, t_size); + + if (r_data->flags & MMC_DATA_WRITE) + tifm_sd_copy_page(sg_page(&host->bounce_buf), + r_data->blksz - t_size, + pg, p_off, p_cnt); + else if (r_data->flags & MMC_DATA_READ) + tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf), + r_data->blksz - t_size, p_cnt); + + t_size -= p_cnt; + host->block_pos += p_cnt; + } +} + +static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data) +{ + struct tifm_dev *sock = host->dev; + unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz; + unsigned int dma_len, dma_blk_cnt, dma_off; + struct scatterlist *sg = NULL; + unsigned long flags; + + if (host->sg_pos == host->sg_len) + return 1; + + if (host->cmd_flags & DATA_CARRY) { + host->cmd_flags &= ~DATA_CARRY; + local_irq_save(flags); + tifm_sd_bounce_block(host, r_data); + local_irq_restore(flags); + if (host->sg_pos == host->sg_len) + return 1; + } + + dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos; + if (!dma_len) { + host->block_pos = 0; + host->sg_pos++; + if (host->sg_pos == host->sg_len) + return 1; + dma_len = sg_dma_len(&r_data->sg[host->sg_pos]); + } + + if (dma_len < t_size) { + dma_blk_cnt = dma_len / r_data->blksz; + dma_off = host->block_pos; + host->block_pos += dma_blk_cnt * r_data->blksz; + } else { + dma_blk_cnt = TIFM_DMA_TSIZE; + dma_off = host->block_pos; + host->block_pos += t_size; + } + + if (dma_blk_cnt) + sg = &r_data->sg[host->sg_pos]; + else if (dma_len) { + if (r_data->flags & MMC_DATA_WRITE) { + local_irq_save(flags); + tifm_sd_bounce_block(host, r_data); + local_irq_restore(flags); + } else + host->cmd_flags |= DATA_CARRY; + + sg = &host->bounce_buf; + dma_off = 0; + dma_blk_cnt = 1; + } else + return 1; + + dev_dbg(&sock->dev, "setting dma for %d blocks\n", dma_blk_cnt); + writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS); + if (r_data->flags & MMC_DATA_WRITE) + writel((dma_blk_cnt << 8) | TIFM_DMA_TX | TIFM_DMA_EN, + sock->addr + SOCK_DMA_CONTROL); + else + writel((dma_blk_cnt << 8) | TIFM_DMA_EN, + sock->addr + SOCK_DMA_CONTROL); + + return 0; +} + +static unsigned int tifm_sd_op_flags(struct mmc_command *cmd) +{ + unsigned int rc = 0; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + rc |= TIFM_MMCSD_RSP_R0; + break; + case MMC_RSP_R1B: + rc |= TIFM_MMCSD_RSP_BUSY; + fallthrough; + case MMC_RSP_R1: + rc |= TIFM_MMCSD_RSP_R1; + break; + case MMC_RSP_R2: + rc |= TIFM_MMCSD_RSP_R2; + break; + case MMC_RSP_R3: + rc |= TIFM_MMCSD_RSP_R3; + break; + default: + BUG(); + } + + switch (mmc_cmd_type(cmd)) { + case MMC_CMD_BC: + rc |= TIFM_MMCSD_CMD_BC; + break; + case MMC_CMD_BCR: + rc |= TIFM_MMCSD_CMD_BCR; + break; + case MMC_CMD_AC: + rc |= TIFM_MMCSD_CMD_AC; + break; + case MMC_CMD_ADTC: + rc |= TIFM_MMCSD_CMD_ADTC; + break; + default: + BUG(); + } + return rc; +} + +static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd) +{ + struct tifm_dev *sock = host->dev; + unsigned int cmd_mask = tifm_sd_op_flags(cmd); + + if (host->open_drain) + cmd_mask |= TIFM_MMCSD_ODTO; + + if (cmd->data && (cmd->data->flags & MMC_DATA_READ)) + cmd_mask |= TIFM_MMCSD_READ; + + dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", + cmd->opcode, cmd->arg, cmd_mask); + + writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); + writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); + writel(cmd->opcode | cmd_mask, sock->addr + SOCK_MMCSD_COMMAND); +} + +static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock) +{ + cmd->resp[0] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x1c) << 16) + | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x18); + cmd->resp[1] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x14) << 16) + | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x10); + cmd->resp[2] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x0c) << 16) + | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x08); + cmd->resp[3] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x04) << 16) + | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00); +} + +static void tifm_sd_check_status(struct tifm_sd *host) +{ + struct tifm_dev *sock = host->dev; + struct mmc_command *cmd = host->req->cmd; + + if (cmd->error) + goto finish_request; + + if (!(host->cmd_flags & CMD_READY)) + return; + + if (cmd->data) { + if (cmd->data->error) { + if ((host->cmd_flags & SCMD_ACTIVE) + && !(host->cmd_flags & SCMD_READY)) + return; + + goto finish_request; + } + + if (!(host->cmd_flags & BRS_READY)) + return; + + if (!(host->no_dma || (host->cmd_flags & FIFO_READY))) + return; + + if (cmd->data->flags & MMC_DATA_WRITE) { + if (host->req->stop) { + if (!(host->cmd_flags & SCMD_ACTIVE)) { + host->cmd_flags |= SCMD_ACTIVE; + writel(TIFM_MMCSD_EOFB + | readl(sock->addr + + SOCK_MMCSD_INT_ENABLE), + sock->addr + + SOCK_MMCSD_INT_ENABLE); + tifm_sd_exec(host, host->req->stop); + return; + } else { + if (!(host->cmd_flags & SCMD_READY) + || (host->cmd_flags & CARD_BUSY)) + return; + writel((~TIFM_MMCSD_EOFB) + & readl(sock->addr + + SOCK_MMCSD_INT_ENABLE), + sock->addr + + SOCK_MMCSD_INT_ENABLE); + } + } else { + if (host->cmd_flags & CARD_BUSY) + return; + writel((~TIFM_MMCSD_EOFB) + & readl(sock->addr + + SOCK_MMCSD_INT_ENABLE), + sock->addr + SOCK_MMCSD_INT_ENABLE); + } + } else { + if (host->req->stop) { + if (!(host->cmd_flags & SCMD_ACTIVE)) { + host->cmd_flags |= SCMD_ACTIVE; + tifm_sd_exec(host, host->req->stop); + return; + } else { + if (!(host->cmd_flags & SCMD_READY)) + return; + } + } + } + } +finish_request: + tasklet_schedule(&host->finish_tasklet); +} + +/* Called from interrupt handler */ +static void tifm_sd_data_event(struct tifm_dev *sock) +{ + struct tifm_sd *host; + unsigned int fifo_status = 0; + struct mmc_data *r_data = NULL; + + spin_lock(&sock->lock); + host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); + fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); + dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n", + fifo_status, host->cmd_flags); + + if (host->req) { + r_data = host->req->cmd->data; + + if (r_data && (fifo_status & TIFM_FIFO_READY)) { + if (tifm_sd_set_dma_data(host, r_data)) { + host->cmd_flags |= FIFO_READY; + tifm_sd_check_status(host); + } + } + } + + writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS); + spin_unlock(&sock->lock); +} + +/* Called from interrupt handler */ +static void tifm_sd_card_event(struct tifm_dev *sock) +{ + struct tifm_sd *host; + unsigned int host_status = 0; + int cmd_error = 0; + struct mmc_command *cmd = NULL; + unsigned long flags; + + spin_lock(&sock->lock); + host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); + host_status = readl(sock->addr + SOCK_MMCSD_STATUS); + dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n", + host_status, host->cmd_flags); + + if (host->req) { + cmd = host->req->cmd; + + if (host_status & TIFM_MMCSD_ERRMASK) { + writel(host_status & TIFM_MMCSD_ERRMASK, + sock->addr + SOCK_MMCSD_STATUS); + if (host_status & TIFM_MMCSD_CTO) + cmd_error = -ETIMEDOUT; + else if (host_status & TIFM_MMCSD_CCRC) + cmd_error = -EILSEQ; + + if (cmd->data) { + if (host_status & TIFM_MMCSD_DTO) + cmd->data->error = -ETIMEDOUT; + else if (host_status & TIFM_MMCSD_DCRC) + cmd->data->error = -EILSEQ; + } + + writel(TIFM_FIFO_INT_SETALL, + sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); + writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); + + if (host->req->stop) { + if (host->cmd_flags & SCMD_ACTIVE) { + host->req->stop->error = cmd_error; + host->cmd_flags |= SCMD_READY; + } else { + cmd->error = cmd_error; + host->cmd_flags |= SCMD_ACTIVE; + tifm_sd_exec(host, host->req->stop); + goto done; + } + } else + cmd->error = cmd_error; + } else { + if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) { + if (!(host->cmd_flags & CMD_READY)) { + host->cmd_flags |= CMD_READY; + tifm_sd_fetch_resp(cmd, sock); + } else if (host->cmd_flags & SCMD_ACTIVE) { + host->cmd_flags |= SCMD_READY; + tifm_sd_fetch_resp(host->req->stop, + sock); + } + } + if (host_status & TIFM_MMCSD_BRS) + host->cmd_flags |= BRS_READY; + } + + if (host->no_dma && cmd->data) { + if (host_status & TIFM_MMCSD_AE) + writel(host_status & TIFM_MMCSD_AE, + sock->addr + SOCK_MMCSD_STATUS); + + if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF + | TIFM_MMCSD_BRS)) { + local_irq_save(flags); + tifm_sd_transfer_data(host); + local_irq_restore(flags); + host_status &= ~TIFM_MMCSD_AE; + } + } + + if (host_status & TIFM_MMCSD_EOFB) + host->cmd_flags &= ~CARD_BUSY; + else if (host_status & TIFM_MMCSD_CB) + host->cmd_flags |= CARD_BUSY; + + tifm_sd_check_status(host); + } +done: + writel(host_status, sock->addr + SOCK_MMCSD_STATUS); + spin_unlock(&sock->lock); +} + +static void tifm_sd_set_data_timeout(struct tifm_sd *host, + struct mmc_data *data) +{ + struct tifm_dev *sock = host->dev; + unsigned int data_timeout = data->timeout_clks; + + if (fixed_timeout) + return; + + data_timeout += data->timeout_ns / + ((1000000000UL / host->clk_freq) * host->clk_div); + + if (data_timeout < 0xffff) { + writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); + writel((~TIFM_MMCSD_DPE) + & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), + sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); + } else { + data_timeout = (data_timeout >> 10) + 1; + if (data_timeout > 0xffff) + data_timeout = 0; /* set to unlimited */ + writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); + writel(TIFM_MMCSD_DPE + | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), + sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); + } +} + +static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct tifm_sd *host = mmc_priv(mmc); + struct tifm_dev *sock = host->dev; + unsigned long flags; + struct mmc_data *r_data = mrq->cmd->data; + + spin_lock_irqsave(&sock->lock, flags); + if (host->eject) { + mrq->cmd->error = -ENOMEDIUM; + goto err_out; + } + + if (host->req) { + pr_err("%s : unfinished request detected\n", + dev_name(&sock->dev)); + mrq->cmd->error = -ETIMEDOUT; + goto err_out; + } + + host->cmd_flags = 0; + host->block_pos = 0; + host->sg_pos = 0; + + if (mrq->data && !is_power_of_2(mrq->data->blksz)) + host->no_dma = 1; + else + host->no_dma = no_dma ? 1 : 0; + + if (r_data) { + tifm_sd_set_data_timeout(host, r_data); + + if ((r_data->flags & MMC_DATA_WRITE) && !mrq->stop) + writel(TIFM_MMCSD_EOFB + | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), + sock->addr + SOCK_MMCSD_INT_ENABLE); + + if (host->no_dma) { + writel(TIFM_MMCSD_BUFINT + | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), + sock->addr + SOCK_MMCSD_INT_ENABLE); + writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) + | (TIFM_MMCSD_FIFO_SIZE - 1), + sock->addr + SOCK_MMCSD_BUFFER_CONFIG); + + host->sg_len = r_data->sg_len; + } else { + sg_init_one(&host->bounce_buf, host->bounce_buf_data, + r_data->blksz); + + if(1 != tifm_map_sg(sock, &host->bounce_buf, 1, + r_data->flags & MMC_DATA_WRITE + ? PCI_DMA_TODEVICE + : PCI_DMA_FROMDEVICE)) { + pr_err("%s : scatterlist map failed\n", + dev_name(&sock->dev)); + mrq->cmd->error = -ENOMEM; + goto err_out; + } + host->sg_len = tifm_map_sg(sock, r_data->sg, + r_data->sg_len, + r_data->flags + & MMC_DATA_WRITE + ? PCI_DMA_TODEVICE + : PCI_DMA_FROMDEVICE); + if (host->sg_len < 1) { + pr_err("%s : scatterlist map failed\n", + dev_name(&sock->dev)); + tifm_unmap_sg(sock, &host->bounce_buf, 1, + r_data->flags & MMC_DATA_WRITE + ? PCI_DMA_TODEVICE + : PCI_DMA_FROMDEVICE); + mrq->cmd->error = -ENOMEM; + goto err_out; + } + + writel(TIFM_FIFO_INT_SETALL, + sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); + writel(ilog2(r_data->blksz) - 2, + sock->addr + SOCK_FIFO_PAGE_SIZE); + writel(TIFM_FIFO_ENABLE, + sock->addr + SOCK_FIFO_CONTROL); + writel(TIFM_FIFO_INTMASK, + sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); + + if (r_data->flags & MMC_DATA_WRITE) + writel(TIFM_MMCSD_TXDE, + sock->addr + SOCK_MMCSD_BUFFER_CONFIG); + else + writel(TIFM_MMCSD_RXDE, + sock->addr + SOCK_MMCSD_BUFFER_CONFIG); + + tifm_sd_set_dma_data(host, r_data); + } + + writel(r_data->blocks - 1, + sock->addr + SOCK_MMCSD_NUM_BLOCKS); + writel(r_data->blksz - 1, + sock->addr + SOCK_MMCSD_BLOCK_LEN); + } + + host->req = mrq; + mod_timer(&host->timer, jiffies + host->timeout_jiffies); + writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), + sock->addr + SOCK_CONTROL); + tifm_sd_exec(host, mrq->cmd); + spin_unlock_irqrestore(&sock->lock, flags); + return; + +err_out: + spin_unlock_irqrestore(&sock->lock, flags); + mmc_request_done(mmc, mrq); +} + +static void tifm_sd_end_cmd(unsigned long data) +{ + struct tifm_sd *host = (struct tifm_sd*)data; + struct tifm_dev *sock = host->dev; + struct mmc_host *mmc = tifm_get_drvdata(sock); + struct mmc_request *mrq; + struct mmc_data *r_data = NULL; + unsigned long flags; + + spin_lock_irqsave(&sock->lock, flags); + + del_timer(&host->timer); + mrq = host->req; + host->req = NULL; + + if (!mrq) { + pr_err(" %s : no request to complete?\n", + dev_name(&sock->dev)); + spin_unlock_irqrestore(&sock->lock, flags); + return; + } + + r_data = mrq->cmd->data; + if (r_data) { + if (host->no_dma) { + writel((~TIFM_MMCSD_BUFINT) + & readl(sock->addr + SOCK_MMCSD_INT_ENABLE), + sock->addr + SOCK_MMCSD_INT_ENABLE); + } else { + tifm_unmap_sg(sock, &host->bounce_buf, 1, + (r_data->flags & MMC_DATA_WRITE) + ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + tifm_unmap_sg(sock, r_data->sg, r_data->sg_len, + (r_data->flags & MMC_DATA_WRITE) + ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + } + + r_data->bytes_xfered = r_data->blocks + - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; + r_data->bytes_xfered *= r_data->blksz; + r_data->bytes_xfered += r_data->blksz + - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; + } + + writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), + sock->addr + SOCK_CONTROL); + + spin_unlock_irqrestore(&sock->lock, flags); + mmc_request_done(mmc, mrq); +} + +static void tifm_sd_abort(struct timer_list *t) +{ + struct tifm_sd *host = from_timer(host, t, timer); + + pr_err("%s : card failed to respond for a long period of time " + "(%x, %x)\n", + dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags); + + tifm_eject(host->dev); +} + +static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct tifm_sd *host = mmc_priv(mmc); + struct tifm_dev *sock = host->dev; + unsigned int clk_div1, clk_div2; + unsigned long flags; + + spin_lock_irqsave(&sock->lock, flags); + + dev_dbg(&sock->dev, "ios: clock = %u, vdd = %x, bus_mode = %x, " + "chip_select = %x, power_mode = %x, bus_width = %x\n", + ios->clock, ios->vdd, ios->bus_mode, ios->chip_select, + ios->power_mode, ios->bus_width); + + if (ios->bus_width == MMC_BUS_WIDTH_4) { + writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), + sock->addr + SOCK_MMCSD_CONFIG); + } else { + writel((~TIFM_MMCSD_4BBUS) + & readl(sock->addr + SOCK_MMCSD_CONFIG), + sock->addr + SOCK_MMCSD_CONFIG); + } + + if (ios->clock) { + clk_div1 = 20000000 / ios->clock; + if (!clk_div1) + clk_div1 = 1; + + clk_div2 = 24000000 / ios->clock; + if (!clk_div2) + clk_div2 = 1; + + if ((20000000 / clk_div1) > ios->clock) + clk_div1++; + if ((24000000 / clk_div2) > ios->clock) + clk_div2++; + if ((20000000 / clk_div1) > (24000000 / clk_div2)) { + host->clk_freq = 20000000; + host->clk_div = clk_div1; + writel((~TIFM_CTRL_FAST_CLK) + & readl(sock->addr + SOCK_CONTROL), + sock->addr + SOCK_CONTROL); + } else { + host->clk_freq = 24000000; + host->clk_div = clk_div2; + writel(TIFM_CTRL_FAST_CLK + | readl(sock->addr + SOCK_CONTROL), + sock->addr + SOCK_CONTROL); + } + } else { + host->clk_div = 0; + } + host->clk_div &= TIFM_MMCSD_CLKMASK; + writel(host->clk_div + | ((~TIFM_MMCSD_CLKMASK) + & readl(sock->addr + SOCK_MMCSD_CONFIG)), + sock->addr + SOCK_MMCSD_CONFIG); + + host->open_drain = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN); + + /* chip_select : maybe later */ + //vdd + //power is set before probe / after remove + + spin_unlock_irqrestore(&sock->lock, flags); +} + +static int tifm_sd_ro(struct mmc_host *mmc) +{ + int rc = 0; + struct tifm_sd *host = mmc_priv(mmc); + struct tifm_dev *sock = host->dev; + unsigned long flags; + + spin_lock_irqsave(&sock->lock, flags); + if (TIFM_MMCSD_CARD_RO & readl(sock->addr + SOCK_PRESENT_STATE)) + rc = 1; + spin_unlock_irqrestore(&sock->lock, flags); + return rc; +} + +static const struct mmc_host_ops tifm_sd_ops = { + .request = tifm_sd_request, + .set_ios = tifm_sd_ios, + .get_ro = tifm_sd_ro +}; + +static int tifm_sd_initialize_host(struct tifm_sd *host) +{ + int rc; + unsigned int host_status = 0; + struct tifm_dev *sock = host->dev; + + writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); + host->clk_div = 61; + host->clk_freq = 20000000; + writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); + writel(host->clk_div | TIFM_MMCSD_POWER, + sock->addr + SOCK_MMCSD_CONFIG); + + /* wait up to 0.51 sec for reset */ + for (rc = 32; rc <= 256; rc <<= 1) { + if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { + rc = 0; + break; + } + msleep(rc); + } + + if (rc) { + pr_err("%s : controller failed to reset\n", + dev_name(&sock->dev)); + return -ENODEV; + } + + writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); + writel(host->clk_div | TIFM_MMCSD_POWER, + sock->addr + SOCK_MMCSD_CONFIG); + writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); + + // command timeout fixed to 64 clocks for now + writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); + writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); + + for (rc = 16; rc <= 64; rc <<= 1) { + host_status = readl(sock->addr + SOCK_MMCSD_STATUS); + writel(host_status, sock->addr + SOCK_MMCSD_STATUS); + if (!(host_status & TIFM_MMCSD_ERRMASK) + && (host_status & TIFM_MMCSD_EOC)) { + rc = 0; + break; + } + msleep(rc); + } + + if (rc) { + pr_err("%s : card not ready - probe failed on initialization\n", + dev_name(&sock->dev)); + return -ENODEV; + } + + writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC + | TIFM_MMCSD_ERRMASK, + sock->addr + SOCK_MMCSD_INT_ENABLE); + + return 0; +} + +static int tifm_sd_probe(struct tifm_dev *sock) +{ + struct mmc_host *mmc; + struct tifm_sd *host; + int rc = -EIO; + + if (!(TIFM_SOCK_STATE_OCCUPIED + & readl(sock->addr + SOCK_PRESENT_STATE))) { + pr_warn("%s : card gone, unexpectedly\n", + dev_name(&sock->dev)); + return rc; + } + + mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + tifm_set_drvdata(sock, mmc); + host->dev = sock; + host->timeout_jiffies = msecs_to_jiffies(TIFM_MMCSD_REQ_TIMEOUT_MS); + /* + * We use a fixed request timeout of 1s, hence inform the core about it. + * A future improvement should instead respect the cmd->busy_timeout. + */ + mmc->max_busy_timeout = TIFM_MMCSD_REQ_TIMEOUT_MS; + + tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd, + (unsigned long)host); + timer_setup(&host->timer, tifm_sd_abort, 0); + + mmc->ops = &tifm_sd_ops; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = MMC_CAP_4_BIT_DATA; + mmc->f_min = 20000000 / 60; + mmc->f_max = 24000000; + + mmc->max_blk_count = 2048; + mmc->max_segs = mmc->max_blk_count; + mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE); + mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size; + mmc->max_req_size = mmc->max_seg_size; + + sock->card_event = tifm_sd_card_event; + sock->data_event = tifm_sd_data_event; + rc = tifm_sd_initialize_host(host); + + if (!rc) + rc = mmc_add_host(mmc); + if (!rc) + return 0; + + mmc_free_host(mmc); + return rc; +} + +static void tifm_sd_remove(struct tifm_dev *sock) +{ + struct mmc_host *mmc = tifm_get_drvdata(sock); + struct tifm_sd *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&sock->lock, flags); + host->eject = 1; + writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); + spin_unlock_irqrestore(&sock->lock, flags); + + tasklet_kill(&host->finish_tasklet); + + spin_lock_irqsave(&sock->lock, flags); + if (host->req) { + writel(TIFM_FIFO_INT_SETALL, + sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); + writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); + host->req->cmd->error = -ENOMEDIUM; + if (host->req->stop) + host->req->stop->error = -ENOMEDIUM; + tasklet_schedule(&host->finish_tasklet); + } + spin_unlock_irqrestore(&sock->lock, flags); + mmc_remove_host(mmc); + dev_dbg(&sock->dev, "after remove\n"); + + mmc_free_host(mmc); +} + +#ifdef CONFIG_PM + +static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) +{ + return 0; +} + +static int tifm_sd_resume(struct tifm_dev *sock) +{ + struct mmc_host *mmc = tifm_get_drvdata(sock); + struct tifm_sd *host = mmc_priv(mmc); + int rc; + + rc = tifm_sd_initialize_host(host); + dev_dbg(&sock->dev, "resume initialize %d\n", rc); + + if (rc) + host->eject = 1; + + return rc; +} + +#else + +#define tifm_sd_suspend NULL +#define tifm_sd_resume NULL + +#endif /* CONFIG_PM */ + +static struct tifm_device_id tifm_sd_id_tbl[] = { + { TIFM_TYPE_SD }, { } +}; + +static struct tifm_driver tifm_sd_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE + }, + .id_table = tifm_sd_id_tbl, + .probe = tifm_sd_probe, + .remove = tifm_sd_remove, + .suspend = tifm_sd_suspend, + .resume = tifm_sd_resume +}; + +static int __init tifm_sd_init(void) +{ + return tifm_register_driver(&tifm_sd_driver); +} + +static void __exit tifm_sd_exit(void) +{ + tifm_unregister_driver(&tifm_sd_driver); +} + +MODULE_AUTHOR("Alex Dubov"); +MODULE_DESCRIPTION("TI FlashMedia SD driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(tifm, tifm_sd_id_tbl); +MODULE_VERSION(DRIVER_VERSION); + +module_init(tifm_sd_init); +module_exit(tifm_sd_exit); diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c new file mode 100644 index 000000000..d2d3b8df1 --- /dev/null +++ b/drivers/mmc/host/tmio_mmc.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the MMC / SD / SDIO cell found in: + * + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 + * + * Copyright (C) 2017 Renesas Electronics Corporation + * Copyright (C) 2017 Horms Solutions, Simon Horman + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tmio_mmc.h" + +/* Registers specific to this variant */ +#define CTL_SDIO_REGS 0x100 +#define CTL_CLK_AND_WAIT_CTL 0x138 +#define CTL_RESET_SDIO 0x1e0 + +static void tmio_mmc_clk_start(struct tmio_mmc_host *host) +{ + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + usleep_range(10000, 11000); + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); + usleep_range(10000, 11000); +} + +static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) +{ + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); + usleep_range(10000, 11000); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + + usleep_range(10000, 11000); +} + +static void tmio_mmc_set_clock(struct tmio_mmc_host *host, + unsigned int new_clock) +{ + unsigned int divisor; + u32 clk = 0; + int clk_sel; + + if (new_clock == 0) { + tmio_mmc_clk_stop(host); + return; + } + + divisor = host->pdata->hclk / new_clock; + + /* bit7 set: 1/512, ... bit0 set: 1/4, all bits clear: 1/2 */ + clk_sel = (divisor <= 1); + clk = clk_sel ? 0 : (roundup_pow_of_two(divisor) >> 2); + + host->pdata->set_clk_div(host->pdev, clk_sel); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); + usleep_range(10000, 11000); + + tmio_mmc_clk_start(host); +} + +static void tmio_mmc_reset(struct tmio_mmc_host *host) +{ + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); + usleep_range(10000, 11000); + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); + usleep_range(10000, 11000); +} + +#ifdef CONFIG_PM_SLEEP +static int tmio_mmc_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + const struct mfd_cell *cell = mfd_get_cell(pdev); + int ret; + + ret = pm_runtime_force_suspend(dev); + + /* Tell MFD core it can disable us now.*/ + if (!ret && cell->disable) + cell->disable(pdev); + + return ret; +} + +static int tmio_mmc_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + const struct mfd_cell *cell = mfd_get_cell(pdev); + int ret = 0; + + /* Tell the MFD core we are ready to be enabled */ + if (cell->resume) + ret = cell->resume(pdev); + + if (!ret) + ret = pm_runtime_force_resume(dev); + + return ret; +} +#endif + +static int tmio_mmc_probe(struct platform_device *pdev) +{ + const struct mfd_cell *cell = mfd_get_cell(pdev); + struct tmio_mmc_data *pdata; + struct tmio_mmc_host *host; + struct resource *res; + int ret = -EINVAL, irq; + + if (pdev->num_resources != 2) + goto out; + + pdata = pdev->dev.platform_data; + if (!pdata || !pdata->hclk) + goto out; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out; + } + + /* Tell the MFD core we are ready to be enabled */ + if (cell->enable) { + ret = cell->enable(pdev); + if (ret) + goto out; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -EINVAL; + goto cell_disable; + } + + host = tmio_mmc_host_alloc(pdev, pdata); + if (IS_ERR(host)) { + ret = PTR_ERR(host); + goto cell_disable; + } + + /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ + host->bus_shift = resource_size(res) >> 10; + host->set_clock = tmio_mmc_set_clock; + host->reset = tmio_mmc_reset; + + host->mmc->f_max = pdata->hclk; + host->mmc->f_min = pdata->hclk / 512; + + ret = tmio_mmc_host_probe(host); + if (ret) + goto host_free; + + ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, + IRQF_TRIGGER_FALLING, + dev_name(&pdev->dev), host); + if (ret) + goto host_remove; + + pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), + (unsigned long)host->ctl, irq); + + return 0; + +host_remove: + tmio_mmc_host_remove(host); +host_free: + tmio_mmc_host_free(host); +cell_disable: + if (cell->disable) + cell->disable(pdev); +out: + return ret; +} + +static int tmio_mmc_remove(struct platform_device *pdev) +{ + const struct mfd_cell *cell = mfd_get_cell(pdev); + struct tmio_mmc_host *host = platform_get_drvdata(pdev); + + tmio_mmc_host_remove(host); + if (cell->disable) + cell->disable(pdev); + + return 0; +} + +/* ------------------- device registration ----------------------- */ + +static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume) + SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, + tmio_mmc_host_runtime_resume, NULL) +}; + +static struct platform_driver tmio_mmc_driver = { + .driver = { + .name = "tmio-mmc", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &tmio_mmc_dev_pm_ops, + }, + .probe = tmio_mmc_probe, + .remove = tmio_mmc_remove, +}; + +module_platform_driver(tmio_mmc_driver); + +MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver"); +MODULE_AUTHOR("Ian Molton "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:tmio-mmc"); diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h new file mode 100644 index 000000000..330a17267 --- /dev/null +++ b/drivers/mmc/host/tmio_mmc.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Driver for the MMC / SD / SDIO cell found in: + * + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 + * + * Copyright (C) 2015-19 Renesas Electronics Corporation + * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang + * Copyright (C) 2016-17 Horms Solutions, Simon Horman + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton + */ + +#ifndef TMIO_MMC_H +#define TMIO_MMC_H + +#include +#include +#include +#include +#include +#include +#include + +#define CTL_SD_CMD 0x00 +#define CTL_ARG_REG 0x04 +#define CTL_STOP_INTERNAL_ACTION 0x08 +#define CTL_XFER_BLK_COUNT 0xa +#define CTL_RESPONSE 0x0c +/* driver merges STATUS and following STATUS2 */ +#define CTL_STATUS 0x1c +/* driver merges IRQ_MASK and following IRQ_MASK2 */ +#define CTL_IRQ_MASK 0x20 +#define CTL_SD_CARD_CLK_CTL 0x24 +#define CTL_SD_XFER_LEN 0x26 +#define CTL_SD_MEM_CARD_OPT 0x28 +#define CTL_SD_ERROR_DETAIL_STATUS 0x2c +#define CTL_SD_DATA_PORT 0x30 +#define CTL_TRANSACTION_CTL 0x34 +#define CTL_SDIO_STATUS 0x36 +#define CTL_SDIO_IRQ_MASK 0x38 +#define CTL_DMA_ENABLE 0xd8 +#define CTL_RESET_SD 0xe0 +#define CTL_VERSION 0xe2 +#define CTL_SDIF_MODE 0xe6 + +/* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */ +#define TMIO_STOP_STP BIT(0) +#define TMIO_STOP_SEC BIT(8) + +/* Definitions for values the CTL_STATUS register can take */ +#define TMIO_STAT_CMDRESPEND BIT(0) +#define TMIO_STAT_DATAEND BIT(2) +#define TMIO_STAT_CARD_REMOVE BIT(3) +#define TMIO_STAT_CARD_INSERT BIT(4) +#define TMIO_STAT_SIGSTATE BIT(5) +#define TMIO_STAT_WRPROTECT BIT(7) +#define TMIO_STAT_CARD_REMOVE_A BIT(8) +#define TMIO_STAT_CARD_INSERT_A BIT(9) +#define TMIO_STAT_SIGSTATE_A BIT(10) + +/* These belong technically to CTL_STATUS2, but the driver merges them */ +#define TMIO_STAT_CMD_IDX_ERR BIT(16) +#define TMIO_STAT_CRCFAIL BIT(17) +#define TMIO_STAT_STOPBIT_ERR BIT(18) +#define TMIO_STAT_DATATIMEOUT BIT(19) +#define TMIO_STAT_RXOVERFLOW BIT(20) +#define TMIO_STAT_TXUNDERRUN BIT(21) +#define TMIO_STAT_CMDTIMEOUT BIT(22) +#define TMIO_STAT_DAT0 BIT(23) /* only known on R-Car so far */ +#define TMIO_STAT_RXRDY BIT(24) +#define TMIO_STAT_TXRQ BIT(25) +#define TMIO_STAT_ALWAYS_SET_27 BIT(27) /* only known on R-Car 2+ so far */ +#define TMIO_STAT_ILL_FUNC BIT(29) /* only when !TMIO_MMC_HAS_IDLE_WAIT */ +#define TMIO_STAT_SCLKDIVEN BIT(29) /* only when TMIO_MMC_HAS_IDLE_WAIT */ +#define TMIO_STAT_CMD_BUSY BIT(30) +#define TMIO_STAT_ILL_ACCESS BIT(31) + +/* Definitions for values the CTL_SD_CARD_CLK_CTL register can take */ +#define CLK_CTL_DIV_MASK 0xff +#define CLK_CTL_SCLKEN BIT(8) + +/* Definitions for values the CTL_SD_MEM_CARD_OPT register can take */ +#define CARD_OPT_WIDTH8 BIT(13) +#define CARD_OPT_WIDTH BIT(15) + +/* Definitions for values the CTL_SDIO_STATUS register can take */ +#define TMIO_SDIO_STAT_IOIRQ 0x0001 +#define TMIO_SDIO_STAT_EXPUB52 0x4000 +#define TMIO_SDIO_STAT_EXWT 0x8000 +#define TMIO_SDIO_MASK_ALL 0xc007 + +#define TMIO_SDIO_SETBITS_MASK 0x0006 + +/* Definitions for values the CTL_DMA_ENABLE register can take */ +#define DMA_ENABLE_DMASDRW BIT(1) + +/* Define some IRQ masks */ +/* This is the mask used at reset by the chip */ +#define TMIO_MASK_ALL 0x837f031d +#define TMIO_MASK_ALL_RCAR2 0x8b7f031d +#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) +#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) +#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ + TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) +#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) + +#define TMIO_MAX_BLK_SIZE 512 + +struct tmio_mmc_data; +struct tmio_mmc_host; + +struct tmio_mmc_dma_ops { + void (*start)(struct tmio_mmc_host *host, struct mmc_data *data); + void (*enable)(struct tmio_mmc_host *host, bool enable); + void (*request)(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata); + void (*release)(struct tmio_mmc_host *host); + void (*abort)(struct tmio_mmc_host *host); + void (*dataend)(struct tmio_mmc_host *host); + + /* optional */ + void (*end)(struct tmio_mmc_host *host); /* held host->lock */ +}; + +struct tmio_mmc_host { + void __iomem *ctl; + struct mmc_command *cmd; + struct mmc_request *mrq; + struct mmc_data *data; + struct mmc_host *mmc; + struct mmc_host_ops ops; + + /* Callbacks for clock / power control */ + void (*set_pwr)(struct platform_device *host, int state); + + /* pio related stuff */ + struct scatterlist *sg_ptr; + struct scatterlist *sg_orig; + unsigned int sg_len; + unsigned int sg_off; + unsigned int bus_shift; + + struct platform_device *pdev; + struct tmio_mmc_data *pdata; + + /* DMA support */ + bool dma_on; + struct dma_chan *chan_rx; + struct dma_chan *chan_tx; + struct tasklet_struct dma_issue; + struct scatterlist bounce_sg; + u8 *bounce_buf; + + /* Track lost interrupts */ + struct delayed_work delayed_reset_work; + struct work_struct done; + + /* Cache */ + u32 sdcard_irq_mask; + u32 sdio_irq_mask; + unsigned int clk_cache; + u32 sdcard_irq_setbit_mask; + u32 sdcard_irq_mask_all; + + spinlock_t lock; /* protect host private data */ + unsigned long last_req_ts; + struct mutex ios_lock; /* protect set_ios() context */ + bool native_hotplug; + bool sdio_irq_enabled; + + /* Mandatory callback */ + int (*clk_enable)(struct tmio_mmc_host *host); + void (*set_clock)(struct tmio_mmc_host *host, unsigned int clock); + + /* Optional callbacks */ + void (*clk_disable)(struct tmio_mmc_host *host); + int (*multi_io_quirk)(struct mmc_card *card, + unsigned int direction, int blk_size); + int (*write16_hook)(struct tmio_mmc_host *host, int addr); + void (*reset)(struct tmio_mmc_host *host); + bool (*check_retune)(struct tmio_mmc_host *host); + void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq); + + void (*prepare_hs400_tuning)(struct tmio_mmc_host *host); + void (*hs400_downgrade)(struct tmio_mmc_host *host); + void (*hs400_complete)(struct tmio_mmc_host *host); + + const struct tmio_mmc_dma_ops *dma_ops; +}; + +struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, + struct tmio_mmc_data *pdata); +void tmio_mmc_host_free(struct tmio_mmc_host *host); +int tmio_mmc_host_probe(struct tmio_mmc_host *host); +void tmio_mmc_host_remove(struct tmio_mmc_host *host); +void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); + +void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); +void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); +irqreturn_t tmio_mmc_irq(int irq, void *devid); + +static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, + unsigned long *flags) +{ + local_irq_save(*flags); + return kmap_atomic(sg_page(sg)) + sg->offset; +} + +static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, + unsigned long *flags, void *virt) +{ + kunmap_atomic(virt - sg->offset); + local_irq_restore(*flags); +} + +#ifdef CONFIG_PM +int tmio_mmc_host_runtime_suspend(struct device *dev); +int tmio_mmc_host_runtime_resume(struct device *dev); +#endif + +static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) +{ + return ioread16(host->ctl + (addr << host->bus_shift)); +} + +static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, + u16 *buf, int count) +{ + ioread16_rep(host->ctl + (addr << host->bus_shift), buf, count); +} + +static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, + int addr) +{ + return ioread16(host->ctl + (addr << host->bus_shift)) | + ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16; +} + +static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr, + u32 *buf, int count) +{ + ioread32_rep(host->ctl + (addr << host->bus_shift), buf, count); +} + +static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, + u16 val) +{ + /* If there is a hook and it returns non-zero then there + * is an error and the write should be skipped + */ + if (host->write16_hook && host->write16_hook(host, addr)) + return; + iowrite16(val, host->ctl + (addr << host->bus_shift)); +} + +static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, + u16 *buf, int count) +{ + iowrite16_rep(host->ctl + (addr << host->bus_shift), buf, count); +} + +static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, + int addr, u32 val) +{ + if (addr == CTL_IRQ_MASK || addr == CTL_STATUS) + val |= host->sdcard_irq_setbit_mask; + + iowrite16(val & 0xffff, host->ctl + (addr << host->bus_shift)); + iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); +} + +static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) +{ + iowrite32(val, host->ctl + (addr << host->bus_shift)); +} + +static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr, + const u32 *buf, int count) +{ + iowrite32_rep(host->ctl + (addr << host->bus_shift), buf, count); +} + +#endif diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c new file mode 100644 index 000000000..abf36acb2 --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -0,0 +1,1281 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the MMC / SD / SDIO IP found in: + * + * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs + * + * Copyright (C) 2015-19 Renesas Electronics Corporation + * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang + * Copyright (C) 2017 Horms Solutions, Simon Horman + * Copyright (C) 2011 Guennadi Liakhovetski + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton + * + * This driver draws mainly on scattered spec sheets, Reverse engineering + * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit + * support). (Further 4 bit support from a later datasheet). + * + * TODO: + * Investigate using a workqueue for PIO transfers + * Eliminate FIXMEs + * Better Power management + * Handle MMC errors better + * double buffer support + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tmio_mmc.h" + +static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + if (host->dma_ops) + host->dma_ops->start(host, data); +} + +static inline void tmio_mmc_end_dma(struct tmio_mmc_host *host) +{ + if (host->dma_ops && host->dma_ops->end) + host->dma_ops->end(host); +} + +static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) +{ + if (host->dma_ops) + host->dma_ops->enable(host, enable); +} + +static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + if (host->dma_ops) { + host->dma_ops->request(host, pdata); + } else { + host->chan_tx = NULL; + host->chan_rx = NULL; + } +} + +static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) +{ + if (host->dma_ops) + host->dma_ops->release(host); +} + +static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host) +{ + if (host->dma_ops) + host->dma_ops->abort(host); +} + +static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host) +{ + if (host->dma_ops) + host->dma_ops->dataend(host); +} + +void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) +{ + host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); + sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask); +} +EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs); + +void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) +{ + host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ); + sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask); +} +EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs); + +static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) +{ + sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i); +} + +static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) +{ + host->sg_len = data->sg_len; + host->sg_ptr = data->sg; + host->sg_orig = data->sg; + host->sg_off = 0; +} + +static int tmio_mmc_next_sg(struct tmio_mmc_host *host) +{ + host->sg_ptr = sg_next(host->sg_ptr); + host->sg_off = 0; + return --host->sg_len; +} + +#define CMDREQ_TIMEOUT 5000 + +static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + + if (enable && !host->sdio_irq_enabled) { + u16 sdio_status; + + /* Keep device active while SDIO irq is enabled */ + pm_runtime_get_sync(mmc_dev(mmc)); + + host->sdio_irq_enabled = true; + host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ; + + /* Clear obsolete interrupts before enabling */ + sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL; + if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS) + sdio_status |= TMIO_SDIO_SETBITS_MASK; + sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status); + + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); + } else if (!enable && host->sdio_irq_enabled) { + host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); + + host->sdio_irq_enabled = false; + pm_runtime_mark_last_busy(mmc_dev(mmc)); + pm_runtime_put_autosuspend(mmc_dev(mmc)); + } +} + +static void tmio_mmc_reset(struct tmio_mmc_host *host) +{ + /* FIXME - should we set stop clock reg here */ + sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); + usleep_range(10000, 11000); + sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); + usleep_range(10000, 11000); + + if (host->reset) + host->reset(host); + + tmio_mmc_abort_dma(host); + + if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) { + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); + } +} + +static void tmio_mmc_reset_work(struct work_struct *work) +{ + struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, + delayed_reset_work.work); + struct mmc_request *mrq; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + mrq = host->mrq; + + /* + * is request already finished? Since we use a non-blocking + * cancel_delayed_work(), it can happen, that a .set_ios() call preempts + * us, so, have to check for IS_ERR(host->mrq) + */ + if (IS_ERR_OR_NULL(mrq) || + time_is_after_jiffies(host->last_req_ts + + msecs_to_jiffies(CMDREQ_TIMEOUT))) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + dev_warn(&host->pdev->dev, + "timeout waiting for hardware interrupt (CMD%u)\n", + mrq->cmd->opcode); + + if (host->data) + host->data->error = -ETIMEDOUT; + else if (host->cmd) + host->cmd->error = -ETIMEDOUT; + else + mrq->cmd->error = -ETIMEDOUT; + + host->cmd = NULL; + host->data = NULL; + + spin_unlock_irqrestore(&host->lock, flags); + + tmio_mmc_reset(host); + + /* Ready for new calls */ + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); +} + +/* These are the bitmasks the tmio chip requires to implement the MMC response + * types. Note that R1 and R6 are the same in this scheme. */ +#define APP_CMD 0x0040 +#define RESP_NONE 0x0300 +#define RESP_R1 0x0400 +#define RESP_R1B 0x0500 +#define RESP_R2 0x0600 +#define RESP_R3 0x0700 +#define DATA_PRESENT 0x0800 +#define TRANSFER_READ 0x1000 +#define TRANSFER_MULTI 0x2000 +#define SECURITY_CMD 0x4000 +#define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */ + +static int tmio_mmc_start_command(struct tmio_mmc_host *host, + struct mmc_command *cmd) +{ + struct mmc_data *data = host->data; + int c = cmd->opcode; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: c |= RESP_NONE; break; + case MMC_RSP_R1: + case MMC_RSP_R1_NO_CRC: + c |= RESP_R1; break; + case MMC_RSP_R1B: c |= RESP_R1B; break; + case MMC_RSP_R2: c |= RESP_R2; break; + case MMC_RSP_R3: c |= RESP_R3; break; + default: + pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); + return -EINVAL; + } + + host->cmd = cmd; + +/* FIXME - this seems to be ok commented out but the spec suggest this bit + * should be set when issuing app commands. + * if(cmd->flags & MMC_FLAG_ACMD) + * c |= APP_CMD; + */ + if (data) { + c |= DATA_PRESENT; + if (data->blocks > 1) { + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC); + c |= TRANSFER_MULTI; + + /* + * Disable auto CMD12 at IO_RW_EXTENDED and + * SET_BLOCK_COUNT when doing multiple block transfer + */ + if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) && + (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc)) + c |= NO_CMD12_ISSUE; + } + if (data->flags & MMC_DATA_READ) + c |= TRANSFER_READ; + } + + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); + + /* Fire off the command */ + sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg); + sd_ctrl_write16(host, CTL_SD_CMD, c); + + return 0; +} + +static void tmio_mmc_transfer_data(struct tmio_mmc_host *host, + unsigned short *buf, + unsigned int count) +{ + int is_read = host->data->flags & MMC_DATA_READ; + u8 *buf8; + + /* + * Transfer the data + */ + if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) { + u32 data = 0; + u32 *buf32 = (u32 *)buf; + + if (is_read) + sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32, + count >> 2); + else + sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32, + count >> 2); + + /* if count was multiple of 4 */ + if (!(count & 0x3)) + return; + + buf32 += count >> 2; + count %= 4; + + if (is_read) { + sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1); + memcpy(buf32, &data, count); + } else { + memcpy(&data, buf32, count); + sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1); + } + + return; + } + + if (is_read) + sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); + else + sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); + + /* if count was even number */ + if (!(count & 0x1)) + return; + + /* if count was odd number */ + buf8 = (u8 *)(buf + (count >> 1)); + + /* + * FIXME + * + * driver and this function are assuming that + * it is used as little endian + */ + if (is_read) + *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff; + else + sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8); +} + +/* + * This chip always returns (at least?) as much data as you ask for. + * I'm unsure what happens if you ask for less than a block. This should be + * looked into to ensure that a funny length read doesn't hose the controller. + */ +static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) +{ + struct mmc_data *data = host->data; + void *sg_virt; + unsigned short *buf; + unsigned int count; + unsigned long flags; + + if (host->dma_on) { + pr_err("PIO IRQ in DMA mode!\n"); + return; + } else if (!data) { + pr_debug("Spurious PIO IRQ\n"); + return; + } + + sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); + buf = (unsigned short *)(sg_virt + host->sg_off); + + count = host->sg_ptr->length - host->sg_off; + if (count > data->blksz) + count = data->blksz; + + pr_debug("count: %08x offset: %08x flags %08x\n", + count, host->sg_off, data->flags); + + /* Transfer the data */ + tmio_mmc_transfer_data(host, buf, count); + + host->sg_off += count; + + tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); + + if (host->sg_off == host->sg_ptr->length) + tmio_mmc_next_sg(host); +} + +static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) +{ + if (host->sg_ptr == &host->bounce_sg) { + unsigned long flags; + void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); + + memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); + tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); + } +} + +/* needs to be called with host->lock held */ +void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) +{ + struct mmc_data *data = host->data; + struct mmc_command *stop; + + host->data = NULL; + + if (!data) { + dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); + return; + } + stop = data->stop; + + /* FIXME - return correct transfer count on errors */ + if (!data->error) + data->bytes_xfered = data->blocks * data->blksz; + else + data->bytes_xfered = 0; + + pr_debug("Completed data request\n"); + + /* + * FIXME: other drivers allow an optional stop command of any given type + * which we dont do, as the chip can auto generate them. + * Perhaps we can be smarter about when to use auto CMD12 and + * only issue the auto request when we know this is the desired + * stop command, allowing fallback to the stop command the + * upper layers expect. For now, we do what works. + */ + + if (data->flags & MMC_DATA_READ) { + if (host->dma_on) + tmio_mmc_check_bounce_buffer(host); + dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", + host->mrq); + } else { + dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", + host->mrq); + } + + if (stop && !host->mrq->sbc) { + if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg) + dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n", + stop->opcode, stop->arg); + + /* fill in response from auto CMD12 */ + stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE); + + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0); + } + + schedule_work(&host->done); +} +EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq); + +static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat) +{ + struct mmc_data *data; + + spin_lock(&host->lock); + data = host->data; + + if (!data) + goto out; + + if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR || + stat & TMIO_STAT_TXUNDERRUN) + data->error = -EILSEQ; + if (host->dma_on && (data->flags & MMC_DATA_WRITE)) { + u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); + bool done = false; + + /* + * Has all data been written out yet? Testing on SuperH showed, + * that in most cases the first interrupt comes already with the + * BUSY status bit clear, but on some operations, like mount or + * in the beginning of a write / sync / umount, there is one + * DATAEND interrupt with the BUSY bit set, in this cases + * waiting for one more interrupt fixes the problem. + */ + if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) { + if (status & TMIO_STAT_SCLKDIVEN) + done = true; + } else { + if (!(status & TMIO_STAT_CMD_BUSY)) + done = true; + } + + if (done) { + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); + tmio_mmc_dataend_dma(host); + } + } else if (host->dma_on && (data->flags & MMC_DATA_READ)) { + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); + tmio_mmc_dataend_dma(host); + } else { + tmio_mmc_do_data_irq(host); + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); + } +out: + spin_unlock(&host->lock); +} + +static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat) +{ + struct mmc_command *cmd = host->cmd; + int i, addr; + + spin_lock(&host->lock); + + if (!host->cmd) { + pr_debug("Spurious CMD irq\n"); + goto out; + } + + /* This controller is sicker than the PXA one. Not only do we need to + * drop the top 8 bits of the first response word, we also need to + * modify the order of the response for short response command types. + */ + + for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) + cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr); + + if (cmd->flags & MMC_RSP_136) { + cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); + cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); + cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); + cmd->resp[3] <<= 8; + } else if (cmd->flags & MMC_RSP_R3) { + cmd->resp[0] = cmd->resp[3]; + } + + if (stat & TMIO_STAT_CMDTIMEOUT) + cmd->error = -ETIMEDOUT; + else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) || + stat & TMIO_STAT_STOPBIT_ERR || + stat & TMIO_STAT_CMD_IDX_ERR) + cmd->error = -EILSEQ; + + /* If there is data to handle we enable data IRQs here, and + * we will ultimatley finish the request in the data_end handler. + * If theres no data or we encountered an error, finish now. + */ + if (host->data && (!cmd->error || cmd->error == -EILSEQ)) { + if (host->data->flags & MMC_DATA_READ) { + if (!host->dma_on) { + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); + } else { + tmio_mmc_disable_mmc_irqs(host, + TMIO_MASK_READOP); + tasklet_schedule(&host->dma_issue); + } + } else { + if (!host->dma_on) { + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); + } else { + tmio_mmc_disable_mmc_irqs(host, + TMIO_MASK_WRITEOP); + tasklet_schedule(&host->dma_issue); + } + } + } else { + schedule_work(&host->done); + } + +out: + spin_unlock(&host->lock); +} + +static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host, + int ireg, int status) +{ + struct mmc_host *mmc = host->mmc; + + /* Card insert / remove attempts */ + if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | + TMIO_STAT_CARD_REMOVE); + if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) || + ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && + !work_pending(&mmc->detect.work)) + mmc_detect_change(host->mmc, msecs_to_jiffies(100)); + return true; + } + + return false; +} + +static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg, + int status) +{ + /* Command completion */ + if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND | + TMIO_STAT_CMDTIMEOUT); + tmio_mmc_cmd_irq(host, status); + return true; + } + + /* Data transfer */ + if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); + tmio_mmc_pio_irq(host); + return true; + } + + /* Data transfer completion */ + if (ireg & TMIO_STAT_DATAEND) { + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); + tmio_mmc_data_irq(host, status); + return true; + } + + return false; +} + +static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct tmio_mmc_data *pdata = host->pdata; + unsigned int ireg, status; + unsigned int sdio_status; + + if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) + return false; + + status = sd_ctrl_read16(host, CTL_SDIO_STATUS); + ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask; + + sdio_status = status & ~TMIO_SDIO_MASK_ALL; + if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS) + sdio_status |= TMIO_SDIO_SETBITS_MASK; + + sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status); + + if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) + mmc_signal_sdio_irq(mmc); + + return ireg; +} + +irqreturn_t tmio_mmc_irq(int irq, void *devid) +{ + struct tmio_mmc_host *host = devid; + unsigned int ireg, status; + + status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); + ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; + + /* Clear the status except the interrupt status */ + sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); + + if (__tmio_mmc_card_detect_irq(host, ireg, status)) + return IRQ_HANDLED; + if (__tmio_mmc_sdcard_irq(host, ireg, status)) + return IRQ_HANDLED; + + if (__tmio_mmc_sdio_irq(host)) + return IRQ_HANDLED; + + return IRQ_NONE; +} +EXPORT_SYMBOL_GPL(tmio_mmc_irq); + +static int tmio_mmc_start_data(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + struct tmio_mmc_data *pdata = host->pdata; + + pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", + data->blksz, data->blocks); + + /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */ + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 || + host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) { + int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; + + if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { + pr_err("%s: %d byte block unsupported in 4/8 bit mode\n", + mmc_hostname(host->mmc), data->blksz); + return -EINVAL; + } + } + + tmio_mmc_init_sg(host, data); + host->data = data; + host->dma_on = false; + + /* Set transfer length / blocksize */ + sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); + if (host->mmc->max_blk_count >= SZ_64K) + sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks); + else + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); + + tmio_mmc_start_dma(host, data); + + return 0; +} + +static void tmio_process_mrq(struct tmio_mmc_host *host, + struct mmc_request *mrq) +{ + struct mmc_command *cmd; + int ret; + + if (mrq->sbc && host->cmd != mrq->sbc) { + cmd = mrq->sbc; + } else { + cmd = mrq->cmd; + if (mrq->data) { + ret = tmio_mmc_start_data(host, mrq->data); + if (ret) + goto fail; + } + } + + ret = tmio_mmc_start_command(host, cmd); + if (ret) + goto fail; + + schedule_delayed_work(&host->delayed_reset_work, + msecs_to_jiffies(CMDREQ_TIMEOUT)); + return; + +fail: + host->mrq = NULL; + mrq->cmd->error = ret; + mmc_request_done(host->mmc, mrq); +} + +/* Process requests from the MMC layer */ +static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + if (host->mrq) { + pr_debug("request not null\n"); + if (IS_ERR(host->mrq)) { + spin_unlock_irqrestore(&host->lock, flags); + mrq->cmd->error = -EAGAIN; + mmc_request_done(mmc, mrq); + return; + } + } + + host->last_req_ts = jiffies; + wmb(); + host->mrq = mrq; + + spin_unlock_irqrestore(&host->lock, flags); + + tmio_process_mrq(host, mrq); +} + +static void tmio_mmc_finish_request(struct tmio_mmc_host *host) +{ + struct mmc_request *mrq; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + tmio_mmc_end_dma(host); + + mrq = host->mrq; + if (IS_ERR_OR_NULL(mrq)) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + /* If not SET_BLOCK_COUNT, clear old data */ + if (host->cmd != mrq->sbc) { + host->cmd = NULL; + host->data = NULL; + host->mrq = NULL; + } + + cancel_delayed_work(&host->delayed_reset_work); + + spin_unlock_irqrestore(&host->lock, flags); + + if (mrq->cmd->error || (mrq->data && mrq->data->error)) + tmio_mmc_abort_dma(host); + + /* Error means retune, but executed command was still successful */ + if (host->check_retune && host->check_retune(host)) + mmc_retune_needed(host->mmc); + + /* If SET_BLOCK_COUNT, continue with main command */ + if (host->mrq && !mrq->cmd->error) { + tmio_process_mrq(host, mrq); + return; + } + + if (host->fixup_request) + host->fixup_request(host, mrq); + + mmc_request_done(host->mmc, mrq); +} + +static void tmio_mmc_done_work(struct work_struct *work) +{ + struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, + done); + tmio_mmc_finish_request(host); +} + +static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + int ret = 0; + + /* .set_ios() is returning void, so, no chance to report an error */ + + if (host->set_pwr) + host->set_pwr(host->pdev, 1); + + if (!IS_ERR(mmc->supply.vmmc)) { + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + /* + * Attention: empiric value. With a b43 WiFi SDIO card this + * delay proved necessary for reliable card-insertion probing. + * 100us were not enough. Is this the same 140us delay, as in + * tmio_mmc_set_ios()? + */ + usleep_range(200, 300); + } + /* + * It seems, VccQ should be switched on after Vcc, this is also what the + * omap_hsmmc.c driver does. + */ + if (!IS_ERR(mmc->supply.vqmmc) && !ret) { + ret = regulator_enable(mmc->supply.vqmmc); + usleep_range(200, 300); + } + + if (ret < 0) + dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n", + ret); +} + +static void tmio_mmc_power_off(struct tmio_mmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + + if (!IS_ERR(mmc->supply.vqmmc)) + regulator_disable(mmc->supply.vqmmc); + + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + if (host->set_pwr) + host->set_pwr(host->pdev, 0); +} + +static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host, + unsigned char bus_width) +{ + u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT) + & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8); + + /* reg now applies to MMC_BUS_WIDTH_4 */ + if (bus_width == MMC_BUS_WIDTH_1) + reg |= CARD_OPT_WIDTH; + else if (bus_width == MMC_BUS_WIDTH_8) + reg |= CARD_OPT_WIDTH8; + + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg); +} + +/* Set MMC clock / power. + * Note: This controller uses a simple divider scheme therefore it cannot + * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as + * MMC wont run that fast, it has to be clocked at 12MHz which is the next + * slowest setting. + */ +static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct device *dev = &host->pdev->dev; + unsigned long flags; + + mutex_lock(&host->ios_lock); + + spin_lock_irqsave(&host->lock, flags); + if (host->mrq) { + if (IS_ERR(host->mrq)) { + dev_dbg(dev, + "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", + current->comm, task_pid_nr(current), + ios->clock, ios->power_mode); + host->mrq = ERR_PTR(-EINTR); + } else { + dev_dbg(dev, + "%s.%d: CMD%u active since %lu, now %lu!\n", + current->comm, task_pid_nr(current), + host->mrq->cmd->opcode, host->last_req_ts, + jiffies); + } + spin_unlock_irqrestore(&host->lock, flags); + + mutex_unlock(&host->ios_lock); + return; + } + + host->mrq = ERR_PTR(-EBUSY); + + spin_unlock_irqrestore(&host->lock, flags); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + tmio_mmc_power_off(host); + /* For R-Car Gen2+, we need to reset SDHI specific SCC */ + if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) + host->reset(host); + host->set_clock(host, 0); + break; + case MMC_POWER_UP: + tmio_mmc_power_on(host, ios->vdd); + host->set_clock(host, ios->clock); + tmio_mmc_set_bus_width(host, ios->bus_width); + break; + case MMC_POWER_ON: + host->set_clock(host, ios->clock); + tmio_mmc_set_bus_width(host, ios->bus_width); + break; + } + + /* Let things settle. delay taken from winCE driver */ + usleep_range(140, 200); + if (PTR_ERR(host->mrq) == -EINTR) + dev_dbg(&host->pdev->dev, + "%s.%d: IOS interrupted: clk %u, mode %u", + current->comm, task_pid_nr(current), + ios->clock, ios->power_mode); + host->mrq = NULL; + + host->clk_cache = ios->clock; + + mutex_unlock(&host->ios_lock); +} + +static int tmio_mmc_get_ro(struct mmc_host *mmc) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + + return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & + TMIO_STAT_WRPROTECT); +} + +static int tmio_mmc_get_cd(struct mmc_host *mmc) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + + return !!(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & + TMIO_STAT_SIGSTATE); +} + +static int tmio_multi_io_quirk(struct mmc_card *card, + unsigned int direction, int blk_size) +{ + struct tmio_mmc_host *host = mmc_priv(card->host); + + if (host->multi_io_quirk) + return host->multi_io_quirk(card, direction, blk_size); + + return blk_size; +} + +static struct mmc_host_ops tmio_mmc_ops = { + .request = tmio_mmc_request, + .set_ios = tmio_mmc_set_ios, + .get_ro = tmio_mmc_get_ro, + .get_cd = tmio_mmc_get_cd, + .enable_sdio_irq = tmio_mmc_enable_sdio_irq, + .multi_io_quirk = tmio_multi_io_quirk, +}; + +static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) +{ + struct tmio_mmc_data *pdata = host->pdata; + struct mmc_host *mmc = host->mmc; + int err; + + err = mmc_regulator_get_supply(mmc); + if (err) + return err; + + /* use ocr_mask if no regulator */ + if (!mmc->ocr_avail) + mmc->ocr_avail = pdata->ocr_mask; + + /* + * try again. + * There is possibility that regulator has not been probed + */ + if (!mmc->ocr_avail) + return -EPROBE_DEFER; + + return 0; +} + +static void tmio_mmc_of_parse(struct platform_device *pdev, + struct mmc_host *mmc) +{ + const struct device_node *np = pdev->dev.of_node; + + if (!np) + return; + + /* + * DEPRECATED: + * For new platforms, please use "disable-wp" instead of + * "toshiba,mmc-wrprotect-disable" + */ + if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL)) + mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; +} + +struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, + struct tmio_mmc_data *pdata) +{ + struct tmio_mmc_host *host; + struct mmc_host *mmc; + void __iomem *ctl; + int ret; + + ctl = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctl)) + return ERR_CAST(ctl); + + mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); + if (!mmc) + return ERR_PTR(-ENOMEM); + + host = mmc_priv(mmc); + host->ctl = ctl; + host->mmc = mmc; + host->pdev = pdev; + host->pdata = pdata; + host->ops = tmio_mmc_ops; + mmc->ops = &host->ops; + + ret = mmc_of_parse(host->mmc); + if (ret) { + host = ERR_PTR(ret); + goto free; + } + + tmio_mmc_of_parse(pdev, mmc); + + platform_set_drvdata(pdev, host); + + return host; +free: + mmc_free_host(mmc); + + return host; +} +EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc); + +void tmio_mmc_host_free(struct tmio_mmc_host *host) +{ + mmc_free_host(host->mmc); +} +EXPORT_SYMBOL_GPL(tmio_mmc_host_free); + +int tmio_mmc_host_probe(struct tmio_mmc_host *_host) +{ + struct platform_device *pdev = _host->pdev; + struct tmio_mmc_data *pdata = _host->pdata; + struct mmc_host *mmc = _host->mmc; + int ret; + + /* + * Check the sanity of mmc->f_min to prevent host->set_clock() from + * looping forever... + */ + if (mmc->f_min == 0) + return -EINVAL; + + if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) + _host->write16_hook = NULL; + + _host->set_pwr = pdata->set_pwr; + + ret = tmio_mmc_init_ocr(_host); + if (ret < 0) + return ret; + + /* + * Look for a card detect GPIO, if it fails with anything + * else than a probe deferral, just live without it. + */ + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); + if (ret == -EPROBE_DEFER) + return ret; + + mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; + mmc->caps2 |= pdata->capabilities2; + mmc->max_segs = pdata->max_segs ? : 32; + mmc->max_blk_size = TMIO_MAX_BLK_SIZE; + mmc->max_blk_count = pdata->max_blk_count ? : + (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs; + mmc->max_req_size = min_t(size_t, + mmc->max_blk_size * mmc->max_blk_count, + dma_max_mapping_size(&pdev->dev)); + mmc->max_seg_size = mmc->max_req_size; + + if (mmc_can_gpio_ro(mmc)) + _host->ops.get_ro = mmc_gpio_get_ro; + + if (mmc_can_gpio_cd(mmc)) + _host->ops.get_cd = mmc_gpio_get_cd; + + _host->native_hotplug = !(mmc_can_gpio_cd(mmc) || + mmc->caps & MMC_CAP_NEEDS_POLL || + !mmc_card_is_removable(mmc)); + + /* + * On Gen2+, eMMC with NONREMOVABLE currently fails because native + * hotplug gets disabled. It seems RuntimePM related yet we need further + * research. Since we are planning a PM overhaul anyway, let's enforce + * for now the device being active by enabling native hotplug always. + */ + if (pdata->flags & TMIO_MMC_MIN_RCAR2) + _host->native_hotplug = true; + + /* + * While using internal tmio hardware logic for card detection, we need + * to ensure it stays powered for it to work. + */ + if (_host->native_hotplug) + pm_runtime_get_noresume(&pdev->dev); + + _host->sdio_irq_enabled = false; + if (pdata->flags & TMIO_MMC_SDIO_IRQ) + _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; + + _host->set_clock(_host, 0); + tmio_mmc_reset(_host); + + _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK); + if (!_host->sdcard_irq_mask_all) + _host->sdcard_irq_mask_all = TMIO_MASK_ALL; + tmio_mmc_disable_mmc_irqs(_host, _host->sdcard_irq_mask_all); + + if (_host->native_hotplug) + tmio_mmc_enable_mmc_irqs(_host, + TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); + + spin_lock_init(&_host->lock); + mutex_init(&_host->ios_lock); + + /* Init delayed work for request timeouts */ + INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); + INIT_WORK(&_host->done, tmio_mmc_done_work); + + /* See if we also get DMA */ + tmio_mmc_request_dma(_host, pdata); + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = mmc_add_host(mmc); + if (ret) + goto remove_host; + + dev_pm_qos_expose_latency_limit(&pdev->dev, 100); + pm_runtime_put(&pdev->dev); + + return 0; + +remove_host: + pm_runtime_put_noidle(&pdev->dev); + tmio_mmc_host_remove(_host); + return ret; +} +EXPORT_SYMBOL_GPL(tmio_mmc_host_probe); + +void tmio_mmc_host_remove(struct tmio_mmc_host *host) +{ + struct platform_device *pdev = host->pdev; + struct mmc_host *mmc = host->mmc; + + pm_runtime_get_sync(&pdev->dev); + + if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); + + dev_pm_qos_hide_latency_limit(&pdev->dev); + + mmc_remove_host(mmc); + cancel_work_sync(&host->done); + cancel_delayed_work_sync(&host->delayed_reset_work); + tmio_mmc_release_dma(host); + tmio_mmc_disable_mmc_irqs(host, host->sdcard_irq_mask_all); + + if (host->native_hotplug) + pm_runtime_put_noidle(&pdev->dev); + + pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); +} +EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); + +#ifdef CONFIG_PM +static int tmio_mmc_clk_enable(struct tmio_mmc_host *host) +{ + if (!host->clk_enable) + return -ENOTSUPP; + + return host->clk_enable(host); +} + +static void tmio_mmc_clk_disable(struct tmio_mmc_host *host) +{ + if (host->clk_disable) + host->clk_disable(host); +} + +int tmio_mmc_host_runtime_suspend(struct device *dev) +{ + struct tmio_mmc_host *host = dev_get_drvdata(dev); + + tmio_mmc_disable_mmc_irqs(host, host->sdcard_irq_mask_all); + + if (host->clk_cache) + host->set_clock(host, 0); + + tmio_mmc_clk_disable(host); + + return 0; +} +EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend); + +int tmio_mmc_host_runtime_resume(struct device *dev) +{ + struct tmio_mmc_host *host = dev_get_drvdata(dev); + + tmio_mmc_clk_enable(host); + tmio_mmc_reset(host); + + if (host->clk_cache) + host->set_clock(host, host->clk_cache); + + if (host->native_hotplug) + tmio_mmc_enable_mmc_irqs(host, + TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); + + tmio_mmc_enable_dma(host, true); + + mmc_retune_needed(host->mmc); + + return 0; +} +EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume); +#endif + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c new file mode 100644 index 000000000..497791ffa --- /dev/null +++ b/drivers/mmc/host/toshsd.c @@ -0,0 +1,709 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Toshiba PCI Secure Digital Host Controller Interface driver + * + * Copyright (C) 2014 Ondrej Zary + * Copyright (C) 2007 Richard Betts, All Rights Reserved. + * + * Based on asic3_mmc.c, copyright (c) 2005 SDG Systems, LLC and, + * sdhci.c, copyright (C) 2005-2006 Pierre Ossman + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "toshsd.h" + +#define DRIVER_NAME "toshsd" + +static const struct pci_device_id pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA, 0x0805) }, + { /* end: all zeroes */ }, +}; + +MODULE_DEVICE_TABLE(pci, pci_ids); + +static void toshsd_init(struct toshsd_host *host) +{ + /* enable clock */ + pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP, + SD_PCICFG_CLKSTOP_ENABLE_ALL); + pci_write_config_byte(host->pdev, SD_PCICFG_CARDDETECT, 2); + + /* reset */ + iowrite16(0, host->ioaddr + SD_SOFTWARERESET); /* assert */ + mdelay(2); + iowrite16(1, host->ioaddr + SD_SOFTWARERESET); /* deassert */ + mdelay(2); + + /* Clear card registers */ + iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL); + iowrite32(0, host->ioaddr + SD_CARDSTATUS); + iowrite32(0, host->ioaddr + SD_ERRORSTATUS0); + iowrite16(0, host->ioaddr + SD_STOPINTERNAL); + + /* SDIO clock? */ + iowrite16(0x100, host->ioaddr + SDIO_BASE + SDIO_CLOCKNWAITCTRL); + + /* enable LED */ + pci_write_config_byte(host->pdev, SD_PCICFG_SDLED_ENABLE1, + SD_PCICFG_LED_ENABLE1_START); + pci_write_config_byte(host->pdev, SD_PCICFG_SDLED_ENABLE2, + SD_PCICFG_LED_ENABLE2_START); + + /* set interrupt masks */ + iowrite32(~(u32)(SD_CARD_RESP_END | SD_CARD_RW_END + | SD_CARD_CARD_REMOVED_0 | SD_CARD_CARD_INSERTED_0 + | SD_BUF_READ_ENABLE | SD_BUF_WRITE_ENABLE + | SD_BUF_CMD_TIMEOUT), + host->ioaddr + SD_INTMASKCARD); + + iowrite16(0x1000, host->ioaddr + SD_TRANSACTIONCTRL); +} + +/* Set MMC clock / power. + * Note: This controller uses a simple divider scheme therefore it cannot run + * SD/MMC cards at full speed (24/20MHz). HCLK (=33MHz PCI clock?) is too high + * and the next slowest is 16MHz (div=2). + */ +static void __toshsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct toshsd_host *host = mmc_priv(mmc); + + if (ios->clock) { + u16 clk; + int div = 1; + + while (ios->clock < HCLK / div) + div *= 2; + + clk = div >> 2; + + if (div == 1) { /* disable the divider */ + pci_write_config_byte(host->pdev, SD_PCICFG_CLKMODE, + SD_PCICFG_CLKMODE_DIV_DISABLE); + clk |= SD_CARDCLK_DIV_DISABLE; + } else + pci_write_config_byte(host->pdev, SD_PCICFG_CLKMODE, 0); + + clk |= SD_CARDCLK_ENABLE_CLOCK; + iowrite16(clk, host->ioaddr + SD_CARDCLOCKCTRL); + + mdelay(10); + } else + iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + pci_write_config_byte(host->pdev, SD_PCICFG_POWER1, + SD_PCICFG_PWR1_OFF); + mdelay(1); + break; + case MMC_POWER_UP: + break; + case MMC_POWER_ON: + pci_write_config_byte(host->pdev, SD_PCICFG_POWER1, + SD_PCICFG_PWR1_33V); + pci_write_config_byte(host->pdev, SD_PCICFG_POWER2, + SD_PCICFG_PWR2_AUTO); + mdelay(20); + break; + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + iowrite16(SD_CARDOPT_REQUIRED | SD_CARDOPT_DATA_RESP_TIMEOUT(14) + | SD_CARDOPT_C2_MODULE_ABSENT + | SD_CARDOPT_DATA_XFR_WIDTH_1, + host->ioaddr + SD_CARDOPTIONSETUP); + break; + case MMC_BUS_WIDTH_4: + iowrite16(SD_CARDOPT_REQUIRED | SD_CARDOPT_DATA_RESP_TIMEOUT(14) + | SD_CARDOPT_C2_MODULE_ABSENT + | SD_CARDOPT_DATA_XFR_WIDTH_4, + host->ioaddr + SD_CARDOPTIONSETUP); + break; + } +} + +static void toshsd_set_led(struct toshsd_host *host, unsigned char state) +{ + iowrite16(state, host->ioaddr + SDIO_BASE + SDIO_LEDCTRL); +} + +static void toshsd_finish_request(struct toshsd_host *host) +{ + struct mmc_request *mrq = host->mrq; + + /* Write something to end the command */ + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + + toshsd_set_led(host, 0); + mmc_request_done(host->mmc, mrq); +} + +static irqreturn_t toshsd_thread_irq(int irq, void *dev_id) +{ + struct toshsd_host *host = dev_id; + struct mmc_data *data = host->data; + struct sg_mapping_iter *sg_miter = &host->sg_miter; + unsigned short *buf; + int count; + unsigned long flags; + + if (!data) { + dev_warn(&host->pdev->dev, "Spurious Data IRQ\n"); + if (host->cmd) { + host->cmd->error = -EIO; + toshsd_finish_request(host); + } + return IRQ_NONE; + } + spin_lock_irqsave(&host->lock, flags); + + if (!sg_miter_next(sg_miter)) + goto done; + + buf = sg_miter->addr; + + /* Ensure we dont read more than one block. The chip will interrupt us + * When the next block is available. + */ + count = sg_miter->length; + if (count > data->blksz) + count = data->blksz; + + dev_dbg(&host->pdev->dev, "count: %08x, flags %08x\n", count, + data->flags); + + /* Transfer the data */ + if (data->flags & MMC_DATA_READ) + ioread32_rep(host->ioaddr + SD_DATAPORT, buf, count >> 2); + else + iowrite32_rep(host->ioaddr + SD_DATAPORT, buf, count >> 2); + + sg_miter->consumed = count; + sg_miter_stop(sg_miter); + +done: + spin_unlock_irqrestore(&host->lock, flags); + + return IRQ_HANDLED; +} + +static void toshsd_cmd_irq(struct toshsd_host *host) +{ + struct mmc_command *cmd = host->cmd; + u8 *buf; + u16 data; + + if (!host->cmd) { + dev_warn(&host->pdev->dev, "Spurious CMD irq\n"); + return; + } + buf = (u8 *)cmd->resp; + host->cmd = NULL; + + if (cmd->flags & MMC_RSP_PRESENT && cmd->flags & MMC_RSP_136) { + /* R2 */ + buf[12] = 0xff; + data = ioread16(host->ioaddr + SD_RESPONSE0); + buf[13] = data & 0xff; + buf[14] = data >> 8; + data = ioread16(host->ioaddr + SD_RESPONSE1); + buf[15] = data & 0xff; + buf[8] = data >> 8; + data = ioread16(host->ioaddr + SD_RESPONSE2); + buf[9] = data & 0xff; + buf[10] = data >> 8; + data = ioread16(host->ioaddr + SD_RESPONSE3); + buf[11] = data & 0xff; + buf[4] = data >> 8; + data = ioread16(host->ioaddr + SD_RESPONSE4); + buf[5] = data & 0xff; + buf[6] = data >> 8; + data = ioread16(host->ioaddr + SD_RESPONSE5); + buf[7] = data & 0xff; + buf[0] = data >> 8; + data = ioread16(host->ioaddr + SD_RESPONSE6); + buf[1] = data & 0xff; + buf[2] = data >> 8; + data = ioread16(host->ioaddr + SD_RESPONSE7); + buf[3] = data & 0xff; + } else if (cmd->flags & MMC_RSP_PRESENT) { + /* R1, R1B, R3, R6, R7 */ + data = ioread16(host->ioaddr + SD_RESPONSE0); + buf[0] = data & 0xff; + buf[1] = data >> 8; + data = ioread16(host->ioaddr + SD_RESPONSE1); + buf[2] = data & 0xff; + buf[3] = data >> 8; + } + + dev_dbg(&host->pdev->dev, "Command IRQ complete %d %d %x\n", + cmd->opcode, cmd->error, cmd->flags); + + /* If there is data to handle we will + * finish the request in the mmc_data_end_irq handler.*/ + if (host->data) + return; + + toshsd_finish_request(host); +} + +static void toshsd_data_end_irq(struct toshsd_host *host) +{ + struct mmc_data *data = host->data; + + host->data = NULL; + + if (!data) { + dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); + return; + } + + if (data->error == 0) + data->bytes_xfered = data->blocks * data->blksz; + else + data->bytes_xfered = 0; + + dev_dbg(&host->pdev->dev, "Completed data request xfr=%d\n", + data->bytes_xfered); + + iowrite16(0, host->ioaddr + SD_STOPINTERNAL); + + toshsd_finish_request(host); +} + +static irqreturn_t toshsd_irq(int irq, void *dev_id) +{ + struct toshsd_host *host = dev_id; + u32 int_reg, int_mask, int_status, detail; + int error = 0, ret = IRQ_HANDLED; + + spin_lock(&host->lock); + int_status = ioread32(host->ioaddr + SD_CARDSTATUS); + int_mask = ioread32(host->ioaddr + SD_INTMASKCARD); + int_reg = int_status & ~int_mask & ~IRQ_DONT_CARE_BITS; + + dev_dbg(&host->pdev->dev, "IRQ status:%x mask:%x\n", + int_status, int_mask); + + /* nothing to do: it's not our IRQ */ + if (!int_reg) { + ret = IRQ_NONE; + goto irq_end; + } + + if (int_reg & SD_BUF_CMD_TIMEOUT) { + error = -ETIMEDOUT; + dev_dbg(&host->pdev->dev, "Timeout\n"); + } else if (int_reg & SD_BUF_CRC_ERR) { + error = -EILSEQ; + dev_err(&host->pdev->dev, "BadCRC\n"); + } else if (int_reg & (SD_BUF_ILLEGAL_ACCESS + | SD_BUF_CMD_INDEX_ERR + | SD_BUF_STOP_BIT_END_ERR + | SD_BUF_OVERFLOW + | SD_BUF_UNDERFLOW + | SD_BUF_DATA_TIMEOUT)) { + dev_err(&host->pdev->dev, "Buffer status error: { %s%s%s%s%s%s}\n", + int_reg & SD_BUF_ILLEGAL_ACCESS ? "ILLEGAL_ACC " : "", + int_reg & SD_BUF_CMD_INDEX_ERR ? "CMD_INDEX " : "", + int_reg & SD_BUF_STOP_BIT_END_ERR ? "STOPBIT_END " : "", + int_reg & SD_BUF_OVERFLOW ? "OVERFLOW " : "", + int_reg & SD_BUF_UNDERFLOW ? "UNDERFLOW " : "", + int_reg & SD_BUF_DATA_TIMEOUT ? "DATA_TIMEOUT " : ""); + + detail = ioread32(host->ioaddr + SD_ERRORSTATUS0); + dev_err(&host->pdev->dev, "detail error status { %s%s%s%s%s%s%s%s%s%s%s%s%s}\n", + detail & SD_ERR0_RESP_CMD_ERR ? "RESP_CMD " : "", + detail & SD_ERR0_RESP_NON_CMD12_END_BIT_ERR ? "RESP_END_BIT " : "", + detail & SD_ERR0_RESP_CMD12_END_BIT_ERR ? "RESP_END_BIT " : "", + detail & SD_ERR0_READ_DATA_END_BIT_ERR ? "READ_DATA_END_BIT " : "", + detail & SD_ERR0_WRITE_CRC_STATUS_END_BIT_ERR ? "WRITE_CMD_END_BIT " : "", + detail & SD_ERR0_RESP_NON_CMD12_CRC_ERR ? "RESP_CRC " : "", + detail & SD_ERR0_RESP_CMD12_CRC_ERR ? "RESP_CRC " : "", + detail & SD_ERR0_READ_DATA_CRC_ERR ? "READ_DATA_CRC " : "", + detail & SD_ERR0_WRITE_CMD_CRC_ERR ? "WRITE_CMD_CRC " : "", + detail & SD_ERR1_NO_CMD_RESP ? "NO_CMD_RESP " : "", + detail & SD_ERR1_TIMEOUT_READ_DATA ? "READ_DATA_TIMEOUT " : "", + detail & SD_ERR1_TIMEOUT_CRS_STATUS ? "CRS_STATUS_TIMEOUT " : "", + detail & SD_ERR1_TIMEOUT_CRC_BUSY ? "CRC_BUSY_TIMEOUT " : ""); + error = -EIO; + } + + if (error) { + if (host->cmd) + host->cmd->error = error; + + if (error == -ETIMEDOUT) { + iowrite32(int_status & + ~(SD_BUF_CMD_TIMEOUT | SD_CARD_RESP_END), + host->ioaddr + SD_CARDSTATUS); + } else { + toshsd_init(host); + __toshsd_set_ios(host->mmc, &host->mmc->ios); + goto irq_end; + } + } + + /* Card insert/remove. The mmc controlling code is stateless. */ + if (int_reg & (SD_CARD_CARD_INSERTED_0 | SD_CARD_CARD_REMOVED_0)) { + iowrite32(int_status & + ~(SD_CARD_CARD_REMOVED_0 | SD_CARD_CARD_INSERTED_0), + host->ioaddr + SD_CARDSTATUS); + + if (int_reg & SD_CARD_CARD_INSERTED_0) + toshsd_init(host); + + mmc_detect_change(host->mmc, 1); + } + + /* Data transfer */ + if (int_reg & (SD_BUF_READ_ENABLE | SD_BUF_WRITE_ENABLE)) { + iowrite32(int_status & + ~(SD_BUF_WRITE_ENABLE | SD_BUF_READ_ENABLE), + host->ioaddr + SD_CARDSTATUS); + + ret = IRQ_WAKE_THREAD; + goto irq_end; + } + + /* Command completion */ + if (int_reg & SD_CARD_RESP_END) { + iowrite32(int_status & ~(SD_CARD_RESP_END), + host->ioaddr + SD_CARDSTATUS); + toshsd_cmd_irq(host); + } + + /* Data transfer completion */ + if (int_reg & SD_CARD_RW_END) { + iowrite32(int_status & ~(SD_CARD_RW_END), + host->ioaddr + SD_CARDSTATUS); + toshsd_data_end_irq(host); + } +irq_end: + spin_unlock(&host->lock); + return ret; +} + +static void toshsd_start_cmd(struct toshsd_host *host, struct mmc_command *cmd) +{ + struct mmc_data *data = host->data; + int c = cmd->opcode; + + dev_dbg(&host->pdev->dev, "Command opcode: %d\n", cmd->opcode); + + if (cmd->opcode == MMC_STOP_TRANSMISSION) { + iowrite16(SD_STOPINT_ISSUE_CMD12, + host->ioaddr + SD_STOPINTERNAL); + + cmd->resp[0] = cmd->opcode; + cmd->resp[1] = 0; + cmd->resp[2] = 0; + cmd->resp[3] = 0; + + toshsd_finish_request(host); + return; + } + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + c |= SD_CMD_RESP_TYPE_NONE; + break; + + case MMC_RSP_R1: + c |= SD_CMD_RESP_TYPE_EXT_R1; + break; + case MMC_RSP_R1B: + c |= SD_CMD_RESP_TYPE_EXT_R1B; + break; + case MMC_RSP_R2: + c |= SD_CMD_RESP_TYPE_EXT_R2; + break; + case MMC_RSP_R3: + c |= SD_CMD_RESP_TYPE_EXT_R3; + break; + + default: + dev_err(&host->pdev->dev, "Unknown response type %d\n", + mmc_resp_type(cmd)); + break; + } + + host->cmd = cmd; + + if (cmd->opcode == MMC_APP_CMD) + c |= SD_CMD_TYPE_ACMD; + + if (cmd->opcode == MMC_GO_IDLE_STATE) + c |= (3 << 8); /* removed from ipaq-asic3.h for some reason */ + + if (data) { + c |= SD_CMD_DATA_PRESENT; + + if (data->blocks > 1) { + iowrite16(SD_STOPINT_AUTO_ISSUE_CMD12, + host->ioaddr + SD_STOPINTERNAL); + c |= SD_CMD_MULTI_BLOCK; + } + + if (data->flags & MMC_DATA_READ) + c |= SD_CMD_TRANSFER_READ; + + /* MMC_DATA_WRITE does not require a bit to be set */ + } + + /* Send the command */ + iowrite32(cmd->arg, host->ioaddr + SD_ARG0); + iowrite16(c, host->ioaddr + SD_CMD); +} + +static void toshsd_start_data(struct toshsd_host *host, struct mmc_data *data) +{ + unsigned int flags = SG_MITER_ATOMIC; + + dev_dbg(&host->pdev->dev, "setup data transfer: blocksize %08x nr_blocks %d, offset: %08x\n", + data->blksz, data->blocks, data->sg->offset); + + host->data = data; + + if (data->flags & MMC_DATA_READ) + flags |= SG_MITER_TO_SG; + else + flags |= SG_MITER_FROM_SG; + + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); + + /* Set transfer length and blocksize */ + iowrite16(data->blocks, host->ioaddr + SD_BLOCKCOUNT); + iowrite16(data->blksz, host->ioaddr + SD_CARDXFERDATALEN); +} + +/* Process requests from the MMC layer */ +static void toshsd_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct toshsd_host *host = mmc_priv(mmc); + unsigned long flags; + + /* abort if card not present */ + if (!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0)) { + mrq->cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, mrq); + return; + } + + spin_lock_irqsave(&host->lock, flags); + + WARN_ON(host->mrq != NULL); + + host->mrq = mrq; + + if (mrq->data) + toshsd_start_data(host, mrq->data); + + toshsd_set_led(host, 1); + + toshsd_start_cmd(host, mrq->cmd); + + spin_unlock_irqrestore(&host->lock, flags); +} + +static void toshsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct toshsd_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + __toshsd_set_ios(mmc, ios); + spin_unlock_irqrestore(&host->lock, flags); +} + +static int toshsd_get_ro(struct mmc_host *mmc) +{ + struct toshsd_host *host = mmc_priv(mmc); + + /* active low */ + return !(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_WRITE_PROTECT); +} + +static int toshsd_get_cd(struct mmc_host *mmc) +{ + struct toshsd_host *host = mmc_priv(mmc); + + return !!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0); +} + +static const struct mmc_host_ops toshsd_ops = { + .request = toshsd_request, + .set_ios = toshsd_set_ios, + .get_ro = toshsd_get_ro, + .get_cd = toshsd_get_cd, +}; + + +static void toshsd_powerdown(struct toshsd_host *host) +{ + /* mask all interrupts */ + iowrite32(0xffffffff, host->ioaddr + SD_INTMASKCARD); + /* disable card clock */ + iowrite16(0x000, host->ioaddr + SDIO_BASE + SDIO_CLOCKNWAITCTRL); + iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL); + /* power down card */ + pci_write_config_byte(host->pdev, SD_PCICFG_POWER1, SD_PCICFG_PWR1_OFF); + /* disable clock */ + pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int toshsd_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct toshsd_host *host = pci_get_drvdata(pdev); + + toshsd_powerdown(host); + + pci_save_state(pdev); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int toshsd_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct toshsd_host *host = pci_get_drvdata(pdev); + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + + toshsd_init(host); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int ret; + struct toshsd_host *host; + struct mmc_host *mmc; + resource_size_t base; + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + mmc = mmc_alloc_host(sizeof(struct toshsd_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto err; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + + host->pdev = pdev; + pci_set_drvdata(pdev, host); + + ret = pci_request_regions(pdev, DRIVER_NAME); + if (ret) + goto free; + + host->ioaddr = pci_iomap(pdev, 0, 0); + if (!host->ioaddr) { + ret = -ENOMEM; + goto release; + } + + /* Set MMC host parameters */ + mmc->ops = &toshsd_ops; + mmc->caps = MMC_CAP_4_BIT_DATA; + mmc->ocr_avail = MMC_VDD_32_33; + + mmc->f_min = HCLK / 512; + mmc->f_max = HCLK; + + spin_lock_init(&host->lock); + + toshsd_init(host); + + ret = request_threaded_irq(pdev->irq, toshsd_irq, toshsd_thread_irq, + IRQF_SHARED, DRIVER_NAME, host); + if (ret) + goto unmap; + + ret = mmc_add_host(mmc); + if (ret) + goto free_irq; + + base = pci_resource_start(pdev, 0); + dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq); + + pm_suspend_ignore_children(&pdev->dev, 1); + + return 0; + +free_irq: + free_irq(pdev->irq, host); +unmap: + pci_iounmap(pdev, host->ioaddr); +release: + pci_release_regions(pdev); +free: + mmc_free_host(mmc); + pci_set_drvdata(pdev, NULL); +err: + pci_disable_device(pdev); + return ret; +} + +static void toshsd_remove(struct pci_dev *pdev) +{ + struct toshsd_host *host = pci_get_drvdata(pdev); + + mmc_remove_host(host->mmc); + toshsd_powerdown(host); + free_irq(pdev->irq, host); + pci_iounmap(pdev, host->ioaddr); + pci_release_regions(pdev); + mmc_free_host(host->mmc); + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); +} + +static const struct dev_pm_ops toshsd_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(toshsd_pm_suspend, toshsd_pm_resume) +}; + +static struct pci_driver toshsd_driver = { + .name = DRIVER_NAME, + .id_table = pci_ids, + .probe = toshsd_probe, + .remove = toshsd_remove, + .driver.pm = &toshsd_pm_ops, +}; + +module_pci_driver(toshsd_driver); + +MODULE_AUTHOR("Ondrej Zary, Richard Betts"); +MODULE_DESCRIPTION("Toshiba PCI Secure Digital Host Controller Interface driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/toshsd.h b/drivers/mmc/host/toshsd.h new file mode 100644 index 000000000..3ba876eaa --- /dev/null +++ b/drivers/mmc/host/toshsd.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Toshiba PCI Secure Digital Host Controller Interface driver + * + * Copyright (C) 2014 Ondrej Zary + * Copyright (C) 2007 Richard Betts, All Rights Reserved. + * + * Based on asic3_mmc.c Copyright (c) 2005 SDG Systems, LLC + */ + +#define HCLK 33000000 /* 33 MHz (PCI clock) */ + +#define SD_PCICFG_CLKSTOP 0x40 /* 0x1f = clock controller, 0 = stop */ +#define SD_PCICFG_GATEDCLK 0x41 /* Gated clock */ +#define SD_PCICFG_CLKMODE 0x42 /* Control clock of SD controller */ +#define SD_PCICFG_PINSTATUS 0x44 /* R/O: read status of SD pins */ +#define SD_PCICFG_POWER1 0x48 +#define SD_PCICFG_POWER2 0x49 +#define SD_PCICFG_POWER3 0x4a +#define SD_PCICFG_CARDDETECT 0x4c +#define SD_PCICFG_SLOTS 0x50 /* R/O: define support slot number */ +#define SD_PCICFG_EXTGATECLK1 0xf0 /* Could be used for gated clock */ +#define SD_PCICFG_EXTGATECLK2 0xf1 /* Could be used for gated clock */ +#define SD_PCICFG_EXTGATECLK3 0xf9 /* Bit 1: double buffer/single buffer */ +#define SD_PCICFG_SDLED_ENABLE1 0xfa +#define SD_PCICFG_SDLED_ENABLE2 0xfe + +#define SD_PCICFG_CLKMODE_DIV_DISABLE BIT(0) +#define SD_PCICFG_CLKSTOP_ENABLE_ALL 0x1f +#define SD_PCICFG_LED_ENABLE1_START 0x12 +#define SD_PCICFG_LED_ENABLE2_START 0x80 + +#define SD_PCICFG_PWR1_33V 0x08 /* Set for 3.3 volts */ +#define SD_PCICFG_PWR1_OFF 0x00 /* Turn off power */ +#define SD_PCICFG_PWR2_AUTO 0x02 + +#define SD_CMD 0x00 /* also for SDIO */ +#define SD_ARG0 0x04 /* also for SDIO */ +#define SD_ARG1 0x06 /* also for SDIO */ +#define SD_STOPINTERNAL 0x08 +#define SD_BLOCKCOUNT 0x0a /* also for SDIO */ +#define SD_RESPONSE0 0x0c /* also for SDIO */ +#define SD_RESPONSE1 0x0e /* also for SDIO */ +#define SD_RESPONSE2 0x10 /* also for SDIO */ +#define SD_RESPONSE3 0x12 /* also for SDIO */ +#define SD_RESPONSE4 0x14 /* also for SDIO */ +#define SD_RESPONSE5 0x16 /* also for SDIO */ +#define SD_RESPONSE6 0x18 /* also for SDIO */ +#define SD_RESPONSE7 0x1a /* also for SDIO */ +#define SD_CARDSTATUS 0x1c /* also for SDIO */ +#define SD_BUFFERCTRL 0x1e /* also for SDIO */ +#define SD_INTMASKCARD 0x20 /* also for SDIO */ +#define SD_INTMASKBUFFER 0x22 /* also for SDIO */ +#define SD_CARDCLOCKCTRL 0x24 +#define SD_CARDXFERDATALEN 0x26 /* also for SDIO */ +#define SD_CARDOPTIONSETUP 0x28 /* also for SDIO */ +#define SD_ERRORSTATUS0 0x2c /* also for SDIO */ +#define SD_ERRORSTATUS1 0x2e /* also for SDIO */ +#define SD_DATAPORT 0x30 /* also for SDIO */ +#define SD_TRANSACTIONCTRL 0x34 /* also for SDIO */ +#define SD_SOFTWARERESET 0xe0 /* also for SDIO */ + +/* registers above marked "also for SDIO" and all SDIO registers below can be + * accessed at SDIO_BASE + reg address */ +#define SDIO_BASE 0x100 + +#define SDIO_CARDPORTSEL 0x02 +#define SDIO_CARDINTCTRL 0x36 +#define SDIO_CLOCKNWAITCTRL 0x38 +#define SDIO_HOSTINFORMATION 0x3a +#define SDIO_ERRORCTRL 0x3c +#define SDIO_LEDCTRL 0x3e + +#define SD_TRANSCTL_SET BIT(8) + +#define SD_CARDCLK_DIV_DISABLE BIT(15) +#define SD_CARDCLK_ENABLE_CLOCK BIT(8) +#define SD_CARDCLK_CLK_DIV_512 BIT(7) +#define SD_CARDCLK_CLK_DIV_256 BIT(6) +#define SD_CARDCLK_CLK_DIV_128 BIT(5) +#define SD_CARDCLK_CLK_DIV_64 BIT(4) +#define SD_CARDCLK_CLK_DIV_32 BIT(3) +#define SD_CARDCLK_CLK_DIV_16 BIT(2) +#define SD_CARDCLK_CLK_DIV_8 BIT(1) +#define SD_CARDCLK_CLK_DIV_4 BIT(0) +#define SD_CARDCLK_CLK_DIV_2 0 + +#define SD_CARDOPT_REQUIRED 0x000e +#define SD_CARDOPT_DATA_RESP_TIMEOUT(x) (((x) & 0x0f) << 4) /* 4 bits */ +#define SD_CARDOPT_C2_MODULE_ABSENT BIT(14) +#define SD_CARDOPT_DATA_XFR_WIDTH_1 (1 << 15) +#define SD_CARDOPT_DATA_XFR_WIDTH_4 (0 << 15) + +#define SD_CMD_TYPE_CMD (0 << 6) +#define SD_CMD_TYPE_ACMD (1 << 6) +#define SD_CMD_TYPE_AUTHEN (2 << 6) +#define SD_CMD_RESP_TYPE_NONE (3 << 8) +#define SD_CMD_RESP_TYPE_EXT_R1 (4 << 8) +#define SD_CMD_RESP_TYPE_EXT_R1B (5 << 8) +#define SD_CMD_RESP_TYPE_EXT_R2 (6 << 8) +#define SD_CMD_RESP_TYPE_EXT_R3 (7 << 8) +#define SD_CMD_RESP_TYPE_EXT_R6 (4 << 8) +#define SD_CMD_RESP_TYPE_EXT_R7 (4 << 8) +#define SD_CMD_DATA_PRESENT BIT(11) +#define SD_CMD_TRANSFER_READ BIT(12) +#define SD_CMD_MULTI_BLOCK BIT(13) +#define SD_CMD_SECURITY_CMD BIT(14) + +#define SD_STOPINT_ISSUE_CMD12 BIT(0) +#define SD_STOPINT_AUTO_ISSUE_CMD12 BIT(8) + +#define SD_CARD_RESP_END BIT(0) +#define SD_CARD_RW_END BIT(2) +#define SD_CARD_CARD_REMOVED_0 BIT(3) +#define SD_CARD_CARD_INSERTED_0 BIT(4) +#define SD_CARD_PRESENT_0 BIT(5) +#define SD_CARD_UNK6 BIT(6) +#define SD_CARD_WRITE_PROTECT BIT(7) +#define SD_CARD_CARD_REMOVED_3 BIT(8) +#define SD_CARD_CARD_INSERTED_3 BIT(9) +#define SD_CARD_PRESENT_3 BIT(10) + +#define SD_BUF_CMD_INDEX_ERR BIT(16) +#define SD_BUF_CRC_ERR BIT(17) +#define SD_BUF_STOP_BIT_END_ERR BIT(18) +#define SD_BUF_DATA_TIMEOUT BIT(19) +#define SD_BUF_OVERFLOW BIT(20) +#define SD_BUF_UNDERFLOW BIT(21) +#define SD_BUF_CMD_TIMEOUT BIT(22) +#define SD_BUF_UNK7 BIT(23) +#define SD_BUF_READ_ENABLE BIT(24) +#define SD_BUF_WRITE_ENABLE BIT(25) +#define SD_BUF_ILLEGAL_FUNCTION BIT(29) +#define SD_BUF_CMD_BUSY BIT(30) +#define SD_BUF_ILLEGAL_ACCESS BIT(31) + +#define SD_ERR0_RESP_CMD_ERR BIT(0) +#define SD_ERR0_RESP_NON_CMD12_END_BIT_ERR BIT(2) +#define SD_ERR0_RESP_CMD12_END_BIT_ERR BIT(3) +#define SD_ERR0_READ_DATA_END_BIT_ERR BIT(4) +#define SD_ERR0_WRITE_CRC_STATUS_END_BIT_ERR BIT(5) +#define SD_ERR0_RESP_NON_CMD12_CRC_ERR BIT(8) +#define SD_ERR0_RESP_CMD12_CRC_ERR BIT(9) +#define SD_ERR0_READ_DATA_CRC_ERR BIT(10) +#define SD_ERR0_WRITE_CMD_CRC_ERR BIT(11) + +#define SD_ERR1_NO_CMD_RESP BIT(16) +#define SD_ERR1_TIMEOUT_READ_DATA BIT(20) +#define SD_ERR1_TIMEOUT_CRS_STATUS BIT(21) +#define SD_ERR1_TIMEOUT_CRC_BUSY BIT(22) + +#define IRQ_DONT_CARE_BITS (SD_CARD_PRESENT_3 \ + | SD_CARD_WRITE_PROTECT \ + | SD_CARD_UNK6 \ + | SD_CARD_PRESENT_0 \ + | SD_BUF_UNK7 \ + | SD_BUF_CMD_BUSY) + +struct toshsd_host { + struct pci_dev *pdev; + struct mmc_host *mmc; + + spinlock_t lock; + + struct mmc_request *mrq;/* Current request */ + struct mmc_command *cmd;/* Current command */ + struct mmc_data *data; /* Current data request */ + + struct sg_mapping_iter sg_miter; /* for PIO */ + + void __iomem *ioaddr; /* mapped address */ +}; diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c new file mode 100644 index 000000000..196e94bf3 --- /dev/null +++ b/drivers/mmc/host/uniphier-sd.c @@ -0,0 +1,699 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2017-2018 Socionext Inc. +// Author: Masahiro Yamada + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tmio_mmc.h" + +#define UNIPHIER_SD_CLK_CTL_DIV1024 BIT(16) +#define UNIPHIER_SD_CLK_CTL_DIV1 BIT(10) +#define UNIPHIER_SD_CLKCTL_OFFEN BIT(9) // auto SDCLK stop +#define UNIPHIER_SD_CC_EXT_MODE 0x1b0 +#define UNIPHIER_SD_CC_EXT_MODE_DMA BIT(1) +#define UNIPHIER_SD_HOST_MODE 0x1c8 +#define UNIPHIER_SD_VOLT 0x1e4 +#define UNIPHIER_SD_VOLT_MASK GENMASK(1, 0) +#define UNIPHIER_SD_VOLT_OFF 0 +#define UNIPHIER_SD_VOLT_330 1 // 3.3V signal +#define UNIPHIER_SD_VOLT_180 2 // 1.8V signal +#define UNIPHIER_SD_DMA_MODE 0x410 +#define UNIPHIER_SD_DMA_MODE_DIR_MASK GENMASK(17, 16) +#define UNIPHIER_SD_DMA_MODE_DIR_TO_DEV 0 +#define UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV 1 +#define UNIPHIER_SD_DMA_MODE_WIDTH_MASK GENMASK(5, 4) +#define UNIPHIER_SD_DMA_MODE_WIDTH_8 0 +#define UNIPHIER_SD_DMA_MODE_WIDTH_16 1 +#define UNIPHIER_SD_DMA_MODE_WIDTH_32 2 +#define UNIPHIER_SD_DMA_MODE_WIDTH_64 3 +#define UNIPHIER_SD_DMA_MODE_ADDR_INC BIT(0) // 1: inc, 0: fixed +#define UNIPHIER_SD_DMA_CTL 0x414 +#define UNIPHIER_SD_DMA_CTL_START BIT(0) // start DMA (auto cleared) +#define UNIPHIER_SD_DMA_RST 0x418 +#define UNIPHIER_SD_DMA_RST_CH1 BIT(9) +#define UNIPHIER_SD_DMA_RST_CH0 BIT(8) +#define UNIPHIER_SD_DMA_ADDR_L 0x440 +#define UNIPHIER_SD_DMA_ADDR_H 0x444 + +/* + * IP is extended to support various features: built-in DMA engine, + * 1/1024 divisor, etc. + */ +#define UNIPHIER_SD_CAP_EXTENDED_IP BIT(0) +/* RX channel of the built-in DMA controller is broken (Pro5) */ +#define UNIPHIER_SD_CAP_BROKEN_DMA_RX BIT(1) + +struct uniphier_sd_priv { + struct tmio_mmc_data tmio_data; + struct pinctrl *pinctrl; + struct pinctrl_state *pinstate_uhs; + struct clk *clk; + struct reset_control *rst; + struct reset_control *rst_br; + struct reset_control *rst_hw; + struct dma_chan *chan; + enum dma_data_direction dma_dir; + unsigned long clk_rate; + unsigned long caps; +}; + +static void *uniphier_sd_priv(struct tmio_mmc_host *host) +{ + return container_of(host->pdata, struct uniphier_sd_priv, tmio_data); +} + +static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable) +{ + sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0); +} + +/* external DMA engine */ +static void uniphier_sd_external_dma_issue(unsigned long arg) +{ + struct tmio_mmc_host *host = (void *)arg; + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + uniphier_sd_dma_endisable(host, 1); + dma_async_issue_pending(priv->chan); +} + +static void uniphier_sd_external_dma_callback(void *param, + const struct dmaengine_result *result) +{ + struct tmio_mmc_host *host = param; + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + unsigned long flags; + + dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, + priv->dma_dir); + + spin_lock_irqsave(&host->lock, flags); + + if (result->result == DMA_TRANS_NOERROR) { + /* + * When the external DMA engine is enabled, strangely enough, + * the DATAEND flag can be asserted even if the DMA engine has + * not been kicked yet. Enable the TMIO_STAT_DATAEND irq only + * after we make sure the DMA engine finishes the transfer, + * hence, in this callback. + */ + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + } else { + host->data->error = -ETIMEDOUT; + tmio_mmc_do_data_irq(host); + } + + spin_unlock_irqrestore(&host->lock, flags); +} + +static void uniphier_sd_external_dma_start(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + enum dma_transfer_direction dma_tx_dir; + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + int sg_len; + + if (!priv->chan) + goto force_pio; + + if (data->flags & MMC_DATA_READ) { + priv->dma_dir = DMA_FROM_DEVICE; + dma_tx_dir = DMA_DEV_TO_MEM; + } else { + priv->dma_dir = DMA_TO_DEVICE; + dma_tx_dir = DMA_MEM_TO_DEV; + } + + sg_len = dma_map_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, + priv->dma_dir); + if (sg_len == 0) + goto force_pio; + + desc = dmaengine_prep_slave_sg(priv->chan, host->sg_ptr, sg_len, + dma_tx_dir, DMA_CTRL_ACK); + if (!desc) + goto unmap_sg; + + desc->callback_result = uniphier_sd_external_dma_callback; + desc->callback_param = host; + + cookie = dmaengine_submit(desc); + if (cookie < 0) + goto unmap_sg; + + host->dma_on = true; + + return; + +unmap_sg: + dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, + priv->dma_dir); +force_pio: + uniphier_sd_dma_endisable(host, 0); +} + +static void uniphier_sd_external_dma_enable(struct tmio_mmc_host *host, + bool enable) +{ +} + +static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + struct dma_chan *chan; + + chan = dma_request_chan(mmc_dev(host->mmc), "rx-tx"); + if (IS_ERR(chan)) { + dev_warn(mmc_dev(host->mmc), + "failed to request DMA channel. falling back to PIO\n"); + return; /* just use PIO even for -EPROBE_DEFER */ + } + + /* this driver uses a single channel for both RX an TX */ + priv->chan = chan; + host->chan_rx = chan; + host->chan_tx = chan; + + tasklet_init(&host->dma_issue, uniphier_sd_external_dma_issue, + (unsigned long)host); +} + +static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + if (priv->chan) + dma_release_channel(priv->chan); +} + +static void uniphier_sd_external_dma_abort(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + uniphier_sd_dma_endisable(host, 0); + + if (priv->chan) + dmaengine_terminate_sync(priv->chan); +} + +static void uniphier_sd_external_dma_dataend(struct tmio_mmc_host *host) +{ + uniphier_sd_dma_endisable(host, 0); + + tmio_mmc_do_data_irq(host); +} + +static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = { + .start = uniphier_sd_external_dma_start, + .enable = uniphier_sd_external_dma_enable, + .request = uniphier_sd_external_dma_request, + .release = uniphier_sd_external_dma_release, + .abort = uniphier_sd_external_dma_abort, + .dataend = uniphier_sd_external_dma_dataend, +}; + +static void uniphier_sd_internal_dma_issue(unsigned long arg) +{ + struct tmio_mmc_host *host = (void *)arg; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + spin_unlock_irqrestore(&host->lock, flags); + + uniphier_sd_dma_endisable(host, 1); + writel(UNIPHIER_SD_DMA_CTL_START, host->ctl + UNIPHIER_SD_DMA_CTL); +} + +static void uniphier_sd_internal_dma_start(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + struct scatterlist *sg = host->sg_ptr; + dma_addr_t dma_addr; + unsigned int dma_mode_dir; + u32 dma_mode; + int sg_len; + + if ((data->flags & MMC_DATA_READ) && !host->chan_rx) + goto force_pio; + + if (WARN_ON(host->sg_len != 1)) + goto force_pio; + + if (!IS_ALIGNED(sg->offset, 8)) + goto force_pio; + + if (data->flags & MMC_DATA_READ) { + priv->dma_dir = DMA_FROM_DEVICE; + dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV; + } else { + priv->dma_dir = DMA_TO_DEVICE; + dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_TO_DEV; + } + + sg_len = dma_map_sg(mmc_dev(host->mmc), sg, 1, priv->dma_dir); + if (sg_len == 0) + goto force_pio; + + dma_mode = FIELD_PREP(UNIPHIER_SD_DMA_MODE_DIR_MASK, dma_mode_dir); + dma_mode |= FIELD_PREP(UNIPHIER_SD_DMA_MODE_WIDTH_MASK, + UNIPHIER_SD_DMA_MODE_WIDTH_64); + dma_mode |= UNIPHIER_SD_DMA_MODE_ADDR_INC; + + writel(dma_mode, host->ctl + UNIPHIER_SD_DMA_MODE); + + dma_addr = sg_dma_address(data->sg); + writel(lower_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_L); + writel(upper_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_H); + + host->dma_on = true; + + return; +force_pio: + uniphier_sd_dma_endisable(host, 0); +} + +static void uniphier_sd_internal_dma_enable(struct tmio_mmc_host *host, + bool enable) +{ +} + +static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + /* + * Due to a hardware bug, Pro5 cannot use DMA for RX. + * We can still use DMA for TX, but PIO for RX. + */ + if (!(priv->caps & UNIPHIER_SD_CAP_BROKEN_DMA_RX)) + host->chan_rx = (void *)0xdeadbeaf; + + host->chan_tx = (void *)0xdeadbeaf; + + tasklet_init(&host->dma_issue, uniphier_sd_internal_dma_issue, + (unsigned long)host); +} + +static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host) +{ + /* Each value is set to zero to assume "disabling" each DMA */ + host->chan_rx = NULL; + host->chan_tx = NULL; +} + +static void uniphier_sd_internal_dma_abort(struct tmio_mmc_host *host) +{ + u32 tmp; + + uniphier_sd_dma_endisable(host, 0); + + tmp = readl(host->ctl + UNIPHIER_SD_DMA_RST); + tmp &= ~(UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0); + writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); + + tmp |= UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0; + writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); +} + +static void uniphier_sd_internal_dma_dataend(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + uniphier_sd_dma_endisable(host, 0); + dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, 1, priv->dma_dir); + + tmio_mmc_do_data_irq(host); +} + +static const struct tmio_mmc_dma_ops uniphier_sd_internal_dma_ops = { + .start = uniphier_sd_internal_dma_start, + .enable = uniphier_sd_internal_dma_enable, + .request = uniphier_sd_internal_dma_request, + .release = uniphier_sd_internal_dma_release, + .abort = uniphier_sd_internal_dma_abort, + .dataend = uniphier_sd_internal_dma_dataend, +}; + +static int uniphier_sd_clk_enable(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + struct mmc_host *mmc = host->mmc; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = clk_set_rate(priv->clk, ULONG_MAX); + if (ret) + goto disable_clk; + + priv->clk_rate = clk_get_rate(priv->clk); + + /* If max-frequency property is set, use it. */ + if (!mmc->f_max) + mmc->f_max = priv->clk_rate; + + /* + * 1/512 is the finest divisor in the original IP. Newer versions + * also supports 1/1024 divisor. (UniPhier-specific extension) + */ + if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) + mmc->f_min = priv->clk_rate / 1024; + else + mmc->f_min = priv->clk_rate / 512; + + ret = reset_control_deassert(priv->rst); + if (ret) + goto disable_clk; + + ret = reset_control_deassert(priv->rst_br); + if (ret) + goto assert_rst; + + return 0; + +assert_rst: + reset_control_assert(priv->rst); +disable_clk: + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void uniphier_sd_clk_disable(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + reset_control_assert(priv->rst_br); + reset_control_assert(priv->rst); + clk_disable_unprepare(priv->clk); +} + +static void uniphier_sd_hw_reset(struct mmc_host *mmc) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + + reset_control_assert(priv->rst_hw); + /* For eMMC, minimum is 1us but give it 9us for good measure */ + udelay(9); + reset_control_deassert(priv->rst_hw); + /* For eMMC, minimum is 200us but give it 300us for good measure */ + usleep_range(300, 1000); +} + +static void uniphier_sd_set_clock(struct tmio_mmc_host *host, + unsigned int clock) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + unsigned long divisor; + u32 tmp; + + tmp = readl(host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); + + /* stop the clock before changing its rate to avoid a glitch signal */ + tmp &= ~CLK_CTL_SCLKEN; + writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); + + if (clock == 0) + return; + + tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1024; + tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1; + tmp &= ~CLK_CTL_DIV_MASK; + + divisor = priv->clk_rate / clock; + + /* + * In the original IP, bit[7:0] represents the divisor. + * bit7 set: 1/512, ... bit0 set:1/4, all bits clear: 1/2 + * + * The IP does not define a way to achieve 1/1. For UniPhier variants, + * bit10 is used for 1/1. Newer versions of UniPhier variants use + * bit16 for 1/1024. + */ + if (divisor <= 1) + tmp |= UNIPHIER_SD_CLK_CTL_DIV1; + else if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP && divisor > 512) + tmp |= UNIPHIER_SD_CLK_CTL_DIV1024; + else + tmp |= roundup_pow_of_two(divisor) >> 2; + + writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); + + tmp |= CLK_CTL_SCLKEN; + writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); +} + +static void uniphier_sd_host_init(struct tmio_mmc_host *host) +{ + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + u32 val; + + /* + * Connected to 32bit AXI. + * This register holds settings for SoC-specific internal bus + * connection. What is worse, the register spec was changed, + * breaking the backward compatibility. Write an appropriate + * value depending on a flag associated with a compatible string. + */ + if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) + val = 0x00000101; + else + val = 0x00000000; + + writel(val, host->ctl + UNIPHIER_SD_HOST_MODE); + + val = 0; + /* + * If supported, the controller can automatically + * enable/disable the clock line to the card. + */ + if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) + val |= UNIPHIER_SD_CLKCTL_OFFEN; + + writel(val, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); +} + +static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct uniphier_sd_priv *priv = uniphier_sd_priv(host); + struct pinctrl_state *pinstate = NULL; + u32 val, tmp; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + val = UNIPHIER_SD_VOLT_330; + break; + case MMC_SIGNAL_VOLTAGE_180: + val = UNIPHIER_SD_VOLT_180; + pinstate = priv->pinstate_uhs; + break; + default: + return -ENOTSUPP; + } + + tmp = readl(host->ctl + UNIPHIER_SD_VOLT); + tmp &= ~UNIPHIER_SD_VOLT_MASK; + tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val); + writel(tmp, host->ctl + UNIPHIER_SD_VOLT); + + if (pinstate) + pinctrl_select_state(priv->pinctrl, pinstate); + else + pinctrl_select_default_state(mmc_dev(mmc)); + + return 0; +} + +static int uniphier_sd_uhs_init(struct tmio_mmc_host *host, + struct uniphier_sd_priv *priv) +{ + priv->pinctrl = devm_pinctrl_get(mmc_dev(host->mmc)); + if (IS_ERR(priv->pinctrl)) + return PTR_ERR(priv->pinctrl); + + priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs"); + if (IS_ERR(priv->pinstate_uhs)) + return PTR_ERR(priv->pinstate_uhs); + + host->ops.start_signal_voltage_switch = + uniphier_sd_start_signal_voltage_switch; + + return 0; +} + +static int uniphier_sd_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uniphier_sd_priv *priv; + struct tmio_mmc_data *tmio_data; + struct tmio_mmc_host *host; + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->caps = (unsigned long)of_device_get_match_data(dev); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(priv->clk); + } + + priv->rst = devm_reset_control_get_shared(dev, "host"); + if (IS_ERR(priv->rst)) { + dev_err(dev, "failed to get host reset\n"); + return PTR_ERR(priv->rst); + } + + /* old version has one more reset */ + if (!(priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)) { + priv->rst_br = devm_reset_control_get_shared(dev, "bridge"); + if (IS_ERR(priv->rst_br)) { + dev_err(dev, "failed to get bridge reset\n"); + return PTR_ERR(priv->rst_br); + } + } + + tmio_data = &priv->tmio_data; + tmio_data->flags |= TMIO_MMC_32BIT_DATA_PORT; + + host = tmio_mmc_host_alloc(pdev, tmio_data); + if (IS_ERR(host)) + return PTR_ERR(host); + + if (host->mmc->caps & MMC_CAP_HW_RESET) { + priv->rst_hw = devm_reset_control_get_exclusive(dev, "hw"); + if (IS_ERR(priv->rst_hw)) { + dev_err(dev, "failed to get hw reset\n"); + ret = PTR_ERR(priv->rst_hw); + goto free_host; + } + host->ops.hw_reset = uniphier_sd_hw_reset; + } + + if (host->mmc->caps & MMC_CAP_UHS) { + ret = uniphier_sd_uhs_init(host, priv); + if (ret) { + dev_warn(dev, + "failed to setup UHS (error %d). Disabling UHS.", + ret); + host->mmc->caps &= ~MMC_CAP_UHS; + } + } + + if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) + host->dma_ops = &uniphier_sd_internal_dma_ops; + else + host->dma_ops = &uniphier_sd_external_dma_ops; + + host->bus_shift = 1; + host->clk_enable = uniphier_sd_clk_enable; + host->clk_disable = uniphier_sd_clk_disable; + host->set_clock = uniphier_sd_set_clock; + + ret = uniphier_sd_clk_enable(host); + if (ret) + goto free_host; + + uniphier_sd_host_init(host); + + tmio_data->ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34; + if (host->mmc->caps & MMC_CAP_UHS) + tmio_data->ocr_mask |= MMC_VDD_165_195; + + tmio_data->max_segs = 1; + tmio_data->max_blk_count = U16_MAX; + + ret = tmio_mmc_host_probe(host); + if (ret) + goto disable_clk; + + ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, + dev_name(dev), host); + if (ret) + goto remove_host; + + return 0; + +remove_host: + tmio_mmc_host_remove(host); +disable_clk: + uniphier_sd_clk_disable(host); +free_host: + tmio_mmc_host_free(host); + + return ret; +} + +static int uniphier_sd_remove(struct platform_device *pdev) +{ + struct tmio_mmc_host *host = platform_get_drvdata(pdev); + + tmio_mmc_host_remove(host); + uniphier_sd_clk_disable(host); + tmio_mmc_host_free(host); + + return 0; +} + +static const struct of_device_id uniphier_sd_match[] = { + { + .compatible = "socionext,uniphier-sd-v2.91", + }, + { + .compatible = "socionext,uniphier-sd-v3.1", + .data = (void *)(UNIPHIER_SD_CAP_EXTENDED_IP | + UNIPHIER_SD_CAP_BROKEN_DMA_RX), + }, + { + .compatible = "socionext,uniphier-sd-v3.1.1", + .data = (void *)UNIPHIER_SD_CAP_EXTENDED_IP, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, uniphier_sd_match); + +static struct platform_driver uniphier_sd_driver = { + .probe = uniphier_sd_probe, + .remove = uniphier_sd_remove, + .driver = { + .name = "uniphier-sd", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = uniphier_sd_match, + }, +}; +module_platform_driver(uniphier_sd_driver); + +MODULE_AUTHOR("Masahiro Yamada "); +MODULE_DESCRIPTION("UniPhier SD/eMMC host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c new file mode 100644 index 000000000..4f22ecef9 --- /dev/null +++ b/drivers/mmc/host/usdhi6rol0.c @@ -0,0 +1,1908 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. + * Author: Guennadi Liakhovetski + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define USDHI6_SD_CMD 0x0000 +#define USDHI6_SD_PORT_SEL 0x0004 +#define USDHI6_SD_ARG 0x0008 +#define USDHI6_SD_STOP 0x0010 +#define USDHI6_SD_SECCNT 0x0014 +#define USDHI6_SD_RSP10 0x0018 +#define USDHI6_SD_RSP32 0x0020 +#define USDHI6_SD_RSP54 0x0028 +#define USDHI6_SD_RSP76 0x0030 +#define USDHI6_SD_INFO1 0x0038 +#define USDHI6_SD_INFO2 0x003c +#define USDHI6_SD_INFO1_MASK 0x0040 +#define USDHI6_SD_INFO2_MASK 0x0044 +#define USDHI6_SD_CLK_CTRL 0x0048 +#define USDHI6_SD_SIZE 0x004c +#define USDHI6_SD_OPTION 0x0050 +#define USDHI6_SD_ERR_STS1 0x0058 +#define USDHI6_SD_ERR_STS2 0x005c +#define USDHI6_SD_BUF0 0x0060 +#define USDHI6_SDIO_MODE 0x0068 +#define USDHI6_SDIO_INFO1 0x006c +#define USDHI6_SDIO_INFO1_MASK 0x0070 +#define USDHI6_CC_EXT_MODE 0x01b0 +#define USDHI6_SOFT_RST 0x01c0 +#define USDHI6_VERSION 0x01c4 +#define USDHI6_HOST_MODE 0x01c8 +#define USDHI6_SDIF_MODE 0x01cc + +#define USDHI6_SD_CMD_APP 0x0040 +#define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000 +#define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300 +#define USDHI6_SD_CMD_MODE_RSP_R1 0x0400 /* Also R5, R6, R7 */ +#define USDHI6_SD_CMD_MODE_RSP_R1B 0x0500 /* R1b */ +#define USDHI6_SD_CMD_MODE_RSP_R2 0x0600 +#define USDHI6_SD_CMD_MODE_RSP_R3 0x0700 /* Also R4 */ +#define USDHI6_SD_CMD_DATA 0x0800 +#define USDHI6_SD_CMD_READ 0x1000 +#define USDHI6_SD_CMD_MULTI 0x2000 +#define USDHI6_SD_CMD_CMD12_AUTO_OFF 0x4000 + +#define USDHI6_CC_EXT_MODE_SDRW BIT(1) + +#define USDHI6_SD_INFO1_RSP_END BIT(0) +#define USDHI6_SD_INFO1_ACCESS_END BIT(2) +#define USDHI6_SD_INFO1_CARD_OUT BIT(3) +#define USDHI6_SD_INFO1_CARD_IN BIT(4) +#define USDHI6_SD_INFO1_CD BIT(5) +#define USDHI6_SD_INFO1_WP BIT(7) +#define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8) +#define USDHI6_SD_INFO1_D3_CARD_IN BIT(9) + +#define USDHI6_SD_INFO2_CMD_ERR BIT(0) +#define USDHI6_SD_INFO2_CRC_ERR BIT(1) +#define USDHI6_SD_INFO2_END_ERR BIT(2) +#define USDHI6_SD_INFO2_TOUT BIT(3) +#define USDHI6_SD_INFO2_IWA_ERR BIT(4) +#define USDHI6_SD_INFO2_IRA_ERR BIT(5) +#define USDHI6_SD_INFO2_RSP_TOUT BIT(6) +#define USDHI6_SD_INFO2_SDDAT0 BIT(7) +#define USDHI6_SD_INFO2_BRE BIT(8) +#define USDHI6_SD_INFO2_BWE BIT(9) +#define USDHI6_SD_INFO2_SCLKDIVEN BIT(13) +#define USDHI6_SD_INFO2_CBSY BIT(14) +#define USDHI6_SD_INFO2_ILA BIT(15) + +#define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN) +#define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT) +#define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT) +#define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT) + +#define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR | \ + USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \ + USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR | \ + USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT | \ + USDHI6_SD_INFO2_ILA) + +#define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \ + USDHI6_SD_INFO1_CARD) + +#define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \ + USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA) + +#define USDHI6_SD_CLK_CTRL_SCLKEN BIT(8) + +#define USDHI6_SD_STOP_STP BIT(0) +#define USDHI6_SD_STOP_SEC BIT(8) + +#define USDHI6_SDIO_INFO1_IOIRQ BIT(0) +#define USDHI6_SDIO_INFO1_EXPUB52 BIT(14) +#define USDHI6_SDIO_INFO1_EXWT BIT(15) + +#define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13) + +#define USDHI6_SOFT_RST_RESERVED (BIT(1) | BIT(2)) +#define USDHI6_SOFT_RST_RESET BIT(0) + +#define USDHI6_SD_OPTION_TIMEOUT_SHIFT 4 +#define USDHI6_SD_OPTION_TIMEOUT_MASK (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT) +#define USDHI6_SD_OPTION_WIDTH_1 BIT(15) + +#define USDHI6_SD_PORT_SEL_PORTS_SHIFT 8 + +#define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff + +#define USDHI6_SDIO_INFO1_IRQ (USDHI6_SDIO_INFO1_IOIRQ | 3 | \ + USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT) + +#define USDHI6_MIN_DMA 64 + +#define USDHI6_REQ_TIMEOUT_MS 4000 + +enum usdhi6_wait_for { + USDHI6_WAIT_FOR_REQUEST, + USDHI6_WAIT_FOR_CMD, + USDHI6_WAIT_FOR_MREAD, + USDHI6_WAIT_FOR_MWRITE, + USDHI6_WAIT_FOR_READ, + USDHI6_WAIT_FOR_WRITE, + USDHI6_WAIT_FOR_DATA_END, + USDHI6_WAIT_FOR_STOP, + USDHI6_WAIT_FOR_DMA, +}; + +struct usdhi6_page { + struct page *page; + void *mapped; /* mapped page */ +}; + +struct usdhi6_host { + struct mmc_host *mmc; + struct mmc_request *mrq; + void __iomem *base; + struct clk *clk; + + /* SG memory handling */ + + /* Common for multiple and single block requests */ + struct usdhi6_page pg; /* current page from an SG */ + void *blk_page; /* either a mapped page, or the bounce buffer */ + size_t offset; /* offset within a page, including sg->offset */ + + /* Blocks, crossing a page boundary */ + size_t head_len; + struct usdhi6_page head_pg; + + /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */ + struct scatterlist bounce_sg; + u8 bounce_buf[512]; + + /* Multiple block requests only */ + struct scatterlist *sg; /* current SG segment */ + int page_idx; /* page index within an SG segment */ + + enum usdhi6_wait_for wait; + u32 status_mask; + u32 status2_mask; + u32 sdio_mask; + u32 io_error; + u32 irq_status; + unsigned long imclk; + unsigned long rate; + bool app_cmd; + + /* Timeout handling */ + struct delayed_work timeout_work; + unsigned long timeout; + + /* DMA support */ + struct dma_chan *chan_rx; + struct dma_chan *chan_tx; + bool dma_active; + + /* Pin control */ + struct pinctrl *pinctrl; + struct pinctrl_state *pins_uhs; +}; + +/* I/O primitives */ + +static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data) +{ + iowrite32(data, host->base + reg); + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, + host->base, reg, data); +} + +static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data) +{ + iowrite16(data, host->base + reg); + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, + host->base, reg, data); +} + +static u32 usdhi6_read(struct usdhi6_host *host, u32 reg) +{ + u32 data = ioread32(host->base + reg); + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, + host->base, reg, data); + return data; +} + +static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg) +{ + u16 data = ioread16(host->base + reg); + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, + host->base, reg, data); + return data; +} + +static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2) +{ + host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1; + host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2; + usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask); + usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask); +} + +static void usdhi6_wait_for_resp(struct usdhi6_host *host) +{ + usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END | + USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD, + USDHI6_SD_INFO2_ERR); +} + +static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read) +{ + usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END | + USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR | + (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE)); +} + +static void usdhi6_only_cd(struct usdhi6_host *host) +{ + /* Mask all except card hotplug */ + usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0); +} + +static void usdhi6_mask_all(struct usdhi6_host *host) +{ + usdhi6_irq_enable(host, 0, 0); +} + +static int usdhi6_error_code(struct usdhi6_host *host) +{ + u32 err; + + usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP); + + if (host->io_error & + (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) { + u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54); + int opc = host->mrq ? host->mrq->cmd->opcode : -1; + + err = usdhi6_read(host, USDHI6_SD_ERR_STS2); + /* Response timeout is often normal, don't spam the log */ + if (host->wait == USDHI6_WAIT_FOR_CMD) + dev_dbg(mmc_dev(host->mmc), + "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", + err, rsp54, host->wait, opc); + else + dev_warn(mmc_dev(host->mmc), + "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", + err, rsp54, host->wait, opc); + return -ETIMEDOUT; + } + + err = usdhi6_read(host, USDHI6_SD_ERR_STS1); + if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR) + dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n", + err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1); + if (host->io_error & USDHI6_SD_INFO2_ILA) + return -EILSEQ; + + return -EIO; +} + +/* Scatter-Gather management */ + +/* + * In PIO mode we have to map each page separately, using kmap(). That way + * adjacent pages are mapped to non-adjacent virtual addresses. That's why we + * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks + * have been observed with an SDIO WiFi card (b43 driver). + */ +static void usdhi6_blk_bounce(struct usdhi6_host *host, + struct scatterlist *sg) +{ + struct mmc_data *data = host->mrq->data; + size_t blk_head = host->head_len; + + dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n", + __func__, host->mrq->cmd->opcode, data->sg_len, + data->blksz, data->blocks, sg->offset); + + host->head_pg.page = host->pg.page; + host->head_pg.mapped = host->pg.mapped; + host->pg.page = nth_page(host->pg.page, 1); + host->pg.mapped = kmap(host->pg.page); + + host->blk_page = host->bounce_buf; + host->offset = 0; + + if (data->flags & MMC_DATA_READ) + return; + + memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head, + blk_head); + memcpy(host->bounce_buf + blk_head, host->pg.mapped, + data->blksz - blk_head); +} + +/* Only called for multiple block IO */ +static void usdhi6_sg_prep(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = mrq->data; + + usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks); + + host->sg = data->sg; + /* TODO: if we always map, this is redundant */ + host->offset = host->sg->offset; +} + +/* Map the first page in an SG segment: common for multiple and single block IO */ +static void *usdhi6_sg_map(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg; + size_t head = PAGE_SIZE - sg->offset; + size_t blk_head = head % data->blksz; + + WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page); + if (WARN(sg_dma_len(sg) % data->blksz, + "SG size %u isn't a multiple of block size %u\n", + sg_dma_len(sg), data->blksz)) + return NULL; + + host->pg.page = sg_page(sg); + host->pg.mapped = kmap(host->pg.page); + host->offset = sg->offset; + + /* + * Block size must be a power of 2 for multi-block transfers, + * therefore blk_head is equal for all pages in this SG + */ + host->head_len = blk_head; + + if (head < data->blksz) + /* + * The first block in the SG crosses a page boundary. + * Max blksz = 512, so blocks can only span 2 pages + */ + usdhi6_blk_bounce(host, sg); + else + host->blk_page = host->pg.mapped; + + dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n", + host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, + sg->offset, host->mrq->cmd->opcode, host->mrq); + + return host->blk_page + host->offset; +} + +/* Unmap the current page: common for multiple and single block IO */ +static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force) +{ + struct mmc_data *data = host->mrq->data; + struct page *page = host->head_pg.page; + + if (page) { + /* Previous block was cross-page boundary */ + struct scatterlist *sg = data->sg_len > 1 ? + host->sg : data->sg; + size_t blk_head = host->head_len; + + if (!data->error && data->flags & MMC_DATA_READ) { + memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head, + host->bounce_buf, blk_head); + memcpy(host->pg.mapped, host->bounce_buf + blk_head, + data->blksz - blk_head); + } + + flush_dcache_page(page); + kunmap(page); + + host->head_pg.page = NULL; + + if (!force && sg_dma_len(sg) + sg->offset > + (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head) + /* More blocks in this SG, don't unmap the next page */ + return; + } + + page = host->pg.page; + if (!page) + return; + + flush_dcache_page(page); + kunmap(page); + + host->pg.page = NULL; +} + +/* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */ +static void usdhi6_sg_advance(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + size_t done, total; + + /* New offset: set at the end of the previous block */ + if (host->head_pg.page) { + /* Finished a cross-page block, jump to the new page */ + host->page_idx++; + host->offset = data->blksz - host->head_len; + host->blk_page = host->pg.mapped; + usdhi6_sg_unmap(host, false); + } else { + host->offset += data->blksz; + /* The completed block didn't cross a page boundary */ + if (host->offset == PAGE_SIZE) { + /* If required, we'll map the page below */ + host->offset = 0; + host->page_idx++; + } + } + + /* + * Now host->blk_page + host->offset point at the end of our last block + * and host->page_idx is the index of the page, in which our new block + * is located, if any + */ + + done = (host->page_idx << PAGE_SHIFT) + host->offset; + total = host->sg->offset + sg_dma_len(host->sg); + + dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__, + done, total, host->offset); + + if (done < total && host->offset) { + /* More blocks in this page */ + if (host->offset + data->blksz > PAGE_SIZE) + /* We approached at a block, that spans 2 pages */ + usdhi6_blk_bounce(host, host->sg); + + return; + } + + /* Finished current page or an SG segment */ + usdhi6_sg_unmap(host, false); + + if (done == total) { + /* + * End of an SG segment or the complete SG: jump to the next + * segment, we'll map it later in usdhi6_blk_read() or + * usdhi6_blk_write() + */ + struct scatterlist *next = sg_next(host->sg); + + host->page_idx = 0; + + if (!next) + host->wait = USDHI6_WAIT_FOR_DATA_END; + host->sg = next; + + if (WARN(next && sg_dma_len(next) % data->blksz, + "SG size %u isn't a multiple of block size %u\n", + sg_dma_len(next), data->blksz)) + data->error = -EINVAL; + + return; + } + + /* We cannot get here after crossing a page border */ + + /* Next page in the same SG */ + host->pg.page = nth_page(sg_page(host->sg), host->page_idx); + host->pg.mapped = kmap(host->pg.page); + host->blk_page = host->pg.mapped; + + dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n", + host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, + host->mrq->cmd->opcode, host->mrq); +} + +/* DMA handling */ + +static void usdhi6_dma_release(struct usdhi6_host *host) +{ + host->dma_active = false; + if (host->chan_tx) { + struct dma_chan *chan = host->chan_tx; + host->chan_tx = NULL; + dma_release_channel(chan); + } + if (host->chan_rx) { + struct dma_chan *chan = host->chan_rx; + host->chan_rx = NULL; + dma_release_channel(chan); + } +} + +static void usdhi6_dma_stop_unmap(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + + if (!host->dma_active) + return; + + usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); + host->dma_active = false; + + if (data->flags & MMC_DATA_READ) + dma_unmap_sg(host->chan_rx->device->dev, data->sg, + data->sg_len, DMA_FROM_DEVICE); + else + dma_unmap_sg(host->chan_tx->device->dev, data->sg, + data->sg_len, DMA_TO_DEVICE); +} + +static void usdhi6_dma_complete(void *arg) +{ + struct usdhi6_host *host = arg; + struct mmc_request *mrq = host->mrq; + + if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n", + dev_name(mmc_dev(host->mmc)), mrq)) + return; + + dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__, + mrq->cmd->opcode); + + usdhi6_dma_stop_unmap(host); + usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); +} + +static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan, + enum dma_transfer_direction dir) +{ + struct mmc_data *data = host->mrq->data; + struct scatterlist *sg = data->sg; + struct dma_async_tx_descriptor *desc = NULL; + dma_cookie_t cookie = -EINVAL; + enum dma_data_direction data_dir; + int ret; + + switch (dir) { + case DMA_MEM_TO_DEV: + data_dir = DMA_TO_DEVICE; + break; + case DMA_DEV_TO_MEM: + data_dir = DMA_FROM_DEVICE; + break; + default: + return -EINVAL; + } + + ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir); + if (ret > 0) { + host->dma_active = true; + desc = dmaengine_prep_slave_sg(chan, sg, ret, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } + + if (desc) { + desc->callback = usdhi6_dma_complete; + desc->callback_param = host; + cookie = dmaengine_submit(desc); + } + + dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n", + __func__, data->sg_len, ret, cookie, desc); + + if (cookie < 0) { + /* DMA failed, fall back to PIO */ + if (ret >= 0) + ret = cookie; + usdhi6_dma_release(host); + dev_warn(mmc_dev(host->mmc), + "DMA failed: %d, falling back to PIO\n", ret); + } + + return cookie; +} + +static int usdhi6_dma_start(struct usdhi6_host *host) +{ + if (!host->chan_rx || !host->chan_tx) + return -ENODEV; + + if (host->mrq->data->flags & MMC_DATA_READ) + return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM); + + return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV); +} + +static void usdhi6_dma_kill(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + + dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n", + __func__, data->sg_len, data->blocks, data->blksz); + /* Abort DMA */ + if (data->flags & MMC_DATA_READ) + dmaengine_terminate_all(host->chan_rx); + else + dmaengine_terminate_all(host->chan_tx); +} + +static void usdhi6_dma_check_error(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + + dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n", + __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1)); + + if (host->io_error) { + data->error = usdhi6_error_code(host); + data->bytes_xfered = 0; + usdhi6_dma_kill(host); + usdhi6_dma_release(host); + dev_warn(mmc_dev(host->mmc), + "DMA failed: %d, falling back to PIO\n", data->error); + return; + } + + /* + * The datasheet tells us to check a response from the card, whereas + * responses only come after the command phase, not after the data + * phase. Let's check anyway. + */ + if (host->irq_status & USDHI6_SD_INFO1_RSP_END) + dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n"); +} + +static void usdhi6_dma_kick(struct usdhi6_host *host) +{ + if (host->mrq->data->flags & MMC_DATA_READ) + dma_async_issue_pending(host->chan_rx); + else + dma_async_issue_pending(host->chan_tx); +} + +static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) +{ + struct dma_slave_config cfg = { + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + }; + int ret; + + host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); + dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, + host->chan_tx); + + if (IS_ERR(host->chan_tx)) { + host->chan_tx = NULL; + return; + } + + cfg.direction = DMA_MEM_TO_DEV; + cfg.dst_addr = start + USDHI6_SD_BUF0; + cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ + cfg.src_addr = 0; + ret = dmaengine_slave_config(host->chan_tx, &cfg); + if (ret < 0) + goto e_release_tx; + + host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); + dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, + host->chan_rx); + + if (IS_ERR(host->chan_rx)) { + host->chan_rx = NULL; + goto e_release_tx; + } + + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = cfg.dst_addr; + cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ + cfg.dst_addr = 0; + ret = dmaengine_slave_config(host->chan_rx, &cfg); + if (ret < 0) + goto e_release_rx; + + return; + +e_release_rx: + dma_release_channel(host->chan_rx); + host->chan_rx = NULL; +e_release_tx: + dma_release_channel(host->chan_tx); + host->chan_tx = NULL; +} + +/* API helpers */ + +static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios) +{ + unsigned long rate = ios->clock; + u32 val; + unsigned int i; + + for (i = 1000; i; i--) { + if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN) + break; + usleep_range(10, 100); + } + + if (!i) { + dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n"); + return; + } + + val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK; + + if (rate) { + unsigned long new_rate; + + if (host->imclk <= rate) { + if (ios->timing != MMC_TIMING_UHS_DDR50) { + /* Cannot have 1-to-1 clock in DDR mode */ + new_rate = host->imclk; + val |= 0xff; + } else { + new_rate = host->imclk / 2; + } + } else { + unsigned long div = + roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate)); + val |= div >> 2; + new_rate = host->imclk / div; + } + + if (host->rate == new_rate) + return; + + host->rate = new_rate; + + dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n", + rate, (val & 0xff) << 2, new_rate); + } + + /* + * if old or new rate is equal to input rate, have to switch the clock + * off before changing and on after + */ + if (host->imclk == rate || host->imclk == host->rate || !rate) + usdhi6_write(host, USDHI6_SD_CLK_CTRL, + val & ~USDHI6_SD_CLK_CTRL_SCLKEN); + + if (!rate) { + host->rate = 0; + return; + } + + usdhi6_write(host, USDHI6_SD_CLK_CTRL, val); + + if (host->imclk == rate || host->imclk == host->rate || + !(val & USDHI6_SD_CLK_CTRL_SCLKEN)) + usdhi6_write(host, USDHI6_SD_CLK_CTRL, + val | USDHI6_SD_CLK_CTRL_SCLKEN); +} + +static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios) +{ + struct mmc_host *mmc = host->mmc; + + if (!IS_ERR(mmc->supply.vmmc)) + /* Errors ignored... */ + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, + ios->power_mode ? ios->vdd : 0); +} + +static int usdhi6_reset(struct usdhi6_host *host) +{ + int i; + + usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED); + cpu_relax(); + usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET); + for (i = 1000; i; i--) + if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET) + break; + + return i ? 0 : -ETIMEDOUT; +} + +static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct usdhi6_host *host = mmc_priv(mmc); + u32 option, mode; + int ret; + + dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n", + ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + usdhi6_set_power(host, ios); + usdhi6_only_cd(host); + break; + case MMC_POWER_UP: + /* + * We only also touch USDHI6_SD_OPTION from .request(), which + * cannot race with MMC_POWER_UP + */ + ret = usdhi6_reset(host); + if (ret < 0) { + dev_err(mmc_dev(mmc), "Cannot reset the interface!\n"); + } else { + usdhi6_set_power(host, ios); + usdhi6_only_cd(host); + } + break; + case MMC_POWER_ON: + option = usdhi6_read(host, USDHI6_SD_OPTION); + /* + * The eMMC standard only allows 4 or 8 bits in the DDR mode, + * the same probably holds for SD cards. We check here anyway, + * since the datasheet explicitly requires 4 bits for DDR. + */ + if (ios->bus_width == MMC_BUS_WIDTH_1) { + if (ios->timing == MMC_TIMING_UHS_DDR50) + dev_err(mmc_dev(mmc), + "4 bits are required for DDR\n"); + option |= USDHI6_SD_OPTION_WIDTH_1; + mode = 0; + } else { + option &= ~USDHI6_SD_OPTION_WIDTH_1; + mode = ios->timing == MMC_TIMING_UHS_DDR50; + } + usdhi6_write(host, USDHI6_SD_OPTION, option); + usdhi6_write(host, USDHI6_SDIF_MODE, mode); + break; + } + + if (host->rate != ios->clock) + usdhi6_clk_set(host, ios); +} + +/* This is data timeout. Response timeout is fixed to 640 clock cycles */ +static void usdhi6_timeout_set(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + u32 val; + unsigned long ticks; + + if (!mrq->data) + ticks = host->rate / 1000 * mrq->cmd->busy_timeout; + else + ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) + + mrq->data->timeout_clks; + + if (!ticks || ticks > 1 << 27) + /* Max timeout */ + val = 14; + else if (ticks < 1 << 13) + /* Min timeout */ + val = 0; + else + val = order_base_2(ticks) - 13; + + dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n", + mrq->data ? "data" : "cmd", ticks, host->rate); + + /* Timeout Counter mask: 0xf0 */ + usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) | + (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK)); +} + +static void usdhi6_request_done(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = mrq->data; + + if (WARN(host->pg.page || host->head_pg.page, + "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n", + host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode, + data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-', + data ? host->offset : 0, data ? data->blocks : 0, + data ? data->blksz : 0, data ? data->sg_len : 0)) + usdhi6_sg_unmap(host, true); + + if (mrq->cmd->error || + (data && data->error) || + (mrq->stop && mrq->stop->error)) + dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n", + __func__, mrq->cmd->opcode, data ? data->blocks : 0, + data ? data->blksz : 0, + mrq->cmd->error, + data ? data->error : 1, + mrq->stop ? mrq->stop->error : 1); + + /* Disable DMA */ + usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); + host->wait = USDHI6_WAIT_FOR_REQUEST; + host->mrq = NULL; + + mmc_request_done(host->mmc, mrq); +} + +static int usdhi6_cmd_flags(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = mrq->cmd; + u16 opc = cmd->opcode; + + if (host->app_cmd) { + host->app_cmd = false; + opc |= USDHI6_SD_CMD_APP; + } + + if (mrq->data) { + opc |= USDHI6_SD_CMD_DATA; + + if (mrq->data->flags & MMC_DATA_READ) + opc |= USDHI6_SD_CMD_READ; + + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || + (cmd->opcode == SD_IO_RW_EXTENDED && + mrq->data->blocks > 1)) { + opc |= USDHI6_SD_CMD_MULTI; + if (!mrq->stop) + opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF; + } + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + opc |= USDHI6_SD_CMD_MODE_RSP_NONE; + break; + case MMC_RSP_R1: + opc |= USDHI6_SD_CMD_MODE_RSP_R1; + break; + case MMC_RSP_R1B: + opc |= USDHI6_SD_CMD_MODE_RSP_R1B; + break; + case MMC_RSP_R2: + opc |= USDHI6_SD_CMD_MODE_RSP_R2; + break; + case MMC_RSP_R3: + opc |= USDHI6_SD_CMD_MODE_RSP_R3; + break; + default: + dev_warn(mmc_dev(host->mmc), + "Unknown response type %d\n", + mmc_resp_type(cmd)); + return -EINVAL; + } + } + + return opc; +} + +static int usdhi6_rq_start(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + int opc = usdhi6_cmd_flags(host); + int i; + + if (opc < 0) + return opc; + + for (i = 1000; i; i--) { + if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY)) + break; + usleep_range(10, 100); + } + + if (!i) { + dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n"); + return -EAGAIN; + } + + if (data) { + bool use_dma; + int ret = 0; + + host->page_idx = 0; + + if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) { + switch (data->blksz) { + case 512: + break; + case 32: + case 64: + case 128: + case 256: + if (mrq->stop) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + } + } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) && + data->blksz != 512) { + ret = -EINVAL; + } + + if (ret < 0) { + dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n", + __func__, data->blocks, data->blksz); + return -EINVAL; + } + + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || + (cmd->opcode == SD_IO_RW_EXTENDED && + data->blocks > 1)) + usdhi6_sg_prep(host); + + usdhi6_write(host, USDHI6_SD_SIZE, data->blksz); + + if ((data->blksz >= USDHI6_MIN_DMA || + data->blocks > 1) && + (data->blksz % 4 || + data->sg->offset % 4)) + dev_dbg(mmc_dev(host->mmc), + "Bad SG of %u: %ux%u @ %u\n", data->sg_len, + data->blksz, data->blocks, data->sg->offset); + + /* Enable DMA for USDHI6_MIN_DMA bytes or more */ + use_dma = data->blksz >= USDHI6_MIN_DMA && + !(data->blksz % 4) && + usdhi6_dma_start(host) >= DMA_MIN_COOKIE; + + if (use_dma) + usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW); + + dev_dbg(mmc_dev(host->mmc), + "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n", + __func__, cmd->opcode, data->blocks, data->blksz, + data->sg_len, use_dma ? "DMA" : "PIO", + data->flags & MMC_DATA_READ ? "read" : "write", + data->sg->offset, mrq->stop ? " + stop" : ""); + } else { + dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n", + __func__, cmd->opcode); + } + + /* We have to get a command completion interrupt with DMA too */ + usdhi6_wait_for_resp(host); + + host->wait = USDHI6_WAIT_FOR_CMD; + schedule_delayed_work(&host->timeout_work, host->timeout); + + /* SEC bit is required to enable block counting by the core */ + usdhi6_write(host, USDHI6_SD_STOP, + data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0); + usdhi6_write(host, USDHI6_SD_ARG, cmd->arg); + + /* Kick command execution */ + usdhi6_write(host, USDHI6_SD_CMD, opc); + + return 0; +} + +static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct usdhi6_host *host = mmc_priv(mmc); + int ret; + + cancel_delayed_work_sync(&host->timeout_work); + + host->mrq = mrq; + host->sg = NULL; + + usdhi6_timeout_set(host); + ret = usdhi6_rq_start(host); + if (ret < 0) { + mrq->cmd->error = ret; + usdhi6_request_done(host); + } +} + +static int usdhi6_get_cd(struct mmc_host *mmc) +{ + struct usdhi6_host *host = mmc_priv(mmc); + /* Read is atomic, no need to lock */ + u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD; + +/* + * level status.CD CD_ACTIVE_HIGH card present + * 1 0 0 0 + * 1 0 1 1 + * 0 1 0 1 + * 0 1 1 0 + */ + return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); +} + +static int usdhi6_get_ro(struct mmc_host *mmc) +{ + struct usdhi6_host *host = mmc_priv(mmc); + /* No locking as above */ + u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP; + +/* + * level status.WP RO_ACTIVE_HIGH card read-only + * 1 0 0 0 + * 1 0 1 1 + * 0 1 0 1 + * 0 1 1 0 + */ + return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); +} + +static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct usdhi6_host *host = mmc_priv(mmc); + + dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis"); + + if (enable) { + host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ; + usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask); + usdhi6_write(host, USDHI6_SDIO_MODE, 1); + } else { + usdhi6_write(host, USDHI6_SDIO_MODE, 0); + usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ); + host->sdio_mask = USDHI6_SDIO_INFO1_IRQ; + } +} + +static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage) +{ + if (IS_ERR(host->pins_uhs)) + return 0; + + switch (voltage) { + case MMC_SIGNAL_VOLTAGE_180: + case MMC_SIGNAL_VOLTAGE_120: + return pinctrl_select_state(host->pinctrl, + host->pins_uhs); + + default: + return pinctrl_select_default_state(mmc_dev(host->mmc)); + } +} + +static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + int ret; + + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) + return ret; + + ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage); + if (ret) + dev_warn_once(mmc_dev(mmc), + "Failed to set pinstate err=%d\n", ret); + return ret; +} + +static const struct mmc_host_ops usdhi6_ops = { + .request = usdhi6_request, + .set_ios = usdhi6_set_ios, + .get_cd = usdhi6_get_cd, + .get_ro = usdhi6_get_ro, + .enable_sdio_irq = usdhi6_enable_sdio_irq, + .start_signal_voltage_switch = usdhi6_sig_volt_switch, +}; + +/* State machine handlers */ + +static void usdhi6_resp_cmd12(struct usdhi6_host *host) +{ + struct mmc_command *cmd = host->mrq->stop; + cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10); +} + +static void usdhi6_resp_read(struct usdhi6_host *host) +{ + struct mmc_command *cmd = host->mrq->cmd; + u32 *rsp = cmd->resp, tmp = 0; + int i; + +/* + * RSP10 39-8 + * RSP32 71-40 + * RSP54 103-72 + * RSP76 127-104 + * R2-type response: + * resp[0] = r[127..96] + * resp[1] = r[95..64] + * resp[2] = r[63..32] + * resp[3] = r[31..0] + * Other responses: + * resp[0] = r[39..8] + */ + + if (mmc_resp_type(cmd) == MMC_RSP_NONE) + return; + + if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) { + dev_err(mmc_dev(host->mmc), + "CMD%d: response expected but is missing!\n", cmd->opcode); + return; + } + + if (mmc_resp_type(cmd) & MMC_RSP_136) + for (i = 0; i < 4; i++) { + if (i) + rsp[3 - i] = tmp >> 24; + tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8); + rsp[3 - i] |= tmp << 8; + } + else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) + /* Read RSP54 to avoid conflict with auto CMD12 */ + rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54); + else + rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10); + + dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]); +} + +static int usdhi6_blk_read(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + u32 *p; + int i, rest; + + if (host->io_error) { + data->error = usdhi6_error_code(host); + goto error; + } + + if (host->pg.page) { + p = host->blk_page + host->offset; + } else { + p = usdhi6_sg_map(host); + if (!p) { + data->error = -ENOMEM; + goto error; + } + } + + for (i = 0; i < data->blksz / 4; i++, p++) + *p = usdhi6_read(host, USDHI6_SD_BUF0); + + rest = data->blksz % 4; + for (i = 0; i < (rest + 1) / 2; i++) { + u16 d = usdhi6_read16(host, USDHI6_SD_BUF0); + ((u8 *)p)[2 * i] = ((u8 *)&d)[0]; + if (rest > 1 && !i) + ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1]; + } + + return 0; + +error: + dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); + host->wait = USDHI6_WAIT_FOR_REQUEST; + return data->error; +} + +static int usdhi6_blk_write(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + u32 *p; + int i, rest; + + if (host->io_error) { + data->error = usdhi6_error_code(host); + goto error; + } + + if (host->pg.page) { + p = host->blk_page + host->offset; + } else { + p = usdhi6_sg_map(host); + if (!p) { + data->error = -ENOMEM; + goto error; + } + } + + for (i = 0; i < data->blksz / 4; i++, p++) + usdhi6_write(host, USDHI6_SD_BUF0, *p); + + rest = data->blksz % 4; + for (i = 0; i < (rest + 1) / 2; i++) { + u16 d; + ((u8 *)&d)[0] = ((u8 *)p)[2 * i]; + if (rest > 1 && !i) + ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1]; + else + ((u8 *)&d)[1] = 0; + usdhi6_write16(host, USDHI6_SD_BUF0, d); + } + + return 0; + +error: + dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); + host->wait = USDHI6_WAIT_FOR_REQUEST; + return data->error; +} + +static int usdhi6_stop_cmd(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + + switch (mrq->cmd->opcode) { + case MMC_READ_MULTIPLE_BLOCK: + case MMC_WRITE_MULTIPLE_BLOCK: + if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) { + host->wait = USDHI6_WAIT_FOR_STOP; + return 0; + } + fallthrough; /* Unsupported STOP command */ + default: + dev_err(mmc_dev(host->mmc), + "unsupported stop CMD%d for CMD%d\n", + mrq->stop->opcode, mrq->cmd->opcode); + mrq->stop->error = -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static bool usdhi6_end_cmd(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = mrq->cmd; + + if (host->io_error) { + cmd->error = usdhi6_error_code(host); + return false; + } + + usdhi6_resp_read(host); + + if (!mrq->data) + return false; + + if (host->dma_active) { + usdhi6_dma_kick(host); + if (!mrq->stop) + host->wait = USDHI6_WAIT_FOR_DMA; + else if (usdhi6_stop_cmd(host) < 0) + return false; + } else if (mrq->data->flags & MMC_DATA_READ) { + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + (cmd->opcode == SD_IO_RW_EXTENDED && + mrq->data->blocks > 1)) + host->wait = USDHI6_WAIT_FOR_MREAD; + else + host->wait = USDHI6_WAIT_FOR_READ; + } else { + if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || + (cmd->opcode == SD_IO_RW_EXTENDED && + mrq->data->blocks > 1)) + host->wait = USDHI6_WAIT_FOR_MWRITE; + else + host->wait = USDHI6_WAIT_FOR_WRITE; + } + + return true; +} + +static bool usdhi6_read_block(struct usdhi6_host *host) +{ + /* ACCESS_END IRQ is already unmasked */ + int ret = usdhi6_blk_read(host); + + /* + * Have to force unmapping both pages: the single block could have been + * cross-page, in which case for single-block IO host->page_idx == 0. + * So, if we don't force, the second page won't be unmapped. + */ + usdhi6_sg_unmap(host, true); + + if (ret < 0) + return false; + + host->wait = USDHI6_WAIT_FOR_DATA_END; + return true; +} + +static bool usdhi6_mread_block(struct usdhi6_host *host) +{ + int ret = usdhi6_blk_read(host); + + if (ret < 0) + return false; + + usdhi6_sg_advance(host); + + return !host->mrq->data->error && + (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); +} + +static bool usdhi6_write_block(struct usdhi6_host *host) +{ + int ret = usdhi6_blk_write(host); + + /* See comment in usdhi6_read_block() */ + usdhi6_sg_unmap(host, true); + + if (ret < 0) + return false; + + host->wait = USDHI6_WAIT_FOR_DATA_END; + return true; +} + +static bool usdhi6_mwrite_block(struct usdhi6_host *host) +{ + int ret = usdhi6_blk_write(host); + + if (ret < 0) + return false; + + usdhi6_sg_advance(host); + + return !host->mrq->data->error && + (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); +} + +/* Interrupt & timeout handlers */ + +static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id) +{ + struct usdhi6_host *host = dev_id; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + bool io_wait = false; + + cancel_delayed_work_sync(&host->timeout_work); + + mrq = host->mrq; + if (!mrq) + return IRQ_HANDLED; + + cmd = mrq->cmd; + data = mrq->data; + + switch (host->wait) { + case USDHI6_WAIT_FOR_REQUEST: + /* We're too late, the timeout has already kicked in */ + return IRQ_HANDLED; + case USDHI6_WAIT_FOR_CMD: + /* Wait for data? */ + io_wait = usdhi6_end_cmd(host); + break; + case USDHI6_WAIT_FOR_MREAD: + /* Wait for more data? */ + io_wait = usdhi6_mread_block(host); + break; + case USDHI6_WAIT_FOR_READ: + /* Wait for data end? */ + io_wait = usdhi6_read_block(host); + break; + case USDHI6_WAIT_FOR_MWRITE: + /* Wait data to write? */ + io_wait = usdhi6_mwrite_block(host); + break; + case USDHI6_WAIT_FOR_WRITE: + /* Wait for data end? */ + io_wait = usdhi6_write_block(host); + break; + case USDHI6_WAIT_FOR_DMA: + usdhi6_dma_check_error(host); + break; + case USDHI6_WAIT_FOR_STOP: + usdhi6_write(host, USDHI6_SD_STOP, 0); + if (host->io_error) { + int ret = usdhi6_error_code(host); + if (mrq->stop) + mrq->stop->error = ret; + else + mrq->data->error = ret; + dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret); + break; + } + usdhi6_resp_cmd12(host); + mrq->stop->error = 0; + break; + case USDHI6_WAIT_FOR_DATA_END: + if (host->io_error) { + mrq->data->error = usdhi6_error_code(host); + dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, + mrq->data->error); + } + break; + default: + cmd->error = -EFAULT; + dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); + usdhi6_request_done(host); + return IRQ_HANDLED; + } + + if (io_wait) { + schedule_delayed_work(&host->timeout_work, host->timeout); + /* Wait for more data or ACCESS_END */ + if (!host->dma_active) + usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); + return IRQ_HANDLED; + } + + if (!cmd->error) { + if (data) { + if (!data->error) { + if (host->wait != USDHI6_WAIT_FOR_STOP && + host->mrq->stop && + !host->mrq->stop->error && + !usdhi6_stop_cmd(host)) { + /* Sending STOP */ + usdhi6_wait_for_resp(host); + + schedule_delayed_work(&host->timeout_work, + host->timeout); + + return IRQ_HANDLED; + } + + data->bytes_xfered = data->blocks * data->blksz; + } else { + /* Data error: might need to unmap the last page */ + dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n", + __func__, data->error); + usdhi6_sg_unmap(host, true); + } + } else if (cmd->opcode == MMC_APP_CMD) { + host->app_cmd = true; + } + } + + usdhi6_request_done(host); + + return IRQ_HANDLED; +} + +static irqreturn_t usdhi6_sd(int irq, void *dev_id) +{ + struct usdhi6_host *host = dev_id; + u16 status, status2, error; + + status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & + ~USDHI6_SD_INFO1_CARD; + status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask; + + usdhi6_only_cd(host); + + dev_dbg(mmc_dev(host->mmc), + "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2); + + if (!status && !status2) + return IRQ_NONE; + + error = status2 & USDHI6_SD_INFO2_ERR; + + /* Ack / clear interrupts */ + if (USDHI6_SD_INFO1_IRQ & status) + usdhi6_write(host, USDHI6_SD_INFO1, + 0xffff & ~(USDHI6_SD_INFO1_IRQ & status)); + + if (USDHI6_SD_INFO2_IRQ & status2) { + if (error) + /* In error cases BWE and BRE aren't cleared automatically */ + status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE; + + usdhi6_write(host, USDHI6_SD_INFO2, + 0xffff & ~(USDHI6_SD_INFO2_IRQ & status2)); + } + + host->io_error = error; + host->irq_status = status; + + if (error) { + /* Don't pollute the log with unsupported command timeouts */ + if (host->wait != USDHI6_WAIT_FOR_CMD || + error != USDHI6_SD_INFO2_RSP_TOUT) + dev_warn(mmc_dev(host->mmc), + "%s(): INFO2 error bits 0x%08x\n", + __func__, error); + else + dev_dbg(mmc_dev(host->mmc), + "%s(): INFO2 error bits 0x%08x\n", + __func__, error); + } + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t usdhi6_sdio(int irq, void *dev_id) +{ + struct usdhi6_host *host = dev_id; + u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask; + + dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status); + + if (!status) + return IRQ_NONE; + + usdhi6_write(host, USDHI6_SDIO_INFO1, ~status); + + mmc_signal_sdio_irq(host->mmc); + + return IRQ_HANDLED; +} + +static irqreturn_t usdhi6_cd(int irq, void *dev_id) +{ + struct usdhi6_host *host = dev_id; + struct mmc_host *mmc = host->mmc; + u16 status; + + /* We're only interested in hotplug events here */ + status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & + USDHI6_SD_INFO1_CARD; + + if (!status) + return IRQ_NONE; + + /* Ack */ + usdhi6_write(host, USDHI6_SD_INFO1, ~status); + + if (!work_pending(&mmc->detect.work) && + (((status & USDHI6_SD_INFO1_CARD_INSERT) && + !mmc->card) || + ((status & USDHI6_SD_INFO1_CARD_EJECT) && + mmc->card))) + mmc_detect_change(mmc, msecs_to_jiffies(100)); + + return IRQ_HANDLED; +} + +/* + * Actually this should not be needed, if the built-in timeout works reliably in + * the both PIO cases and DMA never fails. But if DMA does fail, a timeout + * handler might be the only way to catch the error. + */ +static void usdhi6_timeout_work(struct work_struct *work) +{ + struct delayed_work *d = to_delayed_work(work); + struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = mrq ? mrq->data : NULL; + struct scatterlist *sg; + + dev_warn(mmc_dev(host->mmc), + "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n", + host->dma_active ? "DMA" : "PIO", + host->wait, mrq ? mrq->cmd->opcode : -1, + usdhi6_read(host, USDHI6_SD_INFO1), + usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status); + + if (host->dma_active) { + usdhi6_dma_kill(host); + usdhi6_dma_stop_unmap(host); + } + + switch (host->wait) { + default: + dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); + fallthrough; /* mrq can be NULL, but is impossible */ + case USDHI6_WAIT_FOR_CMD: + usdhi6_error_code(host); + if (mrq) + mrq->cmd->error = -ETIMEDOUT; + break; + case USDHI6_WAIT_FOR_STOP: + usdhi6_error_code(host); + mrq->stop->error = -ETIMEDOUT; + break; + case USDHI6_WAIT_FOR_DMA: + case USDHI6_WAIT_FOR_MREAD: + case USDHI6_WAIT_FOR_MWRITE: + case USDHI6_WAIT_FOR_READ: + case USDHI6_WAIT_FOR_WRITE: + sg = host->sg ?: data->sg; + dev_dbg(mmc_dev(host->mmc), + "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n", + data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, + host->offset, data->blocks, data->blksz, data->sg_len, + sg_dma_len(sg), sg->offset); + usdhi6_sg_unmap(host, true); + fallthrough; /* page unmapped in USDHI6_WAIT_FOR_DATA_END */ + case USDHI6_WAIT_FOR_DATA_END: + usdhi6_error_code(host); + data->error = -ETIMEDOUT; + } + + if (mrq) + usdhi6_request_done(host); +} + +/* Probe / release */ + +static const struct of_device_id usdhi6_of_match[] = { + {.compatible = "renesas,usdhi6rol0"}, + {} +}; +MODULE_DEVICE_TABLE(of, usdhi6_of_match); + +static int usdhi6_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mmc_host *mmc; + struct usdhi6_host *host; + struct resource *res; + int irq_cd, irq_sd, irq_sdio; + u32 version; + int ret; + + if (!dev->of_node) + return -ENODEV; + + irq_cd = platform_get_irq_byname(pdev, "card detect"); + irq_sd = platform_get_irq_byname(pdev, "data"); + irq_sdio = platform_get_irq_byname(pdev, "SDIO"); + if (irq_sd < 0) + return irq_sd; + if (irq_sdio < 0) + return irq_sdio; + + mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev); + if (!mmc) + return -ENOMEM; + + ret = mmc_regulator_get_supply(mmc); + if (ret) + goto e_free_mmc; + + ret = mmc_of_parse(mmc); + if (ret < 0) + goto e_free_mmc; + + host = mmc_priv(mmc); + host->mmc = mmc; + host->wait = USDHI6_WAIT_FOR_REQUEST; + host->timeout = msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS); + /* + * We use a fixed timeout of 4s, hence inform the core about it. A + * future improvement should instead respect the cmd->busy_timeout. + */ + mmc->max_busy_timeout = USDHI6_REQ_TIMEOUT_MS; + + host->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(host->pinctrl)) { + ret = PTR_ERR(host->pinctrl); + goto e_free_mmc; + } + + host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto e_free_mmc; + } + + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto e_free_mmc; + } + + host->imclk = clk_get_rate(host->clk); + + ret = clk_prepare_enable(host->clk); + if (ret < 0) + goto e_free_mmc; + + version = usdhi6_read(host, USDHI6_VERSION); + if ((version & 0xfff) != 0xa0d) { + ret = -EPERM; + dev_err(dev, "Version not recognized %x\n", version); + goto e_clk_off; + } + + dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n", + usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT); + + usdhi6_mask_all(host); + + if (irq_cd >= 0) { + ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0, + dev_name(dev), host); + if (ret < 0) + goto e_clk_off; + } else { + mmc->caps |= MMC_CAP_NEEDS_POLL; + } + + ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0, + dev_name(dev), host); + if (ret < 0) + goto e_clk_off; + + ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0, + dev_name(dev), host); + if (ret < 0) + goto e_clk_off; + + INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work); + + usdhi6_dma_request(host, res->start); + + mmc->ops = &usdhi6_ops; + mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_SDIO_IRQ; + /* Set .max_segs to some random number. Feel free to adjust. */ + mmc->max_segs = 32; + mmc->max_blk_size = 512; + mmc->max_req_size = PAGE_SIZE * mmc->max_segs; + mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; + /* + * Setting .max_seg_size to 1 page would simplify our page-mapping code, + * But OTOH, having large segments makes DMA more efficient. We could + * check, whether we managed to get DMA and fall back to 1 page + * segments, but if we do manage to obtain DMA and then it fails at + * run-time and we fall back to PIO, we will continue getting large + * segments. So, we wouldn't be able to get rid of the code anyway. + */ + mmc->max_seg_size = mmc->max_req_size; + if (!mmc->f_max) + mmc->f_max = host->imclk; + mmc->f_min = host->imclk / 512; + + platform_set_drvdata(pdev, host); + + ret = mmc_add_host(mmc); + if (ret < 0) + goto e_release_dma; + + return 0; + +e_release_dma: + usdhi6_dma_release(host); +e_clk_off: + clk_disable_unprepare(host->clk); +e_free_mmc: + mmc_free_host(mmc); + + return ret; +} + +static int usdhi6_remove(struct platform_device *pdev) +{ + struct usdhi6_host *host = platform_get_drvdata(pdev); + + mmc_remove_host(host->mmc); + + usdhi6_mask_all(host); + cancel_delayed_work_sync(&host->timeout_work); + usdhi6_dma_release(host); + clk_disable_unprepare(host->clk); + mmc_free_host(host->mmc); + + return 0; +} + +static struct platform_driver usdhi6_driver = { + .probe = usdhi6_probe, + .remove = usdhi6_remove, + .driver = { + .name = "usdhi6rol0", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = usdhi6_of_match, + }, +}; + +module_platform_driver(usdhi6_driver); + +MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:usdhi6rol0"); +MODULE_AUTHOR("Guennadi Liakhovetski "); diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c new file mode 100644 index 000000000..9a6358fd9 --- /dev/null +++ b/drivers/mmc/host/ushc.c @@ -0,0 +1,568 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * USB SD Host Controller (USHC) controller driver. + * + * Copyright (C) 2010 Cambridge Silicon Radio Ltd. + * + * Notes: + * - Only version 2 devices are supported. + * - Version 2 devices only support SDIO cards/devices (R2 response is + * unsupported). + * + * References: + * [USHC] USB SD Host Controller specification (CS-118793-SP) + */ +#include +#include +#include +#include +#include +#include + +enum ushc_request { + USHC_GET_CAPS = 0x00, + USHC_HOST_CTRL = 0x01, + USHC_PWR_CTRL = 0x02, + USHC_CLK_FREQ = 0x03, + USHC_EXEC_CMD = 0x04, + USHC_READ_RESP = 0x05, + USHC_RESET = 0x06, +}; + +enum ushc_request_type { + USHC_GET_CAPS_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + USHC_HOST_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + USHC_PWR_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + USHC_CLK_FREQ_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + USHC_EXEC_CMD_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + USHC_READ_RESP_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + USHC_RESET_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, +}; + +#define USHC_GET_CAPS_VERSION_MASK 0xff +#define USHC_GET_CAPS_3V3 (1 << 8) +#define USHC_GET_CAPS_3V0 (1 << 9) +#define USHC_GET_CAPS_1V8 (1 << 10) +#define USHC_GET_CAPS_HIGH_SPD (1 << 16) + +#define USHC_HOST_CTRL_4BIT (1 << 1) +#define USHC_HOST_CTRL_HIGH_SPD (1 << 0) + +#define USHC_PWR_CTRL_OFF 0x00 +#define USHC_PWR_CTRL_3V3 0x01 +#define USHC_PWR_CTRL_3V0 0x02 +#define USHC_PWR_CTRL_1V8 0x03 + +#define USHC_READ_RESP_BUSY (1 << 4) +#define USHC_READ_RESP_ERR_TIMEOUT (1 << 3) +#define USHC_READ_RESP_ERR_CRC (1 << 2) +#define USHC_READ_RESP_ERR_DAT (1 << 1) +#define USHC_READ_RESP_ERR_CMD (1 << 0) +#define USHC_READ_RESP_ERR_MASK 0x0f + +struct ushc_cbw { + __u8 signature; + __u8 cmd_idx; + __le16 block_size; + __le32 arg; +} __attribute__((packed)); + +#define USHC_CBW_SIGNATURE 'C' + +struct ushc_csw { + __u8 signature; + __u8 status; + __le32 response; +} __attribute__((packed)); + +#define USHC_CSW_SIGNATURE 'S' + +struct ushc_int_data { + u8 status; + u8 reserved[3]; +}; + +#define USHC_INT_STATUS_SDIO_INT (1 << 1) +#define USHC_INT_STATUS_CARD_PRESENT (1 << 0) + + +struct ushc_data { + struct usb_device *usb_dev; + struct mmc_host *mmc; + + struct urb *int_urb; + struct ushc_int_data *int_data; + + struct urb *cbw_urb; + struct ushc_cbw *cbw; + + struct urb *data_urb; + + struct urb *csw_urb; + struct ushc_csw *csw; + + spinlock_t lock; + struct mmc_request *current_req; + u32 caps; + u16 host_ctrl; + unsigned long flags; + u8 last_status; + int clock_freq; +}; + +#define DISCONNECTED 0 +#define INT_EN 1 +#define IGNORE_NEXT_INT 2 + +static void data_callback(struct urb *urb); + +static int ushc_hw_reset(struct ushc_data *ushc) +{ + return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0), + USHC_RESET, USHC_RESET_TYPE, + 0, 0, NULL, 0, 100); +} + +static int ushc_hw_get_caps(struct ushc_data *ushc) +{ + int ret; + int version; + + ret = usb_control_msg(ushc->usb_dev, usb_rcvctrlpipe(ushc->usb_dev, 0), + USHC_GET_CAPS, USHC_GET_CAPS_TYPE, + 0, 0, &ushc->caps, sizeof(ushc->caps), 100); + if (ret < 0) + return ret; + + ushc->caps = le32_to_cpu(ushc->caps); + + version = ushc->caps & USHC_GET_CAPS_VERSION_MASK; + if (version != 0x02) { + dev_err(&ushc->usb_dev->dev, "controller version %d is not supported\n", version); + return -EINVAL; + } + + return 0; +} + +static int ushc_hw_set_host_ctrl(struct ushc_data *ushc, u16 mask, u16 val) +{ + u16 host_ctrl; + int ret; + + host_ctrl = (ushc->host_ctrl & ~mask) | val; + ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0), + USHC_HOST_CTRL, USHC_HOST_CTRL_TYPE, + host_ctrl, 0, NULL, 0, 100); + if (ret < 0) + return ret; + ushc->host_ctrl = host_ctrl; + return 0; +} + +static void int_callback(struct urb *urb) +{ + struct ushc_data *ushc = urb->context; + u8 status, last_status; + + if (urb->status < 0) + return; + + status = ushc->int_data->status; + last_status = ushc->last_status; + ushc->last_status = status; + + /* + * Ignore the card interrupt status on interrupt transfers that + * were submitted while card interrupts where disabled. + * + * This avoid occasional spurious interrupts when enabling + * interrupts immediately after clearing the source on the card. + */ + + if (!test_and_clear_bit(IGNORE_NEXT_INT, &ushc->flags) + && test_bit(INT_EN, &ushc->flags) + && status & USHC_INT_STATUS_SDIO_INT) { + mmc_signal_sdio_irq(ushc->mmc); + } + + if ((status ^ last_status) & USHC_INT_STATUS_CARD_PRESENT) + mmc_detect_change(ushc->mmc, msecs_to_jiffies(100)); + + if (!test_bit(INT_EN, &ushc->flags)) + set_bit(IGNORE_NEXT_INT, &ushc->flags); + usb_submit_urb(ushc->int_urb, GFP_ATOMIC); +} + +static void cbw_callback(struct urb *urb) +{ + struct ushc_data *ushc = urb->context; + + if (urb->status != 0) { + usb_unlink_urb(ushc->data_urb); + usb_unlink_urb(ushc->csw_urb); + } +} + +static void data_callback(struct urb *urb) +{ + struct ushc_data *ushc = urb->context; + + if (urb->status != 0) + usb_unlink_urb(ushc->csw_urb); +} + +static void csw_callback(struct urb *urb) +{ + struct ushc_data *ushc = urb->context; + struct mmc_request *req = ushc->current_req; + int status; + + status = ushc->csw->status; + + if (urb->status != 0) { + req->cmd->error = urb->status; + } else if (status & USHC_READ_RESP_ERR_CMD) { + if (status & USHC_READ_RESP_ERR_CRC) + req->cmd->error = -EIO; + else + req->cmd->error = -ETIMEDOUT; + } + if (req->data) { + if (status & USHC_READ_RESP_ERR_DAT) { + if (status & USHC_READ_RESP_ERR_CRC) + req->data->error = -EIO; + else + req->data->error = -ETIMEDOUT; + req->data->bytes_xfered = 0; + } else { + req->data->bytes_xfered = req->data->blksz * req->data->blocks; + } + } + + req->cmd->resp[0] = le32_to_cpu(ushc->csw->response); + + mmc_request_done(ushc->mmc, req); +} + +static void ushc_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct ushc_data *ushc = mmc_priv(mmc); + int ret; + unsigned long flags; + + spin_lock_irqsave(&ushc->lock, flags); + + if (test_bit(DISCONNECTED, &ushc->flags)) { + ret = -ENODEV; + goto out; + } + + /* Version 2 firmware doesn't support the R2 response format. */ + if (req->cmd->flags & MMC_RSP_136) { + ret = -EINVAL; + goto out; + } + + /* The Astoria's data FIFOs don't work with clock speeds < 5MHz so + limit commands with data to 6MHz or more. */ + if (req->data && ushc->clock_freq < 6000000) { + ret = -EINVAL; + goto out; + } + + ushc->current_req = req; + + /* Start cmd with CBW. */ + ushc->cbw->cmd_idx = cpu_to_le16(req->cmd->opcode); + if (req->data) + ushc->cbw->block_size = cpu_to_le16(req->data->blksz); + else + ushc->cbw->block_size = 0; + ushc->cbw->arg = cpu_to_le32(req->cmd->arg); + + ret = usb_submit_urb(ushc->cbw_urb, GFP_ATOMIC); + if (ret < 0) + goto out; + + /* Submit data (if any). */ + if (req->data) { + struct mmc_data *data = req->data; + int pipe; + + if (data->flags & MMC_DATA_READ) + pipe = usb_rcvbulkpipe(ushc->usb_dev, 6); + else + pipe = usb_sndbulkpipe(ushc->usb_dev, 2); + + usb_fill_bulk_urb(ushc->data_urb, ushc->usb_dev, pipe, + NULL, data->sg->length, + data_callback, ushc); + ushc->data_urb->num_sgs = 1; + ushc->data_urb->sg = data->sg; + ret = usb_submit_urb(ushc->data_urb, GFP_ATOMIC); + if (ret < 0) + goto out; + } + + /* Submit CSW. */ + ret = usb_submit_urb(ushc->csw_urb, GFP_ATOMIC); + +out: + spin_unlock_irqrestore(&ushc->lock, flags); + if (ret < 0) { + usb_unlink_urb(ushc->cbw_urb); + usb_unlink_urb(ushc->data_urb); + req->cmd->error = ret; + mmc_request_done(mmc, req); + } +} + +static int ushc_set_power(struct ushc_data *ushc, unsigned char power_mode) +{ + u16 voltage; + + switch (power_mode) { + case MMC_POWER_OFF: + voltage = USHC_PWR_CTRL_OFF; + break; + case MMC_POWER_UP: + case MMC_POWER_ON: + voltage = USHC_PWR_CTRL_3V3; + break; + default: + return -EINVAL; + } + + return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0), + USHC_PWR_CTRL, USHC_PWR_CTRL_TYPE, + voltage, 0, NULL, 0, 100); +} + +static int ushc_set_bus_width(struct ushc_data *ushc, int bus_width) +{ + return ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_4BIT, + bus_width == 4 ? USHC_HOST_CTRL_4BIT : 0); +} + +static int ushc_set_bus_freq(struct ushc_data *ushc, int clk, bool enable_hs) +{ + int ret; + + /* Hardware can't detect interrupts while the clock is off. */ + if (clk == 0) + clk = 400000; + + ret = ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_HIGH_SPD, + enable_hs ? USHC_HOST_CTRL_HIGH_SPD : 0); + if (ret < 0) + return ret; + + ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0), + USHC_CLK_FREQ, USHC_CLK_FREQ_TYPE, + clk & 0xffff, (clk >> 16) & 0xffff, NULL, 0, 100); + if (ret < 0) + return ret; + + ushc->clock_freq = clk; + return 0; +} + +static void ushc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct ushc_data *ushc = mmc_priv(mmc); + + ushc_set_power(ushc, ios->power_mode); + ushc_set_bus_width(ushc, 1 << ios->bus_width); + ushc_set_bus_freq(ushc, ios->clock, ios->timing == MMC_TIMING_SD_HS); +} + +static int ushc_get_cd(struct mmc_host *mmc) +{ + struct ushc_data *ushc = mmc_priv(mmc); + + return !!(ushc->last_status & USHC_INT_STATUS_CARD_PRESENT); +} + +static void ushc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct ushc_data *ushc = mmc_priv(mmc); + + if (enable) + set_bit(INT_EN, &ushc->flags); + else + clear_bit(INT_EN, &ushc->flags); +} + +static void ushc_clean_up(struct ushc_data *ushc) +{ + usb_free_urb(ushc->int_urb); + usb_free_urb(ushc->csw_urb); + usb_free_urb(ushc->data_urb); + usb_free_urb(ushc->cbw_urb); + + kfree(ushc->int_data); + kfree(ushc->cbw); + kfree(ushc->csw); + + mmc_free_host(ushc->mmc); +} + +static const struct mmc_host_ops ushc_ops = { + .request = ushc_request, + .set_ios = ushc_set_ios, + .get_cd = ushc_get_cd, + .enable_sdio_irq = ushc_enable_sdio_irq, +}; + +static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(intf); + struct mmc_host *mmc; + struct ushc_data *ushc; + int ret; + + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + + mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); + if (mmc == NULL) + return -ENOMEM; + ushc = mmc_priv(mmc); + usb_set_intfdata(intf, ushc); + + ushc->usb_dev = usb_dev; + ushc->mmc = mmc; + + spin_lock_init(&ushc->lock); + + ret = ushc_hw_reset(ushc); + if (ret < 0) + goto err; + + /* Read capabilities. */ + ret = ushc_hw_get_caps(ushc); + if (ret < 0) + goto err; + + mmc->ops = &ushc_ops; + + mmc->f_min = 400000; + mmc->f_max = 50000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; + mmc->caps |= (ushc->caps & USHC_GET_CAPS_HIGH_SPD) ? MMC_CAP_SD_HIGHSPEED : 0; + + mmc->max_seg_size = 512*511; + mmc->max_segs = 1; + mmc->max_req_size = 512*511; + mmc->max_blk_size = 512; + mmc->max_blk_count = 511; + + ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL); + if (ushc->int_urb == NULL) { + ret = -ENOMEM; + goto err; + } + ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL); + if (ushc->int_data == NULL) { + ret = -ENOMEM; + goto err; + } + usb_fill_int_urb(ushc->int_urb, ushc->usb_dev, + usb_rcvintpipe(usb_dev, + intf->cur_altsetting->endpoint[0].desc.bEndpointAddress), + ushc->int_data, sizeof(struct ushc_int_data), + int_callback, ushc, + intf->cur_altsetting->endpoint[0].desc.bInterval); + + ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL); + if (ushc->cbw_urb == NULL) { + ret = -ENOMEM; + goto err; + } + ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); + if (ushc->cbw == NULL) { + ret = -ENOMEM; + goto err; + } + ushc->cbw->signature = USHC_CBW_SIGNATURE; + + usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2), + ushc->cbw, sizeof(struct ushc_cbw), + cbw_callback, ushc); + + ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL); + if (ushc->data_urb == NULL) { + ret = -ENOMEM; + goto err; + } + + ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL); + if (ushc->csw_urb == NULL) { + ret = -ENOMEM; + goto err; + } + ushc->csw = kzalloc(sizeof(struct ushc_csw), GFP_KERNEL); + if (ushc->csw == NULL) { + ret = -ENOMEM; + goto err; + } + usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6), + ushc->csw, sizeof(struct ushc_csw), + csw_callback, ushc); + + ret = mmc_add_host(ushc->mmc); + if (ret) + goto err; + + ret = usb_submit_urb(ushc->int_urb, GFP_KERNEL); + if (ret < 0) { + mmc_remove_host(ushc->mmc); + goto err; + } + + return 0; + +err: + ushc_clean_up(ushc); + return ret; +} + +static void ushc_disconnect(struct usb_interface *intf) +{ + struct ushc_data *ushc = usb_get_intfdata(intf); + + spin_lock_irq(&ushc->lock); + set_bit(DISCONNECTED, &ushc->flags); + spin_unlock_irq(&ushc->lock); + + usb_kill_urb(ushc->int_urb); + usb_kill_urb(ushc->cbw_urb); + usb_kill_urb(ushc->data_urb); + usb_kill_urb(ushc->csw_urb); + + mmc_remove_host(ushc->mmc); + + ushc_clean_up(ushc); +} + +static struct usb_device_id ushc_id_table[] = { + /* CSR USB SD Host Controller */ + { USB_DEVICE(0x0a12, 0x5d10) }, + { }, +}; +MODULE_DEVICE_TABLE(usb, ushc_id_table); + +static struct usb_driver ushc_driver = { + .name = "ushc", + .id_table = ushc_id_table, + .probe = ushc_probe, + .disconnect = ushc_disconnect, +}; + +module_usb_driver(ushc_driver); + +MODULE_DESCRIPTION("USB SD Host Controller driver"); +MODULE_AUTHOR("David Vrabel "); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c new file mode 100644 index 000000000..f6b525fb5 --- /dev/null +++ b/drivers/mmc/host/via-sdmmc.c @@ -0,0 +1,1320 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * drivers/mmc/host/via-sdmmc.c - VIA SD/MMC Card Reader driver + * Copyright (c) 2008, VIA Technologies Inc. All Rights Reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#define DRV_NAME "via_sdmmc" + +#define PCI_DEVICE_ID_VIA_9530 0x9530 + +#define VIA_CRDR_SDC_OFF 0x200 +#define VIA_CRDR_DDMA_OFF 0x400 +#define VIA_CRDR_PCICTRL_OFF 0x600 + +#define VIA_CRDR_MIN_CLOCK 375000 +#define VIA_CRDR_MAX_CLOCK 48000000 + +/* + * PCI registers + */ + +#define VIA_CRDR_PCI_WORK_MODE 0x40 +#define VIA_CRDR_PCI_DBG_MODE 0x41 + +/* + * SDC MMIO Registers + */ + +#define VIA_CRDR_SDCTRL 0x0 +#define VIA_CRDR_SDCTRL_START 0x01 +#define VIA_CRDR_SDCTRL_WRITE 0x04 +#define VIA_CRDR_SDCTRL_SINGLE_WR 0x10 +#define VIA_CRDR_SDCTRL_SINGLE_RD 0x20 +#define VIA_CRDR_SDCTRL_MULTI_WR 0x30 +#define VIA_CRDR_SDCTRL_MULTI_RD 0x40 +#define VIA_CRDR_SDCTRL_STOP 0x70 + +#define VIA_CRDR_SDCTRL_RSP_NONE 0x0 +#define VIA_CRDR_SDCTRL_RSP_R1 0x10000 +#define VIA_CRDR_SDCTRL_RSP_R2 0x20000 +#define VIA_CRDR_SDCTRL_RSP_R3 0x30000 +#define VIA_CRDR_SDCTRL_RSP_R1B 0x90000 + +#define VIA_CRDR_SDCARG 0x4 + +#define VIA_CRDR_SDBUSMODE 0x8 +#define VIA_CRDR_SDMODE_4BIT 0x02 +#define VIA_CRDR_SDMODE_CLK_ON 0x40 + +#define VIA_CRDR_SDBLKLEN 0xc +/* + * Bit 0 -Bit 10 : Block length. So, the maximum block length should be 2048. + * Bit 11 - Bit 13 : Reserved. + * GPIDET : Select GPI pin to detect card, GPI means CR_CD# in top design. + * INTEN : Enable SD host interrupt. + * Bit 16 - Bit 31 : Block count. So, the maximun block count should be 65536. + */ +#define VIA_CRDR_SDBLKLEN_GPIDET 0x2000 +#define VIA_CRDR_SDBLKLEN_INTEN 0x8000 +#define VIA_CRDR_MAX_BLOCK_COUNT 65536 +#define VIA_CRDR_MAX_BLOCK_LENGTH 2048 + +#define VIA_CRDR_SDRESP0 0x10 +#define VIA_CRDR_SDRESP1 0x14 +#define VIA_CRDR_SDRESP2 0x18 +#define VIA_CRDR_SDRESP3 0x1c + +#define VIA_CRDR_SDCURBLKCNT 0x20 + +#define VIA_CRDR_SDINTMASK 0x24 +/* + * MBDIE : Multiple Blocks transfer Done Interrupt Enable + * BDDIE : Block Data transfer Done Interrupt Enable + * CIRIE : Card Insertion or Removal Interrupt Enable + * CRDIE : Command-Response transfer Done Interrupt Enable + * CRTOIE : Command-Response response TimeOut Interrupt Enable + * ASCRDIE : Auto Stop Command-Response transfer Done Interrupt Enable + * DTIE : Data access Timeout Interrupt Enable + * SCIE : reSponse CRC error Interrupt Enable + * RCIE : Read data CRC error Interrupt Enable + * WCIE : Write data CRC error Interrupt Enable + */ +#define VIA_CRDR_SDINTMASK_MBDIE 0x10 +#define VIA_CRDR_SDINTMASK_BDDIE 0x20 +#define VIA_CRDR_SDINTMASK_CIRIE 0x80 +#define VIA_CRDR_SDINTMASK_CRDIE 0x200 +#define VIA_CRDR_SDINTMASK_CRTOIE 0x400 +#define VIA_CRDR_SDINTMASK_ASCRDIE 0x800 +#define VIA_CRDR_SDINTMASK_DTIE 0x1000 +#define VIA_CRDR_SDINTMASK_SCIE 0x2000 +#define VIA_CRDR_SDINTMASK_RCIE 0x4000 +#define VIA_CRDR_SDINTMASK_WCIE 0x8000 + +#define VIA_CRDR_SDACTIVE_INTMASK \ + (VIA_CRDR_SDINTMASK_MBDIE | VIA_CRDR_SDINTMASK_CIRIE \ + | VIA_CRDR_SDINTMASK_CRDIE | VIA_CRDR_SDINTMASK_CRTOIE \ + | VIA_CRDR_SDINTMASK_DTIE | VIA_CRDR_SDINTMASK_SCIE \ + | VIA_CRDR_SDINTMASK_RCIE | VIA_CRDR_SDINTMASK_WCIE) + +#define VIA_CRDR_SDSTATUS 0x28 +/* + * CECC : Reserved + * WP : SD card Write Protect status + * SLOTD : Reserved + * SLOTG : SD SLOT status(Gpi pin status) + * MBD : Multiple Blocks transfer Done interrupt status + * BDD : Block Data transfer Done interrupt status + * CD : Reserved + * CIR : Card Insertion or Removal interrupt detected on GPI pin + * IO : Reserved + * CRD : Command-Response transfer Done interrupt status + * CRTO : Command-Response response TimeOut interrupt status + * ASCRDIE : Auto Stop Command-Response transfer Done interrupt status + * DT : Data access Timeout interrupt status + * SC : reSponse CRC error interrupt status + * RC : Read data CRC error interrupt status + * WC : Write data CRC error interrupt status + */ +#define VIA_CRDR_SDSTS_CECC 0x01 +#define VIA_CRDR_SDSTS_WP 0x02 +#define VIA_CRDR_SDSTS_SLOTD 0x04 +#define VIA_CRDR_SDSTS_SLOTG 0x08 +#define VIA_CRDR_SDSTS_MBD 0x10 +#define VIA_CRDR_SDSTS_BDD 0x20 +#define VIA_CRDR_SDSTS_CD 0x40 +#define VIA_CRDR_SDSTS_CIR 0x80 +#define VIA_CRDR_SDSTS_IO 0x100 +#define VIA_CRDR_SDSTS_CRD 0x200 +#define VIA_CRDR_SDSTS_CRTO 0x400 +#define VIA_CRDR_SDSTS_ASCRDIE 0x800 +#define VIA_CRDR_SDSTS_DT 0x1000 +#define VIA_CRDR_SDSTS_SC 0x2000 +#define VIA_CRDR_SDSTS_RC 0x4000 +#define VIA_CRDR_SDSTS_WC 0x8000 + +#define VIA_CRDR_SDSTS_IGN_MASK\ + (VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_IO) +#define VIA_CRDR_SDSTS_INT_MASK \ + (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_CD \ + | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_IO | VIA_CRDR_SDSTS_CRD \ + | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \ + | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC) +#define VIA_CRDR_SDSTS_W1C_MASK \ + (VIA_CRDR_SDSTS_CECC | VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD \ + | VIA_CRDR_SDSTS_CD | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_CRD \ + | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \ + | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC) +#define VIA_CRDR_SDSTS_CMD_MASK \ + (VIA_CRDR_SDSTS_CRD | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_SC) +#define VIA_CRDR_SDSTS_DATA_MASK\ + (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_DT \ + | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC) + +#define VIA_CRDR_SDSTATUS2 0x2a +/* + * CFE : Enable SD host automatic Clock FReezing + */ +#define VIA_CRDR_SDSTS_CFE 0x80 + +#define VIA_CRDR_SDRSPTMO 0x2C + +#define VIA_CRDR_SDCLKSEL 0x30 + +#define VIA_CRDR_SDEXTCTRL 0x34 +#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SD 0x01 +#define VIS_CRDR_SDEXTCTRL_SHIFT_9 0x02 +#define VIS_CRDR_SDEXTCTRL_MMC_8BIT 0x04 +#define VIS_CRDR_SDEXTCTRL_RELD_BLK 0x08 +#define VIS_CRDR_SDEXTCTRL_BAD_CMDA 0x10 +#define VIS_CRDR_SDEXTCTRL_BAD_DATA 0x20 +#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SPI 0x40 +#define VIA_CRDR_SDEXTCTRL_HISPD 0x80 +/* 0x38-0xFF reserved */ + +/* + * Data DMA Control Registers + */ + +#define VIA_CRDR_DMABASEADD 0x0 +#define VIA_CRDR_DMACOUNTER 0x4 + +#define VIA_CRDR_DMACTRL 0x8 +/* + * DIR :Transaction Direction + * 0 : From card to memory + * 1 : From memory to card + */ +#define VIA_CRDR_DMACTRL_DIR 0x100 +#define VIA_CRDR_DMACTRL_ENIRQ 0x10000 +#define VIA_CRDR_DMACTRL_SFTRST 0x1000000 + +#define VIA_CRDR_DMASTS 0xc + +#define VIA_CRDR_DMASTART 0x10 +/*0x14-0xFF reserved*/ + +/* + * PCI Control Registers + */ + +/*0x0 - 0x1 reserved*/ +#define VIA_CRDR_PCICLKGATT 0x2 +/* + * SFTRST : + * 0 : Soft reset all the controller and it will be de-asserted automatically + * 1 : Soft reset is de-asserted + */ +#define VIA_CRDR_PCICLKGATT_SFTRST 0x01 +/* + * 3V3 : Pad power select + * 0 : 1.8V + * 1 : 3.3V + * NOTE : No mater what the actual value should be, this bit always + * read as 0. This is a hardware bug. + */ +#define VIA_CRDR_PCICLKGATT_3V3 0x10 +/* + * PAD_PWRON : Pad Power on/off select + * 0 : Power off + * 1 : Power on + * NOTE : No mater what the actual value should be, this bit always + * read as 0. This is a hardware bug. + */ +#define VIA_CRDR_PCICLKGATT_PAD_PWRON 0x20 + +#define VIA_CRDR_PCISDCCLK 0x5 + +#define VIA_CRDR_PCIDMACLK 0x7 +#define VIA_CRDR_PCIDMACLK_SDC 0x2 + +#define VIA_CRDR_PCIINTCTRL 0x8 +#define VIA_CRDR_PCIINTCTRL_SDCIRQEN 0x04 + +#define VIA_CRDR_PCIINTSTATUS 0x9 +#define VIA_CRDR_PCIINTSTATUS_SDC 0x04 + +#define VIA_CRDR_PCITMOCTRL 0xa +#define VIA_CRDR_PCITMOCTRL_NO 0x0 +#define VIA_CRDR_PCITMOCTRL_32US 0x1 +#define VIA_CRDR_PCITMOCTRL_256US 0x2 +#define VIA_CRDR_PCITMOCTRL_1024US 0x3 +#define VIA_CRDR_PCITMOCTRL_256MS 0x4 +#define VIA_CRDR_PCITMOCTRL_512MS 0x5 +#define VIA_CRDR_PCITMOCTRL_1024MS 0x6 + +/*0xB-0xFF reserved*/ + +enum PCI_HOST_CLK_CONTROL { + PCI_CLK_375K = 0x03, + PCI_CLK_8M = 0x04, + PCI_CLK_12M = 0x00, + PCI_CLK_16M = 0x05, + PCI_CLK_24M = 0x01, + PCI_CLK_33M = 0x06, + PCI_CLK_48M = 0x02 +}; + +struct sdhcreg { + u32 sdcontrol_reg; + u32 sdcmdarg_reg; + u32 sdbusmode_reg; + u32 sdblklen_reg; + u32 sdresp_reg[4]; + u32 sdcurblkcnt_reg; + u32 sdintmask_reg; + u32 sdstatus_reg; + u32 sdrsptmo_reg; + u32 sdclksel_reg; + u32 sdextctrl_reg; +}; + +struct pcictrlreg { + u8 reserve[2]; + u8 pciclkgat_reg; + u8 pcinfcclk_reg; + u8 pcimscclk_reg; + u8 pcisdclk_reg; + u8 pcicaclk_reg; + u8 pcidmaclk_reg; + u8 pciintctrl_reg; + u8 pciintstatus_reg; + u8 pcitmoctrl_reg; + u8 Resv; +}; + +struct via_crdr_mmc_host { + struct mmc_host *mmc; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + void __iomem *mmiobase; + void __iomem *sdhc_mmiobase; + void __iomem *ddma_mmiobase; + void __iomem *pcictrl_mmiobase; + + struct pcictrlreg pm_pcictrl_reg; + struct sdhcreg pm_sdhc_reg; + + struct work_struct carddet_work; + struct tasklet_struct finish_tasklet; + + struct timer_list timer; + spinlock_t lock; + u8 power; + int reject; + unsigned int quirks; +}; + +/* some devices need a very long delay for power to stabilize */ +#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001 + +#define VIA_CMD_TIMEOUT_MS 1000 + +static const struct pci_device_id via_ids[] = { + {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, via_ids); + +static void via_print_sdchc(struct via_crdr_mmc_host *host) +{ + void __iomem *addrbase = host->sdhc_mmiobase; + + pr_debug("SDC MMIO Registers:\n"); + pr_debug("SDCONTROL=%08x, SDCMDARG=%08x, SDBUSMODE=%08x\n", + readl(addrbase + VIA_CRDR_SDCTRL), + readl(addrbase + VIA_CRDR_SDCARG), + readl(addrbase + VIA_CRDR_SDBUSMODE)); + pr_debug("SDBLKLEN=%08x, SDCURBLKCNT=%08x, SDINTMASK=%08x\n", + readl(addrbase + VIA_CRDR_SDBLKLEN), + readl(addrbase + VIA_CRDR_SDCURBLKCNT), + readl(addrbase + VIA_CRDR_SDINTMASK)); + pr_debug("SDSTATUS=%08x, SDCLKSEL=%08x, SDEXTCTRL=%08x\n", + readl(addrbase + VIA_CRDR_SDSTATUS), + readl(addrbase + VIA_CRDR_SDCLKSEL), + readl(addrbase + VIA_CRDR_SDEXTCTRL)); +} + +static void via_print_pcictrl(struct via_crdr_mmc_host *host) +{ + void __iomem *addrbase = host->pcictrl_mmiobase; + + pr_debug("PCI Control Registers:\n"); + pr_debug("PCICLKGATT=%02x, PCISDCCLK=%02x, PCIDMACLK=%02x\n", + readb(addrbase + VIA_CRDR_PCICLKGATT), + readb(addrbase + VIA_CRDR_PCISDCCLK), + readb(addrbase + VIA_CRDR_PCIDMACLK)); + pr_debug("PCIINTCTRL=%02x, PCIINTSTATUS=%02x\n", + readb(addrbase + VIA_CRDR_PCIINTCTRL), + readb(addrbase + VIA_CRDR_PCIINTSTATUS)); +} + +static void via_save_pcictrlreg(struct via_crdr_mmc_host *host) +{ + struct pcictrlreg *pm_pcictrl_reg; + void __iomem *addrbase; + + pm_pcictrl_reg = &(host->pm_pcictrl_reg); + addrbase = host->pcictrl_mmiobase; + + pm_pcictrl_reg->pciclkgat_reg = readb(addrbase + VIA_CRDR_PCICLKGATT); + pm_pcictrl_reg->pciclkgat_reg |= + VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON; + pm_pcictrl_reg->pcisdclk_reg = readb(addrbase + VIA_CRDR_PCISDCCLK); + pm_pcictrl_reg->pcidmaclk_reg = readb(addrbase + VIA_CRDR_PCIDMACLK); + pm_pcictrl_reg->pciintctrl_reg = readb(addrbase + VIA_CRDR_PCIINTCTRL); + pm_pcictrl_reg->pciintstatus_reg = + readb(addrbase + VIA_CRDR_PCIINTSTATUS); + pm_pcictrl_reg->pcitmoctrl_reg = readb(addrbase + VIA_CRDR_PCITMOCTRL); +} + +static void via_restore_pcictrlreg(struct via_crdr_mmc_host *host) +{ + struct pcictrlreg *pm_pcictrl_reg; + void __iomem *addrbase; + + pm_pcictrl_reg = &(host->pm_pcictrl_reg); + addrbase = host->pcictrl_mmiobase; + + writeb(pm_pcictrl_reg->pciclkgat_reg, addrbase + VIA_CRDR_PCICLKGATT); + writeb(pm_pcictrl_reg->pcisdclk_reg, addrbase + VIA_CRDR_PCISDCCLK); + writeb(pm_pcictrl_reg->pcidmaclk_reg, addrbase + VIA_CRDR_PCIDMACLK); + writeb(pm_pcictrl_reg->pciintctrl_reg, addrbase + VIA_CRDR_PCIINTCTRL); + writeb(pm_pcictrl_reg->pciintstatus_reg, + addrbase + VIA_CRDR_PCIINTSTATUS); + writeb(pm_pcictrl_reg->pcitmoctrl_reg, addrbase + VIA_CRDR_PCITMOCTRL); +} + +static void via_save_sdcreg(struct via_crdr_mmc_host *host) +{ + struct sdhcreg *pm_sdhc_reg; + void __iomem *addrbase; + + pm_sdhc_reg = &(host->pm_sdhc_reg); + addrbase = host->sdhc_mmiobase; + + pm_sdhc_reg->sdcontrol_reg = readl(addrbase + VIA_CRDR_SDCTRL); + pm_sdhc_reg->sdcmdarg_reg = readl(addrbase + VIA_CRDR_SDCARG); + pm_sdhc_reg->sdbusmode_reg = readl(addrbase + VIA_CRDR_SDBUSMODE); + pm_sdhc_reg->sdblklen_reg = readl(addrbase + VIA_CRDR_SDBLKLEN); + pm_sdhc_reg->sdcurblkcnt_reg = readl(addrbase + VIA_CRDR_SDCURBLKCNT); + pm_sdhc_reg->sdintmask_reg = readl(addrbase + VIA_CRDR_SDINTMASK); + pm_sdhc_reg->sdstatus_reg = readl(addrbase + VIA_CRDR_SDSTATUS); + pm_sdhc_reg->sdrsptmo_reg = readl(addrbase + VIA_CRDR_SDRSPTMO); + pm_sdhc_reg->sdclksel_reg = readl(addrbase + VIA_CRDR_SDCLKSEL); + pm_sdhc_reg->sdextctrl_reg = readl(addrbase + VIA_CRDR_SDEXTCTRL); +} + +static void via_restore_sdcreg(struct via_crdr_mmc_host *host) +{ + struct sdhcreg *pm_sdhc_reg; + void __iomem *addrbase; + + pm_sdhc_reg = &(host->pm_sdhc_reg); + addrbase = host->sdhc_mmiobase; + + writel(pm_sdhc_reg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL); + writel(pm_sdhc_reg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG); + writel(pm_sdhc_reg->sdbusmode_reg, addrbase + VIA_CRDR_SDBUSMODE); + writel(pm_sdhc_reg->sdblklen_reg, addrbase + VIA_CRDR_SDBLKLEN); + writel(pm_sdhc_reg->sdcurblkcnt_reg, addrbase + VIA_CRDR_SDCURBLKCNT); + writel(pm_sdhc_reg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK); + writel(pm_sdhc_reg->sdstatus_reg, addrbase + VIA_CRDR_SDSTATUS); + writel(pm_sdhc_reg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO); + writel(pm_sdhc_reg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL); + writel(pm_sdhc_reg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL); +} + +static void via_pwron_sleep(struct via_crdr_mmc_host *sdhost) +{ + if (sdhost->quirks & VIA_CRDR_QUIRK_300MS_PWRDELAY) + msleep(300); + else + msleep(3); +} + +static void via_set_ddma(struct via_crdr_mmc_host *host, + dma_addr_t dmaaddr, u32 count, int dir, int enirq) +{ + void __iomem *addrbase; + u32 ctrl_data = 0; + + if (enirq) + ctrl_data |= VIA_CRDR_DMACTRL_ENIRQ; + + if (dir) + ctrl_data |= VIA_CRDR_DMACTRL_DIR; + + addrbase = host->ddma_mmiobase; + + writel(dmaaddr, addrbase + VIA_CRDR_DMABASEADD); + writel(count, addrbase + VIA_CRDR_DMACOUNTER); + writel(ctrl_data, addrbase + VIA_CRDR_DMACTRL); + writel(0x01, addrbase + VIA_CRDR_DMASTART); + + /* It seems that our DMA can not work normally with 375kHz clock */ + /* FIXME: don't brute-force 8MHz but use PIO at 375kHz !! */ + addrbase = host->pcictrl_mmiobase; + if (readb(addrbase + VIA_CRDR_PCISDCCLK) == PCI_CLK_375K) { + dev_info(host->mmc->parent, "forcing card speed to 8MHz\n"); + writeb(PCI_CLK_8M, addrbase + VIA_CRDR_PCISDCCLK); + } +} + +static void via_sdc_preparedata(struct via_crdr_mmc_host *host, + struct mmc_data *data) +{ + void __iomem *addrbase; + u32 blk_reg; + int count; + + WARN_ON(host->data); + + /* Sanity checks */ + BUG_ON(data->blksz > host->mmc->max_blk_size); + BUG_ON(data->blocks > host->mmc->max_blk_count); + + host->data = data; + + count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + ((data->flags & MMC_DATA_READ) ? + PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); + BUG_ON(count != 1); + + via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg), + (data->flags & MMC_DATA_WRITE) ? 1 : 0, 1); + + addrbase = host->sdhc_mmiobase; + + blk_reg = data->blksz - 1; + blk_reg |= VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN; + blk_reg |= (data->blocks) << 16; + + writel(blk_reg, addrbase + VIA_CRDR_SDBLKLEN); +} + +static void via_sdc_get_response(struct via_crdr_mmc_host *host, + struct mmc_command *cmd) +{ + void __iomem *addrbase = host->sdhc_mmiobase; + u32 dwdata0 = readl(addrbase + VIA_CRDR_SDRESP0); + u32 dwdata1 = readl(addrbase + VIA_CRDR_SDRESP1); + u32 dwdata2 = readl(addrbase + VIA_CRDR_SDRESP2); + u32 dwdata3 = readl(addrbase + VIA_CRDR_SDRESP3); + + if (cmd->flags & MMC_RSP_136) { + cmd->resp[0] = ((u8) (dwdata1)) | + (((u8) (dwdata0 >> 24)) << 8) | + (((u8) (dwdata0 >> 16)) << 16) | + (((u8) (dwdata0 >> 8)) << 24); + + cmd->resp[1] = ((u8) (dwdata2)) | + (((u8) (dwdata1 >> 24)) << 8) | + (((u8) (dwdata1 >> 16)) << 16) | + (((u8) (dwdata1 >> 8)) << 24); + + cmd->resp[2] = ((u8) (dwdata3)) | + (((u8) (dwdata2 >> 24)) << 8) | + (((u8) (dwdata2 >> 16)) << 16) | + (((u8) (dwdata2 >> 8)) << 24); + + cmd->resp[3] = 0xff | + ((((u8) (dwdata3 >> 24))) << 8) | + (((u8) (dwdata3 >> 16)) << 16) | + (((u8) (dwdata3 >> 8)) << 24); + } else { + dwdata0 >>= 8; + cmd->resp[0] = ((dwdata0 & 0xff) << 24) | + (((dwdata0 >> 8) & 0xff) << 16) | + (((dwdata0 >> 16) & 0xff) << 8) | (dwdata1 & 0xff); + + dwdata1 >>= 8; + cmd->resp[1] = ((dwdata1 & 0xff) << 24) | + (((dwdata1 >> 8) & 0xff) << 16) | + (((dwdata1 >> 16) & 0xff) << 8); + } +} + +static void via_sdc_send_command(struct via_crdr_mmc_host *host, + struct mmc_command *cmd) +{ + void __iomem *addrbase; + struct mmc_data *data; + unsigned int timeout_ms; + u32 cmdctrl = 0; + + WARN_ON(host->cmd); + + data = cmd->data; + host->cmd = cmd; + + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS; + mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms)); + + /*Command index*/ + cmdctrl = cmd->opcode << 8; + + /*Response type*/ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + cmdctrl |= VIA_CRDR_SDCTRL_RSP_NONE; + break; + case MMC_RSP_R1: + cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1; + break; + case MMC_RSP_R1B: + cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1B; + break; + case MMC_RSP_R2: + cmdctrl |= VIA_CRDR_SDCTRL_RSP_R2; + break; + case MMC_RSP_R3: + cmdctrl |= VIA_CRDR_SDCTRL_RSP_R3; + break; + default: + pr_err("%s: cmd->flag is not valid\n", mmc_hostname(host->mmc)); + break; + } + + if (!(cmd->data)) + goto nodata; + + via_sdc_preparedata(host, data); + + /*Command control*/ + if (data->blocks > 1) { + if (data->flags & MMC_DATA_WRITE) { + cmdctrl |= VIA_CRDR_SDCTRL_WRITE; + cmdctrl |= VIA_CRDR_SDCTRL_MULTI_WR; + } else { + cmdctrl |= VIA_CRDR_SDCTRL_MULTI_RD; + } + } else { + if (data->flags & MMC_DATA_WRITE) { + cmdctrl |= VIA_CRDR_SDCTRL_WRITE; + cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_WR; + } else { + cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_RD; + } + } + +nodata: + if (cmd == host->mrq->stop) + cmdctrl |= VIA_CRDR_SDCTRL_STOP; + + cmdctrl |= VIA_CRDR_SDCTRL_START; + + addrbase = host->sdhc_mmiobase; + writel(cmd->arg, addrbase + VIA_CRDR_SDCARG); + writel(cmdctrl, addrbase + VIA_CRDR_SDCTRL); +} + +static void via_sdc_finish_data(struct via_crdr_mmc_host *host) +{ + struct mmc_data *data; + + BUG_ON(!host->data); + + data = host->data; + host->data = NULL; + + if (data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blocks * data->blksz; + + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + ((data->flags & MMC_DATA_READ) ? + PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); + + if (data->stop) + via_sdc_send_command(host, data->stop); + else + tasklet_schedule(&host->finish_tasklet); +} + +static void via_sdc_finish_command(struct via_crdr_mmc_host *host) +{ + via_sdc_get_response(host, host->cmd); + + host->cmd->error = 0; + + if (!host->cmd->data) + tasklet_schedule(&host->finish_tasklet); + + host->cmd = NULL; +} + +static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + void __iomem *addrbase; + struct via_crdr_mmc_host *host; + unsigned long flags; + u16 status; + + host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + + addrbase = host->pcictrl_mmiobase; + writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK); + + status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS); + status &= VIA_CRDR_SDSTS_W1C_MASK; + writew(status, host->sdhc_mmiobase + VIA_CRDR_SDSTATUS); + + WARN_ON(host->mrq != NULL); + host->mrq = mrq; + + status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS); + if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) { + host->mrq->cmd->error = -ENOMEDIUM; + tasklet_schedule(&host->finish_tasklet); + } else { + via_sdc_send_command(host, mrq->cmd); + } + + spin_unlock_irqrestore(&host->lock, flags); +} + +static void via_sdc_set_power(struct via_crdr_mmc_host *host, + unsigned short power, unsigned int on) +{ + unsigned long flags; + u8 gatt; + + spin_lock_irqsave(&host->lock, flags); + + host->power = (1 << power); + + gatt = readb(host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + if (host->power == MMC_VDD_165_195) + gatt &= ~VIA_CRDR_PCICLKGATT_3V3; + else + gatt |= VIA_CRDR_PCICLKGATT_3V3; + if (on) + gatt |= VIA_CRDR_PCICLKGATT_PAD_PWRON; + else + gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON; + writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + + spin_unlock_irqrestore(&host->lock, flags); + + via_pwron_sleep(host); +} + +static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct via_crdr_mmc_host *host; + unsigned long flags; + void __iomem *addrbase; + u32 org_data, sdextctrl; + u8 clock; + + host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + + addrbase = host->sdhc_mmiobase; + org_data = readl(addrbase + VIA_CRDR_SDBUSMODE); + sdextctrl = readl(addrbase + VIA_CRDR_SDEXTCTRL); + + if (ios->bus_width == MMC_BUS_WIDTH_1) + org_data &= ~VIA_CRDR_SDMODE_4BIT; + else + org_data |= VIA_CRDR_SDMODE_4BIT; + + if (ios->power_mode == MMC_POWER_OFF) + org_data &= ~VIA_CRDR_SDMODE_CLK_ON; + else + org_data |= VIA_CRDR_SDMODE_CLK_ON; + + if (ios->timing == MMC_TIMING_SD_HS) + sdextctrl |= VIA_CRDR_SDEXTCTRL_HISPD; + else + sdextctrl &= ~VIA_CRDR_SDEXTCTRL_HISPD; + + writel(org_data, addrbase + VIA_CRDR_SDBUSMODE); + writel(sdextctrl, addrbase + VIA_CRDR_SDEXTCTRL); + + if (ios->clock >= 48000000) + clock = PCI_CLK_48M; + else if (ios->clock >= 33000000) + clock = PCI_CLK_33M; + else if (ios->clock >= 24000000) + clock = PCI_CLK_24M; + else if (ios->clock >= 16000000) + clock = PCI_CLK_16M; + else if (ios->clock >= 12000000) + clock = PCI_CLK_12M; + else if (ios->clock >= 8000000) + clock = PCI_CLK_8M; + else + clock = PCI_CLK_375K; + + addrbase = host->pcictrl_mmiobase; + if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock) + writeb(clock, addrbase + VIA_CRDR_PCISDCCLK); + + spin_unlock_irqrestore(&host->lock, flags); + + if (ios->power_mode != MMC_POWER_OFF) + via_sdc_set_power(host, ios->vdd, 1); + else + via_sdc_set_power(host, ios->vdd, 0); +} + +static int via_sdc_get_ro(struct mmc_host *mmc) +{ + struct via_crdr_mmc_host *host; + unsigned long flags; + u16 status; + + host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + + status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS); + + spin_unlock_irqrestore(&host->lock, flags); + + return !(status & VIA_CRDR_SDSTS_WP); +} + +static const struct mmc_host_ops via_sdc_ops = { + .request = via_sdc_request, + .set_ios = via_sdc_set_ios, + .get_ro = via_sdc_get_ro, +}; + +static void via_reset_pcictrl(struct via_crdr_mmc_host *host) +{ + unsigned long flags; + u8 gatt; + + spin_lock_irqsave(&host->lock, flags); + + via_save_pcictrlreg(host); + via_save_sdcreg(host); + + spin_unlock_irqrestore(&host->lock, flags); + + gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON; + if (host->power == MMC_VDD_165_195) + gatt &= VIA_CRDR_PCICLKGATT_3V3; + else + gatt |= VIA_CRDR_PCICLKGATT_3V3; + writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + via_pwron_sleep(host); + gatt |= VIA_CRDR_PCICLKGATT_SFTRST; + writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + msleep(3); + + spin_lock_irqsave(&host->lock, flags); + + via_restore_pcictrlreg(host); + via_restore_sdcreg(host); + + spin_unlock_irqrestore(&host->lock, flags); +} + +static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask) +{ + BUG_ON(intmask == 0); + + if (!host->cmd) { + pr_err("%s: Got command interrupt 0x%x even " + "though no command operation was in progress.\n", + mmc_hostname(host->mmc), intmask); + return; + } + + if (intmask & VIA_CRDR_SDSTS_CRTO) + host->cmd->error = -ETIMEDOUT; + else if (intmask & VIA_CRDR_SDSTS_SC) + host->cmd->error = -EILSEQ; + + if (host->cmd->error) + tasklet_schedule(&host->finish_tasklet); + else if (intmask & VIA_CRDR_SDSTS_CRD) + via_sdc_finish_command(host); +} + +static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask) +{ + BUG_ON(intmask == 0); + + if (!host->data) + return; + + if (intmask & VIA_CRDR_SDSTS_DT) + host->data->error = -ETIMEDOUT; + else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)) + host->data->error = -EILSEQ; + + via_sdc_finish_data(host); +} + +static irqreturn_t via_sdc_isr(int irq, void *dev_id) +{ + struct via_crdr_mmc_host *sdhost = dev_id; + void __iomem *addrbase; + u8 pci_status; + u16 sd_status; + irqreturn_t result; + + if (!sdhost) + return IRQ_NONE; + + spin_lock(&sdhost->lock); + + addrbase = sdhost->pcictrl_mmiobase; + pci_status = readb(addrbase + VIA_CRDR_PCIINTSTATUS); + if (!(pci_status & VIA_CRDR_PCIINTSTATUS_SDC)) { + result = IRQ_NONE; + goto out; + } + + addrbase = sdhost->sdhc_mmiobase; + sd_status = readw(addrbase + VIA_CRDR_SDSTATUS); + sd_status &= VIA_CRDR_SDSTS_INT_MASK; + sd_status &= ~VIA_CRDR_SDSTS_IGN_MASK; + if (!sd_status) { + result = IRQ_NONE; + goto out; + } + + if (sd_status & VIA_CRDR_SDSTS_CIR) { + writew(sd_status & VIA_CRDR_SDSTS_CIR, + addrbase + VIA_CRDR_SDSTATUS); + + schedule_work(&sdhost->carddet_work); + } + + sd_status &= ~VIA_CRDR_SDSTS_CIR; + if (sd_status & VIA_CRDR_SDSTS_CMD_MASK) { + writew(sd_status & VIA_CRDR_SDSTS_CMD_MASK, + addrbase + VIA_CRDR_SDSTATUS); + via_sdc_cmd_isr(sdhost, sd_status & VIA_CRDR_SDSTS_CMD_MASK); + } + if (sd_status & VIA_CRDR_SDSTS_DATA_MASK) { + writew(sd_status & VIA_CRDR_SDSTS_DATA_MASK, + addrbase + VIA_CRDR_SDSTATUS); + via_sdc_data_isr(sdhost, sd_status & VIA_CRDR_SDSTS_DATA_MASK); + } + + sd_status &= ~(VIA_CRDR_SDSTS_CMD_MASK | VIA_CRDR_SDSTS_DATA_MASK); + if (sd_status) { + pr_err("%s: Unexpected interrupt 0x%x\n", + mmc_hostname(sdhost->mmc), sd_status); + writew(sd_status, addrbase + VIA_CRDR_SDSTATUS); + } + + result = IRQ_HANDLED; + +out: + spin_unlock(&sdhost->lock); + + return result; +} + +static void via_sdc_timeout(struct timer_list *t) +{ + struct via_crdr_mmc_host *sdhost; + unsigned long flags; + + sdhost = from_timer(sdhost, t, timer); + + spin_lock_irqsave(&sdhost->lock, flags); + + if (sdhost->mrq) { + pr_err("%s: Timeout waiting for hardware interrupt." + "cmd:0x%x\n", mmc_hostname(sdhost->mmc), + sdhost->mrq->cmd->opcode); + + if (sdhost->data) { + writel(VIA_CRDR_DMACTRL_SFTRST, + sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL); + sdhost->data->error = -ETIMEDOUT; + via_sdc_finish_data(sdhost); + } else { + if (sdhost->cmd) + sdhost->cmd->error = -ETIMEDOUT; + else + sdhost->mrq->cmd->error = -ETIMEDOUT; + tasklet_schedule(&sdhost->finish_tasklet); + } + } + + spin_unlock_irqrestore(&sdhost->lock, flags); +} + +static void via_sdc_tasklet_finish(unsigned long param) +{ + struct via_crdr_mmc_host *host; + unsigned long flags; + struct mmc_request *mrq; + + host = (struct via_crdr_mmc_host *)param; + + spin_lock_irqsave(&host->lock, flags); + + del_timer(&host->timer); + mrq = host->mrq; + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + + spin_unlock_irqrestore(&host->lock, flags); + + mmc_request_done(host->mmc, mrq); +} + +static void via_sdc_card_detect(struct work_struct *work) +{ + struct via_crdr_mmc_host *host; + void __iomem *addrbase; + unsigned long flags; + u16 status; + + host = container_of(work, struct via_crdr_mmc_host, carddet_work); + + addrbase = host->ddma_mmiobase; + writel(VIA_CRDR_DMACTRL_SFTRST, addrbase + VIA_CRDR_DMACTRL); + + spin_lock_irqsave(&host->lock, flags); + + addrbase = host->pcictrl_mmiobase; + writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK); + + addrbase = host->sdhc_mmiobase; + status = readw(addrbase + VIA_CRDR_SDSTATUS); + if (!(status & VIA_CRDR_SDSTS_SLOTG)) { + if (host->mrq) { + pr_err("%s: Card removed during transfer!\n", + mmc_hostname(host->mmc)); + host->mrq->cmd->error = -ENOMEDIUM; + tasklet_schedule(&host->finish_tasklet); + } + + spin_unlock_irqrestore(&host->lock, flags); + + via_reset_pcictrl(host); + + spin_lock_irqsave(&host->lock, flags); + } + + spin_unlock_irqrestore(&host->lock, flags); + + via_print_pcictrl(host); + via_print_sdchc(host); + + mmc_detect_change(host->mmc, msecs_to_jiffies(500)); +} + +static void via_init_mmc_host(struct via_crdr_mmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + void __iomem *addrbase; + u32 lenreg; + u32 status; + + timer_setup(&host->timer, via_sdc_timeout, 0); + + spin_lock_init(&host->lock); + + mmc->f_min = VIA_CRDR_MIN_CLOCK; + mmc->f_max = VIA_CRDR_MAX_CLOCK; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED; + mmc->ops = &via_sdc_ops; + + /*Hardware cannot do scatter lists*/ + mmc->max_segs = 1; + + mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH; + mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT; + + mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count; + mmc->max_req_size = mmc->max_seg_size; + + INIT_WORK(&host->carddet_work, via_sdc_card_detect); + + tasklet_init(&host->finish_tasklet, via_sdc_tasklet_finish, + (unsigned long)host); + + addrbase = host->sdhc_mmiobase; + writel(0x0, addrbase + VIA_CRDR_SDINTMASK); + msleep(1); + + lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN; + writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN); + + status = readw(addrbase + VIA_CRDR_SDSTATUS); + status &= VIA_CRDR_SDSTS_W1C_MASK; + writew(status, addrbase + VIA_CRDR_SDSTATUS); + + status = readw(addrbase + VIA_CRDR_SDSTATUS2); + status |= VIA_CRDR_SDSTS_CFE; + writew(status, addrbase + VIA_CRDR_SDSTATUS2); + + writeb(0x0, addrbase + VIA_CRDR_SDEXTCTRL); + + writel(VIA_CRDR_SDACTIVE_INTMASK, addrbase + VIA_CRDR_SDINTMASK); + msleep(1); +} + +static int via_sd_probe(struct pci_dev *pcidev, + const struct pci_device_id *id) +{ + struct mmc_host *mmc; + struct via_crdr_mmc_host *sdhost; + u32 base, len; + u8 gatt; + int ret; + + pr_info(DRV_NAME + ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", + pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, + (int)pcidev->revision); + + ret = pci_enable_device(pcidev); + if (ret) + return ret; + + ret = pci_request_regions(pcidev, DRV_NAME); + if (ret) + goto disable; + + pci_write_config_byte(pcidev, VIA_CRDR_PCI_WORK_MODE, 0); + pci_write_config_byte(pcidev, VIA_CRDR_PCI_DBG_MODE, 0); + + mmc = mmc_alloc_host(sizeof(struct via_crdr_mmc_host), &pcidev->dev); + if (!mmc) { + ret = -ENOMEM; + goto release; + } + + sdhost = mmc_priv(mmc); + sdhost->mmc = mmc; + dev_set_drvdata(&pcidev->dev, sdhost); + + len = pci_resource_len(pcidev, 0); + base = pci_resource_start(pcidev, 0); + sdhost->mmiobase = ioremap(base, len); + if (!sdhost->mmiobase) { + ret = -ENOMEM; + goto free_mmc_host; + } + + sdhost->sdhc_mmiobase = + sdhost->mmiobase + VIA_CRDR_SDC_OFF; + sdhost->ddma_mmiobase = + sdhost->mmiobase + VIA_CRDR_DDMA_OFF; + sdhost->pcictrl_mmiobase = + sdhost->mmiobase + VIA_CRDR_PCICTRL_OFF; + + sdhost->power = MMC_VDD_165_195; + + gatt = VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON; + writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + via_pwron_sleep(sdhost); + gatt |= VIA_CRDR_PCICLKGATT_SFTRST; + writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + msleep(3); + + via_init_mmc_host(sdhost); + + ret = + request_irq(pcidev->irq, via_sdc_isr, IRQF_SHARED, DRV_NAME, + sdhost); + if (ret) + goto unmap; + + writeb(VIA_CRDR_PCIINTCTRL_SDCIRQEN, + sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL); + writeb(VIA_CRDR_PCITMOCTRL_1024MS, + sdhost->pcictrl_mmiobase + VIA_CRDR_PCITMOCTRL); + + /* device-specific quirks */ + if (pcidev->subsystem_vendor == PCI_VENDOR_ID_LENOVO && + pcidev->subsystem_device == 0x3891) + sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY; + + ret = mmc_add_host(mmc); + if (ret) + goto unmap; + + return 0; + +unmap: + iounmap(sdhost->mmiobase); +free_mmc_host: + dev_set_drvdata(&pcidev->dev, NULL); + mmc_free_host(mmc); +release: + pci_release_regions(pcidev); +disable: + pci_disable_device(pcidev); + + return ret; +} + +static void via_sd_remove(struct pci_dev *pcidev) +{ + struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev); + unsigned long flags; + u8 gatt; + + spin_lock_irqsave(&sdhost->lock, flags); + + /* Ensure we don't accept more commands from mmc layer */ + sdhost->reject = 1; + + /* Disable generating further interrupts */ + writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL); + + if (sdhost->mrq) { + pr_err("%s: Controller removed during " + "transfer\n", mmc_hostname(sdhost->mmc)); + + /* make sure all DMA is stopped */ + writel(VIA_CRDR_DMACTRL_SFTRST, + sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL); + sdhost->mrq->cmd->error = -ENOMEDIUM; + if (sdhost->mrq->stop) + sdhost->mrq->stop->error = -ENOMEDIUM; + tasklet_schedule(&sdhost->finish_tasklet); + } + spin_unlock_irqrestore(&sdhost->lock, flags); + + mmc_remove_host(sdhost->mmc); + + free_irq(pcidev->irq, sdhost); + + del_timer_sync(&sdhost->timer); + + tasklet_kill(&sdhost->finish_tasklet); + + /* switch off power */ + gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON; + writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + + iounmap(sdhost->mmiobase); + dev_set_drvdata(&pcidev->dev, NULL); + mmc_free_host(sdhost->mmc); + pci_release_regions(pcidev); + pci_disable_device(pcidev); + + pr_info(DRV_NAME + ": VIA SDMMC controller at %s [%04x:%04x] has been removed\n", + pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device); +} + +static void __maybe_unused via_init_sdc_pm(struct via_crdr_mmc_host *host) +{ + struct sdhcreg *pm_sdhcreg; + void __iomem *addrbase; + u32 lenreg; + u16 status; + + pm_sdhcreg = &(host->pm_sdhc_reg); + addrbase = host->sdhc_mmiobase; + + writel(0x0, addrbase + VIA_CRDR_SDINTMASK); + + lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN; + writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN); + + status = readw(addrbase + VIA_CRDR_SDSTATUS); + status &= VIA_CRDR_SDSTS_W1C_MASK; + writew(status, addrbase + VIA_CRDR_SDSTATUS); + + status = readw(addrbase + VIA_CRDR_SDSTATUS2); + status |= VIA_CRDR_SDSTS_CFE; + writew(status, addrbase + VIA_CRDR_SDSTATUS2); + + writel(pm_sdhcreg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL); + writel(pm_sdhcreg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG); + writel(pm_sdhcreg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK); + writel(pm_sdhcreg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO); + writel(pm_sdhcreg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL); + writel(pm_sdhcreg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL); + + via_print_pcictrl(host); + via_print_sdchc(host); +} + +static int __maybe_unused via_sd_suspend(struct device *dev) +{ + struct via_crdr_mmc_host *host; + unsigned long flags; + + host = dev_get_drvdata(dev); + + spin_lock_irqsave(&host->lock, flags); + via_save_pcictrlreg(host); + via_save_sdcreg(host); + spin_unlock_irqrestore(&host->lock, flags); + + device_wakeup_enable(dev); + + return 0; +} + +static int __maybe_unused via_sd_resume(struct device *dev) +{ + struct via_crdr_mmc_host *sdhost; + int ret = 0; + u8 gatt; + + sdhost = dev_get_drvdata(dev); + + gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON; + if (sdhost->power == MMC_VDD_165_195) + gatt &= ~VIA_CRDR_PCICLKGATT_3V3; + else + gatt |= VIA_CRDR_PCICLKGATT_3V3; + writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + via_pwron_sleep(sdhost); + gatt |= VIA_CRDR_PCICLKGATT_SFTRST; + writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT); + msleep(3); + + msleep(100); + + via_restore_pcictrlreg(sdhost); + via_init_sdc_pm(sdhost); + + return ret; +} + +static SIMPLE_DEV_PM_OPS(via_sd_pm_ops, via_sd_suspend, via_sd_resume); + +static struct pci_driver via_sd_driver = { + .name = DRV_NAME, + .id_table = via_ids, + .probe = via_sd_probe, + .remove = via_sd_remove, + .driver.pm = &via_sd_pm_ops, +}; + +module_pci_driver(via_sd_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("VIA Technologies Inc."); +MODULE_DESCRIPTION("VIA SD/MMC Card Interface driver"); diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c new file mode 100644 index 000000000..05ffd5bf5 --- /dev/null +++ b/drivers/mmc/host/vub300.c @@ -0,0 +1,2481 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Remote VUB300 SDIO/SDmem Host Controller Driver + * + * Copyright (C) 2010 Elan Digital Systems Limited + * + * based on USB Skeleton driver - 2.2 + * + * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) + * + * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot + * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear, + * by virtue of this driver, to have been plugged into a local + * SDIO host controller, similar to, say, a PCI Ricoh controller + * This is because this kernel device driver is both a USB 2.0 + * client device driver AND an MMC host controller driver. Thus + * if there is an existing driver for the inserted SDIO/SDmem/MMC + * device then that driver will be used by the kernel to manage + * the device in exactly the same fashion as if it had been + * directly plugged into, say, a local pci bus Ricoh controller + * + * RANT: this driver was written using a display 128x48 - converting it + * to a line width of 80 makes it very difficult to support. In + * particular functions have been broken down into sub functions + * and the original meaningful names have been shortened into + * cryptic ones. + * The problem is that executing a fragment of code subject to + * two conditions means an indentation of 24, thus leaving only + * 56 characters for a C statement. And that is quite ridiculous! + * + * Data types: data passed to/from the VUB300 is fixed to a number of + * bits and driver data fields reflect that limit by using + * u8, u16, u32 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct host_controller_info { + u8 info_size; + u16 firmware_version; + u8 number_of_ports; +} __packed; + +#define FIRMWARE_BLOCK_BOUNDARY 1024 +struct sd_command_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; /* Bit7 - Rd/Wr */ + u8 command_index; + u8 transfer_size[4]; /* ReadSize + ReadSize */ + u8 response_type; + u8 arguments[4]; + u8 block_count[2]; + u8 block_size[2]; + u8 block_boundary[2]; + u8 reserved[44]; /* to pad out to 64 bytes */ +} __packed; + +struct sd_irqpoll_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; /* Bit7 - Rd/Wr */ + u8 padding[16]; /* don't ask why !! */ + u8 poll_timeout_msb; + u8 poll_timeout_lsb; + u8 reserved[42]; /* to pad out to 64 bytes */ +} __packed; + +struct sd_common_header { + u8 header_size; + u8 header_type; + u8 port_number; +} __packed; + +struct sd_response_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; + u8 command_index; + u8 command_response[]; +} __packed; + +struct sd_status_header { + u8 header_size; + u8 header_type; + u8 port_number; + u16 port_flags; + u32 sdio_clock; + u16 host_header_size; + u16 func_header_size; + u16 ctrl_header_size; +} __packed; + +struct sd_error_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 error_code; +} __packed; + +struct sd_interrupt_header { + u8 header_size; + u8 header_type; + u8 port_number; +} __packed; + +struct offload_registers_access { + u8 command_byte[4]; + u8 Respond_Byte[4]; +} __packed; + +#define INTERRUPT_REGISTER_ACCESSES 15 +struct sd_offloaded_interrupt { + u8 header_size; + u8 header_type; + u8 port_number; + struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES]; +} __packed; + +struct sd_register_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; + u8 command_index; + u8 command_response[6]; +} __packed; + +#define PIGGYBACK_REGISTER_ACCESSES 14 +struct sd_offloaded_piggyback { + struct sd_register_header sdio; + struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES]; +} __packed; + +union sd_response { + struct sd_common_header common; + struct sd_status_header status; + struct sd_error_header error; + struct sd_interrupt_header interrupt; + struct sd_response_header response; + struct sd_offloaded_interrupt irq; + struct sd_offloaded_piggyback pig; +} __packed; + +union sd_command { + struct sd_command_header head; + struct sd_irqpoll_header poll; +} __packed; + +enum SD_RESPONSE_TYPE { + SDRT_UNSPECIFIED = 0, + SDRT_NONE, + SDRT_1, + SDRT_1B, + SDRT_2, + SDRT_3, + SDRT_4, + SDRT_5, + SDRT_5B, + SDRT_6, + SDRT_7, +}; + +#define RESPONSE_INTERRUPT 0x01 +#define RESPONSE_ERROR 0x02 +#define RESPONSE_STATUS 0x03 +#define RESPONSE_IRQ_DISABLED 0x05 +#define RESPONSE_IRQ_ENABLED 0x06 +#define RESPONSE_PIGGYBACKED 0x07 +#define RESPONSE_NO_INTERRUPT 0x08 +#define RESPONSE_PIG_DISABLED 0x09 +#define RESPONSE_PIG_ENABLED 0x0A +#define SD_ERROR_1BIT_TIMEOUT 0x01 +#define SD_ERROR_4BIT_TIMEOUT 0x02 +#define SD_ERROR_1BIT_CRC_WRONG 0x03 +#define SD_ERROR_4BIT_CRC_WRONG 0x04 +#define SD_ERROR_1BIT_CRC_ERROR 0x05 +#define SD_ERROR_4BIT_CRC_ERROR 0x06 +#define SD_ERROR_NO_CMD_ENDBIT 0x07 +#define SD_ERROR_NO_1BIT_DATEND 0x08 +#define SD_ERROR_NO_4BIT_DATEND 0x09 +#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A +#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B +#define SD_ERROR_ILLEGAL_COMMAND 0x0C +#define SD_ERROR_NO_DEVICE 0x0D +#define SD_ERROR_TRANSFER_LENGTH 0x0E +#define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F +#define SD_ERROR_4BIT_DATA_TIMEOUT 0x10 +#define SD_ERROR_ILLEGAL_STATE 0x11 +#define SD_ERROR_UNKNOWN_ERROR 0x12 +#define SD_ERROR_RESERVED_ERROR 0x13 +#define SD_ERROR_INVALID_FUNCTION 0x14 +#define SD_ERROR_OUT_OF_RANGE 0x15 +#define SD_ERROR_STAT_CMD 0x16 +#define SD_ERROR_STAT_DATA 0x17 +#define SD_ERROR_STAT_CMD_TIMEOUT 0x18 +#define SD_ERROR_SDCRDY_STUCK 0x19 +#define SD_ERROR_UNHANDLED 0x1A +#define SD_ERROR_OVERRUN 0x1B +#define SD_ERROR_PIO_TIMEOUT 0x1C + +#define FUN(c) (0x000007 & (c->arg>>28)) +#define REG(c) (0x01FFFF & (c->arg>>9)) + +static bool limit_speed_to_24_MHz; +module_param(limit_speed_to_24_MHz, bool, 0644); +MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz"); + +static bool pad_input_to_usb_pkt; +module_param(pad_input_to_usb_pkt, bool, 0644); +MODULE_PARM_DESC(pad_input_to_usb_pkt, + "Pad USB data input transfers to whole USB Packet"); + +static bool disable_offload_processing; +module_param(disable_offload_processing, bool, 0644); +MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing"); + +static bool force_1_bit_data_xfers; +module_param(force_1_bit_data_xfers, bool, 0644); +MODULE_PARM_DESC(force_1_bit_data_xfers, + "Force SDIO Data Transfers to 1-bit Mode"); + +static bool force_polling_for_irqs; +module_param(force_polling_for_irqs, bool, 0644); +MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts"); + +static int firmware_irqpoll_timeout = 1024; +module_param(firmware_irqpoll_timeout, int, 0644); +MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout"); + +static int force_max_req_size = 128; +module_param(force_max_req_size, int, 0644); +MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes"); + +#ifdef SMSC_DEVELOPMENT_BOARD +static int firmware_rom_wait_states = 0x04; +#else +static int firmware_rom_wait_states = 0x1C; +#endif + +module_param(firmware_rom_wait_states, int, 0644); +MODULE_PARM_DESC(firmware_rom_wait_states, + "ROM wait states byte=RRRIIEEE (Reserved Internal External)"); + +#define ELAN_VENDOR_ID 0x2201 +#define VUB300_VENDOR_ID 0x0424 +#define VUB300_PRODUCT_ID 0x012C +static const struct usb_device_id vub300_table[] = { + {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)}, + {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)}, + {} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(usb, vub300_table); + +static struct workqueue_struct *cmndworkqueue; +static struct workqueue_struct *pollworkqueue; +static struct workqueue_struct *deadworkqueue; + +static inline int interface_to_InterfaceNumber(struct usb_interface *interface) +{ + if (!interface) + return -1; + if (!interface->cur_altsetting) + return -1; + return interface->cur_altsetting->desc.bInterfaceNumber; +} + +struct sdio_register { + unsigned func_num:3; + unsigned sdio_reg:17; + unsigned activate:1; + unsigned prepared:1; + unsigned regvalue:8; + unsigned response:8; + unsigned sparebit:26; +}; + +struct vub300_mmc_host { + struct usb_device *udev; + struct usb_interface *interface; + struct kref kref; + struct mutex cmd_mutex; + struct mutex irq_mutex; + char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */ + u8 cmnd_out_ep; /* EndPoint for commands */ + u8 cmnd_res_ep; /* EndPoint for responses */ + u8 data_out_ep; /* EndPoint for out data */ + u8 data_inp_ep; /* EndPoint for inp data */ + bool card_powered; + bool card_present; + bool read_only; + bool large_usb_packets; + bool app_spec; /* ApplicationSpecific */ + bool irq_enabled; /* by the MMC CORE */ + bool irq_disabled; /* in the firmware */ + unsigned bus_width:4; + u8 total_offload_count; + u8 dynamic_register_count; + u8 resp_len; + u32 datasize; + int errors; + int usb_transport_fail; + int usb_timed_out; + int irqs_queued; + struct sdio_register sdio_register[16]; + struct offload_interrupt_function_register { +#define MAXREGBITS 4 +#define MAXREGS (1<mmc; + usb_free_urb(vub300->command_out_urb); + vub300->command_out_urb = NULL; + usb_free_urb(vub300->command_res_urb); + vub300->command_res_urb = NULL; + usb_put_dev(vub300->udev); + mmc_free_host(mmc); + /* + * and hence also frees vub300 + * which is contained at the end of struct mmc + */ +} + +static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300) +{ + kref_get(&vub300->kref); + if (queue_work(cmndworkqueue, &vub300->cmndwork)) { + /* + * then the cmndworkqueue was not previously + * running and the above get ref is obvious + * required and will be put when the thread + * terminates by a specific call + */ + } else { + /* + * the cmndworkqueue was already running from + * a previous invocation and thus to keep the + * kref counts correct we must undo the get + */ + kref_put(&vub300->kref, vub300_delete); + } +} + +static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay) +{ + kref_get(&vub300->kref); + if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) { + /* + * then the pollworkqueue was not previously + * running and the above get ref is obvious + * required and will be put when the thread + * terminates by a specific call + */ + } else { + /* + * the pollworkqueue was already running from + * a previous invocation and thus to keep the + * kref counts correct we must undo the get + */ + kref_put(&vub300->kref, vub300_delete); + } +} + +static void vub300_queue_dead_work(struct vub300_mmc_host *vub300) +{ + kref_get(&vub300->kref); + if (queue_work(deadworkqueue, &vub300->deadwork)) { + /* + * then the deadworkqueue was not previously + * running and the above get ref is obvious + * required and will be put when the thread + * terminates by a specific call + */ + } else { + /* + * the deadworkqueue was already running from + * a previous invocation and thus to keep the + * kref counts correct we must undo the get + */ + kref_put(&vub300->kref, vub300_delete); + } +} + +static void irqpoll_res_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) + vub300->usb_transport_fail = urb->status; + complete(&vub300->irqpoll_complete); +} + +static void irqpoll_out_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) { + vub300->usb_transport_fail = urb->status; + complete(&vub300->irqpoll_complete); + return; + } else { + int ret; + unsigned int pipe = + usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); + usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, + &vub300->resp, sizeof(vub300->resp), + irqpoll_res_completed, vub300); + vub300->command_res_urb->actual_length = 0; + ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); + if (ret) { + vub300->usb_transport_fail = ret; + complete(&vub300->irqpoll_complete); + } + return; + } +} + +static void send_irqpoll(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_pollwork_thread */ + int retval; + int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout); + vub300->cmnd.poll.header_size = 22; + vub300->cmnd.poll.header_type = 1; + vub300->cmnd.poll.port_number = 0; + vub300->cmnd.poll.command_type = 2; + vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout; + vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8); + usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, + usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep) + , &vub300->cmnd, sizeof(vub300->cmnd) + , irqpoll_out_completed, vub300); + retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); + if (0 > retval) { + vub300->usb_transport_fail = retval; + vub300_queue_poll_work(vub300, 1); + complete(&vub300->irqpoll_complete); + return; + } else { + return; + } +} + +static void new_system_port_status(struct vub300_mmc_host *vub300) +{ + int old_card_present = vub300->card_present; + int new_card_present = + (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; + vub300->read_only = + (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; + if (new_card_present && !old_card_present) { + dev_info(&vub300->udev->dev, "card just inserted\n"); + vub300->card_present = 1; + vub300->bus_width = 0; + if (disable_offload_processing) + strncpy(vub300->vub_name, "EMPTY Processing Disabled", + sizeof(vub300->vub_name)); + else + vub300->vub_name[0] = 0; + mmc_detect_change(vub300->mmc, 1); + } else if (!new_card_present && old_card_present) { + dev_info(&vub300->udev->dev, "card just ejected\n"); + vub300->card_present = 0; + mmc_detect_change(vub300->mmc, 0); + } else { + /* no change */ + } +} + +static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300, + struct offload_registers_access + *register_access, u8 func) +{ + u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count; + memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access, + sizeof(struct offload_registers_access)); + vub300->fn[func].offload_count += 1; + vub300->total_offload_count += 1; +} + +static void add_offloaded_reg(struct vub300_mmc_host *vub300, + struct offload_registers_access *register_access) +{ + u32 Register = ((0x03 & register_access->command_byte[0]) << 15) + | ((0xFF & register_access->command_byte[1]) << 7) + | ((0xFE & register_access->command_byte[2]) >> 1); + u8 func = ((0x70 & register_access->command_byte[0]) >> 4); + u8 regs = vub300->dynamic_register_count; + u8 i = 0; + while (0 < regs-- && 1 == vub300->sdio_register[i].activate) { + if (vub300->sdio_register[i].func_num == func && + vub300->sdio_register[i].sdio_reg == Register) { + if (vub300->sdio_register[i].prepared == 0) + vub300->sdio_register[i].prepared = 1; + vub300->sdio_register[i].response = + register_access->Respond_Byte[2]; + vub300->sdio_register[i].regvalue = + register_access->Respond_Byte[3]; + return; + } else { + i += 1; + continue; + } + } + __add_offloaded_reg_to_fifo(vub300, register_access, func); +} + +static void check_vub300_port_status(struct vub300_mmc_host *vub300) +{ + /* + * cmd_mutex is held by vub300_pollwork_thread, + * vub300_deadwork_thread or vub300_cmndwork_thread + */ + int retval; + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + GET_SYSTEM_PORT_STATUS, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, &vub300->system_port_status, + sizeof(vub300->system_port_status), 1000); + if (sizeof(vub300->system_port_status) == retval) + new_system_port_status(vub300); +} + +static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_pollwork_thread */ + if (vub300->command_res_urb->actual_length == 0) + return; + + switch (vub300->resp.common.header_type) { + case RESPONSE_INTERRUPT: + mutex_lock(&vub300->irq_mutex); + if (vub300->irq_enabled) + mmc_signal_sdio_irq(vub300->mmc); + else + vub300->irqs_queued += 1; + vub300->irq_disabled = 1; + mutex_unlock(&vub300->irq_mutex); + break; + case RESPONSE_ERROR: + if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE) + check_vub300_port_status(vub300); + break; + case RESPONSE_STATUS: + vub300->system_port_status = vub300->resp.status; + new_system_port_status(vub300); + if (!vub300->card_present) + vub300_queue_poll_work(vub300, HZ / 5); + break; + case RESPONSE_IRQ_DISABLED: + { + int offloaded_data_length = vub300->resp.common.header_size - 3; + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irq_enabled) + mmc_signal_sdio_irq(vub300->mmc); + else + vub300->irqs_queued += 1; + vub300->irq_disabled = 1; + mutex_unlock(&vub300->irq_mutex); + break; + } + case RESPONSE_IRQ_ENABLED: + { + int offloaded_data_length = vub300->resp.common.header_size - 3; + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irq_enabled) + mmc_signal_sdio_irq(vub300->mmc); + else + vub300->irqs_queued += 1; + vub300->irq_disabled = 0; + mutex_unlock(&vub300->irq_mutex); + break; + } + case RESPONSE_NO_INTERRUPT: + vub300_queue_poll_work(vub300, 1); + break; + default: + break; + } +} + +static void __do_poll(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_pollwork_thread */ + unsigned long commretval; + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + init_completion(&vub300->irqpoll_complete); + send_irqpoll(vub300); + commretval = wait_for_completion_timeout(&vub300->irqpoll_complete, + msecs_to_jiffies(500)); + if (vub300->usb_transport_fail) { + /* no need to do anything */ + } else if (commretval == 0) { + vub300->usb_timed_out = 1; + usb_kill_urb(vub300->command_out_urb); + usb_kill_urb(vub300->command_res_urb); + } else { /* commretval > 0 */ + __vub300_irqpoll_response(vub300); + } +} + +/* this thread runs only when the driver + * is trying to poll the device for an IRQ + */ +static void vub300_pollwork_thread(struct work_struct *work) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = container_of(work, + struct vub300_mmc_host, pollwork.work); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + return; + } + mutex_lock(&vub300->cmd_mutex); + if (vub300->cmd) { + vub300_queue_poll_work(vub300, 1); + } else if (!vub300->card_present) { + /* no need to do anything */ + } else { /* vub300->card_present */ + mutex_lock(&vub300->irq_mutex); + if (!vub300->irq_enabled) { + mutex_unlock(&vub300->irq_mutex); + } else if (vub300->irqs_queued) { + vub300->irqs_queued -= 1; + mmc_signal_sdio_irq(vub300->mmc); + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + mutex_unlock(&vub300->irq_mutex); + } else { /* NOT vub300->irqs_queued */ + mutex_unlock(&vub300->irq_mutex); + __do_poll(vub300); + } + } + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); +} + +static void vub300_deadwork_thread(struct work_struct *work) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = + container_of(work, struct vub300_mmc_host, deadwork); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + return; + } + mutex_lock(&vub300->cmd_mutex); + if (vub300->cmd) { + /* + * a command got in as the inactivity + * timer expired - so we just let the + * processing of the command show if + * the device is dead + */ + } else if (vub300->card_present) { + check_vub300_port_status(vub300); + } else if (vub300->mmc && vub300->mmc->card) { + /* + * the MMC core must not have responded + * to the previous indication - lets + * hope that it eventually does so we + * will just ignore this for now + */ + } else { + check_vub300_port_status(vub300); + } + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); +} + +static void vub300_inactivity_timer_expired(struct timer_list *t) +{ /* softirq */ + struct vub300_mmc_host *vub300 = from_timer(vub300, t, + inactivity_timer); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + } else if (vub300->cmd) { + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + } else { + vub300_queue_dead_work(vub300); + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + } +} + +static int vub300_response_error(u8 error_code) +{ + switch (error_code) { + case SD_ERROR_PIO_TIMEOUT: + case SD_ERROR_1BIT_TIMEOUT: + case SD_ERROR_4BIT_TIMEOUT: + return -ETIMEDOUT; + case SD_ERROR_STAT_DATA: + case SD_ERROR_OVERRUN: + case SD_ERROR_STAT_CMD: + case SD_ERROR_STAT_CMD_TIMEOUT: + case SD_ERROR_SDCRDY_STUCK: + case SD_ERROR_UNHANDLED: + case SD_ERROR_1BIT_CRC_WRONG: + case SD_ERROR_4BIT_CRC_WRONG: + case SD_ERROR_1BIT_CRC_ERROR: + case SD_ERROR_4BIT_CRC_ERROR: + case SD_ERROR_NO_CMD_ENDBIT: + case SD_ERROR_NO_1BIT_DATEND: + case SD_ERROR_NO_4BIT_DATEND: + case SD_ERROR_1BIT_DATA_TIMEOUT: + case SD_ERROR_4BIT_DATA_TIMEOUT: + case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT: + case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT: + return -EILSEQ; + case 33: + return -EILSEQ; + case SD_ERROR_ILLEGAL_COMMAND: + return -EINVAL; + case SD_ERROR_NO_DEVICE: + return -ENOMEDIUM; + default: + return -ENODEV; + } +} + +static void command_res_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) { + /* we have to let the initiator handle the error */ + } else if (vub300->command_res_urb->actual_length == 0) { + /* + * we have seen this happen once or twice and + * we suspect a buggy USB host controller + */ + } else if (!vub300->data) { + /* this means that the command (typically CMD52) succeeded */ + } else if (vub300->resp.common.header_type != 0x02) { + /* + * this is an error response from the VUB300 chip + * and we let the initiator handle it + */ + } else if (vub300->urb) { + vub300->cmd->error = + vub300_response_error(vub300->resp.error.error_code); + usb_unlink_urb(vub300->urb); + } else { + vub300->cmd->error = + vub300_response_error(vub300->resp.error.error_code); + usb_sg_cancel(&vub300->sg_request); + } + complete(&vub300->command_complete); /* got_response_in */ +} + +static void command_out_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) { + complete(&vub300->command_complete); + } else { + int ret; + unsigned int pipe = + usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); + usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, + &vub300->resp, sizeof(vub300->resp), + command_res_completed, vub300); + vub300->command_res_urb->actual_length = 0; + ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); + if (ret == 0) { + /* + * the urb completion handler will call + * our completion handler + */ + } else { + /* + * and thus we only call it directly + * when it will not be called + */ + complete(&vub300->command_complete); + } + } +} + +/* + * the STUFF bits are masked out for the comparisons + */ +static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300, + u32 cmd_arg) +{ + if ((0xFBFFFE00 & cmd_arg) == 0x80022200) + vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80022000) + vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80042200) + vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80042000) + vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80062200) + vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80062000) + vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80082200) + vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80082000) + vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200) + vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000) + vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200) + vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000) + vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200) + vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000) + vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]); + else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00) + vub300->bus_width = 1; + else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02) + vub300->bus_width = 4; +} + +static void send_command(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + struct mmc_command *cmd = vub300->cmd; + struct mmc_data *data = vub300->data; + int retval; + int i; + u8 response_type; + if (vub300->app_spec) { + switch (cmd->opcode) { + case 6: + response_type = SDRT_1; + vub300->resp_len = 6; + if (0x00000000 == (0x00000003 & cmd->arg)) + vub300->bus_width = 1; + else if (0x00000002 == (0x00000003 & cmd->arg)) + vub300->bus_width = 4; + else + dev_err(&vub300->udev->dev, + "unexpected ACMD6 bus_width=%d\n", + 0x00000003 & cmd->arg); + break; + case 13: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 22: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 23: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 41: + response_type = SDRT_3; + vub300->resp_len = 6; + break; + case 42: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 51: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 55: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + default: + vub300->resp_len = 0; + cmd->error = -EINVAL; + complete(&vub300->command_complete); + return; + } + vub300->app_spec = 0; + } else { + switch (cmd->opcode) { + case 0: + response_type = SDRT_NONE; + vub300->resp_len = 0; + break; + case 1: + response_type = SDRT_3; + vub300->resp_len = 6; + break; + case 2: + response_type = SDRT_2; + vub300->resp_len = 17; + break; + case 3: + response_type = SDRT_6; + vub300->resp_len = 6; + break; + case 4: + response_type = SDRT_NONE; + vub300->resp_len = 0; + break; + case 5: + response_type = SDRT_4; + vub300->resp_len = 6; + break; + case 6: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 7: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 8: + response_type = SDRT_7; + vub300->resp_len = 6; + break; + case 9: + response_type = SDRT_2; + vub300->resp_len = 17; + break; + case 10: + response_type = SDRT_2; + vub300->resp_len = 17; + break; + case 12: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 13: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 15: + response_type = SDRT_NONE; + vub300->resp_len = 0; + break; + case 16: + for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) + vub300->fbs[i] = 0xFFFF & cmd->arg; + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 17: + case 18: + case 24: + case 25: + case 27: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 28: + case 29: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 30: + case 32: + case 33: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 38: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 42: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 52: + response_type = SDRT_5; + vub300->resp_len = 6; + snoop_block_size_and_bus_width(vub300, cmd->arg); + break; + case 53: + response_type = SDRT_5; + vub300->resp_len = 6; + break; + case 55: + response_type = SDRT_1; + vub300->resp_len = 6; + vub300->app_spec = 1; + break; + case 56: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + default: + vub300->resp_len = 0; + cmd->error = -EINVAL; + complete(&vub300->command_complete); + return; + } + } + /* + * it is a shame that we can not use "sizeof(struct sd_command_header)" + * this is because the packet _must_ be padded to 64 bytes + */ + vub300->cmnd.head.header_size = 20; + vub300->cmnd.head.header_type = 0x00; + vub300->cmnd.head.port_number = 0; /* "0" means port 1 */ + vub300->cmnd.head.command_type = 0x00; /* standard read command */ + vub300->cmnd.head.response_type = response_type; + vub300->cmnd.head.command_index = cmd->opcode; + vub300->cmnd.head.arguments[0] = cmd->arg >> 24; + vub300->cmnd.head.arguments[1] = cmd->arg >> 16; + vub300->cmnd.head.arguments[2] = cmd->arg >> 8; + vub300->cmnd.head.arguments[3] = cmd->arg >> 0; + if (cmd->opcode == 52) { + int fn = 0x7 & (cmd->arg >> 28); + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF; + vub300->cmnd.head.command_type = 0x00; + vub300->cmnd.head.transfer_size[0] = 0; + vub300->cmnd.head.transfer_size[1] = 0; + vub300->cmnd.head.transfer_size[2] = 0; + vub300->cmnd.head.transfer_size[3] = 0; + } else if (!data) { + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF; + vub300->cmnd.head.command_type = 0x00; + vub300->cmnd.head.transfer_size[0] = 0; + vub300->cmnd.head.transfer_size[1] = 0; + vub300->cmnd.head.transfer_size[2] = 0; + vub300->cmnd.head.transfer_size[3] = 0; + } else if (cmd->opcode == 53) { + int fn = 0x7 & (cmd->arg >> 28); + if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */ + vub300->cmnd.head.block_count[0] = + (data->blocks >> 8) & 0xFF; + vub300->cmnd.head.block_count[1] = + (data->blocks >> 0) & 0xFF; + vub300->cmnd.head.block_size[0] = + (data->blksz >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = + (data->blksz >> 0) & 0xFF; + } else { /* BYTE MODE */ + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + vub300->cmnd.head.block_size[0] = + (vub300->datasize >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = + (vub300->datasize >> 0) & 0xFF; + } + vub300->cmnd.head.command_type = + (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; + vub300->cmnd.head.transfer_size[0] = + (vub300->datasize >> 24) & 0xFF; + vub300->cmnd.head.transfer_size[1] = + (vub300->datasize >> 16) & 0xFF; + vub300->cmnd.head.transfer_size[2] = + (vub300->datasize >> 8) & 0xFF; + vub300->cmnd.head.transfer_size[3] = + (vub300->datasize >> 0) & 0xFF; + if (vub300->datasize < vub300->fbs[fn]) { + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + } + } else { + vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; + vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; + vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; + vub300->cmnd.head.command_type = + (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; + vub300->cmnd.head.transfer_size[0] = + (vub300->datasize >> 24) & 0xFF; + vub300->cmnd.head.transfer_size[1] = + (vub300->datasize >> 16) & 0xFF; + vub300->cmnd.head.transfer_size[2] = + (vub300->datasize >> 8) & 0xFF; + vub300->cmnd.head.transfer_size[3] = + (vub300->datasize >> 0) & 0xFF; + if (vub300->datasize < vub300->fbs[0]) { + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + } + } + if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) { + u16 block_size = vub300->cmnd.head.block_size[1] | + (vub300->cmnd.head.block_size[0] << 8); + u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY - + (FIRMWARE_BLOCK_BOUNDARY % block_size); + vub300->cmnd.head.block_boundary[0] = + (block_boundary >> 8) & 0xFF; + vub300->cmnd.head.block_boundary[1] = + (block_boundary >> 0) & 0xFF; + } else { + vub300->cmnd.head.block_boundary[0] = 0; + vub300->cmnd.head.block_boundary[1] = 0; + } + usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, + usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep), + &vub300->cmnd, sizeof(vub300->cmnd), + command_out_completed, vub300); + retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); + if (retval < 0) { + cmd->error = retval; + complete(&vub300->command_complete); + return; + } else { + return; + } +} + +/* + * timer callback runs in atomic mode + * so it cannot call usb_kill_urb() + */ +static void vub300_sg_timed_out(struct timer_list *t) +{ + struct vub300_mmc_host *vub300 = from_timer(vub300, t, + sg_transfer_timer); + vub300->usb_timed_out = 1; + usb_sg_cancel(&vub300->sg_request); + usb_unlink_urb(vub300->command_out_urb); + usb_unlink_urb(vub300->command_res_urb); +} + +static u16 roundup_to_multiple_of_64(u16 number) +{ + return 0xFFC0 & (0x3F + number); +} + +/* + * this is a separate function to solve the 80 column width restriction + */ +static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, + const struct firmware *fw) +{ + u8 register_count = 0; + u16 ts = 0; + u16 interrupt_size = 0; + const u8 *data = fw->data; + int size = fw->size; + u8 c; + dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n", + vub300->vub_name); + do { + c = *data++; + } while (size-- && c); /* skip comment */ + dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data, + vub300->vub_name); + if (size < 4) { + dev_err(&vub300->udev->dev, + "corrupt offload pseudocode in firmware %s\n", + vub300->vub_name); + strncpy(vub300->vub_name, "corrupt offload pseudocode", + sizeof(vub300->vub_name)); + return; + } + interrupt_size += *data++; + size -= 1; + interrupt_size <<= 8; + interrupt_size += *data++; + size -= 1; + if (interrupt_size < size) { + u16 xfer_length = roundup_to_multiple_of_64(interrupt_size); + u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); + if (xfer_buffer) { + int retval; + memcpy(xfer_buffer, data, interrupt_size); + memset(xfer_buffer + interrupt_size, 0, + xfer_length - interrupt_size); + size -= interrupt_size; + data += interrupt_size; + retval = + usb_control_msg(vub300->udev, + usb_sndctrlpipe(vub300->udev, 0), + SET_INTERRUPT_PSEUDOCODE, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, 0x0000, 0x0000, + xfer_buffer, xfer_length, 1000); + kfree(xfer_buffer); + if (retval < 0) + goto copy_error_message; + } else { + dev_err(&vub300->udev->dev, + "not enough memory for xfer buffer to send" + " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data, + vub300->vub_name); + strncpy(vub300->vub_name, + "SDIO interrupt pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "corrupt interrupt pseudocode in firmware %s %s\n", + fw->data, vub300->vub_name); + strncpy(vub300->vub_name, "corrupt interrupt pseudocode", + sizeof(vub300->vub_name)); + return; + } + ts += *data++; + size -= 1; + ts <<= 8; + ts += *data++; + size -= 1; + if (ts < size) { + u16 xfer_length = roundup_to_multiple_of_64(ts); + u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); + if (xfer_buffer) { + int retval; + memcpy(xfer_buffer, data, ts); + memset(xfer_buffer + ts, 0, + xfer_length - ts); + size -= ts; + data += ts; + retval = + usb_control_msg(vub300->udev, + usb_sndctrlpipe(vub300->udev, 0), + SET_TRANSFER_PSEUDOCODE, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, 0x0000, 0x0000, + xfer_buffer, xfer_length, 1000); + kfree(xfer_buffer); + if (retval < 0) + goto copy_error_message; + } else { + dev_err(&vub300->udev->dev, + "not enough memory for xfer buffer to send" + " TRANSFER_PSEUDOCODE for %s %s\n", fw->data, + vub300->vub_name); + strncpy(vub300->vub_name, + "SDIO transfer pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "corrupt transfer pseudocode in firmware %s %s\n", + fw->data, vub300->vub_name); + strncpy(vub300->vub_name, "corrupt transfer pseudocode", + sizeof(vub300->vub_name)); + return; + } + register_count += *data++; + size -= 1; + if (register_count * 4 == size) { + int I = vub300->dynamic_register_count = register_count; + int i = 0; + while (I--) { + unsigned int func_num = 0; + vub300->sdio_register[i].func_num = *data++; + size -= 1; + func_num += *data++; + size -= 1; + func_num <<= 8; + func_num += *data++; + size -= 1; + func_num <<= 8; + func_num += *data++; + size -= 1; + vub300->sdio_register[i].sdio_reg = func_num; + vub300->sdio_register[i].activate = 1; + vub300->sdio_register[i].prepared = 0; + i += 1; + } + dev_info(&vub300->udev->dev, + "initialized %d dynamic pseudocode registers\n", + vub300->dynamic_register_count); + return; + } else { + dev_err(&vub300->udev->dev, + "corrupt dynamic registers in firmware %s\n", + vub300->vub_name); + strncpy(vub300->vub_name, "corrupt dynamic registers", + sizeof(vub300->vub_name)); + return; + } + + return; + +copy_error_message: + strncpy(vub300->vub_name, "SDIO pseudocode download failed", + sizeof(vub300->vub_name)); +} + +/* + * if the binary containing the EMPTY PseudoCode can not be found + * vub300->vub_name is set anyway in order to prevent an automatic retry + */ +static void download_offload_pseudocode(struct vub300_mmc_host *vub300) +{ + struct mmc_card *card = vub300->mmc->card; + int sdio_funcs = card->sdio_funcs; + const struct firmware *fw = NULL; + int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name), + "vub_%04X%04X", card->cis.vendor, card->cis.device); + int n = 0; + int retval; + for (n = 0; n < sdio_funcs; n++) { + struct sdio_func *sf = card->sdio_func[n]; + l += scnprintf(vub300->vub_name + l, + sizeof(vub300->vub_name) - l, "_%04X%04X", + sf->vendor, sf->device); + } + snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin"); + dev_info(&vub300->udev->dev, "requesting offload firmware %s\n", + vub300->vub_name); + retval = request_firmware(&fw, vub300->vub_name, &card->dev); + if (retval < 0) { + strncpy(vub300->vub_name, "vub_default.bin", + sizeof(vub300->vub_name)); + retval = request_firmware(&fw, vub300->vub_name, &card->dev); + if (retval < 0) { + strncpy(vub300->vub_name, + "no SDIO offload firmware found", + sizeof(vub300->vub_name)); + } else { + __download_offload_pseudocode(vub300, fw); + release_firmware(fw); + } + } else { + __download_offload_pseudocode(vub300, fw); + release_firmware(fw); + } +} + +static void vub300_usb_bulk_msg_completion(struct urb *urb) +{ /* urb completion handler - hardirq */ + complete((struct completion *)urb->context); +} + +static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300, + unsigned int pipe, void *data, int len, + int *actual_length, int timeout_msecs) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + struct usb_device *usb_dev = vub300->udev; + struct completion done; + int retval; + vub300->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!vub300->urb) + return -ENOMEM; + usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len, + vub300_usb_bulk_msg_completion, NULL); + init_completion(&done); + vub300->urb->context = &done; + vub300->urb->actual_length = 0; + retval = usb_submit_urb(vub300->urb, GFP_KERNEL); + if (unlikely(retval)) + goto out; + if (!wait_for_completion_timeout + (&done, msecs_to_jiffies(timeout_msecs))) { + retval = -ETIMEDOUT; + usb_kill_urb(vub300->urb); + } else { + retval = vub300->urb->status; + } +out: + *actual_length = vub300->urb->actual_length; + usb_free_urb(vub300->urb); + vub300->urb = NULL; + return retval; +} + +static int __command_read_data(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, struct mmc_data *data) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + int linear_length = vub300->datasize; + int padded_length = vub300->large_usb_packets ? + ((511 + linear_length) >> 9) << 9 : + ((63 + linear_length) >> 6) << 6; + if ((padded_length == linear_length) || !pad_input_to_usb_pkt) { + int result; + unsigned pipe; + pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep); + result = usb_sg_init(&vub300->sg_request, vub300->udev, + pipe, 0, data->sg, + data->sg_len, 0, GFP_KERNEL); + if (result < 0) { + usb_unlink_urb(vub300->command_out_urb); + usb_unlink_urb(vub300->command_res_urb); + cmd->error = result; + data->bytes_xfered = 0; + return 0; + } else { + vub300->sg_transfer_timer.expires = + jiffies + msecs_to_jiffies(2000 + + (linear_length / 16384)); + add_timer(&vub300->sg_transfer_timer); + usb_sg_wait(&vub300->sg_request); + del_timer(&vub300->sg_transfer_timer); + if (vub300->sg_request.status < 0) { + cmd->error = vub300->sg_request.status; + data->bytes_xfered = 0; + return 0; + } else { + data->bytes_xfered = vub300->datasize; + return linear_length; + } + } + } else { + u8 *buf = kmalloc(padded_length, GFP_KERNEL); + if (buf) { + int result; + unsigned pipe = usb_rcvbulkpipe(vub300->udev, + vub300->data_inp_ep); + int actual_length = 0; + result = vub300_usb_bulk_msg(vub300, pipe, buf, + padded_length, &actual_length, + 2000 + (padded_length / 16384)); + if (result < 0) { + cmd->error = result; + data->bytes_xfered = 0; + kfree(buf); + return 0; + } else if (actual_length < linear_length) { + cmd->error = -EREMOTEIO; + data->bytes_xfered = 0; + kfree(buf); + return 0; + } else { + sg_copy_from_buffer(data->sg, data->sg_len, buf, + linear_length); + kfree(buf); + data->bytes_xfered = vub300->datasize; + return linear_length; + } + } else { + cmd->error = -ENOMEM; + data->bytes_xfered = 0; + return 0; + } + } +} + +static int __command_write_data(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, struct mmc_data *data) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep); + int linear_length = vub300->datasize; + int modulo_64_length = linear_length & 0x003F; + int modulo_512_length = linear_length & 0x01FF; + if (linear_length < 64) { + int result; + int actual_length; + sg_copy_to_buffer(data->sg, data->sg_len, + vub300->padded_buffer, + sizeof(vub300->padded_buffer)); + memset(vub300->padded_buffer + linear_length, 0, + sizeof(vub300->padded_buffer) - linear_length); + result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer, + sizeof(vub300->padded_buffer), + &actual_length, 2000 + + (sizeof(vub300->padded_buffer) / + 16384)); + if (result < 0) { + cmd->error = result; + data->bytes_xfered = 0; + } else { + data->bytes_xfered = vub300->datasize; + } + } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) || + (vub300->large_usb_packets && (64 > modulo_512_length)) + ) { /* don't you just love these work-rounds */ + int padded_length = ((63 + linear_length) >> 6) << 6; + u8 *buf = kmalloc(padded_length, GFP_KERNEL); + if (buf) { + int result; + int actual_length; + sg_copy_to_buffer(data->sg, data->sg_len, buf, + padded_length); + memset(buf + linear_length, 0, + padded_length - linear_length); + result = + vub300_usb_bulk_msg(vub300, pipe, buf, + padded_length, &actual_length, + 2000 + padded_length / 16384); + kfree(buf); + if (result < 0) { + cmd->error = result; + data->bytes_xfered = 0; + } else { + data->bytes_xfered = vub300->datasize; + } + } else { + cmd->error = -ENOMEM; + data->bytes_xfered = 0; + } + } else { /* no data padding required */ + int result; + unsigned char buf[64 * 4]; + sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf)); + result = usb_sg_init(&vub300->sg_request, vub300->udev, + pipe, 0, data->sg, + data->sg_len, 0, GFP_KERNEL); + if (result < 0) { + usb_unlink_urb(vub300->command_out_urb); + usb_unlink_urb(vub300->command_res_urb); + cmd->error = result; + data->bytes_xfered = 0; + } else { + vub300->sg_transfer_timer.expires = + jiffies + msecs_to_jiffies(2000 + + linear_length / 16384); + add_timer(&vub300->sg_transfer_timer); + usb_sg_wait(&vub300->sg_request); + if (cmd->error) { + data->bytes_xfered = 0; + } else { + del_timer(&vub300->sg_transfer_timer); + if (vub300->sg_request.status < 0) { + cmd->error = vub300->sg_request.status; + data->bytes_xfered = 0; + } else { + data->bytes_xfered = vub300->datasize; + } + } + } + } + return linear_length; +} + +static void __vub300_command_response(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, + struct mmc_data *data, int data_length) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + long respretval; + int msec_timeout = 1000 + data_length / 4; + respretval = + wait_for_completion_timeout(&vub300->command_complete, + msecs_to_jiffies(msec_timeout)); + if (respretval == 0) { /* TIMED OUT */ + /* we don't know which of "out" and "res" if any failed */ + int result; + vub300->usb_timed_out = 1; + usb_kill_urb(vub300->command_out_urb); + usb_kill_urb(vub300->command_res_urb); + cmd->error = -ETIMEDOUT; + result = usb_lock_device_for_reset(vub300->udev, + vub300->interface); + if (result == 0) { + result = usb_reset_device(vub300->udev); + usb_unlock_device(vub300->udev); + } + } else if (respretval < 0) { + /* we don't know which of "out" and "res" if any failed */ + usb_kill_urb(vub300->command_out_urb); + usb_kill_urb(vub300->command_res_urb); + cmd->error = respretval; + } else if (cmd->error) { + /* + * the error occurred sending the command + * or receiving the response + */ + } else if (vub300->command_out_urb->status) { + vub300->usb_transport_fail = vub300->command_out_urb->status; + cmd->error = -EPROTO == vub300->command_out_urb->status ? + -ESHUTDOWN : vub300->command_out_urb->status; + } else if (vub300->command_res_urb->status) { + vub300->usb_transport_fail = vub300->command_res_urb->status; + cmd->error = -EPROTO == vub300->command_res_urb->status ? + -ESHUTDOWN : vub300->command_res_urb->status; + } else if (vub300->resp.common.header_type == 0x00) { + /* + * the command completed successfully + * and there was no piggybacked data + */ + } else if (vub300->resp.common.header_type == RESPONSE_ERROR) { + cmd->error = + vub300_response_error(vub300->resp.error.error_code); + if (vub300->data) + usb_sg_cancel(&vub300->sg_request); + } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) { + int offloaded_data_length = + vub300->resp.common.header_size - + sizeof(struct sd_register_header); + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); + ri += 1; + } + vub300->resp.common.header_size = + sizeof(struct sd_register_header); + vub300->resp.common.header_type = 0x00; + cmd->error = 0; + } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) { + int offloaded_data_length = + vub300->resp.common.header_size - + sizeof(struct sd_register_header); + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irqs_queued) { + vub300->irqs_queued += 1; + } else if (vub300->irq_enabled) { + vub300->irqs_queued += 1; + vub300_queue_poll_work(vub300, 0); + } else { + vub300->irqs_queued += 1; + } + vub300->irq_disabled = 1; + mutex_unlock(&vub300->irq_mutex); + vub300->resp.common.header_size = + sizeof(struct sd_register_header); + vub300->resp.common.header_type = 0x00; + cmd->error = 0; + } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) { + int offloaded_data_length = + vub300->resp.common.header_size - + sizeof(struct sd_register_header); + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irqs_queued) { + vub300->irqs_queued += 1; + } else if (vub300->irq_enabled) { + vub300->irqs_queued += 1; + vub300_queue_poll_work(vub300, 0); + } else { + vub300->irqs_queued += 1; + } + vub300->irq_disabled = 0; + mutex_unlock(&vub300->irq_mutex); + vub300->resp.common.header_size = + sizeof(struct sd_register_header); + vub300->resp.common.header_type = 0x00; + cmd->error = 0; + } else { + cmd->error = -EINVAL; + } +} + +static void construct_request_response(struct vub300_mmc_host *vub300, + struct mmc_command *cmd) +{ + int resp_len = vub300->resp_len; + int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1; + int bytes = 3 & less_cmd; + int words = less_cmd >> 2; + u8 *r = vub300->resp.response.command_response; + + if (!resp_len) + return; + if (bytes == 3) { + cmd->resp[words] = (r[1 + (words << 2)] << 24) + | (r[2 + (words << 2)] << 16) + | (r[3 + (words << 2)] << 8); + } else if (bytes == 2) { + cmd->resp[words] = (r[1 + (words << 2)] << 24) + | (r[2 + (words << 2)] << 16); + } else if (bytes == 1) { + cmd->resp[words] = (r[1 + (words << 2)] << 24); + } + while (words-- > 0) { + cmd->resp[words] = (r[1 + (words << 2)] << 24) + | (r[2 + (words << 2)] << 16) + | (r[3 + (words << 2)] << 8) + | (r[4 + (words << 2)] << 0); + } + if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0])) + cmd->resp[0] &= 0xFFFFFF00; +} + +/* this thread runs only when there is an upper level command req outstanding */ +static void vub300_cmndwork_thread(struct work_struct *work) +{ + struct vub300_mmc_host *vub300 = + container_of(work, struct vub300_mmc_host, cmndwork); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + return; + } else { + struct mmc_request *req = vub300->req; + struct mmc_command *cmd = vub300->cmd; + struct mmc_data *data = vub300->data; + int data_length; + mutex_lock(&vub300->cmd_mutex); + init_completion(&vub300->command_complete); + if (likely(vub300->vub_name[0]) || !vub300->mmc->card) { + /* + * the name of the EMPTY Pseudo firmware file + * is used as a flag to indicate that the file + * has been already downloaded to the VUB300 chip + */ + } else if (0 == vub300->mmc->card->sdio_funcs) { + strncpy(vub300->vub_name, "SD memory device", + sizeof(vub300->vub_name)); + } else { + download_offload_pseudocode(vub300); + } + send_command(vub300); + if (!data) + data_length = 0; + else if (MMC_DATA_READ & data->flags) + data_length = __command_read_data(vub300, cmd, data); + else + data_length = __command_write_data(vub300, cmd, data); + __vub300_command_response(vub300, cmd, data, data_length); + vub300->req = NULL; + vub300->cmd = NULL; + vub300->data = NULL; + if (cmd->error) { + if (cmd->error == -ENOMEDIUM) + check_vub300_port_status(vub300); + mutex_unlock(&vub300->cmd_mutex); + mmc_request_done(vub300->mmc, req); + kref_put(&vub300->kref, vub300_delete); + return; + } else { + construct_request_response(vub300, cmd); + vub300->resp_len = 0; + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); + mmc_request_done(vub300->mmc, req); + return; + } + } +} + +static int examine_cyclic_buffer(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, u8 Function) +{ + /* cmd_mutex is held by vub300_mmc_request */ + u8 cmd0 = 0xFF & (cmd->arg >> 24); + u8 cmd1 = 0xFF & (cmd->arg >> 16); + u8 cmd2 = 0xFF & (cmd->arg >> 8); + u8 cmd3 = 0xFF & (cmd->arg >> 0); + int first = MAXREGMASK & vub300->fn[Function].offload_point; + struct offload_registers_access *rf = &vub300->fn[Function].reg[first]; + if (cmd0 == rf->command_byte[0] && + cmd1 == rf->command_byte[1] && + cmd2 == rf->command_byte[2] && + cmd3 == rf->command_byte[3]) { + u8 checksum = 0x00; + cmd->resp[1] = checksum << 24; + cmd->resp[0] = (rf->Respond_Byte[0] << 24) + | (rf->Respond_Byte[1] << 16) + | (rf->Respond_Byte[2] << 8) + | (rf->Respond_Byte[3] << 0); + vub300->fn[Function].offload_point += 1; + vub300->fn[Function].offload_count -= 1; + vub300->total_offload_count -= 1; + return 1; + } else { + int delta = 1; /* because it does not match the first one */ + u8 register_count = vub300->fn[Function].offload_count - 1; + u32 register_point = vub300->fn[Function].offload_point + 1; + while (0 < register_count) { + int point = MAXREGMASK & register_point; + struct offload_registers_access *r = + &vub300->fn[Function].reg[point]; + if (cmd0 == r->command_byte[0] && + cmd1 == r->command_byte[1] && + cmd2 == r->command_byte[2] && + cmd3 == r->command_byte[3]) { + u8 checksum = 0x00; + cmd->resp[1] = checksum << 24; + cmd->resp[0] = (r->Respond_Byte[0] << 24) + | (r->Respond_Byte[1] << 16) + | (r->Respond_Byte[2] << 8) + | (r->Respond_Byte[3] << 0); + vub300->fn[Function].offload_point += delta; + vub300->fn[Function].offload_count -= delta; + vub300->total_offload_count -= delta; + return 1; + } else { + register_point += 1; + register_count -= 1; + delta += 1; + continue; + } + } + return 0; + } +} + +static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300, + struct mmc_command *cmd) +{ + /* cmd_mutex is held by vub300_mmc_request */ + u8 regs = vub300->dynamic_register_count; + u8 i = 0; + u8 func = FUN(cmd); + u32 reg = REG(cmd); + while (0 < regs--) { + if ((vub300->sdio_register[i].func_num == func) && + (vub300->sdio_register[i].sdio_reg == reg)) { + if (!vub300->sdio_register[i].prepared) { + return 0; + } else if ((0x80000000 & cmd->arg) == 0x80000000) { + /* + * a write to a dynamic register + * nullifies our offloaded value + */ + vub300->sdio_register[i].prepared = 0; + return 0; + } else { + u8 checksum = 0x00; + u8 rsp0 = 0x00; + u8 rsp1 = 0x00; + u8 rsp2 = vub300->sdio_register[i].response; + u8 rsp3 = vub300->sdio_register[i].regvalue; + vub300->sdio_register[i].prepared = 0; + cmd->resp[1] = checksum << 24; + cmd->resp[0] = (rsp0 << 24) + | (rsp1 << 16) + | (rsp2 << 8) + | (rsp3 << 0); + return 1; + } + } else { + i += 1; + continue; + } + } + if (vub300->total_offload_count == 0) + return 0; + else if (vub300->fn[func].offload_count == 0) + return 0; + else + return examine_cyclic_buffer(vub300, cmd, func); +} + +static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +{ /* NOT irq */ + struct mmc_command *cmd = req->cmd; + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + if (!vub300->interface) { + cmd->error = -ESHUTDOWN; + mmc_request_done(mmc, req); + return; + } else { + struct mmc_data *data = req->data; + if (!vub300->card_powered) { + cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, req); + return; + } + if (!vub300->card_present) { + cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, req); + return; + } + if (vub300->usb_transport_fail) { + cmd->error = vub300->usb_transport_fail; + mmc_request_done(mmc, req); + return; + } + if (!vub300->interface) { + cmd->error = -ENODEV; + mmc_request_done(mmc, req); + return; + } + kref_get(&vub300->kref); + mutex_lock(&vub300->cmd_mutex); + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + /* + * for performance we have to return immediately + * if the requested data has been offloaded + */ + if (cmd->opcode == 52 && + satisfy_request_from_offloaded_data(vub300, cmd)) { + cmd->error = 0; + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); + mmc_request_done(mmc, req); + return; + } else { + vub300->cmd = cmd; + vub300->req = req; + vub300->data = data; + if (data) + vub300->datasize = data->blksz * data->blocks; + else + vub300->datasize = 0; + vub300_queue_cmnd_work(vub300); + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); + /* + * the kernel lock diagnostics complain + * if the cmd_mutex * is "passed on" + * to the cmndwork thread, + * so we must release it now + * and re-acquire it in the cmndwork thread + */ + } + } +} + +static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8], + struct mmc_ios *ios) +{ + int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */ + int retval; + u32 kHzClock; + if (ios->clock >= 48000000) + kHzClock = 48000; + else if (ios->clock >= 24000000) + kHzClock = 24000; + else if (ios->clock >= 20000000) + kHzClock = 20000; + else if (ios->clock >= 15000000) + kHzClock = 15000; + else if (ios->clock >= 200000) + kHzClock = 200; + else + kHzClock = 0; + { + int i; + u64 c = kHzClock; + for (i = 0; i < buf_array_size; i++) { + buf[i] = c; + c >>= 8; + } + } + retval = + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_CLOCK_SPEED, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x00, 0x00, buf, buf_array_size, 1000); + if (retval != 8) { + dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED" + " %dkHz failed with retval=%d\n", kHzClock, retval); + } else { + dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED" + " %dkHz\n", kHzClock); + } +} + +static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + if (!vub300->interface) + return; + kref_get(&vub300->kref); + mutex_lock(&vub300->cmd_mutex); + if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) { + vub300->card_powered = 0; + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_SD_POWER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, NULL, 0, 1000); + /* must wait for the VUB300 u-proc to boot up */ + msleep(600); + } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) { + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_SD_POWER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0001, 0x0000, NULL, 0, 1000); + msleep(600); + vub300->card_powered = 1; + } else if (ios->power_mode == MMC_POWER_ON) { + u8 *buf = kmalloc(8, GFP_KERNEL); + if (buf) { + __set_clock_speed(vub300, buf, ios); + kfree(buf); + } + } else { + /* this should mean no change of state */ + } + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); +} + +static int vub300_mmc_get_ro(struct mmc_host *mmc) +{ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + return vub300->read_only; +} + +static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + if (!vub300->interface) + return; + kref_get(&vub300->kref); + if (enable) { + set_current_state(TASK_RUNNING); + mutex_lock(&vub300->irq_mutex); + if (vub300->irqs_queued) { + vub300->irqs_queued -= 1; + mmc_signal_sdio_irq(vub300->mmc); + } else if (vub300->irq_disabled) { + vub300->irq_disabled = 0; + vub300->irq_enabled = 1; + vub300_queue_poll_work(vub300, 0); + } else if (vub300->irq_enabled) { + /* this should not happen, so we will just ignore it */ + } else { + vub300->irq_enabled = 1; + vub300_queue_poll_work(vub300, 0); + } + mutex_unlock(&vub300->irq_mutex); + set_current_state(TASK_INTERRUPTIBLE); + } else { + vub300->irq_enabled = 0; + } + kref_put(&vub300->kref, vub300_delete); +} + +static const struct mmc_host_ops vub300_mmc_ops = { + .request = vub300_mmc_request, + .set_ios = vub300_mmc_set_ios, + .get_ro = vub300_mmc_get_ro, + .enable_sdio_irq = vub300_enable_sdio_irq, +}; + +static int vub300_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ /* NOT irq */ + struct vub300_mmc_host *vub300; + struct usb_host_interface *iface_desc; + struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); + int i; + int retval = -ENOMEM; + struct urb *command_out_urb; + struct urb *command_res_urb; + struct mmc_host *mmc; + char manufacturer[48]; + char product[32]; + char serial_number[32]; + usb_string(udev, udev->descriptor.iManufacturer, manufacturer, + sizeof(manufacturer)); + usb_string(udev, udev->descriptor.iProduct, product, sizeof(product)); + usb_string(udev, udev->descriptor.iSerialNumber, serial_number, + sizeof(serial_number)); + dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n", + le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct), + manufacturer, product, serial_number); + command_out_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!command_out_urb) { + retval = -ENOMEM; + goto error0; + } + command_res_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!command_res_urb) { + retval = -ENOMEM; + goto error1; + } + /* this also allocates memory for our VUB300 mmc host device */ + mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); + if (!mmc) { + retval = -ENOMEM; + dev_err(&udev->dev, "not enough memory for the mmc_host\n"); + goto error4; + } + /* MMC core transfer sizes tunable parameters */ + mmc->caps = 0; + if (!force_1_bit_data_xfers) + mmc->caps |= MMC_CAP_4_BIT_DATA; + if (!force_polling_for_irqs) + mmc->caps |= MMC_CAP_SDIO_IRQ; + mmc->caps &= ~MMC_CAP_NEEDS_POLL; + /* + * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll + * for devices which results in spurious CMD7's being + * issued which stops some SDIO cards from working + */ + if (limit_speed_to_24_MHz) { + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; + mmc->caps |= MMC_CAP_SD_HIGHSPEED; + mmc->f_max = 24000000; + dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n"); + } else { + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; + mmc->caps |= MMC_CAP_SD_HIGHSPEED; + mmc->f_max = 48000000; + } + mmc->f_min = 200000; + mmc->max_blk_count = 511; + mmc->max_blk_size = 512; + mmc->max_segs = 128; + if (force_max_req_size) + mmc->max_req_size = force_max_req_size * 1024; + else + mmc->max_req_size = 64 * 1024; + mmc->max_seg_size = mmc->max_req_size; + mmc->ocr_avail = 0; + mmc->ocr_avail |= MMC_VDD_165_195; + mmc->ocr_avail |= MMC_VDD_20_21; + mmc->ocr_avail |= MMC_VDD_21_22; + mmc->ocr_avail |= MMC_VDD_22_23; + mmc->ocr_avail |= MMC_VDD_23_24; + mmc->ocr_avail |= MMC_VDD_24_25; + mmc->ocr_avail |= MMC_VDD_25_26; + mmc->ocr_avail |= MMC_VDD_26_27; + mmc->ocr_avail |= MMC_VDD_27_28; + mmc->ocr_avail |= MMC_VDD_28_29; + mmc->ocr_avail |= MMC_VDD_29_30; + mmc->ocr_avail |= MMC_VDD_30_31; + mmc->ocr_avail |= MMC_VDD_31_32; + mmc->ocr_avail |= MMC_VDD_32_33; + mmc->ocr_avail |= MMC_VDD_33_34; + mmc->ocr_avail |= MMC_VDD_34_35; + mmc->ocr_avail |= MMC_VDD_35_36; + mmc->ops = &vub300_mmc_ops; + vub300 = mmc_priv(mmc); + vub300->mmc = mmc; + vub300->card_powered = 0; + vub300->bus_width = 0; + vub300->cmnd.head.block_size[0] = 0x00; + vub300->cmnd.head.block_size[1] = 0x00; + vub300->app_spec = 0; + mutex_init(&vub300->cmd_mutex); + mutex_init(&vub300->irq_mutex); + vub300->command_out_urb = command_out_urb; + vub300->command_res_urb = command_res_urb; + vub300->usb_timed_out = 0; + vub300->dynamic_register_count = 0; + + for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) { + vub300->fn[i].offload_point = 0; + vub300->fn[i].offload_count = 0; + } + + vub300->total_offload_count = 0; + vub300->irq_enabled = 0; + vub300->irq_disabled = 0; + vub300->irqs_queued = 0; + + for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++) + vub300->sdio_register[i++].activate = 0; + + vub300->udev = udev; + vub300->interface = interface; + vub300->cmnd_res_ep = 0; + vub300->cmnd_out_ep = 0; + vub300->data_inp_ep = 0; + vub300->data_out_ep = 0; + + for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) + vub300->fbs[i] = 512; + + /* + * set up the endpoint information + * + * use the first pair of bulk-in and bulk-out + * endpoints for Command/Response+Interrupt + * + * use the second pair of bulk-in and bulk-out + * endpoints for Data In/Out + */ + vub300->large_usb_packets = 0; + iface_desc = interface->cur_altsetting; + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + struct usb_endpoint_descriptor *endpoint = + &iface_desc->endpoint[i].desc; + dev_info(&vub300->udev->dev, + "vub300 testing %s EndPoint(%d) %02X\n", + usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" : + usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" : + "UNKNOWN", i, endpoint->bEndpointAddress); + if (endpoint->wMaxPacketSize > 64) + vub300->large_usb_packets = 1; + if (usb_endpoint_is_bulk_in(endpoint)) { + if (!vub300->cmnd_res_ep) { + vub300->cmnd_res_ep = + endpoint->bEndpointAddress; + } else if (!vub300->data_inp_ep) { + vub300->data_inp_ep = + endpoint->bEndpointAddress; + } else { + dev_warn(&vub300->udev->dev, + "ignoring" + " unexpected bulk_in endpoint"); + } + } else if (usb_endpoint_is_bulk_out(endpoint)) { + if (!vub300->cmnd_out_ep) { + vub300->cmnd_out_ep = + endpoint->bEndpointAddress; + } else if (!vub300->data_out_ep) { + vub300->data_out_ep = + endpoint->bEndpointAddress; + } else { + dev_warn(&vub300->udev->dev, + "ignoring" + " unexpected bulk_out endpoint"); + } + } else { + dev_warn(&vub300->udev->dev, + "vub300 ignoring EndPoint(%d) %02X", i, + endpoint->bEndpointAddress); + } + } + if (vub300->cmnd_res_ep && vub300->cmnd_out_ep && + vub300->data_inp_ep && vub300->data_out_ep) { + dev_info(&vub300->udev->dev, + "vub300 %s packets" + " using EndPoints %02X %02X %02X %02X\n", + vub300->large_usb_packets ? "LARGE" : "SMALL", + vub300->cmnd_out_ep, vub300->cmnd_res_ep, + vub300->data_out_ep, vub300->data_inp_ep); + /* we have the expected EndPoints */ + } else { + dev_err(&vub300->udev->dev, + "Could not find two sets of bulk-in/out endpoint pairs\n"); + retval = -EINVAL; + goto error5; + } + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + GET_HC_INF0, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, &vub300->hc_info, + sizeof(vub300->hc_info), 1000); + if (retval < 0) + goto error5; + retval = + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_ROM_WAIT_STATES, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + firmware_rom_wait_states, 0x0000, NULL, 0, 1000); + if (retval < 0) + goto error5; + dev_info(&vub300->udev->dev, + "operating_mode = %s %s %d MHz %s %d byte USB packets\n", + (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL", + (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit", + mmc->f_max / 1000000, + pad_input_to_usb_pkt ? "padding input data to" : "with", + vub300->large_usb_packets ? 512 : 64); + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + GET_SYSTEM_PORT_STATUS, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, &vub300->system_port_status, + sizeof(vub300->system_port_status), 1000); + if (retval < 0) { + goto error5; + } else if (sizeof(vub300->system_port_status) == retval) { + vub300->card_present = + (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; + vub300->read_only = + (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; + } else { + retval = -EINVAL; + goto error5; + } + usb_set_intfdata(interface, vub300); + INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); + INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread); + INIT_WORK(&vub300->deadwork, vub300_deadwork_thread); + kref_init(&vub300->kref); + timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0); + kref_get(&vub300->kref); + timer_setup(&vub300->inactivity_timer, + vub300_inactivity_timer_expired, 0); + vub300->inactivity_timer.expires = jiffies + HZ; + add_timer(&vub300->inactivity_timer); + if (vub300->card_present) + dev_info(&vub300->udev->dev, + "USB vub300 remote SDIO host controller[%d]" + "connected with SD/SDIO card inserted\n", + interface_to_InterfaceNumber(interface)); + else + dev_info(&vub300->udev->dev, + "USB vub300 remote SDIO host controller[%d]" + "connected with no SD/SDIO card inserted\n", + interface_to_InterfaceNumber(interface)); + retval = mmc_add_host(mmc); + if (retval) + goto error6; + + return 0; +error6: + del_timer_sync(&vub300->inactivity_timer); +error5: + mmc_free_host(mmc); + /* + * and hence also frees vub300 + * which is contained at the end of struct mmc + */ +error4: + usb_free_urb(command_res_urb); +error1: + usb_free_urb(command_out_urb); +error0: + usb_put_dev(udev); + return retval; +} + +static void vub300_disconnect(struct usb_interface *interface) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = usb_get_intfdata(interface); + if (!vub300 || !vub300->mmc) { + return; + } else { + struct mmc_host *mmc = vub300->mmc; + if (!vub300->mmc) { + return; + } else { + int ifnum = interface_to_InterfaceNumber(interface); + usb_set_intfdata(interface, NULL); + /* prevent more I/O from starting */ + vub300->interface = NULL; + kref_put(&vub300->kref, vub300_delete); + mmc_remove_host(mmc); + pr_info("USB vub300 remote SDIO host controller[%d]" + " now disconnected", ifnum); + return; + } + } +} + +#ifdef CONFIG_PM +static int vub300_suspend(struct usb_interface *intf, pm_message_t message) +{ + return 0; +} + +static int vub300_resume(struct usb_interface *intf) +{ + return 0; +} +#else +#define vub300_suspend NULL +#define vub300_resume NULL +#endif +static int vub300_pre_reset(struct usb_interface *intf) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + mutex_lock(&vub300->cmd_mutex); + return 0; +} + +static int vub300_post_reset(struct usb_interface *intf) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + /* we are sure no URBs are active - no locking needed */ + vub300->errors = -EPIPE; + mutex_unlock(&vub300->cmd_mutex); + return 0; +} + +static struct usb_driver vub300_driver = { + .name = "vub300", + .probe = vub300_probe, + .disconnect = vub300_disconnect, + .suspend = vub300_suspend, + .resume = vub300_resume, + .pre_reset = vub300_pre_reset, + .post_reset = vub300_post_reset, + .id_table = vub300_table, + .supports_autosuspend = 1, +}; + +static int __init vub300_init(void) +{ /* NOT irq */ + int result; + + pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X", + firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout); + cmndworkqueue = create_singlethread_workqueue("kvub300c"); + if (!cmndworkqueue) { + pr_err("not enough memory for the REQUEST workqueue"); + result = -ENOMEM; + goto out1; + } + pollworkqueue = create_singlethread_workqueue("kvub300p"); + if (!pollworkqueue) { + pr_err("not enough memory for the IRQPOLL workqueue"); + result = -ENOMEM; + goto out2; + } + deadworkqueue = create_singlethread_workqueue("kvub300d"); + if (!deadworkqueue) { + pr_err("not enough memory for the EXPIRED workqueue"); + result = -ENOMEM; + goto out3; + } + result = usb_register(&vub300_driver); + if (result) { + pr_err("usb_register failed. Error number %d", result); + goto out4; + } + return 0; +out4: + destroy_workqueue(deadworkqueue); +out3: + destroy_workqueue(pollworkqueue); +out2: + destroy_workqueue(cmndworkqueue); +out1: + return result; +} + +static void __exit vub300_exit(void) +{ + usb_deregister(&vub300_driver); + flush_workqueue(cmndworkqueue); + flush_workqueue(pollworkqueue); + flush_workqueue(deadworkqueue); + destroy_workqueue(cmndworkqueue); + destroy_workqueue(pollworkqueue); + destroy_workqueue(deadworkqueue); +} + +module_init(vub300_init); +module_exit(vub300_exit); + +MODULE_AUTHOR("Tony Olech "); +MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c new file mode 100644 index 000000000..6db08070b --- /dev/null +++ b/drivers/mmc/host/wbsd.c @@ -0,0 +1,2013 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver + * + * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved. + * + * Warning! + * + * Changes to the FIFO system should be done with extreme care since + * the hardware is full of bugs related to the FIFO. Known issues are: + * + * - FIFO size field in FSR is always zero. + * + * - FIFO interrupts tend not to work as they should. Interrupts are + * triggered only for full/empty events, not for threshold values. + * + * - On APIC systems the FIFO empty interrupt is sometimes lost. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "wbsd.h" + +#define DRIVER_NAME "wbsd" + +#define DBG(x...) \ + pr_debug(DRIVER_NAME ": " x) +#define DBGF(f, x...) \ + pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x) + +/* + * Device resources + */ + +#ifdef CONFIG_PNP + +static const struct pnp_device_id pnp_dev_table[] = { + { "WEC0517", 0 }, + { "WEC0518", 0 }, + { "", 0 }, +}; + +MODULE_DEVICE_TABLE(pnp, pnp_dev_table); + +#endif /* CONFIG_PNP */ + +static const int config_ports[] = { 0x2E, 0x4E }; +static const int unlock_codes[] = { 0x83, 0x87 }; + +static const int valid_ids[] = { + 0x7112, +}; + +#ifdef CONFIG_PNP +static unsigned int param_nopnp = 0; +#else +static const unsigned int param_nopnp = 1; +#endif +static unsigned int param_io = 0x248; +static unsigned int param_irq = 6; +static int param_dma = 2; + +/* + * Basic functions + */ + +static inline void wbsd_unlock_config(struct wbsd_host *host) +{ + BUG_ON(host->config == 0); + + outb(host->unlock_code, host->config); + outb(host->unlock_code, host->config); +} + +static inline void wbsd_lock_config(struct wbsd_host *host) +{ + BUG_ON(host->config == 0); + + outb(LOCK_CODE, host->config); +} + +static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value) +{ + BUG_ON(host->config == 0); + + outb(reg, host->config); + outb(value, host->config + 1); +} + +static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg) +{ + BUG_ON(host->config == 0); + + outb(reg, host->config); + return inb(host->config + 1); +} + +static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value) +{ + outb(index, host->base + WBSD_IDXR); + outb(value, host->base + WBSD_DATAR); +} + +static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index) +{ + outb(index, host->base + WBSD_IDXR); + return inb(host->base + WBSD_DATAR); +} + +/* + * Common routines + */ + +static void wbsd_init_device(struct wbsd_host *host) +{ + u8 setup, ier; + + /* + * Reset chip (SD/MMC part) and fifo. + */ + setup = wbsd_read_index(host, WBSD_IDX_SETUP); + setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET; + wbsd_write_index(host, WBSD_IDX_SETUP, setup); + + /* + * Set DAT3 to input + */ + setup &= ~WBSD_DAT3_H; + wbsd_write_index(host, WBSD_IDX_SETUP, setup); + host->flags &= ~WBSD_FIGNORE_DETECT; + + /* + * Read back default clock. + */ + host->clk = wbsd_read_index(host, WBSD_IDX_CLK); + + /* + * Power down port. + */ + outb(WBSD_POWER_N, host->base + WBSD_CSR); + + /* + * Set maximum timeout. + */ + wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F); + + /* + * Test for card presence + */ + if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT) + host->flags |= WBSD_FCARD_PRESENT; + else + host->flags &= ~WBSD_FCARD_PRESENT; + + /* + * Enable interesting interrupts. + */ + ier = 0; + ier |= WBSD_EINT_CARD; + ier |= WBSD_EINT_FIFO_THRE; + ier |= WBSD_EINT_CRC; + ier |= WBSD_EINT_TIMEOUT; + ier |= WBSD_EINT_TC; + + outb(ier, host->base + WBSD_EIR); + + /* + * Clear interrupts. + */ + inb(host->base + WBSD_ISR); +} + +static void wbsd_reset(struct wbsd_host *host) +{ + u8 setup; + + pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc)); + + /* + * Soft reset of chip (SD/MMC part). + */ + setup = wbsd_read_index(host, WBSD_IDX_SETUP); + setup |= WBSD_SOFT_RESET; + wbsd_write_index(host, WBSD_IDX_SETUP, setup); +} + +static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq) +{ + unsigned long dmaflags; + + if (host->dma >= 0) { + /* + * Release ISA DMA controller. + */ + dmaflags = claim_dma_lock(); + disable_dma(host->dma); + clear_dma_ff(host->dma); + release_dma_lock(dmaflags); + + /* + * Disable DMA on host. + */ + wbsd_write_index(host, WBSD_IDX_DMA, 0); + } + + host->mrq = NULL; + + /* + * MMC layer might call back into the driver so first unlock. + */ + spin_unlock(&host->lock); + mmc_request_done(host->mmc, mrq); + spin_lock(&host->lock); +} + +/* + * Scatter/gather functions + */ + +static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data) +{ + /* + * Get info. about SG list from data structure. + */ + host->cur_sg = data->sg; + host->num_sg = data->sg_len; + + host->offset = 0; + host->remain = host->cur_sg->length; +} + +static inline int wbsd_next_sg(struct wbsd_host *host) +{ + /* + * Skip to next SG entry. + */ + host->cur_sg++; + host->num_sg--; + + /* + * Any entries left? + */ + if (host->num_sg > 0) { + host->offset = 0; + host->remain = host->cur_sg->length; + } + + return host->num_sg; +} + +static inline char *wbsd_map_sg(struct wbsd_host *host) +{ + return kmap_atomic(sg_page(host->cur_sg)) + host->cur_sg->offset; +} + +static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) +{ + size_t len = 0; + int i; + + for (i = 0; i < data->sg_len; i++) + len += data->sg[i].length; + sg_copy_to_buffer(data->sg, data->sg_len, host->dma_buffer, len); +} + +static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data) +{ + size_t len = 0; + int i; + + for (i = 0; i < data->sg_len; i++) + len += data->sg[i].length; + sg_copy_from_buffer(data->sg, data->sg_len, host->dma_buffer, len); +} + +/* + * Command handling + */ + +static inline void wbsd_get_short_reply(struct wbsd_host *host, + struct mmc_command *cmd) +{ + /* + * Correct response type? + */ + if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) { + cmd->error = -EILSEQ; + return; + } + + cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24; + cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16; + cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8; + cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0; + cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24; +} + +static inline void wbsd_get_long_reply(struct wbsd_host *host, + struct mmc_command *cmd) +{ + int i; + + /* + * Correct response type? + */ + if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) { + cmd->error = -EILSEQ; + return; + } + + for (i = 0; i < 4; i++) { + cmd->resp[i] = + wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24; + cmd->resp[i] |= + wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16; + cmd->resp[i] |= + wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8; + cmd->resp[i] |= + wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0; + } +} + +static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd) +{ + int i; + u8 status, isr; + + /* + * Clear accumulated ISR. The interrupt routine + * will fill this one with events that occur during + * transfer. + */ + host->isr = 0; + + /* + * Send the command (CRC calculated by host). + */ + outb(cmd->opcode, host->base + WBSD_CMDR); + for (i = 3; i >= 0; i--) + outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR); + + cmd->error = 0; + + /* + * Wait for the request to complete. + */ + do { + status = wbsd_read_index(host, WBSD_IDX_STATUS); + } while (status & WBSD_CARDTRAFFIC); + + /* + * Do we expect a reply? + */ + if (cmd->flags & MMC_RSP_PRESENT) { + /* + * Read back status. + */ + isr = host->isr; + + /* Card removed? */ + if (isr & WBSD_INT_CARD) + cmd->error = -ENOMEDIUM; + /* Timeout? */ + else if (isr & WBSD_INT_TIMEOUT) + cmd->error = -ETIMEDOUT; + /* CRC? */ + else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC)) + cmd->error = -EILSEQ; + /* All ok */ + else { + if (cmd->flags & MMC_RSP_136) + wbsd_get_long_reply(host, cmd); + else + wbsd_get_short_reply(host, cmd); + } + } +} + +/* + * Data functions + */ + +static void wbsd_empty_fifo(struct wbsd_host *host) +{ + struct mmc_data *data = host->mrq->cmd->data; + char *buffer; + int i, idx, fsr, fifo; + + /* + * Handle excessive data. + */ + if (host->num_sg == 0) + return; + + buffer = wbsd_map_sg(host) + host->offset; + idx = 0; + + /* + * Drain the fifo. This has a tendency to loop longer + * than the FIFO length (usually one block). + */ + while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) { + /* + * The size field in the FSR is broken so we have to + * do some guessing. + */ + if (fsr & WBSD_FIFO_FULL) + fifo = 16; + else if (fsr & WBSD_FIFO_FUTHRE) + fifo = 8; + else + fifo = 1; + + for (i = 0; i < fifo; i++) { + buffer[idx++] = inb(host->base + WBSD_DFR); + host->offset++; + host->remain--; + + data->bytes_xfered++; + + /* + * End of scatter list entry? + */ + if (host->remain == 0) { + kunmap_atomic(buffer); + /* + * Get next entry. Check if last. + */ + if (!wbsd_next_sg(host)) + return; + + buffer = wbsd_map_sg(host); + idx = 0; + } + } + } + kunmap_atomic(buffer); + + /* + * This is a very dirty hack to solve a + * hardware problem. The chip doesn't trigger + * FIFO threshold interrupts properly. + */ + if ((data->blocks * data->blksz - data->bytes_xfered) < 16) + tasklet_schedule(&host->fifo_tasklet); +} + +static void wbsd_fill_fifo(struct wbsd_host *host) +{ + struct mmc_data *data = host->mrq->cmd->data; + char *buffer; + int i, idx, fsr, fifo; + + /* + * Check that we aren't being called after the + * entire buffer has been transferred. + */ + if (host->num_sg == 0) + return; + + buffer = wbsd_map_sg(host) + host->offset; + idx = 0; + + /* + * Fill the fifo. This has a tendency to loop longer + * than the FIFO length (usually one block). + */ + while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) { + /* + * The size field in the FSR is broken so we have to + * do some guessing. + */ + if (fsr & WBSD_FIFO_EMPTY) + fifo = 0; + else if (fsr & WBSD_FIFO_EMTHRE) + fifo = 8; + else + fifo = 15; + + for (i = 16; i > fifo; i--) { + outb(buffer[idx], host->base + WBSD_DFR); + host->offset++; + host->remain--; + + data->bytes_xfered++; + + /* + * End of scatter list entry? + */ + if (host->remain == 0) { + kunmap_atomic(buffer); + /* + * Get next entry. Check if last. + */ + if (!wbsd_next_sg(host)) + return; + + buffer = wbsd_map_sg(host); + idx = 0; + } + } + } + kunmap_atomic(buffer); + + /* + * The controller stops sending interrupts for + * 'FIFO empty' under certain conditions. So we + * need to be a bit more pro-active. + */ + tasklet_schedule(&host->fifo_tasklet); +} + +static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) +{ + u16 blksize; + u8 setup; + unsigned long dmaflags; + unsigned int size; + + /* + * Calculate size. + */ + size = data->blocks * data->blksz; + + /* + * Check timeout values for overflow. + * (Yes, some cards cause this value to overflow). + */ + if (data->timeout_ns > 127000000) + wbsd_write_index(host, WBSD_IDX_TAAC, 127); + else { + wbsd_write_index(host, WBSD_IDX_TAAC, + data->timeout_ns / 1000000); + } + + if (data->timeout_clks > 255) + wbsd_write_index(host, WBSD_IDX_NSAC, 255); + else + wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks); + + /* + * Inform the chip of how large blocks will be + * sent. It needs this to determine when to + * calculate CRC. + * + * Space for CRC must be included in the size. + * Two bytes are needed for each data line. + */ + if (host->bus_width == MMC_BUS_WIDTH_1) { + blksize = data->blksz + 2; + + wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); + wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); + } else if (host->bus_width == MMC_BUS_WIDTH_4) { + blksize = data->blksz + 2 * 4; + + wbsd_write_index(host, WBSD_IDX_PBSMSB, + ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH); + wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); + } else { + data->error = -EINVAL; + return; + } + + /* + * Clear the FIFO. This is needed even for DMA + * transfers since the chip still uses the FIFO + * internally. + */ + setup = wbsd_read_index(host, WBSD_IDX_SETUP); + setup |= WBSD_FIFO_RESET; + wbsd_write_index(host, WBSD_IDX_SETUP, setup); + + /* + * DMA transfer? + */ + if (host->dma >= 0) { + /* + * The buffer for DMA is only 64 kB. + */ + BUG_ON(size > 0x10000); + if (size > 0x10000) { + data->error = -EINVAL; + return; + } + + /* + * Transfer data from the SG list to + * the DMA buffer. + */ + if (data->flags & MMC_DATA_WRITE) + wbsd_sg_to_dma(host, data); + + /* + * Initialise the ISA DMA controller. + */ + dmaflags = claim_dma_lock(); + disable_dma(host->dma); + clear_dma_ff(host->dma); + if (data->flags & MMC_DATA_READ) + set_dma_mode(host->dma, DMA_MODE_READ & ~0x40); + else + set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40); + set_dma_addr(host->dma, host->dma_addr); + set_dma_count(host->dma, size); + + enable_dma(host->dma); + release_dma_lock(dmaflags); + + /* + * Enable DMA on the host. + */ + wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE); + } else { + /* + * This flag is used to keep printk + * output to a minimum. + */ + host->firsterr = 1; + + /* + * Initialise the SG list. + */ + wbsd_init_sg(host, data); + + /* + * Turn off DMA. + */ + wbsd_write_index(host, WBSD_IDX_DMA, 0); + + /* + * Set up FIFO threshold levels (and fill + * buffer if doing a write). + */ + if (data->flags & MMC_DATA_READ) { + wbsd_write_index(host, WBSD_IDX_FIFOEN, + WBSD_FIFOEN_FULL | 8); + } else { + wbsd_write_index(host, WBSD_IDX_FIFOEN, + WBSD_FIFOEN_EMPTY | 8); + wbsd_fill_fifo(host); + } + } + + data->error = 0; +} + +static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data) +{ + unsigned long dmaflags; + int count; + u8 status; + + WARN_ON(host->mrq == NULL); + + /* + * Send a stop command if needed. + */ + if (data->stop) + wbsd_send_command(host, data->stop); + + /* + * Wait for the controller to leave data + * transfer state. + */ + do { + status = wbsd_read_index(host, WBSD_IDX_STATUS); + } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE)); + + /* + * DMA transfer? + */ + if (host->dma >= 0) { + /* + * Disable DMA on the host. + */ + wbsd_write_index(host, WBSD_IDX_DMA, 0); + + /* + * Turn of ISA DMA controller. + */ + dmaflags = claim_dma_lock(); + disable_dma(host->dma); + clear_dma_ff(host->dma); + count = get_dma_residue(host->dma); + release_dma_lock(dmaflags); + + data->bytes_xfered = host->mrq->data->blocks * + host->mrq->data->blksz - count; + data->bytes_xfered -= data->bytes_xfered % data->blksz; + + /* + * Any leftover data? + */ + if (count) { + pr_err("%s: Incomplete DMA transfer. " + "%d bytes left.\n", + mmc_hostname(host->mmc), count); + + if (!data->error) + data->error = -EIO; + } else { + /* + * Transfer data from DMA buffer to + * SG list. + */ + if (data->flags & MMC_DATA_READ) + wbsd_dma_to_sg(host, data); + } + + if (data->error) { + if (data->bytes_xfered) + data->bytes_xfered -= data->blksz; + } + } + + wbsd_request_end(host, host->mrq); +} + +/*****************************************************************************\ + * * + * MMC layer callbacks * + * * +\*****************************************************************************/ + +static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct wbsd_host *host = mmc_priv(mmc); + struct mmc_command *cmd; + + /* + * Disable tasklets to avoid a deadlock. + */ + spin_lock_bh(&host->lock); + + BUG_ON(host->mrq != NULL); + + cmd = mrq->cmd; + + host->mrq = mrq; + + /* + * Check that there is actually a card in the slot. + */ + if (!(host->flags & WBSD_FCARD_PRESENT)) { + cmd->error = -ENOMEDIUM; + goto done; + } + + if (cmd->data) { + /* + * The hardware is so delightfully stupid that it has a list + * of "data" commands. If a command isn't on this list, it'll + * just go back to the idle state and won't send any data + * interrupts. + */ + switch (cmd->opcode) { + case SD_SWITCH_VOLTAGE: + case MMC_READ_SINGLE_BLOCK: + case MMC_READ_MULTIPLE_BLOCK: + case MMC_WRITE_DAT_UNTIL_STOP: + case MMC_WRITE_BLOCK: + case MMC_WRITE_MULTIPLE_BLOCK: + case MMC_PROGRAM_CID: + case MMC_PROGRAM_CSD: + case MMC_SEND_WRITE_PROT: + case MMC_LOCK_UNLOCK: + case MMC_GEN_CMD: + break; + + /* ACMDs. We don't keep track of state, so we just treat them + * like any other command. */ + case SD_APP_SEND_SCR: + break; + + default: + pr_warn("%s: Data command %d is not supported by this controller\n", + mmc_hostname(host->mmc), cmd->opcode); + cmd->error = -EINVAL; + + goto done; + } + } + + /* + * Does the request include data? + */ + if (cmd->data) { + wbsd_prepare_data(host, cmd->data); + + if (cmd->data->error) + goto done; + } + + wbsd_send_command(host, cmd); + + /* + * If this is a data transfer the request + * will be finished after the data has + * transferred. + */ + if (cmd->data && !cmd->error) { + /* + * Dirty fix for hardware bug. + */ + if (host->dma == -1) + tasklet_schedule(&host->fifo_tasklet); + + spin_unlock_bh(&host->lock); + + return; + } + +done: + wbsd_request_end(host, mrq); + + spin_unlock_bh(&host->lock); +} + +static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct wbsd_host *host = mmc_priv(mmc); + u8 clk, setup, pwr; + + spin_lock_bh(&host->lock); + + /* + * Reset the chip on each power off. + * Should clear out any weird states. + */ + if (ios->power_mode == MMC_POWER_OFF) + wbsd_init_device(host); + + if (ios->clock >= 24000000) + clk = WBSD_CLK_24M; + else if (ios->clock >= 16000000) + clk = WBSD_CLK_16M; + else if (ios->clock >= 12000000) + clk = WBSD_CLK_12M; + else + clk = WBSD_CLK_375K; + + /* + * Only write to the clock register when + * there is an actual change. + */ + if (clk != host->clk) { + wbsd_write_index(host, WBSD_IDX_CLK, clk); + host->clk = clk; + } + + /* + * Power up card. + */ + if (ios->power_mode != MMC_POWER_OFF) { + pwr = inb(host->base + WBSD_CSR); + pwr &= ~WBSD_POWER_N; + outb(pwr, host->base + WBSD_CSR); + } + + /* + * MMC cards need to have pin 1 high during init. + * It wreaks havoc with the card detection though so + * that needs to be disabled. + */ + setup = wbsd_read_index(host, WBSD_IDX_SETUP); + if (ios->chip_select == MMC_CS_HIGH) { + BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1); + setup |= WBSD_DAT3_H; + host->flags |= WBSD_FIGNORE_DETECT; + } else { + if (setup & WBSD_DAT3_H) { + setup &= ~WBSD_DAT3_H; + + /* + * We cannot resume card detection immediately + * because of capacitance and delays in the chip. + */ + mod_timer(&host->ignore_timer, jiffies + HZ / 100); + } + } + wbsd_write_index(host, WBSD_IDX_SETUP, setup); + + /* + * Store bus width for later. Will be used when + * setting up the data transfer. + */ + host->bus_width = ios->bus_width; + + spin_unlock_bh(&host->lock); +} + +static int wbsd_get_ro(struct mmc_host *mmc) +{ + struct wbsd_host *host = mmc_priv(mmc); + u8 csr; + + spin_lock_bh(&host->lock); + + csr = inb(host->base + WBSD_CSR); + csr |= WBSD_MSLED; + outb(csr, host->base + WBSD_CSR); + + mdelay(1); + + csr = inb(host->base + WBSD_CSR); + csr &= ~WBSD_MSLED; + outb(csr, host->base + WBSD_CSR); + + spin_unlock_bh(&host->lock); + + return !!(csr & WBSD_WRPT); +} + +static const struct mmc_host_ops wbsd_ops = { + .request = wbsd_request, + .set_ios = wbsd_set_ios, + .get_ro = wbsd_get_ro, +}; + +/*****************************************************************************\ + * * + * Interrupt handling * + * * +\*****************************************************************************/ + +/* + * Helper function to reset detection ignore + */ + +static void wbsd_reset_ignore(struct timer_list *t) +{ + struct wbsd_host *host = from_timer(host, t, ignore_timer); + + BUG_ON(host == NULL); + + DBG("Resetting card detection ignore\n"); + + spin_lock_bh(&host->lock); + + host->flags &= ~WBSD_FIGNORE_DETECT; + + /* + * Card status might have changed during the + * blackout. + */ + tasklet_schedule(&host->card_tasklet); + + spin_unlock_bh(&host->lock); +} + +/* + * Tasklets + */ + +static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host) +{ + WARN_ON(!host->mrq); + if (!host->mrq) + return NULL; + + WARN_ON(!host->mrq->cmd); + if (!host->mrq->cmd) + return NULL; + + WARN_ON(!host->mrq->cmd->data); + if (!host->mrq->cmd->data) + return NULL; + + return host->mrq->cmd->data; +} + +static void wbsd_tasklet_card(unsigned long param) +{ + struct wbsd_host *host = (struct wbsd_host *)param; + u8 csr; + int delay = -1; + + spin_lock(&host->lock); + + if (host->flags & WBSD_FIGNORE_DETECT) { + spin_unlock(&host->lock); + return; + } + + csr = inb(host->base + WBSD_CSR); + WARN_ON(csr == 0xff); + + if (csr & WBSD_CARDPRESENT) { + if (!(host->flags & WBSD_FCARD_PRESENT)) { + DBG("Card inserted\n"); + host->flags |= WBSD_FCARD_PRESENT; + + delay = 500; + } + } else if (host->flags & WBSD_FCARD_PRESENT) { + DBG("Card removed\n"); + host->flags &= ~WBSD_FCARD_PRESENT; + + if (host->mrq) { + pr_err("%s: Card removed during transfer!\n", + mmc_hostname(host->mmc)); + wbsd_reset(host); + + host->mrq->cmd->error = -ENOMEDIUM; + tasklet_schedule(&host->finish_tasklet); + } + + delay = 0; + } + + /* + * Unlock first since we might get a call back. + */ + + spin_unlock(&host->lock); + + if (delay != -1) + mmc_detect_change(host->mmc, msecs_to_jiffies(delay)); +} + +static void wbsd_tasklet_fifo(unsigned long param) +{ + struct wbsd_host *host = (struct wbsd_host *)param; + struct mmc_data *data; + + spin_lock(&host->lock); + + if (!host->mrq) + goto end; + + data = wbsd_get_data(host); + if (!data) + goto end; + + if (data->flags & MMC_DATA_WRITE) + wbsd_fill_fifo(host); + else + wbsd_empty_fifo(host); + + /* + * Done? + */ + if (host->num_sg == 0) { + wbsd_write_index(host, WBSD_IDX_FIFOEN, 0); + tasklet_schedule(&host->finish_tasklet); + } + +end: + spin_unlock(&host->lock); +} + +static void wbsd_tasklet_crc(unsigned long param) +{ + struct wbsd_host *host = (struct wbsd_host *)param; + struct mmc_data *data; + + spin_lock(&host->lock); + + if (!host->mrq) + goto end; + + data = wbsd_get_data(host); + if (!data) + goto end; + + DBGF("CRC error\n"); + + data->error = -EILSEQ; + + tasklet_schedule(&host->finish_tasklet); + +end: + spin_unlock(&host->lock); +} + +static void wbsd_tasklet_timeout(unsigned long param) +{ + struct wbsd_host *host = (struct wbsd_host *)param; + struct mmc_data *data; + + spin_lock(&host->lock); + + if (!host->mrq) + goto end; + + data = wbsd_get_data(host); + if (!data) + goto end; + + DBGF("Timeout\n"); + + data->error = -ETIMEDOUT; + + tasklet_schedule(&host->finish_tasklet); + +end: + spin_unlock(&host->lock); +} + +static void wbsd_tasklet_finish(unsigned long param) +{ + struct wbsd_host *host = (struct wbsd_host *)param; + struct mmc_data *data; + + spin_lock(&host->lock); + + WARN_ON(!host->mrq); + if (!host->mrq) + goto end; + + data = wbsd_get_data(host); + if (!data) + goto end; + + wbsd_finish_data(host, data); + +end: + spin_unlock(&host->lock); +} + +/* + * Interrupt handling + */ + +static irqreturn_t wbsd_irq(int irq, void *dev_id) +{ + struct wbsd_host *host = dev_id; + int isr; + + isr = inb(host->base + WBSD_ISR); + + /* + * Was it actually our hardware that caused the interrupt? + */ + if (isr == 0xff || isr == 0x00) + return IRQ_NONE; + + host->isr |= isr; + + /* + * Schedule tasklets as needed. + */ + if (isr & WBSD_INT_CARD) + tasklet_schedule(&host->card_tasklet); + if (isr & WBSD_INT_FIFO_THRE) + tasklet_schedule(&host->fifo_tasklet); + if (isr & WBSD_INT_CRC) + tasklet_hi_schedule(&host->crc_tasklet); + if (isr & WBSD_INT_TIMEOUT) + tasklet_hi_schedule(&host->timeout_tasklet); + if (isr & WBSD_INT_TC) + tasklet_schedule(&host->finish_tasklet); + + return IRQ_HANDLED; +} + +/*****************************************************************************\ + * * + * Device initialisation and shutdown * + * * +\*****************************************************************************/ + +/* + * Allocate/free MMC structure. + */ + +static int wbsd_alloc_mmc(struct device *dev) +{ + struct mmc_host *mmc; + struct wbsd_host *host; + + /* + * Allocate MMC structure. + */ + mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + host->mmc = mmc; + + host->dma = -1; + + /* + * Set host parameters. + */ + mmc->ops = &wbsd_ops; + mmc->f_min = 375000; + mmc->f_max = 24000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = MMC_CAP_4_BIT_DATA; + + spin_lock_init(&host->lock); + + /* + * Set up timers + */ + timer_setup(&host->ignore_timer, wbsd_reset_ignore, 0); + + /* + * Maximum number of segments. Worst case is one sector per segment + * so this will be 64kB/512. + */ + mmc->max_segs = 128; + + /* + * Maximum request size. Also limited by 64KiB buffer. + */ + mmc->max_req_size = 65536; + + /* + * Maximum segment size. Could be one segment with the maximum number + * of bytes. + */ + mmc->max_seg_size = mmc->max_req_size; + + /* + * Maximum block size. We have 12 bits (= 4095) but have to subtract + * space for CRC. So the maximum is 4095 - 4*2 = 4087. + */ + mmc->max_blk_size = 4087; + + /* + * Maximum block count. There is no real limit so the maximum + * request size will be the only restriction. + */ + mmc->max_blk_count = mmc->max_req_size; + + dev_set_drvdata(dev, mmc); + + return 0; +} + +static void wbsd_free_mmc(struct device *dev) +{ + struct mmc_host *mmc; + struct wbsd_host *host; + + mmc = dev_get_drvdata(dev); + if (!mmc) + return; + + host = mmc_priv(mmc); + BUG_ON(host == NULL); + + del_timer_sync(&host->ignore_timer); + + mmc_free_host(mmc); + + dev_set_drvdata(dev, NULL); +} + +/* + * Scan for known chip id:s + */ + +static int wbsd_scan(struct wbsd_host *host) +{ + int i, j, k; + int id; + + /* + * Iterate through all ports, all codes to + * find hardware that is in our known list. + */ + for (i = 0; i < ARRAY_SIZE(config_ports); i++) { + if (!request_region(config_ports[i], 2, DRIVER_NAME)) + continue; + + for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) { + id = 0xFFFF; + + host->config = config_ports[i]; + host->unlock_code = unlock_codes[j]; + + wbsd_unlock_config(host); + + outb(WBSD_CONF_ID_HI, config_ports[i]); + id = inb(config_ports[i] + 1) << 8; + + outb(WBSD_CONF_ID_LO, config_ports[i]); + id |= inb(config_ports[i] + 1); + + wbsd_lock_config(host); + + for (k = 0; k < ARRAY_SIZE(valid_ids); k++) { + if (id == valid_ids[k]) { + host->chip_id = id; + + return 0; + } + } + + if (id != 0xFFFF) { + DBG("Unknown hardware (id %x) found at %x\n", + id, config_ports[i]); + } + } + + release_region(config_ports[i], 2); + } + + host->config = 0; + host->unlock_code = 0; + + return -ENODEV; +} + +/* + * Allocate/free io port ranges + */ + +static int wbsd_request_region(struct wbsd_host *host, int base) +{ + if (base & 0x7) + return -EINVAL; + + if (!request_region(base, 8, DRIVER_NAME)) + return -EIO; + + host->base = base; + + return 0; +} + +static void wbsd_release_regions(struct wbsd_host *host) +{ + if (host->base) + release_region(host->base, 8); + + host->base = 0; + + if (host->config) + release_region(host->config, 2); + + host->config = 0; +} + +/* + * Allocate/free DMA port and buffer + */ + +static void wbsd_request_dma(struct wbsd_host *host, int dma) +{ + if (dma < 0) + return; + + if (request_dma(dma, DRIVER_NAME)) + goto err; + + /* + * We need to allocate a special buffer in + * order for ISA to be able to DMA to it. + */ + host->dma_buffer = kmalloc(WBSD_DMA_SIZE, + GFP_NOIO | GFP_DMA | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (!host->dma_buffer) + goto free; + + /* + * Translate the address to a physical address. + */ + host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer, + WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(mmc_dev(host->mmc), host->dma_addr)) + goto kfree; + + /* + * ISA DMA must be aligned on a 64k basis. + */ + if ((host->dma_addr & 0xffff) != 0) + goto unmap; + /* + * ISA cannot access memory above 16 MB. + */ + else if (host->dma_addr >= 0x1000000) + goto unmap; + + host->dma = dma; + + return; + +unmap: + /* + * If we've gotten here then there is some kind of alignment bug + */ + BUG_ON(1); + + dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, + WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); + host->dma_addr = 0; + +kfree: + kfree(host->dma_buffer); + host->dma_buffer = NULL; + +free: + free_dma(dma); + +err: + pr_warn(DRIVER_NAME ": Unable to allocate DMA %d - falling back on FIFO\n", + dma); +} + +static void wbsd_release_dma(struct wbsd_host *host) +{ + /* + * host->dma_addr is valid here iff host->dma_buffer is not NULL. + */ + if (host->dma_buffer) { + dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, + WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); + kfree(host->dma_buffer); + } + if (host->dma >= 0) + free_dma(host->dma); + + host->dma = -1; + host->dma_buffer = NULL; + host->dma_addr = 0; +} + +/* + * Allocate/free IRQ. + */ + +static int wbsd_request_irq(struct wbsd_host *host, int irq) +{ + int ret; + + /* + * Set up tasklets. Must be done before requesting interrupt. + */ + tasklet_init(&host->card_tasklet, wbsd_tasklet_card, + (unsigned long)host); + tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, + (unsigned long)host); + tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, + (unsigned long)host); + tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, + (unsigned long)host); + tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, + (unsigned long)host); + + /* + * Allocate interrupt. + */ + ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host); + if (ret) + return ret; + + host->irq = irq; + + return 0; +} + +static void wbsd_release_irq(struct wbsd_host *host) +{ + if (!host->irq) + return; + + free_irq(host->irq, host); + + host->irq = 0; + + tasklet_kill(&host->card_tasklet); + tasklet_kill(&host->fifo_tasklet); + tasklet_kill(&host->crc_tasklet); + tasklet_kill(&host->timeout_tasklet); + tasklet_kill(&host->finish_tasklet); +} + +/* + * Allocate all resources for the host. + */ + +static int wbsd_request_resources(struct wbsd_host *host, + int base, int irq, int dma) +{ + int ret; + + /* + * Allocate I/O ports. + */ + ret = wbsd_request_region(host, base); + if (ret) + return ret; + + /* + * Allocate interrupt. + */ + ret = wbsd_request_irq(host, irq); + if (ret) + return ret; + + /* + * Allocate DMA. + */ + wbsd_request_dma(host, dma); + + return 0; +} + +/* + * Release all resources for the host. + */ + +static void wbsd_release_resources(struct wbsd_host *host) +{ + wbsd_release_dma(host); + wbsd_release_irq(host); + wbsd_release_regions(host); +} + +/* + * Configure the resources the chip should use. + */ + +static void wbsd_chip_config(struct wbsd_host *host) +{ + wbsd_unlock_config(host); + + /* + * Reset the chip. + */ + wbsd_write_config(host, WBSD_CONF_SWRST, 1); + wbsd_write_config(host, WBSD_CONF_SWRST, 0); + + /* + * Select SD/MMC function. + */ + wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); + + /* + * Set up card detection. + */ + wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11); + + /* + * Configure chip + */ + wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8); + wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff); + + wbsd_write_config(host, WBSD_CONF_IRQ, host->irq); + + if (host->dma >= 0) + wbsd_write_config(host, WBSD_CONF_DRQ, host->dma); + + /* + * Enable and power up chip. + */ + wbsd_write_config(host, WBSD_CONF_ENABLE, 1); + wbsd_write_config(host, WBSD_CONF_POWER, 0x20); + + wbsd_lock_config(host); +} + +/* + * Check that configured resources are correct. + */ + +static int wbsd_chip_validate(struct wbsd_host *host) +{ + int base, irq, dma; + + wbsd_unlock_config(host); + + /* + * Select SD/MMC function. + */ + wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); + + /* + * Read configuration. + */ + base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8; + base |= wbsd_read_config(host, WBSD_CONF_PORT_LO); + + irq = wbsd_read_config(host, WBSD_CONF_IRQ); + + dma = wbsd_read_config(host, WBSD_CONF_DRQ); + + wbsd_lock_config(host); + + /* + * Validate against given configuration. + */ + if (base != host->base) + return 0; + if (irq != host->irq) + return 0; + if ((dma != host->dma) && (host->dma != -1)) + return 0; + + return 1; +} + +/* + * Powers down the SD function + */ + +static void wbsd_chip_poweroff(struct wbsd_host *host) +{ + wbsd_unlock_config(host); + + wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD); + wbsd_write_config(host, WBSD_CONF_ENABLE, 0); + + wbsd_lock_config(host); +} + +/*****************************************************************************\ + * * + * Devices setup and shutdown * + * * +\*****************************************************************************/ + +static int wbsd_init(struct device *dev, int base, int irq, int dma, + int pnp) +{ + struct wbsd_host *host = NULL; + struct mmc_host *mmc = NULL; + int ret; + + ret = wbsd_alloc_mmc(dev); + if (ret) + return ret; + + mmc = dev_get_drvdata(dev); + host = mmc_priv(mmc); + + /* + * Scan for hardware. + */ + ret = wbsd_scan(host); + if (ret) { + if (pnp && (ret == -ENODEV)) { + pr_warn(DRIVER_NAME ": Unable to confirm device presence - you may experience lock-ups\n"); + } else { + wbsd_free_mmc(dev); + return ret; + } + } + + /* + * Request resources. + */ + ret = wbsd_request_resources(host, base, irq, dma); + if (ret) { + wbsd_release_resources(host); + wbsd_free_mmc(dev); + return ret; + } + + /* + * See if chip needs to be configured. + */ + if (pnp) { + if ((host->config != 0) && !wbsd_chip_validate(host)) { + pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n"); + wbsd_chip_config(host); + } + } else + wbsd_chip_config(host); + + /* + * Power Management stuff. No idea how this works. + * Not tested. + */ +#ifdef CONFIG_PM + if (host->config) { + wbsd_unlock_config(host); + wbsd_write_config(host, WBSD_CONF_PME, 0xA0); + wbsd_lock_config(host); + } +#endif + /* + * Allow device to initialise itself properly. + */ + mdelay(5); + + /* + * Reset the chip into a known state. + */ + wbsd_init_device(host); + + ret = mmc_add_host(mmc); + if (ret) { + if (!pnp) + wbsd_chip_poweroff(host); + + wbsd_release_resources(host); + wbsd_free_mmc(dev); + return ret; + } + + pr_info("%s: W83L51xD", mmc_hostname(mmc)); + if (host->chip_id != 0) + printk(" id %x", (int)host->chip_id); + printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); + if (host->dma >= 0) + printk(" dma %d", (int)host->dma); + else + printk(" FIFO"); + if (pnp) + printk(" PnP"); + printk("\n"); + + return 0; +} + +static void wbsd_shutdown(struct device *dev, int pnp) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct wbsd_host *host; + + if (!mmc) + return; + + host = mmc_priv(mmc); + + mmc_remove_host(mmc); + + /* + * Power down the SD/MMC function. + */ + if (!pnp) + wbsd_chip_poweroff(host); + + wbsd_release_resources(host); + + wbsd_free_mmc(dev); +} + +/* + * Non-PnP + */ + +static int wbsd_probe(struct platform_device *dev) +{ + /* Use the module parameters for resources */ + return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); +} + +static int wbsd_remove(struct platform_device *dev) +{ + wbsd_shutdown(&dev->dev, 0); + + return 0; +} + +/* + * PnP + */ + +#ifdef CONFIG_PNP + +static int +wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id) +{ + int io, irq, dma; + + /* + * Get resources from PnP layer. + */ + io = pnp_port_start(pnpdev, 0); + irq = pnp_irq(pnpdev, 0); + if (pnp_dma_valid(pnpdev, 0)) + dma = pnp_dma(pnpdev, 0); + else + dma = -1; + + DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma); + + return wbsd_init(&pnpdev->dev, io, irq, dma, 1); +} + +static void wbsd_pnp_remove(struct pnp_dev *dev) +{ + wbsd_shutdown(&dev->dev, 1); +} + +#endif /* CONFIG_PNP */ + +/* + * Power management + */ + +#ifdef CONFIG_PM + +static int wbsd_platform_suspend(struct platform_device *dev, + pm_message_t state) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + struct wbsd_host *host; + + if (mmc == NULL) + return 0; + + DBGF("Suspending...\n"); + + host = mmc_priv(mmc); + + wbsd_chip_poweroff(host); + return 0; +} + +static int wbsd_platform_resume(struct platform_device *dev) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + struct wbsd_host *host; + + if (mmc == NULL) + return 0; + + DBGF("Resuming...\n"); + + host = mmc_priv(mmc); + + wbsd_chip_config(host); + + /* + * Allow device to initialise itself properly. + */ + mdelay(5); + + wbsd_init_device(host); + return 0; +} + +#ifdef CONFIG_PNP + +static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state) +{ + struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); + + if (mmc == NULL) + return 0; + + DBGF("Suspending...\n"); + return 0; +} + +static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) +{ + struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); + struct wbsd_host *host; + + if (mmc == NULL) + return 0; + + DBGF("Resuming...\n"); + + host = mmc_priv(mmc); + + /* + * See if chip needs to be configured. + */ + if (host->config != 0) { + if (!wbsd_chip_validate(host)) { + pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n"); + wbsd_chip_config(host); + } + } + + /* + * Allow device to initialise itself properly. + */ + mdelay(5); + + wbsd_init_device(host); + return 0; +} + +#endif /* CONFIG_PNP */ + +#else /* CONFIG_PM */ + +#define wbsd_platform_suspend NULL +#define wbsd_platform_resume NULL + +#define wbsd_pnp_suspend NULL +#define wbsd_pnp_resume NULL + +#endif /* CONFIG_PM */ + +static struct platform_device *wbsd_device; + +static struct platform_driver wbsd_driver = { + .probe = wbsd_probe, + .remove = wbsd_remove, + + .suspend = wbsd_platform_suspend, + .resume = wbsd_platform_resume, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +#ifdef CONFIG_PNP + +static struct pnp_driver wbsd_pnp_driver = { + .name = DRIVER_NAME, + .id_table = pnp_dev_table, + .probe = wbsd_pnp_probe, + .remove = wbsd_pnp_remove, + + .suspend = wbsd_pnp_suspend, + .resume = wbsd_pnp_resume, +}; + +#endif /* CONFIG_PNP */ + +/* + * Module loading/unloading + */ + +static int __init wbsd_drv_init(void) +{ + int result; + + pr_info(DRIVER_NAME + ": Winbond W83L51xD SD/MMC card interface driver\n"); + pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); + +#ifdef CONFIG_PNP + + if (!param_nopnp) { + result = pnp_register_driver(&wbsd_pnp_driver); + if (result < 0) + return result; + } +#endif /* CONFIG_PNP */ + + if (param_nopnp) { + result = platform_driver_register(&wbsd_driver); + if (result < 0) + return result; + + wbsd_device = platform_device_alloc(DRIVER_NAME, -1); + if (!wbsd_device) { + platform_driver_unregister(&wbsd_driver); + return -ENOMEM; + } + + result = platform_device_add(wbsd_device); + if (result) { + platform_device_put(wbsd_device); + platform_driver_unregister(&wbsd_driver); + return result; + } + } + + return 0; +} + +static void __exit wbsd_drv_exit(void) +{ +#ifdef CONFIG_PNP + + if (!param_nopnp) + pnp_unregister_driver(&wbsd_pnp_driver); + +#endif /* CONFIG_PNP */ + + if (param_nopnp) { + platform_device_unregister(wbsd_device); + + platform_driver_unregister(&wbsd_driver); + } + + DBG("unloaded\n"); +} + +module_init(wbsd_drv_init); +module_exit(wbsd_drv_exit); +#ifdef CONFIG_PNP +module_param_hw_named(nopnp, param_nopnp, uint, other, 0444); +#endif +module_param_hw_named(io, param_io, uint, ioport, 0444); +module_param_hw_named(irq, param_irq, uint, irq, 0444); +module_param_hw_named(dma, param_dma, int, dma, 0444); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pierre Ossman "); +MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver"); + +#ifdef CONFIG_PNP +MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)"); +#endif +MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)"); +MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)"); +MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)"); diff --git a/drivers/mmc/host/wbsd.h b/drivers/mmc/host/wbsd.h new file mode 100644 index 000000000..be30b4d8c --- /dev/null +++ b/drivers/mmc/host/wbsd.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * linux/drivers/mmc/host/wbsd.h - Winbond W83L51xD SD/MMC driver + * + * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved. + */ + +#define LOCK_CODE 0xAA + +#define WBSD_CONF_SWRST 0x02 +#define WBSD_CONF_DEVICE 0x07 +#define WBSD_CONF_ID_HI 0x20 +#define WBSD_CONF_ID_LO 0x21 +#define WBSD_CONF_POWER 0x22 +#define WBSD_CONF_PME 0x23 +#define WBSD_CONF_PMES 0x24 + +#define WBSD_CONF_ENABLE 0x30 +#define WBSD_CONF_PORT_HI 0x60 +#define WBSD_CONF_PORT_LO 0x61 +#define WBSD_CONF_IRQ 0x70 +#define WBSD_CONF_DRQ 0x74 + +#define WBSD_CONF_PINS 0xF0 + +#define DEVICE_SD 0x03 + +#define WBSD_PINS_DAT3_HI 0x20 +#define WBSD_PINS_DAT3_OUT 0x10 +#define WBSD_PINS_GP11_HI 0x04 +#define WBSD_PINS_DETECT_GP11 0x02 +#define WBSD_PINS_DETECT_DAT3 0x01 + +#define WBSD_CMDR 0x00 +#define WBSD_DFR 0x01 +#define WBSD_EIR 0x02 +#define WBSD_ISR 0x03 +#define WBSD_FSR 0x04 +#define WBSD_IDXR 0x05 +#define WBSD_DATAR 0x06 +#define WBSD_CSR 0x07 + +#define WBSD_EINT_CARD 0x40 +#define WBSD_EINT_FIFO_THRE 0x20 +#define WBSD_EINT_CRC 0x10 +#define WBSD_EINT_TIMEOUT 0x08 +#define WBSD_EINT_PROGEND 0x04 +#define WBSD_EINT_BUSYEND 0x02 +#define WBSD_EINT_TC 0x01 + +#define WBSD_INT_PENDING 0x80 +#define WBSD_INT_CARD 0x40 +#define WBSD_INT_FIFO_THRE 0x20 +#define WBSD_INT_CRC 0x10 +#define WBSD_INT_TIMEOUT 0x08 +#define WBSD_INT_PROGEND 0x04 +#define WBSD_INT_BUSYEND 0x02 +#define WBSD_INT_TC 0x01 + +#define WBSD_FIFO_EMPTY 0x80 +#define WBSD_FIFO_FULL 0x40 +#define WBSD_FIFO_EMTHRE 0x20 +#define WBSD_FIFO_FUTHRE 0x10 +#define WBSD_FIFO_SZMASK 0x0F + +#define WBSD_MSLED 0x20 +#define WBSD_POWER_N 0x10 +#define WBSD_WRPT 0x04 +#define WBSD_CARDPRESENT 0x01 + +#define WBSD_IDX_CLK 0x01 +#define WBSD_IDX_PBSMSB 0x02 +#define WBSD_IDX_TAAC 0x03 +#define WBSD_IDX_NSAC 0x04 +#define WBSD_IDX_PBSLSB 0x05 +#define WBSD_IDX_SETUP 0x06 +#define WBSD_IDX_DMA 0x07 +#define WBSD_IDX_FIFOEN 0x08 +#define WBSD_IDX_STATUS 0x10 +#define WBSD_IDX_RSPLEN 0x1E +#define WBSD_IDX_RESP0 0x1F +#define WBSD_IDX_RESP1 0x20 +#define WBSD_IDX_RESP2 0x21 +#define WBSD_IDX_RESP3 0x22 +#define WBSD_IDX_RESP4 0x23 +#define WBSD_IDX_RESP5 0x24 +#define WBSD_IDX_RESP6 0x25 +#define WBSD_IDX_RESP7 0x26 +#define WBSD_IDX_RESP8 0x27 +#define WBSD_IDX_RESP9 0x28 +#define WBSD_IDX_RESP10 0x29 +#define WBSD_IDX_RESP11 0x2A +#define WBSD_IDX_RESP12 0x2B +#define WBSD_IDX_RESP13 0x2C +#define WBSD_IDX_RESP14 0x2D +#define WBSD_IDX_RESP15 0x2E +#define WBSD_IDX_RESP16 0x2F +#define WBSD_IDX_CRCSTATUS 0x30 +#define WBSD_IDX_ISR 0x3F + +#define WBSD_CLK_375K 0x00 +#define WBSD_CLK_12M 0x01 +#define WBSD_CLK_16M 0x02 +#define WBSD_CLK_24M 0x03 + +#define WBSD_DATA_WIDTH 0x01 + +#define WBSD_DAT3_H 0x08 +#define WBSD_FIFO_RESET 0x04 +#define WBSD_SOFT_RESET 0x02 +#define WBSD_INC_INDEX 0x01 + +#define WBSD_DMA_SINGLE 0x02 +#define WBSD_DMA_ENABLE 0x01 + +#define WBSD_FIFOEN_EMPTY 0x20 +#define WBSD_FIFOEN_FULL 0x10 +#define WBSD_FIFO_THREMASK 0x0F + +#define WBSD_BLOCK_READ 0x80 +#define WBSD_BLOCK_WRITE 0x40 +#define WBSD_BUSY 0x20 +#define WBSD_CARDTRAFFIC 0x04 +#define WBSD_SENDCMD 0x02 +#define WBSD_RECVRES 0x01 + +#define WBSD_RSP_SHORT 0x00 +#define WBSD_RSP_LONG 0x01 + +#define WBSD_CRC_MASK 0x1F +#define WBSD_CRC_OK 0x05 /* S010E (00101) */ +#define WBSD_CRC_FAIL 0x0B /* S101E (01011) */ + +#define WBSD_DMA_SIZE 65536 + +struct wbsd_host +{ + struct mmc_host* mmc; /* MMC structure */ + + spinlock_t lock; /* Mutex */ + + int flags; /* Driver states */ + +#define WBSD_FCARD_PRESENT (1<<0) /* Card is present */ +#define WBSD_FIGNORE_DETECT (1<<1) /* Ignore card detection */ + + struct mmc_request* mrq; /* Current request */ + + u8 isr; /* Accumulated ISR */ + + struct scatterlist* cur_sg; /* Current SG entry */ + unsigned int num_sg; /* Number of entries left */ + + unsigned int offset; /* Offset into current entry */ + unsigned int remain; /* Data left in curren entry */ + + char* dma_buffer; /* ISA DMA buffer */ + dma_addr_t dma_addr; /* Physical address for same */ + + int firsterr; /* See fifo functions */ + + u8 clk; /* Current clock speed */ + unsigned char bus_width; /* Current bus width */ + + int config; /* Config port */ + u8 unlock_code; /* Code to unlock config */ + + int chip_id; /* ID of controller */ + + int base; /* I/O port base */ + int irq; /* Interrupt */ + int dma; /* DMA channel */ + + struct tasklet_struct card_tasklet; /* Tasklet structures */ + struct tasklet_struct fifo_tasklet; + struct tasklet_struct crc_tasklet; + struct tasklet_struct timeout_tasklet; + struct tasklet_struct finish_tasklet; + + struct timer_list ignore_timer; /* Ignore detection timer */ +}; diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c new file mode 100644 index 000000000..393319548 --- /dev/null +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -0,0 +1,1011 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * WM8505/WM8650 SD/MMC Host Controller + * + * Copyright (C) 2010 Tony Prisk + * Copyright (C) 2008 WonderMedia Technologies, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + + +#define DRIVER_NAME "wmt-sdhc" + + +/* MMC/SD controller registers */ +#define SDMMC_CTLR 0x00 +#define SDMMC_CMD 0x01 +#define SDMMC_RSPTYPE 0x02 +#define SDMMC_ARG 0x04 +#define SDMMC_BUSMODE 0x08 +#define SDMMC_BLKLEN 0x0C +#define SDMMC_BLKCNT 0x0E +#define SDMMC_RSP 0x10 +#define SDMMC_CBCR 0x20 +#define SDMMC_INTMASK0 0x24 +#define SDMMC_INTMASK1 0x25 +#define SDMMC_STS0 0x28 +#define SDMMC_STS1 0x29 +#define SDMMC_STS2 0x2A +#define SDMMC_STS3 0x2B +#define SDMMC_RSPTIMEOUT 0x2C +#define SDMMC_CLK 0x30 /* VT8500 only */ +#define SDMMC_EXTCTRL 0x34 +#define SDMMC_SBLKLEN 0x38 +#define SDMMC_DMATIMEOUT 0x3C + + +/* SDMMC_CTLR bit fields */ +#define CTLR_CMD_START 0x01 +#define CTLR_CMD_WRITE 0x04 +#define CTLR_FIFO_RESET 0x08 + +/* SDMMC_BUSMODE bit fields */ +#define BM_SPI_MODE 0x01 +#define BM_FOURBIT_MODE 0x02 +#define BM_EIGHTBIT_MODE 0x04 +#define BM_SD_OFF 0x10 +#define BM_SPI_CS 0x20 +#define BM_SD_POWER 0x40 +#define BM_SOFT_RESET 0x80 + +/* SDMMC_BLKLEN bit fields */ +#define BLKL_CRCERR_ABORT 0x0800 +#define BLKL_CD_POL_HIGH 0x1000 +#define BLKL_GPI_CD 0x2000 +#define BLKL_DATA3_CD 0x4000 +#define BLKL_INT_ENABLE 0x8000 + +/* SDMMC_INTMASK0 bit fields */ +#define INT0_MBLK_TRAN_DONE_INT_EN 0x10 +#define INT0_BLK_TRAN_DONE_INT_EN 0x20 +#define INT0_CD_INT_EN 0x40 +#define INT0_DI_INT_EN 0x80 + +/* SDMMC_INTMASK1 bit fields */ +#define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02 +#define INT1_CMD_RES_TOUT_INT_EN 0x04 +#define INT1_MBLK_AUTO_STOP_INT_EN 0x08 +#define INT1_DATA_TOUT_INT_EN 0x10 +#define INT1_RESCRC_ERR_INT_EN 0x20 +#define INT1_RCRC_ERR_INT_EN 0x40 +#define INT1_WCRC_ERR_INT_EN 0x80 + +/* SDMMC_STS0 bit fields */ +#define STS0_WRITE_PROTECT 0x02 +#define STS0_CD_DATA3 0x04 +#define STS0_CD_GPI 0x08 +#define STS0_MBLK_DONE 0x10 +#define STS0_BLK_DONE 0x20 +#define STS0_CARD_DETECT 0x40 +#define STS0_DEVICE_INS 0x80 + +/* SDMMC_STS1 bit fields */ +#define STS1_SDIO_INT 0x01 +#define STS1_CMDRSP_DONE 0x02 +#define STS1_RSP_TIMEOUT 0x04 +#define STS1_AUTOSTOP_DONE 0x08 +#define STS1_DATA_TIMEOUT 0x10 +#define STS1_RSP_CRC_ERR 0x20 +#define STS1_RCRC_ERR 0x40 +#define STS1_WCRC_ERR 0x80 + +/* SDMMC_STS2 bit fields */ +#define STS2_CMD_RES_BUSY 0x10 +#define STS2_DATARSP_BUSY 0x20 +#define STS2_DIS_FORCECLK 0x80 + +/* SDMMC_EXTCTRL bit fields */ +#define EXT_EIGHTBIT 0x04 + +/* MMC/SD DMA Controller Registers */ +#define SDDMA_GCR 0x100 +#define SDDMA_IER 0x104 +#define SDDMA_ISR 0x108 +#define SDDMA_DESPR 0x10C +#define SDDMA_RBR 0x110 +#define SDDMA_DAR 0x114 +#define SDDMA_BAR 0x118 +#define SDDMA_CPR 0x11C +#define SDDMA_CCR 0x120 + + +/* SDDMA_GCR bit fields */ +#define DMA_GCR_DMA_EN 0x00000001 +#define DMA_GCR_SOFT_RESET 0x00000100 + +/* SDDMA_IER bit fields */ +#define DMA_IER_INT_EN 0x00000001 + +/* SDDMA_ISR bit fields */ +#define DMA_ISR_INT_STS 0x00000001 + +/* SDDMA_RBR bit fields */ +#define DMA_RBR_FORMAT 0x40000000 +#define DMA_RBR_END 0x80000000 + +/* SDDMA_CCR bit fields */ +#define DMA_CCR_RUN 0x00000080 +#define DMA_CCR_IF_TO_PERIPHERAL 0x00000000 +#define DMA_CCR_PERIPHERAL_TO_IF 0x00400000 + +/* SDDMA_CCR event status */ +#define DMA_CCR_EVT_NO_STATUS 0x00000000 +#define DMA_CCR_EVT_UNDERRUN 0x00000001 +#define DMA_CCR_EVT_OVERRUN 0x00000002 +#define DMA_CCR_EVT_DESP_READ 0x00000003 +#define DMA_CCR_EVT_DATA_RW 0x00000004 +#define DMA_CCR_EVT_EARLY_END 0x00000005 +#define DMA_CCR_EVT_SUCCESS 0x0000000F + +#define PDMA_READ 0x00 +#define PDMA_WRITE 0x01 + +#define WMT_SD_POWER_OFF 0 +#define WMT_SD_POWER_ON 1 + +struct wmt_dma_descriptor { + u32 flags; + u32 data_buffer_addr; + u32 branch_addr; + u32 reserved1; +}; + +struct wmt_mci_caps { + unsigned int f_min; + unsigned int f_max; + u32 ocr_avail; + u32 caps; + u32 max_seg_size; + u32 max_segs; + u32 max_blk_size; +}; + +struct wmt_mci_priv { + struct mmc_host *mmc; + void __iomem *sdmmc_base; + + int irq_regular; + int irq_dma; + + void *dma_desc_buffer; + dma_addr_t dma_desc_device_addr; + + struct completion cmdcomp; + struct completion datacomp; + + struct completion *comp_cmd; + struct completion *comp_dma; + + struct mmc_request *req; + struct mmc_command *cmd; + + struct clk *clk_sdmmc; + struct device *dev; + + u8 power_inverted; + u8 cd_inverted; +}; + +static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable) +{ + u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + + if (enable ^ priv->power_inverted) + reg_tmp &= ~BM_SD_OFF; + else + reg_tmp |= BM_SD_OFF; + + writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE); +} + +static void wmt_mci_read_response(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv; + int idx1, idx2; + u8 tmp_resp; + u32 response; + + priv = mmc_priv(mmc); + + for (idx1 = 0; idx1 < 4; idx1++) { + response = 0; + for (idx2 = 0; idx2 < 4; idx2++) { + if ((idx1 == 3) && (idx2 == 3)) + tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP); + else + tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP + + (idx1*4) + idx2 + 1); + response |= (tmp_resp << (idx2 * 8)); + } + priv->cmd->resp[idx1] = cpu_to_be32(response); + } +} + +static void wmt_mci_start_command(struct wmt_mci_priv *priv) +{ + u32 reg_tmp; + + reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); + writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR); +} + +static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype, + u32 arg, u8 rsptype) +{ + struct wmt_mci_priv *priv; + u32 reg_tmp; + + priv = mmc_priv(mmc); + + /* write command, arg, resptype registers */ + writeb(command, priv->sdmmc_base + SDMMC_CMD); + writel(arg, priv->sdmmc_base + SDMMC_ARG); + writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE); + + /* reset response FIFO */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); + writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); + + /* ensure clock enabled - VT3465 */ + wmt_set_sd_power(priv, WMT_SD_POWER_ON); + + /* clear status bits */ + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS2); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS3); + + /* set command type */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); + writeb((reg_tmp & 0x0F) | (cmdtype << 4), + priv->sdmmc_base + SDMMC_CTLR); + + return 0; +} + +static void wmt_mci_disable_dma(struct wmt_mci_priv *priv) +{ + writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR); + writel(0, priv->sdmmc_base + SDDMA_IER); +} + +static void wmt_complete_data_request(struct wmt_mci_priv *priv) +{ + struct mmc_request *req; + req = priv->req; + + req->data->bytes_xfered = req->data->blksz * req->data->blocks; + + /* unmap the DMA pages used for write data */ + if (req->data->flags & MMC_DATA_WRITE) + dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, + req->data->sg_len, DMA_TO_DEVICE); + else + dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, + req->data->sg_len, DMA_FROM_DEVICE); + + /* Check if the DMA ISR returned a data error */ + if ((req->cmd->error) || (req->data->error)) + mmc_request_done(priv->mmc, req); + else { + wmt_mci_read_response(priv->mmc); + if (!req->data->stop) { + /* single-block read/write requests end here */ + mmc_request_done(priv->mmc, req); + } else { + /* + * we change the priv->cmd variable so the response is + * stored in the stop struct rather than the original + * calling command struct + */ + priv->comp_cmd = &priv->cmdcomp; + init_completion(priv->comp_cmd); + priv->cmd = req->data->stop; + wmt_mci_send_command(priv->mmc, req->data->stop->opcode, + 7, req->data->stop->arg, 9); + wmt_mci_start_command(priv); + } + } +} + +static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data) +{ + struct wmt_mci_priv *priv; + + int status; + + priv = (struct wmt_mci_priv *)data; + + status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F; + + if (status != DMA_CCR_EVT_SUCCESS) { + dev_err(priv->dev, "DMA Error: Status = %d\n", status); + priv->req->data->error = -ETIMEDOUT; + complete(priv->comp_dma); + return IRQ_HANDLED; + } + + priv->req->data->error = 0; + + wmt_mci_disable_dma(priv); + + complete(priv->comp_dma); + + if (priv->comp_cmd) { + if (completion_done(priv->comp_cmd)) { + /* + * if the command (regular) interrupt has already + * completed, finish off the request otherwise we wait + * for the command interrupt and finish from there. + */ + wmt_complete_data_request(priv); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data) +{ + struct wmt_mci_priv *priv; + u32 status0; + u32 status1; + u32 status2; + u32 reg_tmp; + int cmd_done; + + priv = (struct wmt_mci_priv *)data; + cmd_done = 0; + status0 = readb(priv->sdmmc_base + SDMMC_STS0); + status1 = readb(priv->sdmmc_base + SDMMC_STS1); + status2 = readb(priv->sdmmc_base + SDMMC_STS2); + + /* Check for card insertion */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); + if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) { + mmc_detect_change(priv->mmc, 0); + if (priv->cmd) + priv->cmd->error = -ETIMEDOUT; + if (priv->comp_cmd) + complete(priv->comp_cmd); + if (priv->comp_dma) { + wmt_mci_disable_dma(priv); + complete(priv->comp_dma); + } + writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0); + return IRQ_HANDLED; + } + + if ((!priv->req->data) || + ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) { + /* handle non-data & stop_transmission requests */ + if (status1 & STS1_CMDRSP_DONE) { + priv->cmd->error = 0; + cmd_done = 1; + } else if ((status1 & STS1_RSP_TIMEOUT) || + (status1 & STS1_DATA_TIMEOUT)) { + priv->cmd->error = -ETIMEDOUT; + cmd_done = 1; + } + + if (cmd_done) { + priv->comp_cmd = NULL; + + if (!priv->cmd->error) + wmt_mci_read_response(priv->mmc); + + priv->cmd = NULL; + + mmc_request_done(priv->mmc, priv->req); + } + } else { + /* handle data requests */ + if (status1 & STS1_CMDRSP_DONE) { + if (priv->cmd) + priv->cmd->error = 0; + if (priv->comp_cmd) + complete(priv->comp_cmd); + } + + if ((status1 & STS1_RSP_TIMEOUT) || + (status1 & STS1_DATA_TIMEOUT)) { + if (priv->cmd) + priv->cmd->error = -ETIMEDOUT; + if (priv->comp_cmd) + complete(priv->comp_cmd); + if (priv->comp_dma) { + wmt_mci_disable_dma(priv); + complete(priv->comp_dma); + } + } + + if (priv->comp_dma) { + /* + * If the dma interrupt has already completed, finish + * off the request; otherwise we wait for the DMA + * interrupt and finish from there. + */ + if (completion_done(priv->comp_dma)) + wmt_complete_data_request(priv); + } + } + + writeb(status0, priv->sdmmc_base + SDMMC_STS0); + writeb(status1, priv->sdmmc_base + SDMMC_STS1); + writeb(status2, priv->sdmmc_base + SDMMC_STS2); + + return IRQ_HANDLED; +} + +static void wmt_reset_hardware(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv; + u32 reg_tmp; + + priv = mmc_priv(mmc); + + /* reset controller */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); + + /* reset response FIFO */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); + writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); + + /* enable GPI pin to detect card */ + writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN); + + /* clear interrupt status */ + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + + /* setup interrupts */ + writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base + + SDMMC_INTMASK0); + writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN | + INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1); + + /* set the DMA timeout */ + writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT); + + /* auto clock freezing enable */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2); + writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2); + + /* set a default clock speed of 400Khz */ + clk_set_rate(priv->clk_sdmmc, 400000); +} + +static int wmt_dma_init(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv; + + priv = mmc_priv(mmc); + + writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR); + writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR); + if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0) + return 0; + else + return 1; +} + +static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc, + u16 req_count, u32 buffer_addr, u32 branch_addr, int end) +{ + desc->flags = 0x40000000 | req_count; + if (end) + desc->flags |= 0x80000000; + desc->data_buffer_addr = buffer_addr; + desc->branch_addr = branch_addr; +} + +static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir) +{ + struct wmt_mci_priv *priv; + u32 reg_tmp; + + priv = mmc_priv(mmc); + + /* Enable DMA Interrupts */ + writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER); + + /* Write DMA Descriptor Pointer Register */ + writel(descaddr, priv->sdmmc_base + SDDMA_DESPR); + + writel(0x00, priv->sdmmc_base + SDDMA_CCR); + + if (dir == PDMA_WRITE) { + reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); + writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base + + SDDMA_CCR); + } else { + reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); + writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base + + SDDMA_CCR); + } +} + +static void wmt_dma_start(struct wmt_mci_priv *priv) +{ + u32 reg_tmp; + + reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); + writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR); +} + +static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct wmt_mci_priv *priv; + struct wmt_dma_descriptor *desc; + u8 command; + u8 cmdtype; + u32 arg; + u8 rsptype; + u32 reg_tmp; + + struct scatterlist *sg; + int i; + int sg_cnt; + int offset; + u32 dma_address; + int desc_cnt; + + priv = mmc_priv(mmc); + priv->req = req; + + /* + * Use the cmd variable to pass a pointer to the resp[] structure + * This is required on multi-block requests to pass the pointer to the + * stop command + */ + priv->cmd = req->cmd; + + command = req->cmd->opcode; + arg = req->cmd->arg; + rsptype = mmc_resp_type(req->cmd); + cmdtype = 0; + + /* rsptype=7 only valid for SPI commands - should be =2 for SD */ + if (rsptype == 7) + rsptype = 2; + /* rsptype=21 is R1B, convert for controller */ + if (rsptype == 21) + rsptype = 9; + + if (!req->data) { + wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); + wmt_mci_start_command(priv); + /* completion is now handled in the regular_isr() */ + } + if (req->data) { + priv->comp_cmd = &priv->cmdcomp; + init_completion(priv->comp_cmd); + + wmt_dma_init(mmc); + + /* set controller data length */ + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew((reg_tmp & 0xF800) | (req->data->blksz - 1), + priv->sdmmc_base + SDMMC_BLKLEN); + + /* set controller block count */ + writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT); + + desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer; + + if (req->data->flags & MMC_DATA_WRITE) { + sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, + req->data->sg_len, DMA_TO_DEVICE); + cmdtype = 1; + if (req->data->blocks > 1) + cmdtype = 3; + } else { + sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, + req->data->sg_len, DMA_FROM_DEVICE); + cmdtype = 2; + if (req->data->blocks > 1) + cmdtype = 4; + } + + dma_address = priv->dma_desc_device_addr + 16; + desc_cnt = 0; + + for_each_sg(req->data->sg, sg, sg_cnt, i) { + offset = 0; + while (offset < sg_dma_len(sg)) { + wmt_dma_init_descriptor(desc, req->data->blksz, + sg_dma_address(sg)+offset, + dma_address, 0); + desc++; + desc_cnt++; + offset += req->data->blksz; + dma_address += 16; + if (desc_cnt == req->data->blocks) + break; + } + } + desc--; + desc->flags |= 0x80000000; + + if (req->data->flags & MMC_DATA_WRITE) + wmt_dma_config(mmc, priv->dma_desc_device_addr, + PDMA_WRITE); + else + wmt_dma_config(mmc, priv->dma_desc_device_addr, + PDMA_READ); + + wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); + + priv->comp_dma = &priv->datacomp; + init_completion(priv->comp_dma); + + wmt_dma_start(priv); + wmt_mci_start_command(priv); + } +} + +static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct wmt_mci_priv *priv; + u32 busmode, extctrl; + + priv = mmc_priv(mmc); + + if (ios->power_mode == MMC_POWER_UP) { + wmt_reset_hardware(mmc); + + wmt_set_sd_power(priv, WMT_SD_POWER_ON); + } + if (ios->power_mode == MMC_POWER_OFF) + wmt_set_sd_power(priv, WMT_SD_POWER_OFF); + + if (ios->clock != 0) + clk_set_rate(priv->clk_sdmmc, ios->clock); + + busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE); + extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL); + + busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE); + extctrl &= ~EXT_EIGHTBIT; + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_8: + busmode |= BM_EIGHTBIT_MODE; + extctrl |= EXT_EIGHTBIT; + break; + case MMC_BUS_WIDTH_4: + busmode |= BM_FOURBIT_MODE; + break; + case MMC_BUS_WIDTH_1: + break; + } + + writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE); + writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL); +} + +static int wmt_mci_get_ro(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv = mmc_priv(mmc); + + return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT); +} + +static int wmt_mci_get_cd(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv = mmc_priv(mmc); + u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3; + + return !(cd ^ priv->cd_inverted); +} + +static const struct mmc_host_ops wmt_mci_ops = { + .request = wmt_mci_request, + .set_ios = wmt_mci_set_ios, + .get_ro = wmt_mci_get_ro, + .get_cd = wmt_mci_get_cd, +}; + +/* Controller capabilities */ +static struct wmt_mci_caps wm8505_caps = { + .f_min = 390425, + .f_max = 50000000, + .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34, + .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_SD_HIGHSPEED, + .max_seg_size = 65024, + .max_segs = 128, + .max_blk_size = 2048, +}; + +static const struct of_device_id wmt_mci_dt_ids[] = { + { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps }, + { /* Sentinel */ }, +}; + +static int wmt_mci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct wmt_mci_priv *priv; + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *of_id = + of_match_device(wmt_mci_dt_ids, &pdev->dev); + const struct wmt_mci_caps *wmt_caps; + int ret; + int regular_irq, dma_irq; + + if (!of_id || !of_id->data) { + dev_err(&pdev->dev, "Controller capabilities data missing\n"); + return -EFAULT; + } + + wmt_caps = of_id->data; + + if (!np) { + dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n"); + return -EFAULT; + } + + regular_irq = irq_of_parse_and_map(np, 0); + dma_irq = irq_of_parse_and_map(np, 1); + + if (!regular_irq || !dma_irq) { + dev_err(&pdev->dev, "Getting IRQs failed!\n"); + ret = -ENXIO; + goto fail1; + } + + mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "Failed to allocate mmc_host\n"); + ret = -ENOMEM; + goto fail1; + } + + mmc->ops = &wmt_mci_ops; + mmc->f_min = wmt_caps->f_min; + mmc->f_max = wmt_caps->f_max; + mmc->ocr_avail = wmt_caps->ocr_avail; + mmc->caps = wmt_caps->caps; + + mmc->max_seg_size = wmt_caps->max_seg_size; + mmc->max_segs = wmt_caps->max_segs; + mmc->max_blk_size = wmt_caps->max_blk_size; + + mmc->max_req_size = (16*512*mmc->max_segs); + mmc->max_blk_count = mmc->max_req_size / 512; + + priv = mmc_priv(mmc); + priv->mmc = mmc; + priv->dev = &pdev->dev; + + priv->power_inverted = 0; + priv->cd_inverted = 0; + + if (of_get_property(np, "sdon-inverted", NULL)) + priv->power_inverted = 1; + if (of_get_property(np, "cd-inverted", NULL)) + priv->cd_inverted = 1; + + priv->sdmmc_base = of_iomap(np, 0); + if (!priv->sdmmc_base) { + dev_err(&pdev->dev, "Failed to map IO space\n"); + ret = -ENOMEM; + goto fail2; + } + + priv->irq_regular = regular_irq; + priv->irq_dma = dma_irq; + + ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv); + if (ret) { + dev_err(&pdev->dev, "Register regular IRQ fail\n"); + goto fail3; + } + + ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv); + if (ret) { + dev_err(&pdev->dev, "Register DMA IRQ fail\n"); + goto fail4; + } + + /* alloc some DMA buffers for descriptors/transfers */ + priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev, + mmc->max_blk_count * 16, + &priv->dma_desc_device_addr, + GFP_KERNEL); + if (!priv->dma_desc_buffer) { + dev_err(&pdev->dev, "DMA alloc fail\n"); + ret = -EPERM; + goto fail5; + } + + platform_set_drvdata(pdev, mmc); + + priv->clk_sdmmc = of_clk_get(np, 0); + if (IS_ERR(priv->clk_sdmmc)) { + dev_err(&pdev->dev, "Error getting clock\n"); + ret = PTR_ERR(priv->clk_sdmmc); + goto fail5_and_a_half; + } + + ret = clk_prepare_enable(priv->clk_sdmmc); + if (ret) + goto fail6; + + /* configure the controller to a known 'ready' state */ + wmt_reset_hardware(mmc); + + ret = mmc_add_host(mmc); + if (ret) + goto fail7; + + dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); + + return 0; +fail7: + clk_disable_unprepare(priv->clk_sdmmc); +fail6: + clk_put(priv->clk_sdmmc); +fail5_and_a_half: + dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16, + priv->dma_desc_buffer, priv->dma_desc_device_addr); +fail5: + free_irq(dma_irq, priv); +fail4: + free_irq(regular_irq, priv); +fail3: + iounmap(priv->sdmmc_base); +fail2: + mmc_free_host(mmc); +fail1: + return ret; +} + +static int wmt_mci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct wmt_mci_priv *priv; + struct resource *res; + u32 reg_tmp; + + mmc = platform_get_drvdata(pdev); + priv = mmc_priv(mmc); + + /* reset SD controller */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + + /* release the dma buffers */ + dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16, + priv->dma_desc_buffer, priv->dma_desc_device_addr); + + mmc_remove_host(mmc); + + free_irq(priv->irq_regular, priv); + free_irq(priv->irq_dma, priv); + + iounmap(priv->sdmmc_base); + + clk_disable_unprepare(priv->clk_sdmmc); + clk_put(priv->clk_sdmmc); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + mmc_free_host(mmc); + + dev_info(&pdev->dev, "WMT MCI device removed\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int wmt_mci_suspend(struct device *dev) +{ + u32 reg_tmp; + struct mmc_host *mmc = dev_get_drvdata(dev); + struct wmt_mci_priv *priv; + + if (!mmc) + return 0; + + priv = mmc_priv(mmc); + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + + SDMMC_BUSMODE); + + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); + + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + + clk_disable(priv->clk_sdmmc); + return 0; +} + +static int wmt_mci_resume(struct device *dev) +{ + u32 reg_tmp; + struct mmc_host *mmc = dev_get_drvdata(dev); + struct wmt_mci_priv *priv; + + if (mmc) { + priv = mmc_priv(mmc); + clk_enable(priv->clk_sdmmc); + + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + + SDMMC_BUSMODE); + + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE), + priv->sdmmc_base + SDMMC_BLKLEN); + + reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); + writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base + + SDMMC_INTMASK0); + + } + + return 0; +} + +static const struct dev_pm_ops wmt_mci_pm = { + .suspend = wmt_mci_suspend, + .resume = wmt_mci_resume, +}; + +#define wmt_mci_pm_ops (&wmt_mci_pm) + +#else /* !CONFIG_PM */ + +#define wmt_mci_pm_ops NULL + +#endif + +static struct platform_driver wmt_mci_driver = { + .probe = wmt_mci_probe, + .remove = wmt_mci_remove, + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = wmt_mci_pm_ops, + .of_match_table = wmt_mci_dt_ids, + }, +}; + +module_platform_driver(wmt_mci_driver); + +MODULE_DESCRIPTION("Wondermedia MMC/SD Driver"); +MODULE_AUTHOR("Tony Prisk"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids); -- cgit v1.2.3