diff options
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r-- | drivers/mmc/host/mmc_spi.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/mmci.c | 69 | ||||
-rw-r--r-- | drivers/mmc/host/mmci.h | 2 | ||||
-rw-r--r-- | drivers/mmc/host/mtk-sd.c | 166 | ||||
-rw-r--r-- | drivers/mmc/host/omap.c | 48 | ||||
-rw-r--r-- | drivers/mmc/host/rtsx_pci_sdmmc.c | 17 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-brcmstb.c | 69 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-msm.c | 16 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-of-dwcmshc.c | 350 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-omap.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-xenon.c | 31 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-xenon.h | 3 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci.c | 3 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci.h | 1 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci_am654.c | 37 |
15 files changed, 704 insertions, 112 deletions
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 2a99ffb61f..b8dda8160c 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1157,7 +1157,7 @@ static int mmc_spi_probe(struct spi_device *spi) /* We rely on full duplex transfers, mostly to reduce * per-transfer overheads (by making fewer transfers). */ - if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) + if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX) return -EINVAL; /* MMC and SD specs only seem to care that sampling is on the diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index e967cca7a1..b790c3c3c8 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -273,6 +273,7 @@ static struct variant_data variant_stm32_sdmmc = { .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .stm32_idmabsize_mask = GENMASK(12, 5), .stm32_idmabsize_align = BIT(5), + .supports_sdio_irq = true, .busy_timeout = true, .busy_detect = true, .busy_detect_flag = MCI_STM32_BUSYD0, @@ -300,6 +301,7 @@ static struct variant_data variant_stm32_sdmmcv2 = { .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .stm32_idmabsize_mask = GENMASK(16, 5), .stm32_idmabsize_align = BIT(5), + .supports_sdio_irq = true, .dma_lli = true, .busy_timeout = true, .busy_detect = true, @@ -328,6 +330,7 @@ static struct variant_data variant_stm32_sdmmcv3 = { .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .stm32_idmabsize_mask = GENMASK(16, 6), .stm32_idmabsize_align = BIT(6), + .supports_sdio_irq = true, .dma_lli = true, .busy_timeout = true, .busy_detect = true, @@ -421,8 +424,9 @@ void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) */ static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) { - /* Keep busy mode in DPSM if enabled */ - datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag; + /* Keep busy mode in DPSM and SDIO mask if enabled */ + datactrl |= host->datactrl_reg & (host->variant->busy_dpsm_flag | + host->variant->datactrl_mask_sdio); if (host->datactrl_reg != datactrl) { host->datactrl_reg = datactrl; @@ -1762,6 +1766,25 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static void mmci_write_sdio_irq_bit(struct mmci_host *host, int enable) +{ + void __iomem *base = host->base; + u32 mask = readl_relaxed(base + MMCIMASK0); + + if (enable) + writel_relaxed(mask | MCI_ST_SDIOITMASK, base + MMCIMASK0); + else + writel_relaxed(mask & ~MCI_ST_SDIOITMASK, base + MMCIMASK0); +} + +static void mmci_signal_sdio_irq(struct mmci_host *host, u32 status) +{ + if (status & MCI_ST_SDIOIT) { + mmci_write_sdio_irq_bit(host, 0); + sdio_signal_irq(host->mmc); + } +} + /* * Handle completion of command and data transfers. */ @@ -1806,6 +1829,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) mmci_data_irq(host, host->data, status); } + if (host->variant->supports_sdio_irq) + mmci_signal_sdio_irq(host, status); + /* * Busy detection has been handled by mmci_cmd_irq() above. * Clear the status bit to prevent polling in IRQ context. @@ -2042,6 +2068,35 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) return ret; } +static void mmci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct mmci_host *host = mmc_priv(mmc); + unsigned long flags; + + if (enable) + /* Keep the SDIO mode bit if SDIO irqs are enabled */ + pm_runtime_get_sync(mmc_dev(mmc)); + + spin_lock_irqsave(&host->lock, flags); + mmci_write_sdio_irq_bit(host, enable); + spin_unlock_irqrestore(&host->lock, flags); + + if (!enable) { + pm_runtime_mark_last_busy(mmc_dev(mmc)); + pm_runtime_put_autosuspend(mmc_dev(mmc)); + } +} + +static void mmci_ack_sdio_irq(struct mmc_host *mmc) +{ + struct mmci_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + mmci_write_sdio_irq_bit(host, 1); + spin_unlock_irqrestore(&host->lock, flags); +} + static struct mmc_host_ops mmci_ops = { .request = mmci_request, .pre_req = mmci_pre_request, @@ -2317,6 +2372,16 @@ static int mmci_probe(struct amba_device *dev, mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; } + if (variant->supports_sdio_irq && host->mmc->caps & MMC_CAP_SDIO_IRQ) { + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + + mmci_ops.enable_sdio_irq = mmci_enable_sdio_irq; + mmci_ops.ack_sdio_irq = mmci_ack_sdio_irq; + + mmci_write_datactrlreg(host, + host->variant->datactrl_mask_sdio); + } + /* Variants with mandatory busy timeout in HW needs R1B responses. */ if (variant->busy_timeout) mmc->caps |= MMC_CAP_NEED_RSP_BUSY; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 34d9897c28..a5eb4ced4d 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -331,6 +331,7 @@ enum mmci_busy_state { * register. * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register * @dma_lli: true if variant has dma link list feature. + * @supports_sdio_irq: allow SD I/O card to interrupt the host * @stm32_idmabsize_mask: stm32 sdmmc idma buffer size. * @dma_flow_controller: use peripheral as flow controller for DMA. */ @@ -377,6 +378,7 @@ struct variant_data { u32 start_err; u32 opendrain; u8 dma_lli:1; + bool supports_sdio_irq; u32 stm32_idmabsize_mask; u32 stm32_idmabsize_align; bool dma_flow_controller; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 97f7c3d4be..1634b1f5d2 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -252,12 +252,16 @@ #define MSDC_PAD_TUNE_DATWRDLY GENMASK(4, 0) /* RW */ #define MSDC_PAD_TUNE_DATRRDLY GENMASK(12, 8) /* RW */ +#define MSDC_PAD_TUNE_DATRRDLY2 GENMASK(12, 8) /* RW */ #define MSDC_PAD_TUNE_CMDRDLY GENMASK(20, 16) /* RW */ +#define MSDC_PAD_TUNE_CMDRDLY2 GENMASK(20, 16) /* RW */ #define MSDC_PAD_TUNE_CMDRRDLY GENMASK(26, 22) /* RW */ #define MSDC_PAD_TUNE_CLKTDLY GENMASK(31, 27) /* RW */ #define MSDC_PAD_TUNE_RXDLYSEL BIT(15) /* RW */ #define MSDC_PAD_TUNE_RD_SEL BIT(13) /* RW */ #define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */ +#define MSDC_PAD_TUNE_RD2_SEL BIT(13) /* RW */ +#define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */ #define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */ #define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */ @@ -325,7 +329,9 @@ #define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */ -#define PAD_DELAY_MAX 32 /* PAD delay cells */ +#define TUNING_REG2_FIXED_OFFEST 4 +#define PAD_DELAY_HALF 32 /* PAD delay cells */ +#define PAD_DELAY_FULL 64 /*--------------------------------------------------------------------------*/ /* Descriptor Structure */ /*--------------------------------------------------------------------------*/ @@ -461,6 +467,7 @@ struct msdc_host { u32 hs400_ds_dly3; u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */ u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */ + u32 tuning_step; bool hs400_cmd_resp_sel_rising; /* cmd response sample selection for HS400 */ bool hs400_mode; /* current eMMC will run at hs400 mode */ @@ -1149,9 +1156,11 @@ static void msdc_recheck_sdio_irq(struct msdc_host *host) static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd) { - if (host->error) - dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", - __func__, cmd->opcode, cmd->arg, host->error); + if (host->error && + ((!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning) || + cmd->error == -ETIMEDOUT)) + dev_warn(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); } static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) @@ -1615,7 +1624,7 @@ static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts) } if (cmd_err || dat_err) { - dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x", + dev_err(host->dev, "cmd_err = %d, dat_err = %d, intsts = 0x%x", cmd_err, dat_err, intsts); } @@ -1780,10 +1789,20 @@ static void msdc_init_hw(struct msdc_host *host) DATA_K_VALUE_SEL); sdr_set_bits(host->top_base + EMMC_TOP_CMD, PAD_CMD_RD_RXDLY_SEL); + if (host->tuning_step > PAD_DELAY_HALF) { + sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY2_SEL); + sdr_set_bits(host->top_base + EMMC_TOP_CMD, + PAD_CMD_RD_RXDLY2_SEL); + } } else { sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL); + if (host->tuning_step > PAD_DELAY_HALF) + sdr_set_bits(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_RD2_SEL | + MSDC_PAD_TUNE_CMD2_SEL); } } else { /* choose clock tune */ @@ -1925,24 +1944,24 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) msdc_set_mclk(host, ios->timing, ios->clock); } -static u32 test_delay_bit(u32 delay, u32 bit) +static u64 test_delay_bit(u64 delay, u32 bit) { - bit %= PAD_DELAY_MAX; - return delay & BIT(bit); + bit %= PAD_DELAY_FULL; + return delay & BIT_ULL(bit); } -static int get_delay_len(u32 delay, u32 start_bit) +static int get_delay_len(u64 delay, u32 start_bit) { int i; - for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) { + for (i = 0; i < (PAD_DELAY_FULL - start_bit); i++) { if (test_delay_bit(delay, start_bit + i) == 0) return i; } - return PAD_DELAY_MAX - start_bit; + return PAD_DELAY_FULL - start_bit; } -static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) +static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u64 delay) { int start = 0, len = 0; int start_final = 0, len_final = 0; @@ -1950,28 +1969,28 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) struct msdc_delay_phase delay_phase = { 0, }; if (delay == 0) { - dev_err(host->dev, "phase error: [map:%x]\n", delay); + dev_err(host->dev, "phase error: [map:%016llx]\n", delay); delay_phase.final_phase = final_phase; return delay_phase; } - while (start < PAD_DELAY_MAX) { + while (start < PAD_DELAY_FULL) { len = get_delay_len(delay, start); if (len_final < len) { start_final = start; len_final = len; } start += len ? len : 1; - if (len >= 12 && start_final < 4) + if (!upper_32_bits(delay) && len >= 12 && start_final < 4) break; } /* The rule is that to find the smallest delay cell */ if (start_final == 0) - final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX; + final_phase = (start_final + len_final / 3) % PAD_DELAY_FULL; else - final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX; - dev_dbg(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n", + final_phase = (start_final + len_final / 2) % PAD_DELAY_FULL; + dev_dbg(host->dev, "phase: [map:%016llx] [maxlen:%d] [final:%d]\n", delay, len_final, final_phase); delay_phase.maxlen = len_final; @@ -1984,30 +2003,64 @@ static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value) { u32 tune_reg = host->dev_comp->pad_tune_reg; - if (host->top_base) - sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, - value); - else - sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, - value); + if (host->top_base) { + if (value < PAD_DELAY_HALF) { + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, value); + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 0); + } else { + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, + PAD_DELAY_HALF - 1); + sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, + value - PAD_DELAY_HALF); + } + } else { + if (value < PAD_DELAY_HALF) { + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, value); + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_CMDRDLY2, 0); + } else { + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, + PAD_DELAY_HALF - 1); + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_CMDRDLY2, value - PAD_DELAY_HALF); + } + } } static inline void msdc_set_data_delay(struct msdc_host *host, u32 value) { u32 tune_reg = host->dev_comp->pad_tune_reg; - if (host->top_base) - sdr_set_field(host->top_base + EMMC_TOP_CONTROL, - PAD_DAT_RD_RXDLY, value); - else - sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, - value); + if (host->top_base) { + if (value < PAD_DELAY_HALF) { + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY, value); + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY2, 0); + } else { + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1); + sdr_set_field(host->top_base + EMMC_TOP_CONTROL, + PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF); + } + } else { + if (value < PAD_DELAY_HALF) { + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, value); + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_DATRRDLY2, 0); + } else { + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, + PAD_DELAY_HALF - 1); + sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, + MSDC_PAD_TUNE_DATRRDLY2, value - PAD_DELAY_HALF); + } + } } static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); - u32 rise_delay = 0, fall_delay = 0; + u64 rise_delay = 0, fall_delay = 0; struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; struct msdc_delay_phase internal_delay_phase; u8 final_delay, final_maxlen; @@ -2023,7 +2076,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) host->hs200_cmd_int_delay); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - for (i = 0 ; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_cmd_delay(host, i); /* * Using the same parameters, it may sometimes pass the test, @@ -2033,9 +2086,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) for (j = 0; j < 3; j++) { mmc_send_tuning(mmc, opcode, &cmd_err); if (!cmd_err) { - rise_delay |= BIT(i); + rise_delay |= BIT_ULL(i); } else { - rise_delay &= ~BIT(i); + rise_delay &= ~BIT_ULL(i); break; } } @@ -2047,7 +2100,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) goto skip_fall; sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_cmd_delay(host, i); /* * Using the same parameters, it may sometimes pass the test, @@ -2057,9 +2110,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) for (j = 0; j < 3; j++) { mmc_send_tuning(mmc, opcode, &cmd_err); if (!cmd_err) { - fall_delay |= BIT(i); + fall_delay |= BIT_ULL(i); } else { - fall_delay &= ~BIT(i); + fall_delay &= ~BIT_ULL(i); break; } } @@ -2082,12 +2135,12 @@ skip_fall: if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) goto skip_internal; - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, i); mmc_send_tuning(mmc, opcode, &cmd_err); if (!cmd_err) - internal_delay |= BIT(i); + internal_delay |= BIT_ULL(i); } dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay); internal_delay_phase = get_best_delay(host, internal_delay); @@ -2121,7 +2174,8 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); else sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - for (i = 0 ; i < PAD_DELAY_MAX; i++) { + + for (i = 0; i < PAD_DELAY_HALF; i++) { sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3, i); /* @@ -2151,7 +2205,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); - u32 rise_delay = 0, fall_delay = 0; + u64 rise_delay = 0, fall_delay = 0; struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; u8 final_delay, final_maxlen; int i, ret; @@ -2160,11 +2214,11 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) host->latch_ck); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - for (i = 0 ; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) - rise_delay |= BIT(i); + rise_delay |= BIT_ULL(i); } final_rise_delay = get_best_delay(host, rise_delay); /* if rising edge has enough margin, then do not scan falling edge */ @@ -2174,11 +2228,11 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) - fall_delay |= BIT(i); + fall_delay |= BIT_ULL(i); } final_fall_delay = get_best_delay(host, fall_delay); @@ -2206,7 +2260,7 @@ skip_fall: static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); - u32 rise_delay = 0, fall_delay = 0; + u64 rise_delay = 0, fall_delay = 0; struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; u8 final_delay, final_maxlen; int i, ret; @@ -2217,12 +2271,12 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); - for (i = 0 ; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_cmd_delay(host, i); msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) - rise_delay |= BIT(i); + rise_delay |= BIT_ULL(i); } final_rise_delay = get_best_delay(host, rise_delay); /* if rising edge has enough margin, then do not scan falling edge */ @@ -2233,12 +2287,12 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < host->tuning_step; i++) { msdc_set_cmd_delay(host, i); msdc_set_data_delay(host, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) - fall_delay |= BIT(i); + fall_delay |= BIT_ULL(i); } final_fall_delay = get_best_delay(host, fall_delay); @@ -2346,7 +2400,7 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card } host->hs400_tuning = true; - for (i = 0; i < PAD_DELAY_MAX; i++) { + for (i = 0; i < PAD_DELAY_HALF; i++) { if (host->top_base) sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, PAD_DS_DLY1, i); @@ -2580,6 +2634,8 @@ static const struct cqhci_host_ops msdc_cmdq_ops = { static void msdc_of_property_parse(struct platform_device *pdev, struct msdc_host *host) { + struct mmc_host *mmc = mmc_from_priv(host); + of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck", &host->latch_ck); @@ -2601,6 +2657,14 @@ static void msdc_of_property_parse(struct platform_device *pdev, else host->hs400_cmd_resp_sel_rising = false; + if (of_property_read_u32(pdev->dev.of_node, "mediatek,tuning-step", + &host->tuning_step)) { + if (mmc->caps2 & MMC_CAP2_NO_MMC) + host->tuning_step = PAD_DELAY_FULL; + else + host->tuning_step = PAD_DELAY_HALF; + } + if (of_property_read_bool(pdev->dev.of_node, "supports-cqe")) host->cqhci = true; diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 9fb8995b43..13fa8588e3 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1119,10 +1119,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on, host = slot->host; - if (slot->vsd) - gpiod_set_value(slot->vsd, power_on); - if (slot->vio) - gpiod_set_value(slot->vio, power_on); + if (power_on) { + if (slot->vsd) { + gpiod_set_value(slot->vsd, power_on); + msleep(1); + } + if (slot->vio) { + gpiod_set_value(slot->vio, power_on); + msleep(1); + } + } else { + if (slot->vio) { + gpiod_set_value(slot->vio, power_on); + msleep(50); + } + if (slot->vsd) { + gpiod_set_value(slot->vsd, power_on); + msleep(50); + } + } if (slot->pdata->set_power != NULL) slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on, @@ -1259,18 +1274,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) slot->pdata = &host->pdata->slots[id]; /* Check for some optional GPIO controls */ - slot->vsd = gpiod_get_index_optional(host->dev, "vsd", - id, GPIOD_OUT_LOW); + slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd", + id, GPIOD_OUT_LOW); if (IS_ERR(slot->vsd)) return dev_err_probe(host->dev, PTR_ERR(slot->vsd), "error looking up VSD GPIO\n"); - slot->vio = gpiod_get_index_optional(host->dev, "vio", - id, GPIOD_OUT_LOW); + slot->vio = devm_gpiod_get_index_optional(host->dev, "vio", + id, GPIOD_OUT_LOW); if (IS_ERR(slot->vio)) return dev_err_probe(host->dev, PTR_ERR(slot->vio), "error looking up VIO GPIO\n"); - slot->cover = gpiod_get_index_optional(host->dev, "cover", - id, GPIOD_IN); + slot->cover = devm_gpiod_get_index_optional(host->dev, "cover", + id, GPIOD_IN); if (IS_ERR(slot->cover)) return dev_err_probe(host->dev, PTR_ERR(slot->cover), "error looking up cover switch GPIO\n"); @@ -1384,13 +1399,6 @@ static int mmc_omap_probe(struct platform_device *pdev) if (IS_ERR(host->virt_base)) return PTR_ERR(host->virt_base); - host->slot_switch = gpiod_get_optional(host->dev, "switch", - GPIOD_OUT_LOW); - if (IS_ERR(host->slot_switch)) - return dev_err_probe(host->dev, PTR_ERR(host->slot_switch), - "error looking up slot switch GPIO\n"); - - INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work); INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); @@ -1409,6 +1417,12 @@ static int mmc_omap_probe(struct platform_device *pdev) host->dev = &pdev->dev; platform_set_drvdata(pdev, host); + host->slot_switch = devm_gpiod_get_optional(host->dev, "switch", + GPIOD_OUT_LOW); + if (IS_ERR(host->slot_switch)) + return dev_err_probe(host->dev, PTR_ERR(host->slot_switch), + "error looking up slot switch GPIO\n"); + host->id = pdev->id; host->irq = irq; host->phys_base = res->start; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 87d78432a1..7dfe7c4e00 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -7,6 +7,7 @@ * Wei WANG <wei_wang@realsil.com.cn> */ +#include <linux/pci.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/highmem.h> @@ -947,7 +948,7 @@ static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode) /* send at least 74 clocks */ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN); - if (PCI_PID(pcr) == PID_5261) { + if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) { /* * If test mode is set switch to SD Express mandatorily, * this is only for factory testing. @@ -1364,6 +1365,14 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios) struct realtek_pci_sdmmc *host = mmc_priv(mmc); struct rtsx_pcr *pcr = host->pcr; + if (PCI_PID(pcr) == PID_5264) { + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_2_5GT); + pci_write_config_byte(pcr->pci, 0x80e, 0x02); + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_5_0GT); + } + /* Set relink_time for changing to PCIe card */ relink_time = 0x8FFF; @@ -1379,6 +1388,12 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios) if (pcr->ops->disable_auto_blink) pcr->ops->disable_auto_blink(pcr); + if (PCI_PID(pcr) == PID_5264) { + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2, + RTS5264_CHIP_RST_N_SEL, RTS5264_CHIP_RST_N_SEL); + rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00); + } + /* For PCIe/NVMe mode can't enter delink issue */ pcr->hw_param.interrupt_en &= ~(SD_INT_EN); rtsx_pci_writel(pcr, RTSX_BIER, pcr->hw_param.interrupt_en); diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index c23251bb95..9053526fa2 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -6,6 +6,7 @@ */ #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/mmc/host.h> #include <linux/module.h> #include <linux/of.h> @@ -44,8 +45,13 @@ struct brcmstb_match_priv { static inline void enable_clock_gating(struct sdhci_host *host) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); u32 reg; + if (!(priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK)) + return; + reg = sdhci_readl(host, SDHCI_VENDOR); reg |= SDHCI_VENDOR_GATE_SDCLK_EN; sdhci_writel(host, reg, SDHCI_VENDOR); @@ -53,14 +59,53 @@ static inline void enable_clock_gating(struct sdhci_host *host) static void brcmstb_reset(struct sdhci_host *host, u8 mask) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); - sdhci_and_cqhci_reset(host, mask); /* Reset will clear this, so re-enable it */ - if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK) - enable_clock_gating(host); + enable_clock_gating(host); +} + +static void brcmstb_sdhci_reset_cmd_data(struct sdhci_host *host, u8 mask) +{ + u32 new_mask = (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA)) << 24; + int ret; + u32 reg; + + /* + * SDHCI_CLOCK_CONTROL register CARD_EN and CLOCK_INT_EN bits shall + * be set along with SOFTWARE_RESET register RESET_CMD or RESET_DATA + * bits, hence access SDHCI_CLOCK_CONTROL register as 32-bit register + */ + new_mask |= SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN; + reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL); + sdhci_writel(host, reg | new_mask, SDHCI_CLOCK_CONTROL); + + reg = sdhci_readb(host, SDHCI_SOFTWARE_RESET); + + ret = read_poll_timeout_atomic(sdhci_readb, reg, !(reg & mask), + 10, 10000, false, + host, SDHCI_SOFTWARE_RESET); + + if (ret) { + pr_err("%s: Reset 0x%x never completed.\n", + mmc_hostname(host->mmc), (int)mask); + sdhci_err_stats_inc(host, CTRL_TIMEOUT); + sdhci_dumpregs(host); + } +} + +static void brcmstb_reset_74165b0(struct sdhci_host *host, u8 mask) +{ + /* take care of RESET_ALL as usual */ + if (mask & SDHCI_RESET_ALL) + sdhci_and_cqhci_reset(host, SDHCI_RESET_ALL); + + /* cmd and/or data treated differently on this core */ + if (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA)) + brcmstb_sdhci_reset_cmd_data(host, mask); + + /* Reset will clear this, so re-enable it */ + enable_clock_gating(host); } static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) @@ -162,6 +207,13 @@ static struct sdhci_ops sdhci_brcmstb_ops_7216 = { .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, }; +static struct sdhci_ops sdhci_brcmstb_ops_74165b0 = { + .set_clock = sdhci_brcmstb_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = brcmstb_reset_74165b0, + .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, +}; + static struct brcmstb_match_priv match_priv_7425 = { .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT | BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, @@ -179,10 +231,17 @@ static const struct brcmstb_match_priv match_priv_7216 = { .ops = &sdhci_brcmstb_ops_7216, }; +static struct brcmstb_match_priv match_priv_74165b0 = { + .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE, + .hs400es = sdhci_brcmstb_hs400es, + .ops = &sdhci_brcmstb_ops_74165b0, +}; + static const struct of_device_id __maybe_unused sdhci_brcm_of_match[] = { { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 }, { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 }, { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 }, + { .compatible = "brcm,bcm74165b0-sdhci", .data = &match_priv_74165b0 }, {}, }; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 668e0aceee..e113b99a3e 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2694,6 +2694,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->runtime_suspended = true; + spin_unlock_irqrestore(&host->lock, flags); /* Drop the performance vote */ dev_pm_opp_set_rate(dev, 0); @@ -2708,6 +2713,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + unsigned long flags; int ret; ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), @@ -2726,7 +2732,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev) dev_pm_opp_set_rate(dev, msm_host->clk_rate); - return sdhci_msm_ice_resume(msm_host); + ret = sdhci_msm_ice_resume(msm_host); + if (ret) + return ret; + + spin_lock_irqsave(&host->lock, flags); + host->runtime_suspended = false; + spin_unlock_irqrestore(&host->lock, flags); + + return ret; } static const struct dev_pm_ops sdhci_msm_pm_ops = { diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index a0524127ca..60b42126a4 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -8,6 +8,7 @@ */ #include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/iopoll.h> @@ -35,6 +36,21 @@ #define DWCMSHC_CARD_IS_EMMC BIT(0) #define DWCMSHC_ENHANCED_STROBE BIT(8) #define DWCMSHC_EMMC_ATCTRL 0x40 +/* Tuning and auto-tuning fields in AT_CTRL_R control register */ +#define AT_CTRL_AT_EN BIT(0) /* autotuning is enabled */ +#define AT_CTRL_CI_SEL BIT(1) /* interval to drive center phase select */ +#define AT_CTRL_SWIN_TH_EN BIT(2) /* sampling window threshold enable */ +#define AT_CTRL_RPT_TUNE_ERR BIT(3) /* enable reporting framing errors */ +#define AT_CTRL_SW_TUNE_EN BIT(4) /* enable software managed tuning */ +#define AT_CTRL_WIN_EDGE_SEL_MASK GENMASK(11, 8) /* bits [11:8] */ +#define AT_CTRL_WIN_EDGE_SEL 0xf /* sampling window edge select */ +#define AT_CTRL_TUNE_CLK_STOP_EN BIT(16) /* clocks stopped during phase code change */ +#define AT_CTRL_PRE_CHANGE_DLY_MASK GENMASK(18, 17) /* bits [18:17] */ +#define AT_CTRL_PRE_CHANGE_DLY 0x1 /* 2-cycle latency */ +#define AT_CTRL_POST_CHANGE_DLY_MASK GENMASK(20, 19) /* bits [20:19] */ +#define AT_CTRL_POST_CHANGE_DLY 0x3 /* 4-cycle latency */ +#define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */ +#define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */ /* Rockchip specific Registers */ #define DWCMSHC_EMMC_DLL_CTRL 0x800 @@ -72,6 +88,82 @@ (((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0)) #define RK35xx_MAX_CLKS 3 +/* PHY register area pointer */ +#define DWC_MSHC_PTR_PHY_R 0x300 + +/* PHY general configuration */ +#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00) +#define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */ +#define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */ +#define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */ +#define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */ +#define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */ + +/* PHY command/response pad settings */ +#define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04) + +/* PHY data pad settings */ +#define PHY_DATAPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x06) + +/* PHY clock pad settings */ +#define PHY_CLKPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x08) + +/* PHY strobe pad settings */ +#define PHY_STBPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0a) + +/* PHY reset pad settings */ +#define PHY_RSTNPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0c) + +/* Bitfields are common for all pad settings */ +#define PHY_PAD_RXSEL_1V8 0x1 /* Receiver type select for 1.8V */ +#define PHY_PAD_RXSEL_3V3 0x2 /* Receiver type select for 3.3V */ + +#define PHY_PAD_WEAKPULL_MASK GENMASK(4, 3) /* bits [4:3] */ +#define PHY_PAD_WEAKPULL_PULLUP 0x1 /* Weak pull up enabled */ +#define PHY_PAD_WEAKPULL_PULLDOWN 0x2 /* Weak pull down enabled */ + +#define PHY_PAD_TXSLEW_CTRL_P_MASK GENMASK(8, 5) /* bits [8:5] */ +#define PHY_PAD_TXSLEW_CTRL_P 0x3 /* Slew control for P-Type pad TX */ +#define PHY_PAD_TXSLEW_CTRL_N_MASK GENMASK(12, 9) /* bits [12:9] */ +#define PHY_PAD_TXSLEW_CTRL_N 0x3 /* Slew control for N-Type pad TX */ + +/* PHY CLK delay line settings */ +#define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d) +#define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */ + +/* PHY CLK delay line delay code */ +#define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e) +#define PHY_SDCLKDL_DC_INITIAL 0x40 /* initial delay code */ +#define PHY_SDCLKDL_DC_DEFAULT 0x32 /* default delay code */ +#define PHY_SDCLKDL_DC_HS400 0x18 /* delay code for HS400 mode */ + +/* PHY drift_cclk_rx delay line configuration setting */ +#define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21) +#define PHY_ATDL_CNFG_INPSEL_MASK GENMASK(3, 2) /* bits [3:2] */ +#define PHY_ATDL_CNFG_INPSEL 0x3 /* delay line input source */ + +/* PHY DLL control settings */ +#define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24) +#define PHY_DLL_CTRL_DISABLE 0x0 /* PHY DLL is enabled */ +#define PHY_DLL_CTRL_ENABLE 0x1 /* PHY DLL is disabled */ + +/* PHY DLL configuration register 1 */ +#define PHY_DLL_CNFG1_R (DWC_MSHC_PTR_PHY_R + 0x25) +#define PHY_DLL_CNFG1_SLVDLY_MASK GENMASK(5, 4) /* bits [5:4] */ +#define PHY_DLL_CNFG1_SLVDLY 0x2 /* DLL slave update delay input */ +#define PHY_DLL_CNFG1_WAITCYCLE 0x5 /* DLL wait cycle input */ + +/* PHY DLL configuration register 2 */ +#define PHY_DLL_CNFG2_R (DWC_MSHC_PTR_PHY_R + 0x26) +#define PHY_DLL_CNFG2_JUMPSTEP 0xa /* DLL jump step input */ + +/* PHY DLL master and slave delay line configuration settings */ +#define PHY_DLLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x28) +#define PHY_DLLDL_CNFG_SLV_INPSEL_MASK GENMASK(6, 5) /* bits [6:5] */ +#define PHY_DLLDL_CNFG_SLV_INPSEL 0x3 /* clock source select for slave DL */ + +#define FLAG_IO_FIXED_1V8 BIT(0) + #define BOUNDARY_OK(addr, len) \ ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1))) @@ -92,6 +184,8 @@ struct dwcmshc_priv { struct clk *bus_clk; int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA reg */ void *priv; /* pointer to SoC private stuff */ + u16 delay_line; + u16 flags; }; /* @@ -157,6 +251,127 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_request(mmc, mrq); } +static void dwcmshc_phy_1_8v_init(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 val; + + /* deassert phy reset & set tx drive strength */ + val = PHY_CNFG_RSTN_DEASSERT; + val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP); + val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN); + sdhci_writel(host, val, PHY_CNFG_R); + + /* disable delay line */ + sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R); + + /* set delay line */ + sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R); + sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R); + + /* enable delay lane */ + val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); + val &= ~(PHY_SDCLKDL_CNFG_UPDATE); + sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); + + /* configure phy pads */ + val = PHY_PAD_RXSEL_1V8; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); + sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); + sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); + + val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); + + val = PHY_PAD_RXSEL_1V8; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_STBPAD_CNFG_R); + + /* enable data strobe mode */ + sdhci_writeb(host, FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL), + PHY_DLLDL_CNFG_R); + + /* enable phy dll */ + sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R); +} + +static void dwcmshc_phy_3_3v_init(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 val; + + /* deassert phy reset & set tx drive strength */ + val = PHY_CNFG_RSTN_DEASSERT; + val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP); + val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN); + sdhci_writel(host, val, PHY_CNFG_R); + + /* disable delay line */ + sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R); + + /* set delay line */ + sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R); + sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R); + + /* enable delay lane */ + val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R); + val &= ~(PHY_SDCLKDL_CNFG_UPDATE); + sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R); + + /* configure phy pads */ + val = PHY_PAD_RXSEL_3V3; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_CMDPAD_CNFG_R); + sdhci_writew(host, val, PHY_DATAPAD_CNFG_R); + sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R); + + val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_CLKPAD_CNFG_R); + + val = PHY_PAD_RXSEL_3V3; + val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P); + val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N); + sdhci_writew(host, val, PHY_STBPAD_CNFG_R); + + /* enable phy dll */ + sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R); +} + +static void th1520_sdhci_set_phy(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO; + u16 emmc_ctrl; + + /* Before power on, set PHY configs */ + if (priv->flags & FLAG_IO_FIXED_1V8) + dwcmshc_phy_1_8v_init(host); + else + dwcmshc_phy_3_3v_init(host); + + if ((host->mmc->caps2 & emmc_caps) == emmc_caps) { + emmc_ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL); + emmc_ctrl |= DWCMSHC_CARD_IS_EMMC; + sdhci_writew(host, emmc_ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL); + } + + sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) | + PHY_DLL_CNFG1_WAITCYCLE, PHY_DLL_CNFG1_R); +} + static void dwcmshc_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) { @@ -189,9 +404,25 @@ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host, ctrl_2 |= DWCMSHC_CTRL_HS400; } + if (priv->flags & FLAG_IO_FIXED_1V8) + ctrl_2 |= SDHCI_CTRL_VDD_180; sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); } +static void th1520_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + + dwcmshc_set_uhs_signaling(host, timing); + if (timing == MMC_TIMING_MMC_HS400) + priv->delay_line = PHY_SDCLKDL_DC_HS400; + else + sdhci_writeb(host, 0, PHY_DLLDL_CNFG_R); + th1520_sdhci_set_phy(host); +} + static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -338,6 +569,80 @@ static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask) sdhci_reset(host, mask); } +static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u32 val = 0; + + if (host->flags & SDHCI_HS400_TUNING) + return 0; + + sdhci_writeb(host, FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL), + PHY_ATDL_CNFG_R); + val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); + + /* + * configure tuning settings: + * - center phase select code driven in block gap interval + * - disable reporting of framing errors + * - disable software managed tuning + * - disable user selection of sampling window edges, + * instead tuning calculated edges are used + */ + val &= ~(AT_CTRL_CI_SEL | AT_CTRL_RPT_TUNE_ERR | AT_CTRL_SW_TUNE_EN | + FIELD_PREP(AT_CTRL_WIN_EDGE_SEL_MASK, AT_CTRL_WIN_EDGE_SEL)); + + /* + * configure tuning settings: + * - enable auto-tuning + * - enable sampling window threshold + * - stop clocks during phase code change + * - set max latency in cycles between tx and rx clocks + * - set max latency in cycles to switch output phase + * - set max sampling window threshold value + */ + val |= AT_CTRL_AT_EN | AT_CTRL_SWIN_TH_EN | AT_CTRL_TUNE_CLK_STOP_EN; + val |= FIELD_PREP(AT_CTRL_PRE_CHANGE_DLY_MASK, AT_CTRL_PRE_CHANGE_DLY); + val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY); + val |= FIELD_PREP(AT_CTRL_SWIN_TH_VAL_MASK, AT_CTRL_SWIN_TH_VAL); + + sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); + val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); + + /* perform tuning */ + sdhci_start_tuning(host); + host->tuning_loop_count = 128; + host->tuning_err = __sdhci_execute_tuning(host, opcode); + if (host->tuning_err) { + /* disable auto-tuning upon tuning error */ + val &= ~AT_CTRL_AT_EN; + sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL); + dev_err(mmc_dev(host->mmc), "tuning failed: %d\n", host->tuning_err); + return -EIO; + } + sdhci_end_tuning(host); + + return 0; +} + +static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host); + u16 ctrl_2; + + sdhci_reset(host, mask); + + if (priv->flags & FLAG_IO_FIXED_1V8) { + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl_2 & SDHCI_CTRL_VDD_180)) { + ctrl_2 |= SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + } + } +} + static const struct sdhci_ops sdhci_dwcmshc_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, @@ -356,6 +661,17 @@ static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = { .adma_write_desc = dwcmshc_adma_write_desc, }; +static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = th1520_set_uhs_signaling, + .get_max_clock = dwcmshc_get_max_clock, + .reset = th1520_sdhci_reset, + .adma_write_desc = dwcmshc_adma_write_desc, + .voltage_switch = dwcmshc_phy_1_8v_init, + .platform_execute_tuning = &th1520_execute_tuning, +}; + static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { .ops = &sdhci_dwcmshc_ops, .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, @@ -379,6 +695,12 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = { SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, }; +static const struct sdhci_pltfm_data sdhci_dwcmshc_th1520_pdata = { + .ops = &sdhci_dwcmshc_th1520_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, +}; + static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) { int err; @@ -447,6 +769,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = { .compatible = "snps,dwcmshc-sdhci", .data = &sdhci_dwcmshc_pdata, }, + { + .compatible = "thead,th1520-dwcmshc", + .data = &sdhci_dwcmshc_th1520_pdata, + }, {}, }; MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids); @@ -542,6 +868,30 @@ static int dwcmshc_probe(struct platform_device *pdev) goto err_clk; } + if (pltfm_data == &sdhci_dwcmshc_th1520_pdata) { + priv->delay_line = PHY_SDCLKDL_DC_DEFAULT; + + if (device_property_read_bool(dev, "mmc-ddr-1_8v") || + device_property_read_bool(dev, "mmc-hs200-1_8v") || + device_property_read_bool(dev, "mmc-hs400-1_8v")) + priv->flags |= FLAG_IO_FIXED_1V8; + else + priv->flags &= ~FLAG_IO_FIXED_1V8; + + /* + * start_signal_voltage_switch() will try 3.3V first + * then 1.8V. Use SDHCI_SIGNALING_180 rather than + * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V + * in sdhci_start_signal_voltage_switch(). + */ + if (priv->flags & FLAG_IO_FIXED_1V8) { + host->flags &= ~SDHCI_SIGNALING_330; + host->flags |= SDHCI_SIGNALING_180; + } + + sdhci_enable_v4_mode(host); + } + #ifdef CONFIG_ACPI if (pltfm_data == &sdhci_dwcmshc_bf3_pdata) sdhci_enable_v4_mode(host); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 0a26831b3b..94076b0955 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * SDHCI Controller driver for TI's OMAP SoCs * * Copyright (C) 2017 Texas Instruments diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 25ba7aecc3..0e52867f6e 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -18,6 +18,8 @@ #include <linux/of.h> #include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> #include "sdhci-pltfm.h" #include "sdhci-xenon.h" @@ -422,6 +424,7 @@ static int xenon_probe_params(struct platform_device *pdev) struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); u32 sdhc_id, nr_sdhc; u32 tuning_count; + struct sysinfo si; /* Disable HS200 on Armada AP806 */ if (priv->hw_version == XENON_AP806) @@ -450,6 +453,23 @@ static int xenon_probe_params(struct platform_device *pdev) } priv->tuning_count = tuning_count; + /* + * AC5/X/IM HW has only 31-bits passed in the crossbar switch. + * If we have more than 2GB of memory, this means we might pass + * memory pointers which are above 2GB and which cannot be properly + * represented. In this case, disable ADMA, 64-bit DMA and allow only SDMA. + * This effectively will enable bounce buffer quirk in the + * generic SDHCI driver, which will make sure DMA is only done + * from supported memory regions: + */ + if (priv->hw_version == XENON_AC5) { + si_meminfo(&si); + if (si.totalram * si.mem_unit > SZ_2G) { + host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; + host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA; + } + } + return xenon_phy_parse_params(dev, host); } @@ -562,6 +582,16 @@ static int xenon_probe(struct platform_device *pdev) goto remove_sdhc; pm_runtime_put_autosuspend(&pdev->dev); + /* + * If we previously detected AC5 with over 2GB of memory, + * then we disable ADMA and 64-bit DMA. + * This means generic SDHCI driver has set the DMA mask to + * 32-bit. Since DDR starts at 0x2_0000_0000, we must use + * 34-bit DMA mask to access this DDR memory: + */ + if (priv->hw_version == XENON_AC5 && + host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34)); return 0; @@ -680,6 +710,7 @@ static const struct of_device_id sdhci_xenon_dt_ids[] = { { .compatible = "marvell,armada-ap807-sdhci", .data = (void *)XENON_AP807}, { .compatible = "marvell,armada-cp110-sdhci", .data = (void *)XENON_CP110}, { .compatible = "marvell,armada-3700-sdhci", .data = (void *)XENON_A3700}, + { .compatible = "marvell,ac5-sdhci", .data = (void *)XENON_AC5}, {} }; MODULE_DEVICE_TABLE(of, sdhci_xenon_dt_ids); diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h index 3e9c6c908a..0460d97aad 100644 --- a/drivers/mmc/host/sdhci-xenon.h +++ b/drivers/mmc/host/sdhci-xenon.h @@ -57,7 +57,8 @@ enum xenon_variant { XENON_A3700, XENON_AP806, XENON_AP807, - XENON_CP110 + XENON_CP110, + XENON_AC5 }; struct xenon_priv { diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index ff41aa5656..c79f734599 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2841,7 +2841,7 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) } EXPORT_SYMBOL_GPL(sdhci_send_tuning); -static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) +int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) { int i; @@ -2879,6 +2879,7 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) sdhci_reset_tuning(host); return -EAGAIN; } +EXPORT_SYMBOL_GPL(__sdhci_execute_tuning); int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) { diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index f219bdea8f..a20864fc06 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -793,6 +793,7 @@ void sdhci_set_bus_width(struct sdhci_host *host, int width); void sdhci_reset(struct sdhci_host *host, u8 mask); void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); +int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode); void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios); diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 967bd2dfcd..d659c59422 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -141,7 +141,6 @@ static const struct timing_data td[] = { struct sdhci_am654_data { struct regmap *base; - bool legacy_otapdly; int otap_del_sel[ARRAY_SIZE(td)]; int itap_del_sel[ARRAY_SIZE(td)]; int clkbuf_sel; @@ -272,11 +271,7 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_set_clock(host, clock); /* Setup DLL Output TAP delay */ - if (sdhci_am654->legacy_otapdly) - otap_del_sel = sdhci_am654->otap_del_sel[0]; - else - otap_del_sel = sdhci_am654->otap_del_sel[timing]; - + otap_del_sel = sdhci_am654->otap_del_sel[timing]; otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0; mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; @@ -314,10 +309,7 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host, u32 mask, val; /* Setup DLL Output TAP delay */ - if (sdhci_am654->legacy_otapdly) - otap_del_sel = sdhci_am654->otap_del_sel[0]; - else - otap_del_sel = sdhci_am654->otap_del_sel[timing]; + otap_del_sel = sdhci_am654->otap_del_sel[timing]; mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; val = (0x1 << OTAPDLYENA_SHIFT) | @@ -577,32 +569,15 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host, int i; int ret; - ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].otap_binding, - &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]); - if (ret) { - /* - * ti,otap-del-sel-legacy is mandatory, look for old binding - * if not found. - */ - ret = device_property_read_u32(dev, "ti,otap-del-sel", - &sdhci_am654->otap_del_sel[0]); - if (ret) { - dev_err(dev, "Couldn't find otap-del-sel\n"); - - return ret; - } - - dev_info(dev, "Using legacy binding ti,otap-del-sel\n"); - sdhci_am654->legacy_otapdly = true; - - return 0; - } - for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) { ret = device_property_read_u32(dev, td[i].otap_binding, &sdhci_am654->otap_del_sel[i]); if (ret) { + if (i == MMC_TIMING_LEGACY) { + dev_err(dev, "Couldn't find mandatory ti,otap-del-sel-legacy\n"); + return ret; + } dev_dbg(dev, "Couldn't find %s\n", td[i].otap_binding); /* |