summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r--drivers/mmc/host/mmc_spi.c186
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c112
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c48
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c4
5 files changed, 141 insertions, 211 deletions
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 1d814919e..a1fb5d0e9 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bio.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direction.h>
#include <linux/crc7.h>
#include <linux/crc-itu-t.h>
#include <linux/scatterlist.h>
@@ -119,19 +119,14 @@ struct mmc_spi_host {
struct spi_transfer status;
struct spi_message readback;
- /* underlying DMA-aware controller, or null */
- struct device *dma_dev;
-
/* buffer used for commands and for message "overhead" */
struct scratch *data;
- dma_addr_t data_dma;
/* Specs say to write ones most of the time, even when the card
* has no need to read its input data; and many cards won't care.
* This is our source of those ones.
*/
void *ones;
- dma_addr_t ones_dma;
};
@@ -147,11 +142,8 @@ static inline int mmc_cs_off(struct mmc_spi_host *host)
return spi_setup(host->spi);
}
-static int
-mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
+static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len)
{
- int status;
-
if (len > sizeof(*host->data)) {
WARN_ON(1);
return -EIO;
@@ -159,19 +151,7 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
host->status.len = len;
- if (host->dma_dev)
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_FROM_DEVICE);
-
- status = spi_sync_locked(host->spi, &host->readback);
-
- if (host->dma_dev)
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_FROM_DEVICE);
-
- return status;
+ return spi_sync_locked(host->spi, &host->readback);
}
static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
@@ -513,23 +493,11 @@ mmc_spi_command_send(struct mmc_spi_host *host,
t = &host->t;
memset(t, 0, sizeof(*t));
t->tx_buf = t->rx_buf = data->status;
- t->tx_dma = t->rx_dma = host->data_dma;
t->len = cp - data->status;
t->cs_change = 1;
spi_message_add_tail(t, &host->m);
- if (host->dma_dev) {
- host->m.is_dma_mapped = 1;
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
- }
status = spi_sync_locked(host->spi, &host->m);
-
- if (host->dma_dev)
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
if (status < 0) {
dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
cmd->error = status;
@@ -547,9 +515,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,
* We always provide TX data for data and CRC. The MMC/SD protocol
* requires us to write ones; but Linux defaults to writing zeroes;
* so we explicitly initialize it to all ones on RX paths.
- *
- * We also handle DMA mapping, so the underlying SPI controller does
- * not need to (re)do it for each message.
*/
static void
mmc_spi_setup_data_message(
@@ -559,11 +524,8 @@ mmc_spi_setup_data_message(
{
struct spi_transfer *t;
struct scratch *scratch = host->data;
- dma_addr_t dma = host->data_dma;
spi_message_init(&host->m);
- if (dma)
- host->m.is_dma_mapped = 1;
/* for reads, readblock() skips 0xff bytes before finding
* the token; for writes, this transfer issues that token.
@@ -577,8 +539,6 @@ mmc_spi_setup_data_message(
else
scratch->data_token = SPI_TOKEN_SINGLE;
t->tx_buf = &scratch->data_token;
- if (dma)
- t->tx_dma = dma + offsetof(struct scratch, data_token);
spi_message_add_tail(t, &host->m);
}
@@ -588,7 +548,6 @@ mmc_spi_setup_data_message(
t = &host->t;
memset(t, 0, sizeof(*t));
t->tx_buf = host->ones;
- t->tx_dma = host->ones_dma;
/* length and actual buffer info are written later */
spi_message_add_tail(t, &host->m);
@@ -598,14 +557,9 @@ mmc_spi_setup_data_message(
if (direction == DMA_TO_DEVICE) {
/* the actual CRC may get written later */
t->tx_buf = &scratch->crc_val;
- if (dma)
- t->tx_dma = dma + offsetof(struct scratch, crc_val);
} else {
t->tx_buf = host->ones;
- t->tx_dma = host->ones_dma;
t->rx_buf = &scratch->crc_val;
- if (dma)
- t->rx_dma = dma + offsetof(struct scratch, crc_val);
}
spi_message_add_tail(t, &host->m);
@@ -628,10 +582,7 @@ mmc_spi_setup_data_message(
memset(t, 0, sizeof(*t));
t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
t->tx_buf = host->ones;
- t->tx_dma = host->ones_dma;
t->rx_buf = scratch->status;
- if (dma)
- t->rx_dma = dma + offsetof(struct scratch, status);
t->cs_change = 1;
spi_message_add_tail(t, &host->m);
}
@@ -660,23 +611,13 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
if (host->mmc->use_spi_crc)
scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
- if (host->dma_dev)
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
status = spi_sync_locked(spi, &host->m);
-
if (status != 0) {
dev_dbg(&spi->dev, "write error (%d)\n", status);
return status;
}
- if (host->dma_dev)
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
-
/*
* Get the transmission data-response reply. It must follow
* immediately after the data block we transferred. This reply
@@ -725,8 +666,6 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
}
t->tx_buf += t->len;
- if (host->dma_dev)
- t->tx_dma += t->len;
/* Return when not busy. If we didn't collect that status yet,
* we'll need some more I/O.
@@ -790,30 +729,12 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
}
leftover = status << 1;
- if (host->dma_dev) {
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
- dma_sync_single_for_device(host->dma_dev,
- t->rx_dma, t->len,
- DMA_FROM_DEVICE);
- }
-
status = spi_sync_locked(spi, &host->m);
if (status < 0) {
dev_dbg(&spi->dev, "read error %d\n", status);
return status;
}
- if (host->dma_dev) {
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
- dma_sync_single_for_cpu(host->dma_dev,
- t->rx_dma, t->len,
- DMA_FROM_DEVICE);
- }
-
if (bitshift) {
/* Walk through the data and the crc and do
* all the magic to get byte-aligned data.
@@ -848,8 +769,6 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
}
t->rx_buf += t->len;
- if (host->dma_dev)
- t->rx_dma += t->len;
return 0;
}
@@ -864,7 +783,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
struct mmc_data *data, u32 blk_size)
{
struct spi_device *spi = host->spi;
- struct device *dma_dev = host->dma_dev;
struct spi_transfer *t;
enum dma_data_direction direction;
struct scatterlist *sg;
@@ -891,31 +809,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
*/
for_each_sg(data->sg, sg, data->sg_len, n_sg) {
int status = 0;
- dma_addr_t dma_addr = 0;
void *kmap_addr;
unsigned length = sg->length;
- enum dma_data_direction dir = direction;
-
- /* set up dma mapping for controller drivers that might
- * use DMA ... though they may fall back to PIO
- */
- if (dma_dev) {
- /* never invalidate whole *shared* pages ... */
- if ((sg->offset != 0 || length != PAGE_SIZE)
- && dir == DMA_FROM_DEVICE)
- dir = DMA_BIDIRECTIONAL;
-
- dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
- PAGE_SIZE, dir);
- if (dma_mapping_error(dma_dev, dma_addr)) {
- data->error = -EFAULT;
- break;
- }
- if (direction == DMA_TO_DEVICE)
- t->tx_dma = dma_addr + sg->offset;
- else
- t->rx_dma = dma_addr + sg->offset;
- }
/* allow pio too; we don't allow highmem */
kmap_addr = kmap(sg_page(sg));
@@ -951,8 +846,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
if (direction == DMA_FROM_DEVICE)
flush_kernel_dcache_page(sg_page(sg));
kunmap(sg_page(sg));
- if (dma_dev)
- dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
if (status < 0) {
data->error = status;
@@ -989,21 +882,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
scratch->status[0] = SPI_TOKEN_STOP_TRAN;
host->early_status.tx_buf = host->early_status.rx_buf;
- host->early_status.tx_dma = host->early_status.rx_dma;
host->early_status.len = statlen;
- if (host->dma_dev)
- dma_sync_single_for_device(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
-
tmp = spi_sync_locked(spi, &host->m);
-
- if (host->dma_dev)
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*scratch),
- DMA_BIDIRECTIONAL);
-
if (tmp < 0) {
if (!data->error)
data->error = tmp;
@@ -1278,52 +1159,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
return IRQ_HANDLED;
}
-#ifdef CONFIG_HAS_DMA
-static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
-{
- struct spi_device *spi = host->spi;
- struct device *dev;
-
- if (!spi->master->dev.parent->dma_mask)
- return 0;
-
- dev = spi->master->dev.parent;
-
- host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, host->ones_dma))
- return -ENOMEM;
-
- host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, host->data_dma)) {
- dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
- DMA_TO_DEVICE);
- return -ENOMEM;
- }
-
- dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
-
- host->dma_dev = dev;
- return 0;
-}
-
-static void mmc_spi_dma_free(struct mmc_spi_host *host)
-{
- if (!host->dma_dev)
- return;
-
- dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
- DMA_TO_DEVICE);
- dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
-}
-#else
-static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
-static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
-#endif
-
static int mmc_spi_probe(struct spi_device *spi)
{
void *ones;
@@ -1415,24 +1250,17 @@ static int mmc_spi_probe(struct spi_device *spi)
dev_set_drvdata(&spi->dev, mmc);
- /* preallocate dma buffers */
+ /* Preallocate buffers */
host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
if (!host->data)
goto fail_nobuf1;
- status = mmc_spi_dma_alloc(host);
- if (status)
- goto fail_dma;
-
/* setup message for status/busy readback */
spi_message_init(&host->readback);
- host->readback.is_dma_mapped = (host->dma_dev != NULL);
spi_message_add_tail(&host->status, &host->readback);
host->status.tx_buf = host->ones;
- host->status.tx_dma = host->ones_dma;
host->status.rx_buf = &host->data->status;
- host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
host->status.cs_change = 1;
/* register card detect irq */
@@ -1477,9 +1305,8 @@ static int mmc_spi_probe(struct spi_device *spi)
if (!status)
has_ro = true;
- dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
+ dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n",
dev_name(&mmc->class_dev),
- host->dma_dev ? "" : ", no DMA",
has_ro ? "" : ", no WP",
(host->pdata && host->pdata->setpower)
? "" : ", no poweroff",
@@ -1490,8 +1317,6 @@ static int mmc_spi_probe(struct spi_device *spi)
fail_gpiod_request:
mmc_remove_host(mmc);
fail_glue_init:
- mmc_spi_dma_free(host);
-fail_dma:
kfree(host->data);
fail_nobuf1:
mmc_free_host(mmc);
@@ -1513,7 +1338,6 @@ static int mmc_spi_remove(struct spi_device *spi)
mmc_remove_host(mmc);
- mmc_spi_dma_free(host);
kfree(host->data);
kfree(host->ones);
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 4cceb9bab..e3201a621 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -43,6 +43,9 @@ struct sdmmc_lli_desc {
struct sdmmc_idma {
dma_addr_t sg_dma;
void *sg_cpu;
+ dma_addr_t bounce_dma_addr;
+ void *bounce_buf;
+ bool use_bounce_buffer;
};
struct sdmmc_dlyb {
@@ -54,6 +57,8 @@ struct sdmmc_dlyb {
static int sdmmc_idma_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct sdmmc_idma *idma = host->dma_priv;
+ struct device *dev = mmc_dev(host->mmc);
struct scatterlist *sg;
int i;
@@ -61,41 +66,69 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
* idma has constraints on idmabase & idmasize for each element
* excepted the last element which has no constraint on idmasize
*/
+ idma->use_bounce_buffer = false;
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
!IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
- dev_err(mmc_dev(host->mmc),
+ dev_dbg(mmc_dev(host->mmc),
"unaligned scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
- return -EINVAL;
+ goto use_bounce_buffer;
}
}
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- dev_err(mmc_dev(host->mmc),
+ dev_dbg(mmc_dev(host->mmc),
"unaligned last scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
- return -EINVAL;
+ goto use_bounce_buffer;
}
return 0;
+
+use_bounce_buffer:
+ if (!idma->bounce_buf) {
+ idma->bounce_buf = dmam_alloc_coherent(dev,
+ host->mmc->max_req_size,
+ &idma->bounce_dma_addr,
+ GFP_KERNEL);
+ if (!idma->bounce_buf) {
+ dev_err(dev, "Unable to map allocate DMA bounce buffer.\n");
+ return -ENOMEM;
+ }
+ }
+
+ idma->use_bounce_buffer = true;
+
+ return 0;
}
static int _sdmmc_idma_prep_data(struct mmci_host *host,
struct mmc_data *data)
{
- int n_elem;
+ struct sdmmc_idma *idma = host->dma_priv;
- n_elem = dma_map_sg(mmc_dev(host->mmc),
- data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
+ if (idma->use_bounce_buffer) {
+ if (data->flags & MMC_DATA_WRITE) {
+ unsigned int xfer_bytes = data->blksz * data->blocks;
- if (!n_elem) {
- dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
- return -EINVAL;
- }
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ idma->bounce_buf, xfer_bytes);
+ dma_wmb();
+ }
+ } else {
+ int n_elem;
+ n_elem = dma_map_sg(mmc_dev(host->mmc),
+ data->sg,
+ data->sg_len,
+ mmc_get_dma_dir(data));
+
+ if (!n_elem) {
+ dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -112,8 +145,19 @@ static int sdmmc_idma_prep_data(struct mmci_host *host,
static void sdmmc_idma_unprep_data(struct mmci_host *host,
struct mmc_data *data, int err)
{
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ struct sdmmc_idma *idma = host->dma_priv;
+
+ if (idma->use_bounce_buffer) {
+ if (data->flags & MMC_DATA_READ) {
+ unsigned int xfer_bytes = data->blksz * data->blocks;
+
+ sg_copy_from_buffer(data->sg, data->sg_len,
+ idma->bounce_buf, xfer_bytes);
+ }
+ } else {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ }
}
static int sdmmc_idma_setup(struct mmci_host *host)
@@ -137,6 +181,8 @@ static int sdmmc_idma_setup(struct mmci_host *host)
host->mmc->max_segs = SDMMC_LLI_BUF_LEN /
sizeof(struct sdmmc_lli_desc);
host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask;
+
+ host->mmc->max_req_size = SZ_1M;
} else {
host->mmc->max_segs = 1;
host->mmc->max_seg_size = host->mmc->max_req_size;
@@ -154,8 +200,18 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
struct scatterlist *sg;
int i;
- if (!host->variant->dma_lli || data->sg_len == 1) {
- writel_relaxed(sg_dma_address(data->sg),
+ host->dma_in_progress = true;
+
+ if (!host->variant->dma_lli || data->sg_len == 1 ||
+ idma->use_bounce_buffer) {
+ u32 dma_addr;
+
+ if (idma->use_bounce_buffer)
+ dma_addr = idma->bounce_dma_addr;
+ else
+ dma_addr = sg_dma_address(data->sg);
+
+ writel_relaxed(dma_addr,
host->base + MMCI_STM32_IDMABASE0R);
writel_relaxed(MMCI_STM32_IDMAEN,
host->base + MMCI_STM32_IDMACTRLR);
@@ -184,9 +240,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
return 0;
}
+static void sdmmc_idma_error(struct mmci_host *host)
+{
+ struct mmc_data *data = host->data;
+ struct sdmmc_idma *idma = host->dma_priv;
+
+ if (!dma_inprogress(host))
+ return;
+
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
+ data->host_cookie = 0;
+
+ if (!idma->use_bounce_buffer)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+}
+
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
+ if (!dma_inprogress(host))
+ return;
+
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
@@ -512,6 +589,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
+ .dma_error = sdmmc_idma_error,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 03ce57ef4..53bf9870c 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ktime.h>
+#include <linux/iopoll.h>
#include <linux/of_address.h>
#include "sdhci-pltfm.h"
@@ -109,6 +110,8 @@
#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
+#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
+
/*
* List offset of PHY registers and some special register values
* in eMMC PHY 5.0 or eMMC PHY 5.1
@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
+static int xenon_check_stability_internal_clk(struct sdhci_host *host)
+{
+ u32 reg;
+ int err;
+
+ err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
+ 1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
+ if (err)
+ dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
+
+ return err;
+}
+
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+ int ret = xenon_check_stability_internal_clk(host);
+
+ if (ret)
+ return ret;
+
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
/* get the wait time */
wait /= clock;
wait++;
- /* wait for host eMMC PHY init completes */
- udelay(wait);
- reg = sdhci_readl(host, phy_regs->timing_adj);
- reg &= XENON_PHY_INITIALIZAION;
- if (reg) {
+ /*
+ * AC5X spec says bit must be polled until zero.
+ * We see cases in which timeout can take longer
+ * than the standard calculation on AC5X, which is
+ * expected following the spec comment above.
+ * According to the spec, we must wait as long as
+ * it takes for that bit to toggle on AC5X.
+ * Cap that with 100 delay loops so we won't get
+ * stuck here forever:
+ */
+
+ ret = read_poll_timeout(sdhci_readl, reg,
+ !(reg & XENON_PHY_INITIALIZAION),
+ wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
+ false, host, phy_regs->timing_adj);
+ if (ret)
dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
- wait);
- return -ETIMEDOUT;
- }
+ wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
- return 0;
+ return ret;
}
#define ARMADA_3700_SOC_PAD_1_8V 0x1
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index abf36acb2..0482920a7 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -216,6 +216,8 @@ static void tmio_mmc_reset_work(struct work_struct *work)
else
mrq->cmd->error = -ETIMEDOUT;
+ /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */
+ host->mrq = ERR_PTR(-EBUSY);
host->cmd = NULL;
host->data = NULL;
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 393319548..3fcc81e48 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -889,7 +889,6 @@ static int wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
- struct resource *res;
u32 reg_tmp;
mmc = platform_get_drvdata(pdev);
@@ -917,9 +916,6 @@ static int wmt_mci_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk_sdmmc);
clk_put(priv->clk_sdmmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
mmc_free_host(mmc);
dev_info(&pdev->dev, "WMT MCI device removed\n");