summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/mmc/core
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/mmc/core')
-rw-r--r--drivers/mmc/core/Kconfig83
-rw-r--r--drivers/mmc/core/Makefile20
-rw-r--r--drivers/mmc/core/block.c3106
-rw-r--r--drivers/mmc/core/block.h20
-rw-r--r--drivers/mmc/core/bus.c420
-rw-r--r--drivers/mmc/core/bus.h42
-rw-r--r--drivers/mmc/core/card.h231
-rw-r--r--drivers/mmc/core/core.c2417
-rw-r--r--drivers/mmc/core/core.h176
-rw-r--r--drivers/mmc/core/debugfs.c270
-rw-r--r--drivers/mmc/core/host.c620
-rw-r--r--drivers/mmc/core/host.h81
-rw-r--r--drivers/mmc/core/mmc.c2306
-rw-r--r--drivers/mmc/core/mmc_ops.c1065
-rw-r--r--drivers/mmc/core/mmc_ops.h53
-rw-r--r--drivers/mmc/core/mmc_test.c3303
-rw-r--r--drivers/mmc/core/pwrseq.c117
-rw-r--r--drivers/mmc/core/pwrseq.h58
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c120
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c107
-rw-r--r--drivers/mmc/core/pwrseq_simple.c164
-rw-r--r--drivers/mmc/core/queue.c539
-rw-r--r--drivers/mmc/core/queue.h124
-rw-r--r--drivers/mmc/core/quirks.h193
-rw-r--r--drivers/mmc/core/regulator.c314
-rw-r--r--drivers/mmc/core/sd.c1400
-rw-r--r--drivers/mmc/core/sd.h20
-rw-r--r--drivers/mmc/core/sd_ops.c345
-rw-r--r--drivers/mmc/core/sd_ops.h27
-rw-r--r--drivers/mmc/core/sdio.c1316
-rw-r--r--drivers/mmc/core/sdio_bus.c399
-rw-r--r--drivers/mmc/core/sdio_bus.h21
-rw-r--r--drivers/mmc/core/sdio_cis.c420
-rw-r--r--drivers/mmc/core/sdio_cis.h22
-rw-r--r--drivers/mmc/core/sdio_io.c814
-rw-r--r--drivers/mmc/core/sdio_irq.c375
-rw-r--r--drivers/mmc/core/sdio_ops.c217
-rw-r--r--drivers/mmc/core/sdio_ops.h38
-rw-r--r--drivers/mmc/core/sdio_uart.c1183
-rw-r--r--drivers/mmc/core/slot-gpio.c250
-rw-r--r--drivers/mmc/core/slot-gpio.h14
41 files changed, 22810 insertions, 0 deletions
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
new file mode 100644
index 000000000..c12fe13e4
--- /dev/null
+++ b/drivers/mmc/core/Kconfig
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# MMC core configuration
+#
+config PWRSEQ_EMMC
+ tristate "HW reset support for eMMC"
+ default y
+ depends on OF
+ help
+ This selects Hardware reset support aka pwrseq-emmc for eMMC
+ devices. By default this option is set to y.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_emmc.
+
+config PWRSEQ_SD8787
+ tristate "HW reset support for SD8787 BT + Wifi module"
+ depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO)
+ help
+ This selects hardware reset support for the SD8787 BT + Wifi
+ module. By default this option is set to n.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_sd8787.
+
+config PWRSEQ_SIMPLE
+ tristate "Simple HW reset support for MMC"
+ default y
+ depends on OF
+ help
+ This selects simple hardware reset support aka pwrseq-simple for MMC
+ devices. By default this option is set to y.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_simple.
+
+config MMC_BLOCK
+ tristate "MMC block device driver"
+ depends on BLOCK
+ default y
+ help
+ Say Y here to enable the MMC block device driver support.
+ This provides a block device driver, which you can use to
+ mount the filesystem. Almost everyone wishing MMC support
+ should say Y or M here.
+
+config MMC_BLOCK_MINORS
+ int "Number of minors per block device"
+ depends on MMC_BLOCK
+ range 4 256
+ default 8
+ help
+ Number of minors per block device. One is needed for every
+ partition on the disk (plus one for the whole disk).
+
+ Number of total MMC minors available is 256, so your number
+ of supported block devices will be limited to 256 divided
+ by this number.
+
+ Default is 8 to be backwards compatible with previous
+ hardwired device numbering.
+
+ If unsure, say 8 here.
+
+config SDIO_UART
+ tristate "SDIO UART/GPS class support"
+ depends on TTY
+ help
+ SDIO function driver for SDIO cards that implements the UART
+ class, as well as the GPS class which appears like a UART.
+
+config MMC_TEST
+ tristate "MMC host test driver"
+ help
+ Development driver that performs a series of reads and writes
+ to a memory card in order to expose certain well known bugs
+ in host controllers. The tests are executed by writing to the
+ "test" file in debugfs under each card. Note that whatever is
+ on your card will be overwritten by these tests.
+
+ This driver is only of interest to those developing or
+ testing a host driver. Most people should say N here.
+
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
new file mode 100644
index 000000000..95ffe008e
--- /dev/null
+++ b/drivers/mmc/core/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the kernel mmc core.
+#
+
+obj-$(CONFIG_MMC) += mmc_core.o
+mmc_core-y := core.o bus.o host.o \
+ mmc.o mmc_ops.o sd.o sd_ops.o \
+ sdio.o sdio_ops.o sdio_bus.o \
+ sdio_cis.o sdio_io.o sdio_irq.o \
+ slot-gpio.o regulator.o
+mmc_core-$(CONFIG_OF) += pwrseq.o
+obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
+obj-$(CONFIG_PWRSEQ_SD8787) += pwrseq_sd8787.o
+obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
+mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
+mmc_block-objs := block.o queue.o
+obj-$(CONFIG_MMC_TEST) += mmc_test.o
+obj-$(CONFIG_SDIO_UART) += sdio_uart.o
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
new file mode 100644
index 000000000..8d842ff24
--- /dev/null
+++ b/drivers/mmc/core/block.c
@@ -0,0 +1,3106 @@
+/*
+ * Block driver for media (i.e., flash cards)
+ *
+ * Copyright 2002 Hewlett-Packard Company
+ * Copyright 2005-2008 Pierre Ossman
+ *
+ * Use consistent with the GNU GPL is permitted,
+ * provided that this copyright notice is
+ * preserved in its entirety in all copies and derived works.
+ *
+ * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
+ * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * Many thanks to Alessandro Rubini and Jonathan Corbet!
+ *
+ * Author: Andrew Christian
+ * 28 May 2002
+ */
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
+#include <linux/delay.h>
+#include <linux/capability.h>
+#include <linux/compat.h>
+#include <linux/pm_runtime.h>
+#include <linux/idr.h>
+#include <linux/debugfs.h>
+
+#include <linux/mmc/ioctl.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include <linux/uaccess.h>
+
+#include "queue.h"
+#include "block.h"
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "bus.h"
+#include "mmc_ops.h"
+#include "quirks.h"
+#include "sd_ops.h"
+
+MODULE_ALIAS("mmc:block");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "mmcblk."
+
+/*
+ * Set a 10 second timeout for polling write request busy state. Note, mmc core
+ * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
+ * second software timer to timeout the whole request, so 10 seconds should be
+ * ample.
+ */
+#define MMC_BLK_TIMEOUT_MS (10 * 1000)
+#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
+
+#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
+ (rq_data_dir(req) == WRITE))
+static DEFINE_MUTEX(block_mutex);
+
+/*
+ * The defaults come from config options but can be overriden by module
+ * or bootarg options.
+ */
+static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
+
+/*
+ * We've only got one major, so number of mmcblk devices is
+ * limited to (1 << 20) / number of minors per device. It is also
+ * limited by the MAX_DEVICES below.
+ */
+static int max_devices;
+
+#define MAX_DEVICES 256
+
+static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_IDA(mmc_rpmb_ida);
+
+/*
+ * There is one mmc_blk_data per slot.
+ */
+struct mmc_blk_data {
+ struct device *parent;
+ struct gendisk *disk;
+ struct mmc_queue queue;
+ struct list_head part;
+ struct list_head rpmbs;
+
+ unsigned int flags;
+#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
+#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
+
+ unsigned int usage;
+ unsigned int read_only;
+ unsigned int part_type;
+ unsigned int reset_done;
+#define MMC_BLK_READ BIT(0)
+#define MMC_BLK_WRITE BIT(1)
+#define MMC_BLK_DISCARD BIT(2)
+#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_CQE_RECOVERY BIT(4)
+
+ /*
+ * Only set in main mmc_blk_data associated
+ * with mmc_card with dev_set_drvdata, and keeps
+ * track of the current selected device partition.
+ */
+ unsigned int part_curr;
+ struct device_attribute force_ro;
+ struct device_attribute power_ro_lock;
+ int area_type;
+
+ /* debugfs files (only in main mmc_blk_data) */
+ struct dentry *status_dentry;
+ struct dentry *ext_csd_dentry;
+};
+
+/* Device type for RPMB character devices */
+static dev_t mmc_rpmb_devt;
+
+/* Bus type for RPMB character devices */
+static struct bus_type mmc_rpmb_bus_type = {
+ .name = "mmc_rpmb",
+};
+
+/**
+ * struct mmc_rpmb_data - special RPMB device type for these areas
+ * @dev: the device for the RPMB area
+ * @chrdev: character device for the RPMB area
+ * @id: unique device ID number
+ * @part_index: partition index (0 on first)
+ * @md: parent MMC block device
+ * @node: list item, so we can put this device on a list
+ */
+struct mmc_rpmb_data {
+ struct device dev;
+ struct cdev chrdev;
+ int id;
+ unsigned int part_index;
+ struct mmc_blk_data *md;
+ struct list_head node;
+};
+
+static DEFINE_MUTEX(open_lock);
+
+module_param(perdev_minors, int, 0444);
+MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+ unsigned int part_type);
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int recovery_mode,
+ struct mmc_queue *mq);
+static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
+
+static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
+{
+ struct mmc_blk_data *md;
+
+ mutex_lock(&open_lock);
+ md = disk->private_data;
+ if (md && md->usage == 0)
+ md = NULL;
+ if (md)
+ md->usage++;
+ mutex_unlock(&open_lock);
+
+ return md;
+}
+
+static inline int mmc_get_devidx(struct gendisk *disk)
+{
+ int devidx = disk->first_minor / perdev_minors;
+ return devidx;
+}
+
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+ mutex_lock(&open_lock);
+ md->usage--;
+ if (md->usage == 0) {
+ int devidx = mmc_get_devidx(md->disk);
+ blk_put_queue(md->queue.queue);
+ ida_simple_remove(&mmc_blk_ida, devidx);
+ put_disk(md->disk);
+ kfree(md);
+ }
+ mutex_unlock(&open_lock);
+}
+
+static ssize_t power_ro_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card = md->queue.card;
+ int locked = 0;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+ locked = 2;
+ else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+ locked = 1;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+
+ mmc_blk_put(md);
+
+ return ret;
+}
+
+static ssize_t power_ro_lock_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct mmc_blk_data *md, *part_md;
+ struct mmc_queue *mq;
+ struct request *req;
+ unsigned long set;
+
+ if (kstrtoul(buf, 0, &set))
+ return -EINVAL;
+
+ if (set != 1)
+ return count;
+
+ md = mmc_blk_get(dev_to_disk(dev));
+ mq = &md->queue;
+
+ /* Dispatch locking to the block layer */
+ req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req)) {
+ count = PTR_ERR(req);
+ goto out_put;
+ }
+ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ ret = req_to_mmc_queue_req(req)->drv_op_result;
+ blk_put_request(req);
+
+ if (!ret) {
+ pr_info("%s: Locking boot partition ro until next power on\n",
+ md->disk->disk_name);
+ set_disk_ro(md->disk, 1);
+
+ list_for_each_entry(part_md, &md->part, part)
+ if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
+ pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
+ set_disk_ro(part_md->disk, 1);
+ }
+ }
+out_put:
+ mmc_blk_put(md);
+ return count;
+}
+
+static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ get_disk_ro(dev_to_disk(dev)) ^
+ md->read_only);
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ char *end;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ unsigned long set = simple_strtoul(buf, &end, 0);
+ if (end == buf) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ set_disk_ro(dev_to_disk(dev), set || md->read_only);
+ ret = count;
+out:
+ mmc_blk_put(md);
+ return ret;
+}
+
+static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
+{
+ struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ mutex_lock(&block_mutex);
+ if (md) {
+ ret = 0;
+ if ((mode & FMODE_WRITE) && md->read_only) {
+ mmc_blk_put(md);
+ ret = -EROFS;
+ }
+ }
+ mutex_unlock(&block_mutex);
+
+ return ret;
+}
+
+static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
+{
+ struct mmc_blk_data *md = disk->private_data;
+
+ mutex_lock(&block_mutex);
+ mmc_blk_put(md);
+ mutex_unlock(&block_mutex);
+}
+
+static int
+mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->heads = 4;
+ geo->sectors = 16;
+ return 0;
+}
+
+struct mmc_blk_ioc_data {
+ struct mmc_ioc_cmd ic;
+ unsigned char *buf;
+ u64 buf_bytes;
+ struct mmc_rpmb_data *rpmb;
+};
+
+static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+ struct mmc_ioc_cmd __user *user)
+{
+ struct mmc_blk_ioc_data *idata;
+ int err;
+
+ idata = kmalloc(sizeof(*idata), GFP_KERNEL);
+ if (!idata) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
+ err = -EFAULT;
+ goto idata_err;
+ }
+
+ idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
+ if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
+ err = -EOVERFLOW;
+ goto idata_err;
+ }
+
+ if (!idata->buf_bytes) {
+ idata->buf = NULL;
+ return idata;
+ }
+
+ idata->buf = memdup_user((void __user *)(unsigned long)
+ idata->ic.data_ptr, idata->buf_bytes);
+ if (IS_ERR(idata->buf)) {
+ err = PTR_ERR(idata->buf);
+ goto idata_err;
+ }
+
+ return idata;
+
+idata_err:
+ kfree(idata);
+out:
+ return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
+ struct mmc_blk_ioc_data *idata)
+{
+ struct mmc_ioc_cmd *ic = &idata->ic;
+
+ if (copy_to_user(&(ic_ptr->response), ic->response,
+ sizeof(ic->response)))
+ return -EFAULT;
+
+ if (!idata->ic.write_flag) {
+ if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
+ idata->buf, idata->buf_bytes))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+ u32 *resp_errs)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ int err = 0;
+ u32 status;
+
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ err = __mmc_send_status(card, &status, 5);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "error %d requesting status\n", err);
+ return err;
+ }
+
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
+
+ /*
+ * Timeout if the device never becomes ready for data and never
+ * leaves the program state.
+ */
+ if (done) {
+ dev_err(mmc_dev(card->host),
+ "Card stuck in wrong state! %s status: %#x\n",
+ __func__, status);
+ return -ETIMEDOUT;
+ }
+ } while (!mmc_ready_for_data(status));
+
+ return err;
+}
+
+static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ struct mmc_blk_ioc_data *idata)
+{
+ struct mmc_command cmd = {}, sbc = {};
+ struct mmc_data data = {};
+ struct mmc_request mrq = {};
+ struct scatterlist sg;
+ int err;
+ unsigned int target_part;
+
+ if (!card || !md || !idata)
+ return -EINVAL;
+
+ /*
+ * The RPMB accesses comes in from the character device, so we
+ * need to target these explicitly. Else we just target the
+ * partition type for the block device the ioctl() was issued
+ * on.
+ */
+ if (idata->rpmb) {
+ /* Support multiple RPMB partitions */
+ target_part = idata->rpmb->part_index;
+ target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
+ } else {
+ target_part = md->part_type;
+ }
+
+ cmd.opcode = idata->ic.opcode;
+ cmd.arg = idata->ic.arg;
+ cmd.flags = idata->ic.flags;
+
+ if (idata->buf_bytes) {
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.blksz = idata->ic.blksz;
+ data.blocks = idata->ic.blocks;
+
+ sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+ if (idata->ic.write_flag)
+ data.flags = MMC_DATA_WRITE;
+ else
+ data.flags = MMC_DATA_READ;
+
+ /* data.flags must already be set before doing this. */
+ mmc_set_data_timeout(&data, card);
+
+ /* Allow overriding the timeout_ns for empirical tuning. */
+ if (idata->ic.data_timeout_ns)
+ data.timeout_ns = idata->ic.data_timeout_ns;
+
+ if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ /*
+ * Pretend this is a data transfer and rely on the
+ * host driver to compute timeout. When all host
+ * drivers support cmd.cmd_timeout for R1B, this
+ * can be changed to:
+ *
+ * mrq.data = NULL;
+ * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+ */
+ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+ }
+
+ mrq.data = &data;
+ }
+
+ mrq.cmd = &cmd;
+
+ err = mmc_blk_part_switch(card, target_part);
+ if (err)
+ return err;
+
+ if (idata->ic.is_acmd) {
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ return err;
+ }
+
+ if (idata->rpmb) {
+ sbc.opcode = MMC_SET_BLOCK_COUNT;
+ /*
+ * We don't do any blockcount validation because the max size
+ * may be increased by a future standard. We just copy the
+ * 'Reliable Write' bit here.
+ */
+ sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
+ sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ mrq.sbc = &sbc;
+ }
+
+ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
+ (cmd.opcode == MMC_SWITCH))
+ return mmc_sanitize(card);
+
+ mmc_wait_for_req(card->host, &mrq);
+ memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
+
+ if (cmd.error) {
+ dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ __func__, cmd.error);
+ return cmd.error;
+ }
+ if (data.error) {
+ dev_err(mmc_dev(card->host), "%s: data error %d\n",
+ __func__, data.error);
+ return data.error;
+ }
+
+ /*
+ * Make sure the cache of the PARTITION_CONFIG register and
+ * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
+ * changed it successfully.
+ */
+ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
+ (cmd.opcode == MMC_SWITCH)) {
+ struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
+ u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
+
+ /*
+ * Update cache so the next mmc_blk_part_switch call operates
+ * on up-to-date data.
+ */
+ card->ext_csd.part_config = value;
+ main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
+ }
+
+ /*
+ * Make sure to update CACHE_CTRL in case it was changed. The cache
+ * will get turned back on if the card is re-initialized, e.g.
+ * suspend/resume or hw reset in recovery.
+ */
+ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
+ (cmd.opcode == MMC_SWITCH)) {
+ u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
+
+ card->ext_csd.cache_ctrl = value;
+ }
+
+ /*
+ * According to the SD specs, some commands require a delay after
+ * issuing the command.
+ */
+ if (idata->ic.postsleep_min_us)
+ usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+
+ if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ /*
+ * Ensure RPMB/R1B command has completed by polling CMD13
+ * "Send Status".
+ */
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
+ }
+
+ return err;
+}
+
+static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
+ struct mmc_ioc_cmd __user *ic_ptr,
+ struct mmc_rpmb_data *rpmb)
+{
+ struct mmc_blk_ioc_data *idata;
+ struct mmc_blk_ioc_data *idatas[1];
+ struct mmc_queue *mq;
+ struct mmc_card *card;
+ int err = 0, ioc_err = 0;
+ struct request *req;
+
+ idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+ if (IS_ERR(idata))
+ return PTR_ERR(idata);
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata->rpmb = rpmb;
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto cmd_done;
+ }
+
+ /*
+ * Dispatch the ioctl() into the block request queue.
+ */
+ mq = &md->queue;
+ req = blk_get_request(mq->queue,
+ idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto cmd_done;
+ }
+ idatas[0] = idata;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ req_to_mmc_queue_req(req)->drv_op_data = idatas;
+ req_to_mmc_queue_req(req)->ioc_count = 1;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
+ err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+ blk_put_request(req);
+
+cmd_done:
+ kfree(idata->buf);
+ kfree(idata);
+ return ioc_err ? ioc_err : err;
+}
+
+static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
+ struct mmc_ioc_multi_cmd __user *user,
+ struct mmc_rpmb_data *rpmb)
+{
+ struct mmc_blk_ioc_data **idata = NULL;
+ struct mmc_ioc_cmd __user *cmds = user->cmds;
+ struct mmc_card *card;
+ struct mmc_queue *mq;
+ int i, err = 0, ioc_err = 0;
+ __u64 num_of_cmds;
+ struct request *req;
+
+ if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
+ sizeof(num_of_cmds)))
+ return -EFAULT;
+
+ if (!num_of_cmds)
+ return 0;
+
+ if (num_of_cmds > MMC_IOC_MAX_CMDS)
+ return -EINVAL;
+
+ idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
+ if (!idata)
+ return -ENOMEM;
+
+ for (i = 0; i < num_of_cmds; i++) {
+ idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
+ if (IS_ERR(idata[i])) {
+ err = PTR_ERR(idata[i]);
+ num_of_cmds = i;
+ goto cmd_err;
+ }
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata[i]->rpmb = rpmb;
+ }
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto cmd_err;
+ }
+
+
+ /*
+ * Dispatch the ioctl()s into the block request queue.
+ */
+ mq = &md->queue;
+ req = blk_get_request(mq->queue,
+ idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto cmd_err;
+ }
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ req_to_mmc_queue_req(req)->drv_op_data = idata;
+ req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
+
+ /* copy to user if data and response */
+ for (i = 0; i < num_of_cmds && !err; i++)
+ err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
+
+ blk_put_request(req);
+
+cmd_err:
+ for (i = 0; i < num_of_cmds; i++) {
+ kfree(idata[i]->buf);
+ kfree(idata[i]);
+ }
+ kfree(idata);
+ return ioc_err ? ioc_err : err;
+}
+
+static int mmc_blk_check_blkdev(struct block_device *bdev)
+{
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev))
+ return -EPERM;
+ return 0;
+}
+
+static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mmc_blk_data *md;
+ int ret;
+
+ switch (cmd) {
+ case MMC_IOC_CMD:
+ ret = mmc_blk_check_blkdev(bdev);
+ if (ret)
+ return ret;
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md)
+ return -EINVAL;
+ ret = mmc_blk_ioctl_cmd(md,
+ (struct mmc_ioc_cmd __user *)arg,
+ NULL);
+ mmc_blk_put(md);
+ return ret;
+ case MMC_IOC_MULTI_CMD:
+ ret = mmc_blk_check_blkdev(bdev);
+ if (ret)
+ return ret;
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md)
+ return -EINVAL;
+ ret = mmc_blk_ioctl_multi_cmd(md,
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ NULL);
+ mmc_blk_put(md);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static const struct block_device_operations mmc_bdops = {
+ .open = mmc_blk_open,
+ .release = mmc_blk_release,
+ .getgeo = mmc_blk_getgeo,
+ .owner = THIS_MODULE,
+ .ioctl = mmc_blk_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_blk_compat_ioctl,
+#endif
+};
+
+static int mmc_blk_part_switch_pre(struct mmc_card *card,
+ unsigned int part_type)
+{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ int ret = 0;
+
+ if ((part_type & mask) == mask) {
+ if (card->ext_csd.cmdq_en) {
+ ret = mmc_cmdq_disable(card);
+ if (ret)
+ return ret;
+ }
+ mmc_retune_pause(card->host);
+ }
+
+ return ret;
+}
+
+static int mmc_blk_part_switch_post(struct mmc_card *card,
+ unsigned int part_type)
+{
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ int ret = 0;
+
+ if ((part_type & mask) == mask) {
+ mmc_retune_unpause(card->host);
+ if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
+ ret = mmc_cmdq_enable(card);
+ }
+
+ return ret;
+}
+
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+ unsigned int part_type)
+{
+ int ret = 0;
+ struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
+
+ if (main_md->part_curr == part_type)
+ return 0;
+
+ if (mmc_card_mmc(card)) {
+ u8 part_config = card->ext_csd.part_config;
+
+ ret = mmc_blk_part_switch_pre(card, part_type);
+ if (ret)
+ return ret;
+
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ part_config |= part_type;
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_PART_CONFIG, part_config,
+ card->ext_csd.part_time);
+ if (ret) {
+ mmc_blk_part_switch_post(card, part_type);
+ return ret;
+ }
+
+ card->ext_csd.part_config = part_config;
+
+ ret = mmc_blk_part_switch_post(card, main_md->part_curr);
+ }
+
+ main_md->part_curr = part_type;
+ return ret;
+}
+
+static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
+{
+ int err;
+ u32 result;
+ __be32 *blocks;
+
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+
+ struct scatterlist sg;
+
+ cmd.opcode = MMC_APP_CMD;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err)
+ return err;
+ if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
+ return -EIO;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = 4;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ blocks = kmalloc(4, GFP_KERNEL);
+ if (!blocks)
+ return -ENOMEM;
+
+ sg_init_one(&sg, blocks, 4);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ result = ntohl(*blocks);
+ kfree(blocks);
+
+ if (cmd.error || data.error)
+ return -EIO;
+
+ *written_blocks = result;
+
+ return 0;
+}
+
+static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
+{
+ if (host->actual_clock)
+ return host->actual_clock / 1000;
+
+ /* Clock may be subject to a divisor, fudge it by a factor of 2. */
+ if (host->ios.clock)
+ return host->ios.clock / 2000;
+
+ /* How can there be no clock */
+ WARN_ON_ONCE(1);
+ return 100; /* 100 kHz is minimum possible value */
+}
+
+static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
+ struct mmc_data *data)
+{
+ unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
+ unsigned int khz;
+
+ if (data->timeout_clks) {
+ khz = mmc_blk_clock_khz(host);
+ ms += DIV_ROUND_UP(data->timeout_clks, khz);
+ }
+
+ return ms;
+}
+
+static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+ int type)
+{
+ int err;
+
+ if (md->reset_done & type)
+ return -EEXIST;
+
+ md->reset_done |= type;
+ err = mmc_hw_reset(host);
+ /* Ensure we switch back to the correct partition */
+ if (err != -EOPNOTSUPP) {
+ struct mmc_blk_data *main_md =
+ dev_get_drvdata(&host->card->dev);
+ int part_err;
+
+ main_md->part_curr = main_md->part_type;
+ part_err = mmc_blk_part_switch(host->card, md->part_type);
+ if (part_err) {
+ /*
+ * We have failed to get back into the correct
+ * partition, so we need to abort the whole request.
+ */
+ return -ENODEV;
+ }
+ }
+ return err;
+}
+
+static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+{
+ md->reset_done &= ~type;
+}
+
+/*
+ * The non-block commands come back from the block layer after it queued it and
+ * processed it with all other requests and then they get issued in this
+ * function.
+ */
+static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mq_rq;
+ struct mmc_card *card = mq->card;
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_blk_ioc_data **idata;
+ bool rpmb_ioctl;
+ u8 **ext_csd;
+ u32 status;
+ int ret;
+ int i;
+
+ mq_rq = req_to_mmc_queue_req(req);
+ rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
+
+ switch (mq_rq->drv_op) {
+ case MMC_DRV_OP_IOCTL:
+ if (card->ext_csd.cmdq_en) {
+ ret = mmc_cmdq_disable(card);
+ if (ret)
+ break;
+ }
+ fallthrough;
+ case MMC_DRV_OP_IOCTL_RPMB:
+ idata = mq_rq->drv_op_data;
+ for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
+ ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+ if (ret)
+ break;
+ }
+ /* Always switch back to main area after RPMB access */
+ if (rpmb_ioctl)
+ mmc_blk_part_switch(card, 0);
+ else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
+ mmc_cmdq_enable(card);
+ break;
+ case MMC_DRV_OP_BOOT_WP:
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+ card->ext_csd.boot_ro_lock |
+ EXT_CSD_BOOT_WP_B_PWR_WP_EN,
+ card->ext_csd.part_time);
+ if (ret)
+ pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
+ md->disk->disk_name, ret);
+ else
+ card->ext_csd.boot_ro_lock |=
+ EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+ break;
+ case MMC_DRV_OP_GET_CARD_STATUS:
+ ret = mmc_send_status(card, &status);
+ if (!ret)
+ ret = status;
+ break;
+ case MMC_DRV_OP_GET_EXT_CSD:
+ ext_csd = mq_rq->drv_op_data;
+ ret = mmc_get_ext_csd(card, ext_csd);
+ break;
+ default:
+ pr_err("%s: unknown driver specific operation\n",
+ md->disk->disk_name);
+ ret = -EINVAL;
+ break;
+ }
+ mq_rq->drv_op_result = ret;
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+}
+
+static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
+ unsigned int from, nr;
+ int err = 0, type = MMC_BLK_DISCARD;
+ blk_status_t status = BLK_STS_OK;
+
+ if (!mmc_can_erase(card)) {
+ status = BLK_STS_NOTSUPP;
+ goto fail;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ do {
+ unsigned int erase_arg = card->erase_arg;
+
+ if (mmc_card_broken_sd_discard(card))
+ erase_arg = SD_ERASE_ARG;
+
+ err = 0;
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ card->erase_arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ card->ext_csd.generic_cmd6_time);
+ }
+ if (!err)
+ err = mmc_erase(card, from, nr, erase_arg);
+ } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
+ if (err)
+ status = BLK_STS_IOERR;
+ else
+ mmc_blk_reset_success(md, type);
+fail:
+ blk_mq_end_request(req, status);
+}
+
+static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
+ unsigned int from, nr, arg;
+ int err = 0, type = MMC_BLK_SECDISCARD;
+ blk_status_t status = BLK_STS_OK;
+
+ if (!(mmc_can_secure_erase_trim(card))) {
+ status = BLK_STS_NOTSUPP;
+ goto out;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ arg = MMC_SECURE_TRIM1_ARG;
+ else
+ arg = MMC_SECURE_ERASE_ARG;
+
+retry:
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_SECURE_TRIM1_ARG ?
+ INAND_CMD38_ARG_SECTRIM1 :
+ INAND_CMD38_ARG_SECERASE,
+ card->ext_csd.generic_cmd6_time);
+ if (err)
+ goto out_retry;
+ }
+
+ err = mmc_erase(card, from, nr, arg);
+ if (err == -EIO)
+ goto out_retry;
+ if (err) {
+ status = BLK_STS_IOERR;
+ goto out;
+ }
+
+ if (arg == MMC_SECURE_TRIM1_ARG) {
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ INAND_CMD38_ARG_SECTRIM2,
+ card->ext_csd.generic_cmd6_time);
+ if (err)
+ goto out_retry;
+ }
+
+ err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+ if (err == -EIO)
+ goto out_retry;
+ if (err) {
+ status = BLK_STS_IOERR;
+ goto out;
+ }
+ }
+
+out_retry:
+ if (err && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
+out:
+ blk_mq_end_request(req, status);
+}
+
+static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
+ int ret = 0;
+
+ ret = mmc_flush_cache(card);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+}
+
+/*
+ * Reformat current write as a reliable write, supporting
+ * both legacy and the enhanced reliable write MMC cards.
+ * In each transfer we'll handle only as much as a single
+ * reliable write can handle, thus finish the request in
+ * partial completions.
+ */
+static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
+ struct mmc_card *card,
+ struct request *req)
+{
+ if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+ /* Legacy mode imposes restrictions on transfers. */
+ if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
+ brq->data.blocks = 1;
+
+ if (brq->data.blocks > card->ext_csd.rel_sectors)
+ brq->data.blocks = card->ext_csd.rel_sectors;
+ else if (brq->data.blocks < card->ext_csd.rel_sectors)
+ brq->data.blocks = 1;
+ }
+}
+
+#define CMD_ERRORS_EXCL_OOR \
+ (R1_ADDRESS_ERROR | /* Misaligned address */ \
+ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
+ R1_WP_VIOLATION | /* Tried to write to protected block */ \
+ R1_CARD_ECC_FAILED | /* Card ECC failed */ \
+ R1_CC_ERROR | /* Card controller error */ \
+ R1_ERROR) /* General/unknown error */
+
+#define CMD_ERRORS \
+ (CMD_ERRORS_EXCL_OOR | \
+ R1_OUT_OF_RANGE) /* Command argument out of range */ \
+
+static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
+{
+ u32 val;
+
+ /*
+ * Per the SD specification(physical layer version 4.10)[1],
+ * section 4.3.3, it explicitly states that "When the last
+ * block of user area is read using CMD18, the host should
+ * ignore OUT_OF_RANGE error that may occur even the sequence
+ * is correct". And JESD84-B51 for eMMC also has a similar
+ * statement on section 6.8.3.
+ *
+ * Multiple block read/write could be done by either predefined
+ * method, namely CMD23, or open-ending mode. For open-ending mode,
+ * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
+ *
+ * However the spec[1] doesn't tell us whether we should also
+ * ignore that for predefined method. But per the spec[1], section
+ * 4.15 Set Block Count Command, it says"If illegal block count
+ * is set, out of range error will be indicated during read/write
+ * operation (For example, data transfer is stopped at user area
+ * boundary)." In another word, we could expect a out of range error
+ * in the response for the following CMD18/25. And if argument of
+ * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
+ * we could also expect to get a -ETIMEDOUT or any error number from
+ * the host drivers due to missing data response(for write)/data(for
+ * read), as the cards will stop the data transfer by itself per the
+ * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
+ */
+
+ if (!brq->stop.error) {
+ bool oor_with_open_end;
+ /* If there is no error yet, check R1 response */
+
+ val = brq->stop.resp[0] & CMD_ERRORS;
+ oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
+
+ if (val && !oor_with_open_end)
+ brq->stop.error = -EIO;
+ }
+}
+
+static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
+ int recovery_mode, bool *do_rel_wr_p,
+ bool *do_data_tag_p)
+{
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ bool do_rel_wr, do_data_tag;
+
+ /*
+ * Reliable writes are used to implement Forced Unit Access and
+ * are supported only on MMCs.
+ */
+ do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ rq_data_dir(req) == WRITE &&
+ (md->flags & MMC_BLK_REL_WR);
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+
+ brq->mrq.data = &brq->data;
+ brq->mrq.tag = req->tag;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+
+ if (rq_data_dir(req) == READ) {
+ brq->data.flags = MMC_DATA_READ;
+ brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ brq->data.flags = MMC_DATA_WRITE;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ }
+
+ brq->data.blksz = 512;
+ brq->data.blocks = blk_rq_sectors(req);
+ brq->data.blk_addr = blk_rq_pos(req);
+
+ /*
+ * The command queue supports 2 priorities: "high" (1) and "simple" (0).
+ * The eMMC will give "high" priority tasks priority over "simple"
+ * priority tasks. Here we always set "simple" priority by not setting
+ * MMC_DATA_PRIO.
+ */
+
+ /*
+ * The block layer doesn't support all sector count
+ * restrictions, so we need to be prepared for too big
+ * requests.
+ */
+ if (brq->data.blocks > card->host->max_blk_count)
+ brq->data.blocks = card->host->max_blk_count;
+
+ if (brq->data.blocks > 1) {
+ /*
+ * Some SD cards in SPI mode return a CRC error or even lock up
+ * completely when trying to read the last block using a
+ * multiblock read command.
+ */
+ if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
+ (blk_rq_pos(req) + blk_rq_sectors(req) ==
+ get_capacity(md->disk)))
+ brq->data.blocks--;
+
+ /*
+ * After a read error, we redo the request one (native) sector
+ * at a time in order to accurately determine which
+ * sectors can be read successfully.
+ */
+ if (recovery_mode)
+ brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
+
+ /*
+ * Some controllers have HW issues while operating
+ * in multiple I/O mode
+ */
+ if (card->host->ops->multi_io_quirk)
+ brq->data.blocks = card->host->ops->multi_io_quirk(card,
+ (rq_data_dir(req) == READ) ?
+ MMC_DATA_READ : MMC_DATA_WRITE,
+ brq->data.blocks);
+ }
+
+ if (do_rel_wr) {
+ mmc_apply_rel_rw(brq, card, req);
+ brq->data.flags |= MMC_DATA_REL_WR;
+ }
+
+ /*
+ * Data tag is used only during writing meta data to speed
+ * up write and any subsequent read of this meta data
+ */
+ do_data_tag = card->ext_csd.data_tag_unit_size &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ if (do_data_tag)
+ brq->data.flags |= MMC_DATA_DAT_TAG;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (brq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = brq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
+ }
+ }
+ brq->data.sg_len = i;
+ }
+
+ if (do_rel_wr_p)
+ *do_rel_wr_p = do_rel_wr;
+
+ if (do_data_tag_p)
+ *do_data_tag_p = do_data_tag;
+}
+
+#define MMC_CQE_RETRIES 2
+
+static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct request_queue *q = req->q;
+ struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ unsigned long flags;
+ bool put_card;
+ int err;
+
+ mmc_cqe_post_req(host, mrq);
+
+ if (mrq->cmd && mrq->cmd->error)
+ err = mrq->cmd->error;
+ else if (mrq->data && mrq->data->error)
+ err = mrq->data->error;
+ else
+ err = 0;
+
+ if (err) {
+ if (mqrq->retries++ < MMC_CQE_RETRIES)
+ blk_mq_requeue_request(req, true);
+ else
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mrq->data) {
+ if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else if (mq->in_recovery) {
+ blk_mq_requeue_request(req, true);
+ } else {
+ blk_mq_end_request(req, BLK_STS_OK);
+ }
+
+ spin_lock_irqsave(&mq->lock, flags);
+
+ mq->in_flight[issue_type] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ mmc_cqe_check_busy(mq);
+
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ if (!mq->cqe_busy)
+ blk_mq_run_hw_queues(q, true);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+void mmc_blk_cqe_recovery(struct mmc_queue *mq)
+{
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
+
+ err = mmc_cqe_recovery(host);
+ if (err)
+ mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+
+ pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
+}
+
+static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else if (likely(!blk_should_fake_timeout(req->q)))
+ blk_mq_complete_request(req);
+}
+
+static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_blk_cqe_req_done;
+ mrq->recovery_notifier = mmc_cqe_recovery_notifier;
+
+ return mmc_cqe_start_req(host, mrq);
+}
+
+static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
+ struct request *req)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+
+ memset(brq, 0, sizeof(*brq));
+
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.tag = req->tag;
+
+ return &brq->mrq;
+}
+
+static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
+
+ mrq->cmd->opcode = MMC_SWITCH;
+ mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_FLUSH_CACHE << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
+
+ return mmc_blk_cqe_start_req(mq->card->host, mrq);
+}
+
+static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+ int err;
+
+ mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+ mqrq->brq.mrq.done = mmc_blk_hsq_req_done;
+ mmc_pre_req(host, &mqrq->brq.mrq);
+
+ err = mmc_cqe_start_req(host, &mqrq->brq.mrq);
+ if (err)
+ mmc_post_req(host, &mqrq->brq.mrq, err);
+
+ return err;
+}
+
+static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+
+ if (host->hsq_enabled)
+ return mmc_blk_hsq_issue_rw_rq(mq, req);
+
+ mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
+
+ return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+}
+
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int recovery_mode,
+ struct mmc_queue *mq)
+{
+ u32 readcmd, writecmd;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct mmc_blk_data *md = mq->blkdata;
+ bool do_rel_wr, do_data_tag;
+
+ mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
+
+ brq->mrq.cmd = &brq->cmd;
+
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ if (brq->data.blocks > 1 || do_rel_wr) {
+ /* SPI multiblock writes terminate using a special
+ * token, not a STOP_TRANSMISSION request.
+ */
+ if (!mmc_host_is_spi(card->host) ||
+ rq_data_dir(req) == READ)
+ brq->mrq.stop = &brq->stop;
+ readcmd = MMC_READ_MULTIPLE_BLOCK;
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ } else {
+ brq->mrq.stop = NULL;
+ readcmd = MMC_READ_SINGLE_BLOCK;
+ writecmd = MMC_WRITE_BLOCK;
+ }
+ brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
+
+ /*
+ * Pre-defined multi-block transfers are preferable to
+ * open ended-ones (and necessary for reliable writes).
+ * However, it is not sufficient to just send CMD23,
+ * and avoid the final CMD12, as on an error condition
+ * CMD12 (stop) needs to be sent anyway. This, coupled
+ * with Auto-CMD23 enhancements provided by some
+ * hosts, means that the complexity of dealing
+ * with this is best left to the host. If CMD23 is
+ * supported by card and host, we'll fill sbc in and let
+ * the host deal with handling it correctly. This means
+ * that for hosts that don't expose MMC_CAP_CMD23, no
+ * change of behavior will be observed.
+ *
+ * N.B: Some MMC cards experience perf degradation.
+ * We'll avoid using CMD23-bounded multiblock writes for
+ * these, while retaining features like reliable writes.
+ */
+ if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
+ do_data_tag)) {
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = brq->data.blocks |
+ (do_rel_wr ? (1 << 31) : 0) |
+ (do_data_tag ? (1 << 29) : 0);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq->mrq.sbc = &brq->sbc;
+ }
+}
+
+#define MMC_MAX_RETRIES 5
+#define MMC_DATA_RETRIES 2
+#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
+
+static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
+{
+ struct mmc_command cmd = {
+ .opcode = MMC_STOP_TRANSMISSION,
+ .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
+ /* Some hosts wait for busy anyway, so provide a busy timeout */
+ .busy_timeout = timeout,
+ };
+
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
+}
+
+static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
+ int err;
+
+ mmc_retune_hold_now(card->host);
+
+ mmc_blk_send_stop(card, timeout);
+
+ err = card_busy_detect(card, timeout, NULL);
+
+ mmc_retune_release(card->host);
+
+ return err;
+}
+
+#define MMC_READ_SINGLE_RETRIES 2
+
+/* Single (native) sector read during recovery */
+static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ blk_status_t error = BLK_STS_OK;
+ size_t bytes_per_read = queue_physical_block_size(mq->queue);
+
+ do {
+ u32 status;
+ int err;
+ int retries = 0;
+
+ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+
+ mmc_wait_for_req(host, mrq);
+
+ err = mmc_send_status(card, &status);
+ if (err)
+ goto error_exit;
+
+ if (!mmc_host_is_spi(host) &&
+ !mmc_ready_for_data(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
+
+ if (!mrq->cmd->error)
+ break;
+ }
+
+ if (mrq->cmd->error ||
+ mrq->data->error ||
+ (!mmc_host_is_spi(host) &&
+ (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
+ error = BLK_STS_IOERR;
+ else
+ error = BLK_STS_OK;
+
+ } while (blk_update_request(req, error, bytes_per_read));
+
+ return;
+
+error_exit:
+ mrq->data->bytes_xfered = 0;
+ blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
+ /* Let it try the remaining request again */
+ if (mqrq->retries > MMC_MAX_RETRIES - 1)
+ mqrq->retries = MMC_MAX_RETRIES - 1;
+}
+
+static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
+{
+ return !!brq->mrq.sbc;
+}
+
+static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
+{
+ return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
+}
+
+/*
+ * Check for errors the host controller driver might not have seen such as
+ * response mode errors or invalid card state.
+ */
+static bool mmc_blk_status_error(struct request *req, u32 status)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_queue *mq = req->q->queuedata;
+ u32 stop_err_bits;
+
+ if (mmc_host_is_spi(mq->card->host))
+ return false;
+
+ stop_err_bits = mmc_blk_stop_err_bits(brq);
+
+ return brq->cmd.resp[0] & CMD_ERRORS ||
+ brq->stop.resp[0] & stop_err_bits ||
+ status & stop_err_bits ||
+ (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status));
+}
+
+static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
+{
+ return !brq->sbc.error && !brq->cmd.error &&
+ !(brq->cmd.resp[0] & CMD_ERRORS);
+}
+
+/*
+ * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
+ * policy:
+ * 1. A request that has transferred at least some data is considered
+ * successful and will be requeued if there is remaining data to
+ * transfer.
+ * 2. Otherwise the number of retries is incremented and the request
+ * will be requeued if there are remaining retries.
+ * 3. Otherwise the request will be errored out.
+ * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
+ * mqrq->retries. So there are only 4 possible actions here:
+ * 1. do not accept the bytes_xfered value i.e. set it to zero
+ * 2. change mqrq->retries to determine the number of retries
+ * 3. try to reset the card
+ * 4. read one sector at a time
+ */
+static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
+{
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = mq->card;
+ u32 status;
+ u32 blocks;
+ int err;
+
+ /*
+ * Some errors the host driver might not have seen. Set the number of
+ * bytes transferred to zero in that case.
+ */
+ err = __mmc_send_status(card, &status, 0);
+ if (err || mmc_blk_status_error(req, status))
+ brq->data.bytes_xfered = 0;
+
+ mmc_retune_release(card->host);
+
+ /*
+ * Try again to get the status. This also provides an opportunity for
+ * re-tuning.
+ */
+ if (err)
+ err = __mmc_send_status(card, &status, 0);
+
+ /*
+ * Nothing more to do after the number of bytes transferred has been
+ * updated and there is no card.
+ */
+ if (err && mmc_detect_card_removed(card->host))
+ return;
+
+ /* Try to get back to "tran" state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ (err || !mmc_ready_for_data(status)))
+ err = mmc_blk_fix_state(mq->card, req);
+
+ /*
+ * Special case for SD cards where the card might record the number of
+ * blocks written.
+ */
+ if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
+ rq_data_dir(req) == WRITE) {
+ if (mmc_sd_num_wr_blocks(card, &blocks))
+ brq->data.bytes_xfered = 0;
+ else
+ brq->data.bytes_xfered = blocks << 9;
+ }
+
+ /* Reset if the card is in a bad state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ err && mmc_blk_reset(md, card->host, type)) {
+ pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
+ mqrq->retries = MMC_NO_RETRIES;
+ return;
+ }
+
+ /*
+ * If anything was done, just return and if there is anything remaining
+ * on the request it will get requeued.
+ */
+ if (brq->data.bytes_xfered)
+ return;
+
+ /* Reset before last retry */
+ if (mqrq->retries + 1 == MMC_MAX_RETRIES)
+ mmc_blk_reset(md, card->host, type);
+
+ /* Command errors fail fast, so use all MMC_MAX_RETRIES */
+ if (brq->sbc.error || brq->cmd.error)
+ return;
+
+ /* Reduce the remaining retries for data errors */
+ if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
+ mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
+ return;
+ }
+
+ if (rq_data_dir(req) == READ && brq->data.blocks >
+ queue_physical_block_size(mq->queue) >> 9) {
+ /* Read one (native) sector at a time */
+ mmc_blk_read_single(mq, req);
+ return;
+ }
+}
+
+static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
+{
+ mmc_blk_eval_resp_error(brq);
+
+ return brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
+}
+
+static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ u32 status = 0;
+ int err;
+
+ if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ return 0;
+
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
+
+ /*
+ * Do not assume data transferred correctly if there are any error bits
+ * set.
+ */
+ if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
+ mqrq->brq.data.bytes_xfered = 0;
+ err = err ? err : -EIO;
+ }
+
+ /* Copy the exception bit so it will be seen later on */
+ if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
+ mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
+
+ return err;
+}
+
+static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
+ struct request *req)
+{
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+
+ mmc_blk_reset_success(mq->blkdata, type);
+}
+
+static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
+
+ if (nr_bytes) {
+ if (blk_update_request(req, BLK_STS_OK, nr_bytes))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else if (!blk_rq_bytes(req)) {
+ __blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
+ blk_mq_requeue_request(req, true);
+ } else {
+ if (mmc_card_removed(mq->card))
+ req->rq_flags |= RQF_QUIET;
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ }
+}
+
+static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
+ (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
+ mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
+}
+
+static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ if (mmc_blk_urgent_bkops_needed(mq, mqrq))
+ mmc_run_bkops(mq->card);
+}
+
+static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq =
+ container_of(mrq, struct mmc_queue_req, brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_urgent_bkops_needed(mq, mqrq)) {
+ spin_lock_irqsave(&mq->lock, flags);
+ mq->recovery_needed = true;
+ mq->recovery_req = req;
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ host->cqe_ops->cqe_recovery_start(host);
+
+ schedule_work(&mq->recovery_work);
+ return;
+ }
+
+ mmc_blk_rw_reset_success(mq, req);
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else if (likely(!blk_should_fake_timeout(req->q)))
+ blk_mq_complete_request(req);
+}
+
+void mmc_blk_mq_complete(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else if (likely(!blk_should_fake_timeout(req->q)))
+ mmc_blk_mq_complete_rq(mq, req);
+}
+
+static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_card_busy(mq->card, req)) {
+ mmc_blk_mq_rw_recovery(mq, req);
+ } else {
+ mmc_blk_rw_reset_success(mq, req);
+ mmc_retune_release(host);
+ }
+
+ mmc_blk_urgent_bkops(mq, mqrq);
+}
+
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
+{
+ unsigned long flags;
+ bool put_card;
+
+ spin_lock_irqsave(&mq->lock, flags);
+
+ mq->in_flight[issue_type] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+{
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_host *host = mq->card->host;
+
+ mmc_post_req(host, mrq, 0);
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_mq_complete_rq(mq, req);
+ else if (likely(!blk_should_fake_timeout(req->q)))
+ blk_mq_complete_request(req);
+
+ mmc_blk_mq_dec_in_flight(mq, issue_type);
+}
+
+void mmc_blk_mq_recovery(struct mmc_queue *mq)
+{
+ struct request *req = mq->recovery_req;
+ struct mmc_host *host = mq->card->host;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mq->recovery_req = NULL;
+ mq->rw_wait = false;
+
+ if (mmc_blk_rq_error(&mqrq->brq)) {
+ mmc_retune_hold_now(host);
+ mmc_blk_mq_rw_recovery(mq, req);
+ }
+
+ mmc_blk_urgent_bkops(mq, mqrq);
+
+ mmc_blk_mq_post_req(mq, req);
+}
+
+static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
+ struct request **prev_req)
+{
+ if (mmc_host_done_complete(mq->card->host))
+ return;
+
+ mutex_lock(&mq->complete_lock);
+
+ if (!mq->complete_req)
+ goto out_unlock;
+
+ mmc_blk_mq_poll_completion(mq, mq->complete_req);
+
+ if (prev_req)
+ *prev_req = mq->complete_req;
+ else
+ mmc_blk_mq_post_req(mq, mq->complete_req);
+
+ mq->complete_req = NULL;
+
+out_unlock:
+ mutex_unlock(&mq->complete_lock);
+}
+
+void mmc_blk_mq_complete_work(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ complete_work);
+
+ mmc_blk_mq_complete_prev_req(mq, NULL);
+}
+
+static void mmc_blk_mq_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+
+ if (!mmc_host_done_complete(host)) {
+ bool waiting;
+
+ /*
+ * We cannot complete the request in this context, so record
+ * that there is a request to complete, and that a following
+ * request does not need to wait (although it does need to
+ * complete complete_req first).
+ */
+ spin_lock_irqsave(&mq->lock, flags);
+ mq->complete_req = req;
+ mq->rw_wait = false;
+ waiting = mq->waiting;
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ /*
+ * If 'waiting' then the waiting task will complete this
+ * request, otherwise queue a work to do it. Note that
+ * complete_work may still race with the dispatch of a following
+ * request.
+ */
+ if (waiting)
+ wake_up(&mq->wait);
+ else
+ queue_work(mq->card->complete_wq, &mq->complete_work);
+
+ return;
+ }
+
+ /* Take the recovery path for errors or urgent background operations */
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_urgent_bkops_needed(mq, mqrq)) {
+ spin_lock_irqsave(&mq->lock, flags);
+ mq->recovery_needed = true;
+ mq->recovery_req = req;
+ spin_unlock_irqrestore(&mq->lock, flags);
+ wake_up(&mq->wait);
+ schedule_work(&mq->recovery_work);
+ return;
+ }
+
+ mmc_blk_rw_reset_success(mq, req);
+
+ mq->rw_wait = false;
+ wake_up(&mq->wait);
+
+ mmc_blk_mq_post_req(mq, req);
+}
+
+static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
+{
+ unsigned long flags;
+ bool done;
+
+ /*
+ * Wait while there is another request in progress, but not if recovery
+ * is needed. Also indicate whether there is a request waiting to start.
+ */
+ spin_lock_irqsave(&mq->lock, flags);
+ if (mq->recovery_needed) {
+ *err = -EBUSY;
+ done = true;
+ } else {
+ done = !mq->rw_wait;
+ }
+ mq->waiting = !done;
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ return done;
+}
+
+static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
+{
+ int err = 0;
+
+ wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
+
+ /* Always complete the previous request if there is one */
+ mmc_blk_mq_complete_prev_req(mq, prev_req);
+
+ return err;
+}
+
+static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+ struct request *prev_req = NULL;
+ int err = 0;
+
+ mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+
+ mqrq->brq.mrq.done = mmc_blk_mq_req_done;
+
+ mmc_pre_req(host, &mqrq->brq.mrq);
+
+ err = mmc_blk_rw_wait(mq, &prev_req);
+ if (err)
+ goto out_post_req;
+
+ mq->rw_wait = true;
+
+ err = mmc_start_request(host, &mqrq->brq.mrq);
+
+ if (prev_req)
+ mmc_blk_mq_post_req(mq, prev_req);
+
+ if (err)
+ mq->rw_wait = false;
+
+ /* Release re-tuning here where there is no synchronization required */
+ if (err || mmc_host_done_complete(host))
+ mmc_retune_release(host);
+
+out_post_req:
+ if (err)
+ mmc_post_req(host, &mqrq->brq.mrq, err);
+
+ return err;
+}
+
+static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
+{
+ if (mq->use_cqe)
+ return host->cqe_ops->cqe_wait_for_idle(host);
+
+ return mmc_blk_rw_wait(mq, NULL);
+}
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_host *host = card->host;
+ int ret;
+
+ ret = mmc_blk_part_switch(card, md->part_type);
+ if (ret)
+ return MMC_REQ_FAILED_TO_START;
+
+ switch (mmc_issue_type(mq, req)) {
+ case MMC_ISSUE_SYNC:
+ ret = mmc_blk_wait_for_idle(mq, host);
+ if (ret)
+ return MMC_REQ_BUSY;
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ mmc_blk_issue_drv_op(mq, req);
+ break;
+ case REQ_OP_DISCARD:
+ mmc_blk_issue_discard_rq(mq, req);
+ break;
+ case REQ_OP_SECURE_ERASE:
+ mmc_blk_issue_secdiscard_rq(mq, req);
+ break;
+ case REQ_OP_FLUSH:
+ mmc_blk_issue_flush(mq, req);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
+ }
+ return MMC_REQ_FINISHED;
+ case MMC_ISSUE_DCMD:
+ case MMC_ISSUE_ASYNC:
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
+ if (!mmc_cache_enabled(host)) {
+ blk_mq_end_request(req, BLK_STS_OK);
+ return MMC_REQ_FINISHED;
+ }
+ ret = mmc_blk_cqe_issue_flush(mq, req);
+ break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ if (mq->use_cqe)
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ else
+ ret = mmc_blk_mq_issue_rw_rq(mq, req);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
+ if (!ret)
+ return MMC_REQ_STARTED;
+ return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
+ default:
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
+ }
+}
+
+static inline int mmc_blk_readonly(struct mmc_card *card)
+{
+ return mmc_card_readonly(card) ||
+ !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ struct device *parent,
+ sector_t size,
+ bool default_ro,
+ const char *subname,
+ int area_type)
+{
+ struct mmc_blk_data *md;
+ int devidx, ret;
+
+ devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
+ if (devidx < 0) {
+ /*
+ * We get -ENOSPC because there are no more any available
+ * devidx. The reason may be that, either userspace haven't yet
+ * unmounted the partitions, which postpones mmc_blk_release()
+ * from being called, or the device has more partitions than
+ * what we support.
+ */
+ if (devidx == -ENOSPC)
+ dev_err(mmc_dev(card->host),
+ "no more device IDs available\n");
+
+ return ERR_PTR(devidx);
+ }
+
+ md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+ if (!md) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ md->area_type = area_type;
+
+ /*
+ * Set the read-only status based on the supported commands
+ * and the write protect switch.
+ */
+ md->read_only = mmc_blk_readonly(card);
+
+ md->disk = alloc_disk(perdev_minors);
+ if (md->disk == NULL) {
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ INIT_LIST_HEAD(&md->part);
+ INIT_LIST_HEAD(&md->rpmbs);
+ md->usage = 1;
+
+ ret = mmc_init_queue(&md->queue, card);
+ if (ret)
+ goto err_putdisk;
+
+ md->queue.blkdata = md;
+
+ /*
+ * Keep an extra reference to the queue so that we can shutdown the
+ * queue (i.e. call blk_cleanup_queue()) while there are still
+ * references to the 'md'. The corresponding blk_put_queue() is in
+ * mmc_blk_put().
+ */
+ if (!blk_get_queue(md->queue.queue)) {
+ mmc_cleanup_queue(&md->queue);
+ ret = -ENODEV;
+ goto err_putdisk;
+ }
+
+ md->disk->major = MMC_BLOCK_MAJOR;
+ md->disk->first_minor = devidx * perdev_minors;
+ md->disk->fops = &mmc_bdops;
+ md->disk->private_data = md;
+ md->disk->queue = md->queue.queue;
+ md->parent = parent;
+ set_disk_ro(md->disk, md->read_only || default_ro);
+ md->disk->flags = GENHD_FL_EXT_DEVT;
+ if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
+ md->disk->flags |= GENHD_FL_NO_PART_SCAN
+ | GENHD_FL_SUPPRESS_PARTITION_INFO;
+
+ /*
+ * As discussed on lkml, GENHD_FL_REMOVABLE should:
+ *
+ * - be set for removable media with permanent block devices
+ * - be unset for removable block devices with permanent media
+ *
+ * Since MMC block devices clearly fall under the second
+ * case, we do not set GENHD_FL_REMOVABLE. Userspace
+ * should use the block device creation/destruction hotplug
+ * messages to tell when the card is present.
+ */
+
+ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
+
+ set_capacity(md->disk, size);
+
+ if (mmc_host_cmd23(card->host)) {
+ if ((mmc_card_mmc(card) &&
+ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
+ (mmc_card_sd(card) &&
+ card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+ md->flags |= MMC_BLK_CMD23;
+ }
+
+ if (mmc_card_mmc(card) &&
+ md->flags & MMC_BLK_CMD23 &&
+ ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+ card->ext_csd.rel_sectors)) {
+ md->flags |= MMC_BLK_REL_WR;
+ blk_queue_write_cache(md->queue.queue, true, true);
+ }
+
+ return md;
+
+ err_putdisk:
+ put_disk(md->disk);
+ err_kfree:
+ kfree(md);
+ out:
+ ida_simple_remove(&mmc_blk_ida, devidx);
+ return ERR_PTR(ret);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+{
+ sector_t size;
+
+ if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
+ /*
+ * The EXT_CSD sector count is in number or 512 byte
+ * sectors.
+ */
+ size = card->ext_csd.sectors;
+ } else {
+ /*
+ * The CSD capacity field is in units of read_blkbits.
+ * set_capacity takes units of 512 bytes.
+ */
+ size = (typeof(sector_t))card->csd.capacity
+ << (card->csd.read_blkbits - 9);
+ }
+
+ return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
+ MMC_BLK_DATA_AREA_MAIN);
+}
+
+static int mmc_blk_alloc_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_type,
+ sector_t size,
+ bool default_ro,
+ const char *subname,
+ int area_type)
+{
+ char cap_str[10];
+ struct mmc_blk_data *part_md;
+
+ part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
+ subname, area_type);
+ if (IS_ERR(part_md))
+ return PTR_ERR(part_md);
+ part_md->part_type = part_type;
+ list_add(&part_md->part, &md->part);
+
+ string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+ pr_info("%s: %s %s partition %u %s\n",
+ part_md->disk->disk_name, mmc_card_id(card),
+ mmc_card_name(card), part_md->part_type, cap_str);
+ return 0;
+}
+
+/**
+ * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
+ * @filp: the character device file
+ * @cmd: the ioctl() command
+ * @arg: the argument from userspace
+ *
+ * This will essentially just redirect the ioctl()s coming in over to
+ * the main block device spawning the RPMB character device.
+ */
+static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mmc_rpmb_data *rpmb = filp->private_data;
+ int ret;
+
+ switch (cmd) {
+ case MMC_IOC_CMD:
+ ret = mmc_blk_ioctl_cmd(rpmb->md,
+ (struct mmc_ioc_cmd __user *)arg,
+ rpmb);
+ break;
+ case MMC_IOC_MULTI_CMD:
+ ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ rpmb);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ get_device(&rpmb->dev);
+ filp->private_data = rpmb;
+ mmc_blk_get(rpmb->md->disk);
+
+ return nonseekable_open(inode, filp);
+}
+
+static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ mmc_blk_put(rpmb->md);
+ put_device(&rpmb->dev);
+
+ return 0;
+}
+
+static const struct file_operations mmc_rpmb_fileops = {
+ .release = mmc_rpmb_chrdev_release,
+ .open = mmc_rpmb_chrdev_open,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = mmc_rpmb_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_rpmb_ioctl_compat,
+#endif
+};
+
+static void mmc_blk_rpmb_device_release(struct device *dev)
+{
+ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+
+ ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ kfree(rpmb);
+}
+
+static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_index,
+ sector_t size,
+ const char *subname)
+{
+ int devidx, ret;
+ char rpmb_name[DISK_NAME_LEN];
+ char cap_str[10];
+ struct mmc_rpmb_data *rpmb;
+
+ /* This creates the minor number for the RPMB char device */
+ devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ if (devidx < 0)
+ return devidx;
+
+ rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
+ if (!rpmb) {
+ ida_simple_remove(&mmc_rpmb_ida, devidx);
+ return -ENOMEM;
+ }
+
+ snprintf(rpmb_name, sizeof(rpmb_name),
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
+
+ rpmb->id = devidx;
+ rpmb->part_index = part_index;
+ rpmb->dev.init_name = rpmb_name;
+ rpmb->dev.bus = &mmc_rpmb_bus_type;
+ rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
+ rpmb->dev.parent = &card->dev;
+ rpmb->dev.release = mmc_blk_rpmb_device_release;
+ device_initialize(&rpmb->dev);
+ dev_set_drvdata(&rpmb->dev, rpmb);
+ rpmb->md = md;
+
+ cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
+ rpmb->chrdev.owner = THIS_MODULE;
+ ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
+ if (ret) {
+ pr_err("%s: could not add character device\n", rpmb_name);
+ goto out_put_device;
+ }
+
+ list_add(&rpmb->node, &md->rpmbs);
+
+ string_get_size((u64)size, 512, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+
+ pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n",
+ rpmb_name, mmc_card_id(card),
+ mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str,
+ MAJOR(mmc_rpmb_devt), rpmb->id);
+
+ return 0;
+
+out_put_device:
+ put_device(&rpmb->dev);
+ return ret;
+}
+
+static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
+
+{
+ cdev_device_del(&rpmb->chrdev, &rpmb->dev);
+ put_device(&rpmb->dev);
+}
+
+/* MMC Physical partitions consist of two boot partitions and
+ * up to four general purpose partitions.
+ * For each partition enabled in EXT_CSD a block device will be allocatedi
+ * to provide access to the partition.
+ */
+
+static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
+{
+ int idx, ret;
+
+ if (!mmc_card_mmc(card))
+ return 0;
+
+ for (idx = 0; idx < card->nr_parts; idx++) {
+ if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
+ /*
+ * RPMB partitions does not provide block access, they
+ * are only accessed using ioctl():s. Thus create
+ * special RPMB block devices that do not have a
+ * backing block queue for these.
+ */
+ ret = mmc_blk_alloc_rpmb_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].name);
+ if (ret)
+ return ret;
+ } else if (card->part[idx].size) {
+ ret = mmc_blk_alloc_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].force_ro,
+ card->part[idx].name,
+ card->part[idx].area_type);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void mmc_blk_remove_req(struct mmc_blk_data *md)
+{
+ struct mmc_card *card;
+
+ if (md) {
+ /*
+ * Flush remaining requests and free queues. It
+ * is freeing the queue that stops new requests
+ * from being accepted.
+ */
+ card = md->queue.card;
+ if (md->disk->flags & GENHD_FL_UP) {
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable)
+ device_remove_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
+
+ del_gendisk(md->disk);
+ }
+ mmc_cleanup_queue(&md->queue);
+ mmc_blk_put(md);
+ }
+}
+
+static void mmc_blk_remove_parts(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ struct list_head *pos, *q;
+ struct mmc_blk_data *part_md;
+ struct mmc_rpmb_data *rpmb;
+
+ /* Remove RPMB partitions */
+ list_for_each_safe(pos, q, &md->rpmbs) {
+ rpmb = list_entry(pos, struct mmc_rpmb_data, node);
+ list_del(pos);
+ mmc_blk_remove_rpmb_part(rpmb);
+ }
+ /* Remove block partitions */
+ list_for_each_safe(pos, q, &md->part) {
+ part_md = list_entry(pos, struct mmc_blk_data, part);
+ list_del(pos);
+ mmc_blk_remove_req(part_md);
+ }
+}
+
+static int mmc_add_disk(struct mmc_blk_data *md)
+{
+ int ret;
+ struct mmc_card *card = md->queue.card;
+
+ device_add_disk(md->parent, md->disk, NULL);
+ md->force_ro.show = force_ro_show;
+ md->force_ro.store = force_ro_store;
+ sysfs_attr_init(&md->force_ro.attr);
+ md->force_ro.attr.name = "force_ro";
+ md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
+ if (ret)
+ goto force_ro_fail;
+
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable) {
+ umode_t mode;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
+ mode = S_IRUGO;
+ else
+ mode = S_IRUGO | S_IWUSR;
+
+ md->power_ro_lock.show = power_ro_lock_show;
+ md->power_ro_lock.store = power_ro_lock_store;
+ sysfs_attr_init(&md->power_ro_lock.attr);
+ md->power_ro_lock.attr.mode = mode;
+ md->power_ro_lock.attr.name =
+ "ro_lock_until_next_power_on";
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
+ if (ret)
+ goto power_ro_lock_fail;
+ }
+ return ret;
+
+power_ro_lock_fail:
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+force_ro_fail:
+ del_gendisk(md->disk);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mmc_dbg_card_status_get(void *data, u64 *val)
+{
+ struct mmc_card *card = data;
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+ struct mmc_queue *mq = &md->queue;
+ struct request *req;
+ int ret;
+
+ /* Ask the block layer about the card status */
+ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ ret = req_to_mmc_queue_req(req)->drv_op_result;
+ if (ret >= 0) {
+ *val = ret;
+ ret = 0;
+ }
+ blk_put_request(req);
+
+ return ret;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
+ NULL, "%08llx\n");
+
+/* That is two digits * 512 + 1 for newline */
+#define EXT_CSD_STR_LEN 1025
+
+static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_card *card = inode->i_private;
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+ struct mmc_queue *mq = &md->queue;
+ struct request *req;
+ char *buf;
+ ssize_t n = 0;
+ u8 *ext_csd;
+ int err, i;
+
+ buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Ask the block layer for the EXT CSD */
+ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out_free;
+ }
+ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ err = req_to_mmc_queue_req(req)->drv_op_result;
+ blk_put_request(req);
+ if (err) {
+ pr_err("FAILED %d\n", err);
+ goto out_free;
+ }
+
+ for (i = 0; i < 512; i++)
+ n += sprintf(buf + n, "%02x", ext_csd[i]);
+ n += sprintf(buf + n, "\n");
+
+ if (n != EXT_CSD_STR_LEN) {
+ err = -EINVAL;
+ kfree(ext_csd);
+ goto out_free;
+ }
+
+ filp->private_data = buf;
+ kfree(ext_csd);
+ return 0;
+
+out_free:
+ kfree(buf);
+ return err;
+}
+
+static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char *buf = filp->private_data;
+
+ return simple_read_from_buffer(ubuf, cnt, ppos,
+ buf, EXT_CSD_STR_LEN);
+}
+
+static int mmc_ext_csd_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations mmc_dbg_ext_csd_fops = {
+ .open = mmc_ext_csd_open,
+ .read = mmc_ext_csd_read,
+ .release = mmc_ext_csd_release,
+ .llseek = default_llseek,
+};
+
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
+{
+ struct dentry *root;
+
+ if (!card->debugfs_root)
+ return 0;
+
+ root = card->debugfs_root;
+
+ if (mmc_card_mmc(card) || mmc_card_sd(card)) {
+ md->status_dentry =
+ debugfs_create_file_unsafe("status", 0400, root,
+ card,
+ &mmc_dbg_card_status_fops);
+ if (!md->status_dentry)
+ return -EIO;
+ }
+
+ if (mmc_card_mmc(card)) {
+ md->ext_csd_dentry =
+ debugfs_create_file("ext_csd", S_IRUSR, root, card,
+ &mmc_dbg_ext_csd_fops);
+ if (!md->ext_csd_dentry)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ if (!card->debugfs_root)
+ return;
+
+ if (!IS_ERR_OR_NULL(md->status_dentry)) {
+ debugfs_remove(md->status_dentry);
+ md->status_dentry = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
+ debugfs_remove(md->ext_csd_dentry);
+ md->ext_csd_dentry = NULL;
+ }
+}
+
+#else
+
+static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
+{
+ return 0;
+}
+
+static void mmc_blk_remove_debugfs(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int mmc_blk_probe(struct mmc_card *card)
+{
+ struct mmc_blk_data *md, *part_md;
+ char cap_str[10];
+
+ /*
+ * Check that the card supports the command class(es) we need.
+ */
+ if (!(card->csd.cmdclass & CCC_BLOCK_READ))
+ return -ENODEV;
+
+ mmc_fixup_device(card, mmc_blk_fixups);
+
+ card->complete_wq = alloc_workqueue("mmc_complete",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (unlikely(!card->complete_wq)) {
+ pr_err("Failed to create mmc completion workqueue");
+ return -ENOMEM;
+ }
+
+ md = mmc_blk_alloc(card);
+ if (IS_ERR(md))
+ return PTR_ERR(md);
+
+ string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+ pr_info("%s: %s %s %s %s\n",
+ md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
+ cap_str, md->read_only ? "(ro)" : "");
+
+ if (mmc_blk_alloc_parts(card, md))
+ goto out;
+
+ dev_set_drvdata(&card->dev, md);
+
+ if (mmc_add_disk(md))
+ goto out;
+
+ list_for_each_entry(part_md, &md->part, part) {
+ if (mmc_add_disk(part_md))
+ goto out;
+ }
+
+ /* Add two debugfs entries */
+ mmc_blk_add_debugfs(card, md);
+
+ pm_runtime_set_autosuspend_delay(&card->dev, 3000);
+ pm_runtime_use_autosuspend(&card->dev);
+
+ /*
+ * Don't enable runtime PM for SD-combo cards here. Leave that
+ * decision to be taken during the SDIO init sequence instead.
+ */
+ if (card->type != MMC_TYPE_SD_COMBO) {
+ pm_runtime_set_active(&card->dev);
+ pm_runtime_enable(&card->dev);
+ }
+
+ return 0;
+
+ out:
+ mmc_blk_remove_parts(card, md);
+ mmc_blk_remove_req(md);
+ return 0;
+}
+
+static void mmc_blk_remove(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+
+ mmc_blk_remove_debugfs(card, md);
+ mmc_blk_remove_parts(card, md);
+ pm_runtime_get_sync(&card->dev);
+ if (md->part_curr != md->part_type) {
+ mmc_claim_host(card->host);
+ mmc_blk_part_switch(card, md->part_type);
+ mmc_release_host(card->host);
+ }
+ if (card->type != MMC_TYPE_SD_COMBO)
+ pm_runtime_disable(&card->dev);
+ pm_runtime_put_noidle(&card->dev);
+ mmc_blk_remove_req(md);
+ dev_set_drvdata(&card->dev, NULL);
+ destroy_workqueue(card->complete_wq);
+}
+
+static int _mmc_blk_suspend(struct mmc_card *card)
+{
+ struct mmc_blk_data *part_md;
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+
+ if (md) {
+ mmc_queue_suspend(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_suspend(&part_md->queue);
+ }
+ }
+ return 0;
+}
+
+static void mmc_blk_shutdown(struct mmc_card *card)
+{
+ _mmc_blk_suspend(card);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mmc_blk_suspend(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ return _mmc_blk_suspend(card);
+}
+
+static int mmc_blk_resume(struct device *dev)
+{
+ struct mmc_blk_data *part_md;
+ struct mmc_blk_data *md = dev_get_drvdata(dev);
+
+ if (md) {
+ /*
+ * Resume involves the card going into idle state,
+ * so current partition is always the main one.
+ */
+ md->part_curr = md->part_type;
+ mmc_queue_resume(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_resume(&part_md->queue);
+ }
+ }
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
+
+static struct mmc_driver mmc_driver = {
+ .drv = {
+ .name = "mmcblk",
+ .pm = &mmc_blk_pm_ops,
+ },
+ .probe = mmc_blk_probe,
+ .remove = mmc_blk_remove,
+ .shutdown = mmc_blk_shutdown,
+};
+
+static int __init mmc_blk_init(void)
+{
+ int res;
+
+ res = bus_register(&mmc_rpmb_bus_type);
+ if (res < 0) {
+ pr_err("mmcblk: could not register RPMB bus type\n");
+ return res;
+ }
+ res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
+ if (res < 0) {
+ pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
+ goto out_bus_unreg;
+ }
+
+ if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
+ pr_info("mmcblk: using %d minors per device\n", perdev_minors);
+
+ max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
+
+ res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ if (res)
+ goto out_chrdev_unreg;
+
+ res = mmc_register_driver(&mmc_driver);
+ if (res)
+ goto out_blkdev_unreg;
+
+ return 0;
+
+out_blkdev_unreg:
+ unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+out_chrdev_unreg:
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
+out_bus_unreg:
+ bus_unregister(&mmc_rpmb_bus_type);
+ return res;
+}
+
+static void __exit mmc_blk_exit(void)
+{
+ mmc_unregister_driver(&mmc_driver);
+ unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
+ bus_unregister(&mmc_rpmb_bus_type);
+}
+
+module_init(mmc_blk_init);
+module_exit(mmc_blk_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
new file mode 100644
index 000000000..31153f656
--- /dev/null
+++ b/drivers/mmc/core/block.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MMC_CORE_BLOCK_H
+#define _MMC_CORE_BLOCK_H
+
+struct mmc_queue;
+struct request;
+
+void mmc_blk_cqe_recovery(struct mmc_queue *mq);
+
+enum mmc_issued;
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_mq_complete(struct request *req);
+void mmc_blk_mq_recovery(struct mmc_queue *mq);
+
+struct work_struct;
+
+void mmc_blk_mq_complete_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
new file mode 100644
index 000000000..4383c262b
--- /dev/null
+++ b/drivers/mmc/core/bus.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/mmc/core/bus.c
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright (C) 2007 Pierre Ossman
+ *
+ * MMC card bus driver model
+ */
+
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "sdio_cis.h"
+#include "bus.h"
+
+#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ switch (card->type) {
+ case MMC_TYPE_MMC:
+ return sprintf(buf, "MMC\n");
+ case MMC_TYPE_SD:
+ return sprintf(buf, "SD\n");
+ case MMC_TYPE_SDIO:
+ return sprintf(buf, "SDIO\n");
+ case MMC_TYPE_SD_COMBO:
+ return sprintf(buf, "SDcombo\n");
+ default:
+ return -EFAULT;
+ }
+}
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *mmc_dev_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mmc_dev);
+
+/*
+ * This currently matches any MMC driver to any MMC card - drivers
+ * themselves make the decision whether to drive this card in their
+ * probe method.
+ */
+static int mmc_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return 1;
+}
+
+static int
+mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ const char *type;
+ unsigned int i;
+ int retval = 0;
+
+ switch (card->type) {
+ case MMC_TYPE_MMC:
+ type = "MMC";
+ break;
+ case MMC_TYPE_SD:
+ type = "SD";
+ break;
+ case MMC_TYPE_SDIO:
+ type = "SDIO";
+ break;
+ case MMC_TYPE_SD_COMBO:
+ type = "SDcombo";
+ break;
+ default:
+ type = NULL;
+ }
+
+ if (type) {
+ retval = add_uevent_var(env, "MMC_TYPE=%s", type);
+ if (retval)
+ return retval;
+ }
+
+ if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ retval = add_uevent_var(env, "SDIO_ID=%04X:%04X",
+ card->cis.vendor, card->cis.device);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "SDIO_REVISION=%u.%u",
+ card->major_rev, card->minor_rev);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < card->num_info; i++) {
+ retval = add_uevent_var(env, "SDIO_INFO%u=%s", i+1, card->info[i]);
+ if (retval)
+ return retval;
+ }
+ }
+
+ /*
+ * SDIO (non-combo) cards are not handled by mmc_block driver and do not
+ * have accessible CID register which used by mmc_card_name() function.
+ */
+ if (card->type == MMC_TYPE_SDIO)
+ return 0;
+
+ retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card));
+ if (retval)
+ return retval;
+
+ /*
+ * Request the mmc_block device. Note: that this is a direct request
+ * for the module it carries no information as to what is inserted.
+ */
+ retval = add_uevent_var(env, "MODALIAS=mmc:block");
+
+ return retval;
+}
+
+static int mmc_bus_probe(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ return drv->probe(card);
+}
+
+static int mmc_bus_remove(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ drv->remove(card);
+
+ return 0;
+}
+
+static void mmc_bus_shutdown(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+ int ret;
+
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(card);
+
+ if (host->bus_ops->shutdown) {
+ ret = host->bus_ops->shutdown(host);
+ if (ret)
+ pr_warn("%s: error %d during shutdown\n",
+ mmc_hostname(host), ret);
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mmc_bus_suspend(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+ int ret;
+
+ ret = pm_generic_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = host->bus_ops->suspend(host);
+ if (ret)
+ pm_generic_resume(dev);
+
+ return ret;
+}
+
+static int mmc_bus_resume(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+ int ret;
+
+ ret = host->bus_ops->resume(host);
+ if (ret)
+ pr_warn("%s: error %d during resume (card was removed?)\n",
+ mmc_hostname(host), ret);
+
+ ret = pm_generic_resume(dev);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int mmc_runtime_suspend(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ return host->bus_ops->runtime_suspend(host);
+}
+
+static int mmc_runtime_resume(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ return host->bus_ops->runtime_resume(host);
+}
+#endif /* !CONFIG_PM */
+
+static const struct dev_pm_ops mmc_bus_pm_ops = {
+ SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
+};
+
+static struct bus_type mmc_bus_type = {
+ .name = "mmc",
+ .dev_groups = mmc_dev_groups,
+ .match = mmc_bus_match,
+ .uevent = mmc_bus_uevent,
+ .probe = mmc_bus_probe,
+ .remove = mmc_bus_remove,
+ .shutdown = mmc_bus_shutdown,
+ .pm = &mmc_bus_pm_ops,
+};
+
+int mmc_register_bus(void)
+{
+ return bus_register(&mmc_bus_type);
+}
+
+void mmc_unregister_bus(void)
+{
+ bus_unregister(&mmc_bus_type);
+}
+
+/**
+ * mmc_register_driver - register a media driver
+ * @drv: MMC media driver
+ */
+int mmc_register_driver(struct mmc_driver *drv)
+{
+ drv->drv.bus = &mmc_bus_type;
+ return driver_register(&drv->drv);
+}
+
+EXPORT_SYMBOL(mmc_register_driver);
+
+/**
+ * mmc_unregister_driver - unregister a media driver
+ * @drv: MMC media driver
+ */
+void mmc_unregister_driver(struct mmc_driver *drv)
+{
+ drv->drv.bus = &mmc_bus_type;
+ driver_unregister(&drv->drv);
+}
+
+EXPORT_SYMBOL(mmc_unregister_driver);
+
+static void mmc_release_card(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ sdio_free_common_cis(card);
+
+ kfree(card->info);
+
+ kfree(card);
+}
+
+/*
+ * Allocate and initialise a new MMC card structure.
+ */
+struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type)
+{
+ struct mmc_card *card;
+
+ card = kzalloc(sizeof(struct mmc_card), GFP_KERNEL);
+ if (!card)
+ return ERR_PTR(-ENOMEM);
+
+ card->host = host;
+
+ device_initialize(&card->dev);
+
+ card->dev.parent = mmc_classdev(host);
+ card->dev.bus = &mmc_bus_type;
+ card->dev.release = mmc_release_card;
+ card->dev.type = type;
+
+ return card;
+}
+
+/*
+ * Register a new MMC card with the driver model.
+ */
+int mmc_add_card(struct mmc_card *card)
+{
+ int ret;
+ const char *type;
+ const char *uhs_bus_speed_mode = "";
+ static const char *const uhs_speeds[] = {
+ [UHS_SDR12_BUS_SPEED] = "SDR12 ",
+ [UHS_SDR25_BUS_SPEED] = "SDR25 ",
+ [UHS_SDR50_BUS_SPEED] = "SDR50 ",
+ [UHS_SDR104_BUS_SPEED] = "SDR104 ",
+ [UHS_DDR50_BUS_SPEED] = "DDR50 ",
+ };
+
+
+ dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
+
+ switch (card->type) {
+ case MMC_TYPE_MMC:
+ type = "MMC";
+ break;
+ case MMC_TYPE_SD:
+ type = "SD";
+ if (mmc_card_blockaddr(card)) {
+ if (mmc_card_ext_capacity(card))
+ type = "SDXC";
+ else
+ type = "SDHC";
+ }
+ break;
+ case MMC_TYPE_SDIO:
+ type = "SDIO";
+ break;
+ case MMC_TYPE_SD_COMBO:
+ type = "SD-combo";
+ if (mmc_card_blockaddr(card))
+ type = "SDHC-combo";
+ break;
+ default:
+ type = "?";
+ break;
+ }
+
+ if (mmc_card_uhs(card) &&
+ (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds)))
+ uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed];
+
+ if (mmc_host_is_spi(card->host)) {
+ pr_info("%s: new %s%s%s card on SPI\n",
+ mmc_hostname(card->host),
+ mmc_card_hs(card) ? "high speed " : "",
+ mmc_card_ddr52(card) ? "DDR " : "",
+ type);
+ } else {
+ pr_info("%s: new %s%s%s%s%s%s card at address %04x\n",
+ mmc_hostname(card->host),
+ mmc_card_uhs(card) ? "ultra high speed " :
+ (mmc_card_hs(card) ? "high speed " : ""),
+ mmc_card_hs400(card) ? "HS400 " :
+ (mmc_card_hs200(card) ? "HS200 " : ""),
+ mmc_card_hs400es(card) ? "Enhanced strobe " : "",
+ mmc_card_ddr52(card) ? "DDR " : "",
+ uhs_bus_speed_mode, type, card->rca);
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ mmc_add_card_debugfs(card);
+#endif
+ card->dev.of_node = mmc_of_find_child_device(card->host, 0);
+
+ device_enable_async_suspend(&card->dev);
+
+ ret = device_add(&card->dev);
+ if (ret)
+ return ret;
+
+ mmc_card_set_present(card);
+
+ return 0;
+}
+
+/*
+ * Unregister a new MMC card with the driver model, and
+ * (eventually) free it.
+ */
+void mmc_remove_card(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+
+#ifdef CONFIG_DEBUG_FS
+ mmc_remove_card_debugfs(card);
+#endif
+
+ if (mmc_card_present(card)) {
+ if (mmc_host_is_spi(card->host)) {
+ pr_info("%s: SPI card removed\n",
+ mmc_hostname(card->host));
+ } else {
+ pr_info("%s: card %04x removed\n",
+ mmc_hostname(card->host), card->rca);
+ }
+ device_del(&card->dev);
+ of_node_put(card->dev.of_node);
+ }
+
+ if (host->cqe_enabled) {
+ host->cqe_ops->cqe_disable(host);
+ host->cqe_enabled = false;
+ }
+
+ put_device(&card->dev);
+}
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
new file mode 100644
index 000000000..8105852c4
--- /dev/null
+++ b/drivers/mmc/core/bus.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/drivers/mmc/core/bus.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright 2007 Pierre Ossman
+ */
+#ifndef _MMC_CORE_BUS_H
+#define _MMC_CORE_BUS_H
+
+#include <linux/device.h>
+
+struct mmc_host;
+struct mmc_card;
+
+#define MMC_DEV_ATTR(name, fmt, args...) \
+static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct mmc_card *card = mmc_dev_to_card(dev); \
+ return sprintf(buf, fmt, args); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
+
+struct mmc_card *mmc_alloc_card(struct mmc_host *host,
+ struct device_type *type);
+int mmc_add_card(struct mmc_card *card);
+void mmc_remove_card(struct mmc_card *card);
+
+int mmc_register_bus(void);
+void mmc_unregister_bus(void);
+
+struct mmc_driver {
+ struct device_driver drv;
+ int (*probe)(struct mmc_card *card);
+ void (*remove)(struct mmc_card *card);
+ void (*shutdown)(struct mmc_card *card);
+};
+
+int mmc_register_driver(struct mmc_driver *drv);
+void mmc_unregister_driver(struct mmc_driver *drv);
+
+#endif
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
new file mode 100644
index 000000000..5c6986131
--- /dev/null
+++ b/drivers/mmc/core/card.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Private header for the mmc subsystem
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ */
+
+#ifndef _MMC_CORE_CARD_H
+#define _MMC_CORE_CARD_H
+
+#include <linux/mmc/card.h>
+
+#define mmc_card_name(c) ((c)->cid.prod_name)
+#define mmc_card_id(c) (dev_name(&(c)->dev))
+#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
+
+/* Card states */
+#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */
+#define MMC_STATE_READONLY (1<<1) /* card is read-only */
+#define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */
+#define MMC_CARD_SDXC (1<<3) /* card is SDXC */
+#define MMC_CARD_REMOVED (1<<4) /* card has been removed */
+#define MMC_STATE_SUSPENDED (1<<5) /* card is suspended */
+
+#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
+#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
+#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
+#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
+#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
+#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
+
+#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
+#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
+#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
+#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
+#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
+#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
+#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
+
+/*
+ * The world is not perfect and supplies us with broken mmc/sdio devices.
+ * For at least some of these bugs we need a work-around.
+ */
+struct mmc_fixup {
+ /* CID-specific fields. */
+ const char *name;
+
+ /* Valid revision range */
+ u64 rev_start, rev_end;
+
+ unsigned int manfid;
+ unsigned short oemid;
+
+ /* SDIO-specific fields. You can use SDIO_ANY_ID here of course */
+ u16 cis_vendor, cis_device;
+
+ /* for MMC cards */
+ unsigned int ext_csd_rev;
+
+ void (*vendor_fixup)(struct mmc_card *card, int data);
+ int data;
+};
+
+#define CID_MANFID_ANY (-1u)
+#define CID_OEMID_ANY ((unsigned short) -1)
+#define CID_NAME_ANY (NULL)
+
+#define EXT_CSD_REV_ANY (-1u)
+
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_SANDISK_SD 0x3
+#define CID_MANFID_ATP 0x9
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_APACER 0x27
+#define CID_MANFID_KINGSTON 0x70
+#define CID_MANFID_HYNIX 0x90
+#define CID_MANFID_NUMONYX 0xFE
+
+#define END_FIXUP { NULL }
+
+#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
+ _cis_vendor, _cis_device, \
+ _fixup, _data, _ext_csd_rev) \
+ { \
+ .name = (_name), \
+ .manfid = (_manfid), \
+ .oemid = (_oemid), \
+ .rev_start = (_rev_start), \
+ .rev_end = (_rev_end), \
+ .cis_vendor = (_cis_vendor), \
+ .cis_device = (_cis_device), \
+ .vendor_fixup = (_fixup), \
+ .data = (_data), \
+ .ext_csd_rev = (_ext_csd_rev), \
+ }
+
+#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
+ _fixup, _data, _ext_csd_rev) \
+ _FIXUP_EXT(_name, _manfid, \
+ _oemid, _rev_start, _rev_end, \
+ SDIO_ANY_ID, SDIO_ANY_ID, \
+ _fixup, _data, _ext_csd_rev) \
+
+#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ EXT_CSD_REV_ANY)
+
+#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data, \
+ _ext_csd_rev) \
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ _ext_csd_rev)
+
+#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
+ CID_OEMID_ANY, 0, -1ull, \
+ _vendor, _device, \
+ _fixup, _data, EXT_CSD_REV_ANY) \
+
+#define cid_rev(hwrev, fwrev, year, month) \
+ (((u64) hwrev) << 40 | \
+ ((u64) fwrev) << 32 | \
+ ((u64) year) << 16 | \
+ ((u64) month))
+
+#define cid_rev_card(card) \
+ cid_rev(card->cid.hwrev, \
+ card->cid.fwrev, \
+ card->cid.year, \
+ card->cid.month)
+
+/*
+ * Unconditionally quirk add/remove.
+ */
+static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
+{
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
+{
+ card->quirks &= ~data;
+}
+
+static inline void __maybe_unused add_limit_rate_quirk(struct mmc_card *card,
+ int data)
+{
+ card->quirk_max_rate = data;
+}
+
+/*
+ * Quirk add/remove for MMC products.
+ */
+static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
+{
+ if (mmc_card_mmc(card))
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
+ int data)
+{
+ if (mmc_card_mmc(card))
+ card->quirks &= ~data;
+}
+
+/*
+ * Quirk add/remove for SD products.
+ */
+static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
+{
+ if (mmc_card_sd(card))
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
+ int data)
+{
+ if (mmc_card_sd(card))
+ card->quirks &= ~data;
+}
+
+static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LENIENT_FN0;
+}
+
+static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
+}
+
+static inline int mmc_card_disable_cd(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_DISABLE_CD;
+}
+
+static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
+}
+
+static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
+}
+
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
+static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING;
+}
+
+static inline int mmc_card_broken_hpi(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_HPI;
+}
+
+static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
+}
+
+#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
new file mode 100644
index 000000000..d5ca59bd1
--- /dev/null
+++ b/drivers/mmc/core/core.c
@@ -0,0 +1,2417 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/mmc/core/core.c
+ *
+ * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
+ * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
+ * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
+ * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/leds.h>
+#include <linux/scatterlist.h>
+#include <linux/log2.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
+#include <linux/suspend.h>
+#include <linux/fault-inject.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/slot-gpio.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
+#include "core.h"
+#include "card.h"
+#include "bus.h"
+#include "host.h"
+#include "sdio_bus.h"
+#include "pwrseq.h"
+
+#include "mmc_ops.h"
+#include "sd_ops.h"
+#include "sdio_ops.h"
+
+/* The max erase timeout, used when host->max_busy_timeout isn't specified */
+#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
+#define SD_DISCARD_TIMEOUT_MS (250)
+
+static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
+
+/*
+ * Enabling software CRCs on the data blocks can be a significant (30%)
+ * performance cost, and for other reasons may not always be desired.
+ * So we allow it it to be disabled.
+ */
+bool use_spi_crc = 1;
+module_param(use_spi_crc, bool, 0);
+
+static int mmc_schedule_delayed_work(struct delayed_work *work,
+ unsigned long delay)
+{
+ /*
+ * We use the system_freezable_wq, because of two reasons.
+ * First, it allows several works (not the same work item) to be
+ * executed simultaneously. Second, the queue becomes frozen when
+ * userspace becomes frozen during system PM.
+ */
+ return queue_delayed_work(system_freezable_wq, work, delay);
+}
+
+#ifdef CONFIG_FAIL_MMC_REQUEST
+
+/*
+ * Internal function. Inject random data errors.
+ * If mmc_data is NULL no errors are injected.
+ */
+static void mmc_should_fail_request(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ static const int data_errors[] = {
+ -ETIMEDOUT,
+ -EILSEQ,
+ -EIO,
+ };
+
+ if (!data)
+ return;
+
+ if ((cmd && cmd->error) || data->error ||
+ !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
+ return;
+
+ data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
+ data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
+}
+
+#else /* CONFIG_FAIL_MMC_REQUEST */
+
+static inline void mmc_should_fail_request(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+}
+
+#endif /* CONFIG_FAIL_MMC_REQUEST */
+
+static inline void mmc_complete_cmd(struct mmc_request *mrq)
+{
+ if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
+ complete_all(&mrq->cmd_completion);
+}
+
+void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (!mrq->cap_cmd_during_tfr)
+ return;
+
+ mmc_complete_cmd(mrq);
+
+ pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
+ mmc_hostname(host), mrq->cmd->opcode);
+}
+EXPORT_SYMBOL(mmc_command_done);
+
+/**
+ * mmc_request_done - finish processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which request
+ *
+ * MMC drivers should call this function when they have completed
+ * their processing of a request.
+ */
+void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ int err = cmd->error;
+
+ /* Flag re-tuning needed on CRC errors */
+ if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
+ !host->retune_crc_disable &&
+ (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
+ (mrq->data && mrq->data->error == -EILSEQ) ||
+ (mrq->stop && mrq->stop->error == -EILSEQ)))
+ mmc_retune_needed(host);
+
+ if (err && cmd->retries && mmc_host_is_spi(host)) {
+ if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
+ cmd->retries = 0;
+ }
+
+ if (host->ongoing_mrq == mrq)
+ host->ongoing_mrq = NULL;
+
+ mmc_complete_cmd(mrq);
+
+ trace_mmc_request_done(host, mrq);
+
+ /*
+ * We list various conditions for the command to be considered
+ * properly done:
+ *
+ * - There was no error, OK fine then
+ * - We are not doing some kind of retry
+ * - The card was removed (...so just complete everything no matter
+ * if there are errors or retries)
+ */
+ if (!err || !cmd->retries || mmc_card_removed(host->card)) {
+ mmc_should_fail_request(host, mrq);
+
+ if (!host->ongoing_mrq)
+ led_trigger_event(host->led, LED_OFF);
+
+ if (mrq->sbc) {
+ pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
+ mmc_hostname(host), mrq->sbc->opcode,
+ mrq->sbc->error,
+ mrq->sbc->resp[0], mrq->sbc->resp[1],
+ mrq->sbc->resp[2], mrq->sbc->resp[3]);
+ }
+
+ pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
+ mmc_hostname(host), cmd->opcode, err,
+ cmd->resp[0], cmd->resp[1],
+ cmd->resp[2], cmd->resp[3]);
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ if (mrq->stop) {
+ pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
+ mmc_hostname(host), mrq->stop->opcode,
+ mrq->stop->error,
+ mrq->stop->resp[0], mrq->stop->resp[1],
+ mrq->stop->resp[2], mrq->stop->resp[3]);
+ }
+ }
+ /*
+ * Request starter must handle retries - see
+ * mmc_wait_for_req_done().
+ */
+ if (mrq->done)
+ mrq->done(mrq);
+}
+
+EXPORT_SYMBOL(mmc_request_done);
+
+static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ /* Assumes host controller has been runtime resumed by mmc_claim_host */
+ err = mmc_retune(host);
+ if (err) {
+ mrq->cmd->error = err;
+ mmc_request_done(host, mrq);
+ return;
+ }
+
+ /*
+ * For sdio rw commands we must wait for card busy otherwise some
+ * sdio devices won't work properly.
+ * And bypass I/O abort, reset and bus suspend operations.
+ */
+ if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
+ host->ops->card_busy) {
+ int tries = 500; /* Wait aprox 500ms at maximum */
+
+ while (host->ops->card_busy(host) && --tries)
+ mmc_delay(1);
+
+ if (tries == 0) {
+ mrq->cmd->error = -EBUSY;
+ mmc_request_done(host, mrq);
+ return;
+ }
+ }
+
+ if (mrq->cap_cmd_during_tfr) {
+ host->ongoing_mrq = mrq;
+ /*
+ * Retry path could come through here without having waiting on
+ * cmd_completion, so ensure it is reinitialised.
+ */
+ reinit_completion(&mrq->cmd_completion);
+ }
+
+ trace_mmc_request_start(host, mrq);
+
+ if (host->cqe_on)
+ host->cqe_ops->cqe_off(host);
+
+ host->ops->request(host, mrq);
+}
+
+static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
+ bool cqe)
+{
+ if (mrq->sbc) {
+ pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
+ mmc_hostname(host), mrq->sbc->opcode,
+ mrq->sbc->arg, mrq->sbc->flags);
+ }
+
+ if (mrq->cmd) {
+ pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), cqe ? "CQE direct " : "",
+ mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+ } else if (cqe) {
+ pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
+ mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
+ }
+
+ if (mrq->data) {
+ pr_debug("%s: blksz %d blocks %d flags %08x "
+ "tsac %d ms nsac %d\n",
+ mmc_hostname(host), mrq->data->blksz,
+ mrq->data->blocks, mrq->data->flags,
+ mrq->data->timeout_ns / 1000000,
+ mrq->data->timeout_clks);
+ }
+
+ if (mrq->stop) {
+ pr_debug("%s: CMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), mrq->stop->opcode,
+ mrq->stop->arg, mrq->stop->flags);
+ }
+}
+
+static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
+{
+ unsigned int i, sz = 0;
+ struct scatterlist *sg;
+
+ if (mrq->cmd) {
+ mrq->cmd->error = 0;
+ mrq->cmd->mrq = mrq;
+ mrq->cmd->data = mrq->data;
+ }
+ if (mrq->sbc) {
+ mrq->sbc->error = 0;
+ mrq->sbc->mrq = mrq;
+ }
+ if (mrq->data) {
+ if (mrq->data->blksz > host->max_blk_size ||
+ mrq->data->blocks > host->max_blk_count ||
+ mrq->data->blocks * mrq->data->blksz > host->max_req_size)
+ return -EINVAL;
+
+ for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
+ sz += sg->length;
+ if (sz != mrq->data->blocks * mrq->data->blksz)
+ return -EINVAL;
+
+ mrq->data->error = 0;
+ mrq->data->mrq = mrq;
+ if (mrq->stop) {
+ mrq->data->stop = mrq->stop;
+ mrq->stop->error = 0;
+ mrq->stop->mrq = mrq;
+ }
+ }
+
+ return 0;
+}
+
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ init_completion(&mrq->cmd_completion);
+
+ mmc_retune_hold(host);
+
+ if (mmc_card_removed(host->card))
+ return -ENOMEDIUM;
+
+ mmc_mrq_pr_debug(host, mrq, false);
+
+ WARN_ON(!host->claimed);
+
+ err = mmc_mrq_prep(host, mrq);
+ if (err)
+ return err;
+
+ led_trigger_event(host->led, LED_FULL);
+ __mmc_start_request(host, mrq);
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_start_request);
+
+static void mmc_wait_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
+{
+ struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
+
+ /*
+ * If there is an ongoing transfer, wait for the command line to become
+ * available.
+ */
+ if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
+ wait_for_completion(&ongoing_mrq->cmd_completion);
+}
+
+static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ mmc_wait_ongoing_tfr_cmd(host);
+
+ init_completion(&mrq->completion);
+ mrq->done = mmc_wait_done;
+
+ err = mmc_start_request(host, mrq);
+ if (err) {
+ mrq->cmd->error = err;
+ mmc_complete_cmd(mrq);
+ complete(&mrq->completion);
+ }
+
+ return err;
+}
+
+void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_command *cmd;
+
+ while (1) {
+ wait_for_completion(&mrq->completion);
+
+ cmd = mrq->cmd;
+
+ if (!cmd->error || !cmd->retries ||
+ mmc_card_removed(host->card))
+ break;
+
+ mmc_retune_recheck(host);
+
+ pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
+ mmc_hostname(host), cmd->opcode, cmd->error);
+ cmd->retries--;
+ cmd->error = 0;
+ __mmc_start_request(host, mrq);
+ }
+
+ mmc_retune_release(host);
+}
+EXPORT_SYMBOL(mmc_wait_for_req_done);
+
+/*
+ * mmc_cqe_start_req - Start a CQE request.
+ * @host: MMC host to start the request
+ * @mrq: request to start
+ *
+ * Start the request, re-tuning if needed and it is possible. Returns an error
+ * code if the request fails to start or -EBUSY if CQE is busy.
+ */
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ /*
+ * CQE cannot process re-tuning commands. Caller must hold retuning
+ * while CQE is in use. Re-tuning can happen here only when CQE has no
+ * active requests i.e. this is the first. Note, re-tuning will call
+ * ->cqe_off().
+ */
+ err = mmc_retune(host);
+ if (err)
+ goto out_err;
+
+ mrq->host = host;
+
+ mmc_mrq_pr_debug(host, mrq, true);
+
+ err = mmc_mrq_prep(host, mrq);
+ if (err)
+ goto out_err;
+
+ err = host->cqe_ops->cqe_request(host, mrq);
+ if (err)
+ goto out_err;
+
+ trace_mmc_request_start(host, mrq);
+
+ return 0;
+
+out_err:
+ if (mrq->cmd) {
+ pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, err);
+ } else {
+ pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
+ mmc_hostname(host), mrq->tag, err);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_start_req);
+
+/**
+ * mmc_cqe_request_done - CQE has finished processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which completed
+ *
+ * CQE drivers should call this function when they have completed
+ * their processing of a request.
+ */
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mmc_should_fail_request(host, mrq);
+
+ /* Flag re-tuning needed on CRC errors */
+ if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
+ (mrq->data && mrq->data->error == -EILSEQ))
+ mmc_retune_needed(host);
+
+ trace_mmc_request_done(host, mrq);
+
+ if (mrq->cmd) {
+ pr_debug("%s: CQE req done (direct CMD%u): %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
+ } else {
+ pr_debug("%s: CQE transfer done tag %d\n",
+ mmc_hostname(host), mrq->tag);
+ }
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ mrq->done(mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_request_done);
+
+/**
+ * mmc_cqe_post_req - CQE post process of a completed MMC request
+ * @host: MMC host
+ * @mrq: MMC request to be processed
+ */
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->cqe_ops->cqe_post_req)
+ host->cqe_ops->cqe_post_req(host, mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_post_req);
+
+/* Arbitrary 1 second timeout */
+#define MMC_CQE_RECOVERY_TIMEOUT 1000
+
+/*
+ * mmc_cqe_recovery - Recover from CQE errors.
+ * @host: MMC host to recover
+ *
+ * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
+ * in eMMC, and discarding the queue in CQE. CQE must call
+ * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
+ * fails to discard its queue.
+ */
+int mmc_cqe_recovery(struct mmc_host *host)
+{
+ struct mmc_command cmd;
+ int err;
+
+ mmc_retune_hold_now(host);
+
+ /*
+ * Recovery is expected seldom, if at all, but it reduces performance,
+ * so make sure it is not completely silent.
+ */
+ pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
+
+ host->cqe_ops->cqe_recovery_start(host);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+ mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, MMC_BUSY_IO);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ cmd.arg = 1; /* Discard entire queue */
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
+ if (err)
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ mmc_retune_release(host);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_recovery);
+
+/**
+ * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
+ * @host: MMC host
+ * @mrq: MMC request
+ *
+ * mmc_is_req_done() is used with requests that have
+ * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
+ * starting a request and before waiting for it to complete. That is,
+ * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
+ * and before mmc_wait_for_req_done(). If it is called at other times the
+ * result is not meaningful.
+ */
+bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ return completion_done(&mrq->completion);
+}
+EXPORT_SYMBOL(mmc_is_req_done);
+
+/**
+ * mmc_wait_for_req - start a request and wait for completion
+ * @host: MMC host to start command
+ * @mrq: MMC request to start
+ *
+ * Start a new MMC custom command request for a host, and wait
+ * for the command to complete. In the case of 'cap_cmd_during_tfr'
+ * requests, the transfer is ongoing and the caller can issue further
+ * commands that do not use the data lines, and then wait by calling
+ * mmc_wait_for_req_done().
+ * Does not attempt to parse the response.
+ */
+void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ __mmc_start_req(host, mrq);
+
+ if (!mrq->cap_cmd_during_tfr)
+ mmc_wait_for_req_done(host, mrq);
+}
+EXPORT_SYMBOL(mmc_wait_for_req);
+
+/**
+ * mmc_wait_for_cmd - start a command and wait for completion
+ * @host: MMC host to start command
+ * @cmd: MMC command to start
+ * @retries: maximum number of retries
+ *
+ * Start a new MMC command for a host, and wait for the command
+ * to complete. Return any error that occurred while the command
+ * was executing. Do not attempt to parse the response.
+ */
+int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
+{
+ struct mmc_request mrq = {};
+
+ WARN_ON(!host->claimed);
+
+ memset(cmd->resp, 0, sizeof(cmd->resp));
+ cmd->retries = retries;
+
+ mrq.cmd = cmd;
+ cmd->data = NULL;
+
+ mmc_wait_for_req(host, &mrq);
+
+ return cmd->error;
+}
+
+EXPORT_SYMBOL(mmc_wait_for_cmd);
+
+/**
+ * mmc_set_data_timeout - set the timeout for a data command
+ * @data: data phase for command
+ * @card: the MMC card associated with the data transfer
+ *
+ * Computes the data timeout parameters according to the
+ * correct algorithm given the card type.
+ */
+void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
+{
+ unsigned int mult;
+
+ /*
+ * SDIO cards only define an upper 1 s limit on access.
+ */
+ if (mmc_card_sdio(card)) {
+ data->timeout_ns = 1000000000;
+ data->timeout_clks = 0;
+ return;
+ }
+
+ /*
+ * SD cards use a 100 multiplier rather than 10
+ */
+ mult = mmc_card_sd(card) ? 100 : 10;
+
+ /*
+ * Scale up the multiplier (and therefore the timeout) by
+ * the r2w factor for writes.
+ */
+ if (data->flags & MMC_DATA_WRITE)
+ mult <<= card->csd.r2w_factor;
+
+ data->timeout_ns = card->csd.taac_ns * mult;
+ data->timeout_clks = card->csd.taac_clks * mult;
+
+ /*
+ * SD cards also have an upper limit on the timeout.
+ */
+ if (mmc_card_sd(card)) {
+ unsigned int timeout_us, limit_us;
+
+ timeout_us = data->timeout_ns / 1000;
+ if (card->host->ios.clock)
+ timeout_us += data->timeout_clks * 1000 /
+ (card->host->ios.clock / 1000);
+
+ if (data->flags & MMC_DATA_WRITE)
+ /*
+ * The MMC spec "It is strongly recommended
+ * for hosts to implement more than 500ms
+ * timeout value even if the card indicates
+ * the 250ms maximum busy length." Even the
+ * previous value of 300ms is known to be
+ * insufficient for some cards.
+ */
+ limit_us = 3000000;
+ else
+ limit_us = 100000;
+
+ /*
+ * SDHC cards always use these fixed values.
+ */
+ if (timeout_us > limit_us) {
+ data->timeout_ns = limit_us * 1000;
+ data->timeout_clks = 0;
+ }
+
+ /* assign limit value if invalid */
+ if (timeout_us == 0)
+ data->timeout_ns = limit_us * 1000;
+ }
+
+ /*
+ * Some cards require longer data read timeout than indicated in CSD.
+ * Address this by setting the read timeout to a "reasonably high"
+ * value. For the cards tested, 600ms has proven enough. If necessary,
+ * this value can be increased if other problematic cards require this.
+ */
+ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+ data->timeout_ns = 600000000;
+ data->timeout_clks = 0;
+ }
+
+ /*
+ * Some cards need very high timeouts if driven in SPI mode.
+ * The worst observed timeout was 900ms after writing a
+ * continuous stream of data until the internal logic
+ * overflowed.
+ */
+ if (mmc_host_is_spi(card->host)) {
+ if (data->flags & MMC_DATA_WRITE) {
+ if (data->timeout_ns < 1000000000)
+ data->timeout_ns = 1000000000; /* 1s */
+ } else {
+ if (data->timeout_ns < 100000000)
+ data->timeout_ns = 100000000; /* 100ms */
+ }
+ }
+}
+EXPORT_SYMBOL(mmc_set_data_timeout);
+
+/*
+ * Allow claiming an already claimed host if the context is the same or there is
+ * no context but the task is the same.
+ */
+static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ return host->claimer == ctx ||
+ (!ctx && task && host->claimer->task == task);
+}
+
+static inline void mmc_ctx_set_claimer(struct mmc_host *host,
+ struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ if (!host->claimer) {
+ if (ctx)
+ host->claimer = ctx;
+ else
+ host->claimer = &host->default_ctx;
+ }
+ if (task)
+ host->claimer->task = task;
+}
+
+/**
+ * __mmc_claim_host - exclusively claim a host
+ * @host: mmc host to claim
+ * @ctx: context that claims the host or NULL in which case the default
+ * context will be used
+ * @abort: whether or not the operation should be aborted
+ *
+ * Claim a host for a set of operations. If @abort is non null and
+ * dereference a non-zero value then this will return prematurely with
+ * that non-zero value without acquiring the lock. Returns zero
+ * with the lock held otherwise.
+ */
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort)
+{
+ struct task_struct *task = ctx ? NULL : current;
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+ int stop;
+ bool pm = false;
+
+ might_sleep();
+
+ add_wait_queue(&host->wq, &wait);
+ spin_lock_irqsave(&host->lock, flags);
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ stop = abort ? atomic_read(abort) : 0;
+ if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
+ break;
+ spin_unlock_irqrestore(&host->lock, flags);
+ schedule();
+ spin_lock_irqsave(&host->lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ if (!stop) {
+ host->claimed = 1;
+ mmc_ctx_set_claimer(host, ctx, task);
+ host->claim_cnt += 1;
+ if (host->claim_cnt == 1)
+ pm = true;
+ } else
+ wake_up(&host->wq);
+ spin_unlock_irqrestore(&host->lock, flags);
+ remove_wait_queue(&host->wq, &wait);
+
+ if (pm)
+ pm_runtime_get_sync(mmc_dev(host));
+
+ return stop;
+}
+EXPORT_SYMBOL(__mmc_claim_host);
+
+/**
+ * mmc_release_host - release a host
+ * @host: mmc host to release
+ *
+ * Release a MMC host, allowing others to claim the host
+ * for their operations.
+ */
+void mmc_release_host(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ WARN_ON(!host->claimed);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (--host->claim_cnt) {
+ /* Release for nested claim */
+ spin_unlock_irqrestore(&host->lock, flags);
+ } else {
+ host->claimed = 0;
+ host->claimer->task = NULL;
+ host->claimer = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+ wake_up(&host->wq);
+ pm_runtime_mark_last_busy(mmc_dev(host));
+ if (host->caps & MMC_CAP_SYNC_RUNTIME_PM)
+ pm_runtime_put_sync_suspend(mmc_dev(host));
+ else
+ pm_runtime_put_autosuspend(mmc_dev(host));
+ }
+}
+EXPORT_SYMBOL(mmc_release_host);
+
+/*
+ * This is a helper function, which fetches a runtime pm reference for the
+ * card device and also claims the host.
+ */
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
+{
+ pm_runtime_get_sync(&card->dev);
+ __mmc_claim_host(card->host, ctx, NULL);
+}
+EXPORT_SYMBOL(mmc_get_card);
+
+/*
+ * This is a helper function, which releases the host and drops the runtime
+ * pm reference for the card device.
+ */
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
+{
+ struct mmc_host *host = card->host;
+
+ WARN_ON(ctx && host->claimer != ctx);
+
+ mmc_release_host(host);
+ pm_runtime_mark_last_busy(&card->dev);
+ pm_runtime_put_autosuspend(&card->dev);
+}
+EXPORT_SYMBOL(mmc_put_card);
+
+/*
+ * Internal function that does the actual ios call to the host driver,
+ * optionally printing some debug output.
+ */
+static inline void mmc_set_ios(struct mmc_host *host)
+{
+ struct mmc_ios *ios = &host->ios;
+
+ pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
+ "width %u timing %u\n",
+ mmc_hostname(host), ios->clock, ios->bus_mode,
+ ios->power_mode, ios->chip_select, ios->vdd,
+ 1 << ios->bus_width, ios->timing);
+
+ host->ops->set_ios(host, ios);
+}
+
+/*
+ * Control chip select pin on a host.
+ */
+void mmc_set_chip_select(struct mmc_host *host, int mode)
+{
+ host->ios.chip_select = mode;
+ mmc_set_ios(host);
+}
+
+/*
+ * Sets the host clock to the highest possible frequency that
+ * is below "hz".
+ */
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+ WARN_ON(hz && hz < host->f_min);
+
+ if (hz > host->f_max)
+ hz = host->f_max;
+
+ host->ios.clock = hz;
+ mmc_set_ios(host);
+}
+
+int mmc_execute_tuning(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 opcode;
+ int err;
+
+ if (!host->ops->execute_tuning)
+ return 0;
+
+ if (host->cqe_on)
+ host->cqe_ops->cqe_off(host);
+
+ if (mmc_card_mmc(card))
+ opcode = MMC_SEND_TUNING_BLOCK_HS200;
+ else
+ opcode = MMC_SEND_TUNING_BLOCK;
+
+ err = host->ops->execute_tuning(host, opcode);
+
+ if (err) {
+ pr_err("%s: tuning execution failed: %d\n",
+ mmc_hostname(host), err);
+ } else {
+ host->retune_now = 0;
+ host->need_retune = 0;
+ mmc_retune_enable(host);
+ }
+
+ return err;
+}
+
+/*
+ * Change the bus mode (open drain/push-pull) of a host.
+ */
+void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
+{
+ host->ios.bus_mode = mode;
+ mmc_set_ios(host);
+}
+
+/*
+ * Change data bus width of a host.
+ */
+void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
+{
+ host->ios.bus_width = width;
+ mmc_set_ios(host);
+}
+
+/*
+ * Set initial state after a power cycle or a hw_reset.
+ */
+void mmc_set_initial_state(struct mmc_host *host)
+{
+ if (host->cqe_on)
+ host->cqe_ops->cqe_off(host);
+
+ mmc_retune_disable(host);
+
+ if (mmc_host_is_spi(host))
+ host->ios.chip_select = MMC_CS_HIGH;
+ else
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+ host->ios.drv_type = 0;
+ host->ios.enhanced_strobe = false;
+
+ /*
+ * Make sure we are in non-enhanced strobe mode before we
+ * actually enable it in ext_csd.
+ */
+ if ((host->caps2 & MMC_CAP2_HS400_ES) &&
+ host->ops->hs400_enhanced_strobe)
+ host->ops->hs400_enhanced_strobe(host, &host->ios);
+
+ mmc_set_ios(host);
+}
+
+/**
+ * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
+ * @vdd: voltage (mV)
+ * @low_bits: prefer low bits in boundary cases
+ *
+ * This function returns the OCR bit number according to the provided @vdd
+ * value. If conversion is not possible a negative errno value returned.
+ *
+ * Depending on the @low_bits flag the function prefers low or high OCR bits
+ * on boundary voltages. For example,
+ * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
+ * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
+ *
+ * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
+ */
+static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
+{
+ const int max_bit = ilog2(MMC_VDD_35_36);
+ int bit;
+
+ if (vdd < 1650 || vdd > 3600)
+ return -EINVAL;
+
+ if (vdd >= 1650 && vdd <= 1950)
+ return ilog2(MMC_VDD_165_195);
+
+ if (low_bits)
+ vdd -= 1;
+
+ /* Base 2000 mV, step 100 mV, bit's base 8. */
+ bit = (vdd - 2000) / 100 + 8;
+ if (bit > max_bit)
+ return max_bit;
+ return bit;
+}
+
+/**
+ * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
+ * @vdd_min: minimum voltage value (mV)
+ * @vdd_max: maximum voltage value (mV)
+ *
+ * This function returns the OCR mask bits according to the provided @vdd_min
+ * and @vdd_max values. If conversion is not possible the function returns 0.
+ *
+ * Notes wrt boundary cases:
+ * This function sets the OCR bits for all boundary voltages, for example
+ * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
+ * MMC_VDD_34_35 mask.
+ */
+u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
+{
+ u32 mask = 0;
+
+ if (vdd_max < vdd_min)
+ return 0;
+
+ /* Prefer high bits for the boundary vdd_max values. */
+ vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
+ if (vdd_max < 0)
+ return 0;
+
+ /* Prefer low bits for the boundary vdd_min values. */
+ vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
+ if (vdd_min < 0)
+ return 0;
+
+ /* Fill the mask, from max bit to min bit. */
+ while (vdd_max >= vdd_min)
+ mask |= 1 << vdd_max--;
+
+ return mask;
+}
+
+static int mmc_of_get_func_num(struct device_node *node)
+{
+ u32 reg;
+ int ret;
+
+ ret = of_property_read_u32(node, "reg", &reg);
+ if (ret < 0)
+ return ret;
+
+ return reg;
+}
+
+struct device_node *mmc_of_find_child_device(struct mmc_host *host,
+ unsigned func_num)
+{
+ struct device_node *node;
+
+ if (!host->parent || !host->parent->of_node)
+ return NULL;
+
+ for_each_child_of_node(host->parent->of_node, node) {
+ if (mmc_of_get_func_num(node) == func_num)
+ return node;
+ }
+
+ return NULL;
+}
+
+/*
+ * Mask off any voltages we don't support and select
+ * the lowest voltage
+ */
+u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
+{
+ int bit;
+
+ /*
+ * Sanity check the voltages that the card claims to
+ * support.
+ */
+ if (ocr & 0x7F) {
+ dev_warn(mmc_dev(host),
+ "card claims to support voltages below defined range\n");
+ ocr &= ~0x7F;
+ }
+
+ ocr &= host->ocr_avail;
+ if (!ocr) {
+ dev_warn(mmc_dev(host), "no support for card's volts\n");
+ return 0;
+ }
+
+ if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
+ bit = ffs(ocr) - 1;
+ ocr &= 3 << bit;
+ mmc_power_cycle(host, ocr);
+ } else {
+ bit = fls(ocr) - 1;
+ /*
+ * The bit variable represents the highest voltage bit set in
+ * the OCR register.
+ * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
+ * we must shift the mask '3' with (bit - 1).
+ */
+ ocr &= 3 << (bit - 1);
+ if (bit != host->ios.vdd)
+ dev_warn(mmc_dev(host), "exceeding card's volts\n");
+ }
+
+ return ocr;
+}
+
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
+{
+ int err = 0;
+ int old_signal_voltage = host->ios.signal_voltage;
+
+ host->ios.signal_voltage = signal_voltage;
+ if (host->ops->start_signal_voltage_switch)
+ err = host->ops->start_signal_voltage_switch(host, &host->ios);
+
+ if (err)
+ host->ios.signal_voltage = old_signal_voltage;
+
+ return err;
+
+}
+
+void mmc_set_initial_signal_voltage(struct mmc_host *host)
+{
+ /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
+ if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
+ dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
+ else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
+ dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
+ else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
+ dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
+}
+
+int mmc_host_set_uhs_voltage(struct mmc_host *host)
+{
+ u32 clock;
+
+ /*
+ * During a signal voltage level switch, the clock must be gated
+ * for 5 ms according to the SD spec
+ */
+ clock = host->ios.clock;
+ host->ios.clock = 0;
+ mmc_set_ios(host);
+
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
+ return -EAGAIN;
+
+ /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
+ mmc_delay(10);
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+
+ return 0;
+}
+
+int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
+{
+ struct mmc_command cmd = {};
+ int err = 0;
+
+ /*
+ * If we cannot switch voltages, return failure so the caller
+ * can continue without UHS mode
+ */
+ if (!host->ops->start_signal_voltage_switch)
+ return -EPERM;
+ if (!host->ops->card_busy)
+ pr_warn("%s: cannot verify signal voltage switch\n",
+ mmc_hostname(host));
+
+ cmd.opcode = SD_SWITCH_VOLTAGE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ goto power_cycle;
+
+ if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
+ return -EIO;
+
+ /*
+ * The card should drive cmd and dat[0:3] low immediately
+ * after the response of cmd11, but wait 1 ms to be sure
+ */
+ mmc_delay(1);
+ if (host->ops->card_busy && !host->ops->card_busy(host)) {
+ err = -EAGAIN;
+ goto power_cycle;
+ }
+
+ if (mmc_host_set_uhs_voltage(host)) {
+ /*
+ * Voltages may not have been switched, but we've already
+ * sent CMD11, so a power cycle is required anyway
+ */
+ err = -EAGAIN;
+ goto power_cycle;
+ }
+
+ /* Wait for at least 1 ms according to spec */
+ mmc_delay(1);
+
+ /*
+ * Failure to switch is indicated by the card holding
+ * dat[0:3] low
+ */
+ if (host->ops->card_busy && host->ops->card_busy(host))
+ err = -EAGAIN;
+
+power_cycle:
+ if (err) {
+ pr_debug("%s: Signal voltage switch failed, "
+ "power cycling card\n", mmc_hostname(host));
+ mmc_power_cycle(host, ocr);
+ }
+
+ return err;
+}
+
+/*
+ * Select timing parameters for host.
+ */
+void mmc_set_timing(struct mmc_host *host, unsigned int timing)
+{
+ host->ios.timing = timing;
+ mmc_set_ios(host);
+}
+
+/*
+ * Select appropriate driver type for host.
+ */
+void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
+{
+ host->ios.drv_type = drv_type;
+ mmc_set_ios(host);
+}
+
+int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
+ int card_drv_type, int *drv_type)
+{
+ struct mmc_host *host = card->host;
+ int host_drv_type = SD_DRIVER_TYPE_B;
+
+ *drv_type = 0;
+
+ if (!host->ops->select_drive_strength)
+ return 0;
+
+ /* Use SD definition of driver strength for hosts */
+ if (host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ return host->ops->select_drive_strength(card, max_dtr,
+ host_drv_type,
+ card_drv_type,
+ drv_type);
+}
+
+/*
+ * Apply power to the MMC stack. This is a two-stage process.
+ * First, we enable power to the card without the clock running.
+ * We then wait a bit for the power to stabilise. Finally,
+ * enable the bus drivers and clock to the card.
+ *
+ * We must _NOT_ enable the clock prior to power stablising.
+ *
+ * If a host does all the power sequencing itself, ignore the
+ * initial MMC_POWER_UP stage.
+ */
+void mmc_power_up(struct mmc_host *host, u32 ocr)
+{
+ if (host->ios.power_mode == MMC_POWER_ON)
+ return;
+
+ mmc_pwrseq_pre_power_on(host);
+
+ host->ios.vdd = fls(ocr) - 1;
+ host->ios.power_mode = MMC_POWER_UP;
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+
+ mmc_set_initial_signal_voltage(host);
+
+ /*
+ * This delay should be sufficient to allow the power supply
+ * to reach the minimum voltage.
+ */
+ mmc_delay(host->ios.power_delay_ms);
+
+ mmc_pwrseq_post_power_on(host);
+
+ host->ios.clock = host->f_init;
+
+ host->ios.power_mode = MMC_POWER_ON;
+ mmc_set_ios(host);
+
+ /*
+ * This delay must be at least 74 clock sizes, or 1 ms, or the
+ * time required to reach a stable voltage.
+ */
+ mmc_delay(host->ios.power_delay_ms);
+}
+
+void mmc_power_off(struct mmc_host *host)
+{
+ if (host->ios.power_mode == MMC_POWER_OFF)
+ return;
+
+ mmc_pwrseq_power_off(host);
+
+ host->ios.clock = 0;
+ host->ios.vdd = 0;
+
+ host->ios.power_mode = MMC_POWER_OFF;
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+
+ /*
+ * Some configurations, such as the 802.11 SDIO card in the OLPC
+ * XO-1.5, require a short delay after poweroff before the card
+ * can be successfully turned on again.
+ */
+ mmc_delay(1);
+}
+
+void mmc_power_cycle(struct mmc_host *host, u32 ocr)
+{
+ mmc_power_off(host);
+ /* Wait at least 1 ms according to SD spec */
+ mmc_delay(1);
+ mmc_power_up(host, ocr);
+}
+
+/*
+ * Cleanup when the last reference to the bus operator is dropped.
+ */
+static void __mmc_release_bus(struct mmc_host *host)
+{
+ WARN_ON(!host->bus_dead);
+
+ host->bus_ops = NULL;
+}
+
+/*
+ * Increase reference count of bus operator
+ */
+static inline void mmc_bus_get(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_refs++;
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * Decrease reference count of bus operator and free it if
+ * it is the last reference.
+ */
+static inline void mmc_bus_put(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_refs--;
+ if ((host->bus_refs == 0) && host->bus_ops)
+ __mmc_release_bus(host);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * Assign a mmc bus handler to a host. Only one bus handler may control a
+ * host at any given time.
+ */
+void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
+{
+ unsigned long flags;
+
+ WARN_ON(!host->claimed);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ WARN_ON(host->bus_ops);
+ WARN_ON(host->bus_refs);
+
+ host->bus_ops = ops;
+ host->bus_refs = 1;
+ host->bus_dead = 0;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * Remove the current bus handler from a host.
+ */
+void mmc_detach_bus(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ WARN_ON(!host->claimed);
+ WARN_ON(!host->bus_ops);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->bus_dead = 1;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_bus_put(host);
+}
+
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
+{
+ /*
+ * Prevent system sleep for 5s to allow user space to consume the
+ * corresponding uevent. This is especially useful, when CD irq is used
+ * as a system wakeup, but doesn't hurt in other cases.
+ */
+ if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL))
+ __pm_wakeup_event(host->ws, 5000);
+
+ host->detect_change = 1;
+ mmc_schedule_delayed_work(&host->detect, delay);
+}
+
+/**
+ * mmc_detect_change - process change of state on a MMC socket
+ * @host: host which changed state.
+ * @delay: optional delay to wait before detection (jiffies)
+ *
+ * MMC drivers should call this when they detect a card has been
+ * inserted or removed. The MMC layer will confirm that any
+ * present card is still functional, and initialize any newly
+ * inserted.
+ */
+void mmc_detect_change(struct mmc_host *host, unsigned long delay)
+{
+ _mmc_detect_change(host, delay, true);
+}
+EXPORT_SYMBOL(mmc_detect_change);
+
+void mmc_init_erase(struct mmc_card *card)
+{
+ unsigned int sz;
+
+ if (is_power_of_2(card->erase_size))
+ card->erase_shift = ffs(card->erase_size) - 1;
+ else
+ card->erase_shift = 0;
+
+ /*
+ * It is possible to erase an arbitrarily large area of an SD or MMC
+ * card. That is not desirable because it can take a long time
+ * (minutes) potentially delaying more important I/O, and also the
+ * timeout calculations become increasingly hugely over-estimated.
+ * Consequently, 'pref_erase' is defined as a guide to limit erases
+ * to that size and alignment.
+ *
+ * For SD cards that define Allocation Unit size, limit erases to one
+ * Allocation Unit at a time.
+ * For MMC, have a stab at ai good value and for modern cards it will
+ * end up being 4MiB. Note that if the value is too small, it can end
+ * up taking longer to erase. Also note, erase_size is already set to
+ * High Capacity Erase Size if available when this function is called.
+ */
+ if (mmc_card_sd(card) && card->ssr.au) {
+ card->pref_erase = card->ssr.au;
+ card->erase_shift = ffs(card->ssr.au) - 1;
+ } else if (card->erase_size) {
+ sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
+ if (sz < 128)
+ card->pref_erase = 512 * 1024 / 512;
+ else if (sz < 512)
+ card->pref_erase = 1024 * 1024 / 512;
+ else if (sz < 1024)
+ card->pref_erase = 2 * 1024 * 1024 / 512;
+ else
+ card->pref_erase = 4 * 1024 * 1024 / 512;
+ if (card->pref_erase < card->erase_size)
+ card->pref_erase = card->erase_size;
+ else {
+ sz = card->pref_erase % card->erase_size;
+ if (sz)
+ card->pref_erase += card->erase_size - sz;
+ }
+ } else
+ card->pref_erase = 0;
+}
+
+static bool is_trim_arg(unsigned int arg)
+{
+ return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG;
+}
+
+static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
+ unsigned int arg, unsigned int qty)
+{
+ unsigned int erase_timeout;
+
+ if (arg == MMC_DISCARD_ARG ||
+ (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
+ erase_timeout = card->ext_csd.trim_timeout;
+ } else if (card->ext_csd.erase_group_def & 1) {
+ /* High Capacity Erase Group Size uses HC timeouts */
+ if (arg == MMC_TRIM_ARG)
+ erase_timeout = card->ext_csd.trim_timeout;
+ else
+ erase_timeout = card->ext_csd.hc_erase_timeout;
+ } else {
+ /* CSD Erase Group Size uses write timeout */
+ unsigned int mult = (10 << card->csd.r2w_factor);
+ unsigned int timeout_clks = card->csd.taac_clks * mult;
+ unsigned int timeout_us;
+
+ /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
+ if (card->csd.taac_ns < 1000000)
+ timeout_us = (card->csd.taac_ns * mult) / 1000;
+ else
+ timeout_us = (card->csd.taac_ns / 1000) * mult;
+
+ /*
+ * ios.clock is only a target. The real clock rate might be
+ * less but not that much less, so fudge it by multiplying by 2.
+ */
+ timeout_clks <<= 1;
+ timeout_us += (timeout_clks * 1000) /
+ (card->host->ios.clock / 1000);
+
+ erase_timeout = timeout_us / 1000;
+
+ /*
+ * Theoretically, the calculation could underflow so round up
+ * to 1ms in that case.
+ */
+ if (!erase_timeout)
+ erase_timeout = 1;
+ }
+
+ /* Multiplier for secure operations */
+ if (arg & MMC_SECURE_ARGS) {
+ if (arg == MMC_SECURE_ERASE_ARG)
+ erase_timeout *= card->ext_csd.sec_erase_mult;
+ else
+ erase_timeout *= card->ext_csd.sec_trim_mult;
+ }
+
+ erase_timeout *= qty;
+
+ /*
+ * Ensure at least a 1 second timeout for SPI as per
+ * 'mmc_set_data_timeout()'
+ */
+ if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
+ erase_timeout = 1000;
+
+ return erase_timeout;
+}
+
+static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
+ unsigned int arg,
+ unsigned int qty)
+{
+ unsigned int erase_timeout;
+
+ /* for DISCARD none of the below calculation applies.
+ * the busy timeout is 250msec per discard command.
+ */
+ if (arg == SD_DISCARD_ARG)
+ return SD_DISCARD_TIMEOUT_MS;
+
+ if (card->ssr.erase_timeout) {
+ /* Erase timeout specified in SD Status Register (SSR) */
+ erase_timeout = card->ssr.erase_timeout * qty +
+ card->ssr.erase_offset;
+ } else {
+ /*
+ * Erase timeout not specified in SD Status Register (SSR) so
+ * use 250ms per write block.
+ */
+ erase_timeout = 250 * qty;
+ }
+
+ /* Must not be less than 1 second */
+ if (erase_timeout < 1000)
+ erase_timeout = 1000;
+
+ return erase_timeout;
+}
+
+static unsigned int mmc_erase_timeout(struct mmc_card *card,
+ unsigned int arg,
+ unsigned int qty)
+{
+ if (mmc_card_sd(card))
+ return mmc_sd_erase_timeout(card, arg, qty);
+ else
+ return mmc_mmc_erase_timeout(card, arg, qty);
+}
+
+static int mmc_do_erase(struct mmc_card *card, unsigned int from,
+ unsigned int to, unsigned int arg)
+{
+ struct mmc_command cmd = {};
+ unsigned int qty = 0, busy_timeout = 0;
+ bool use_r1b_resp = false;
+ int err;
+
+ mmc_retune_hold(card->host);
+
+ /*
+ * qty is used to calculate the erase timeout which depends on how many
+ * erase groups (or allocation units in SD terminology) are affected.
+ * We count erasing part of an erase group as one erase group.
+ * For SD, the allocation units are always a power of 2. For MMC, the
+ * erase group size is almost certainly also power of 2, but it does not
+ * seem to insist on that in the JEDEC standard, so we fall back to
+ * division in that case. SD may not specify an allocation unit size,
+ * in which case the timeout is based on the number of write blocks.
+ *
+ * Note that the timeout for secure trim 2 will only be correct if the
+ * number of erase groups specified is the same as the total of all
+ * preceding secure trim 1 commands. Since the power may have been
+ * lost since the secure trim 1 commands occurred, it is generally
+ * impossible to calculate the secure trim 2 timeout correctly.
+ */
+ if (card->erase_shift)
+ qty += ((to >> card->erase_shift) -
+ (from >> card->erase_shift)) + 1;
+ else if (mmc_card_sd(card))
+ qty += to - from + 1;
+ else
+ qty += ((to / card->erase_size) -
+ (from / card->erase_size)) + 1;
+
+ if (!mmc_card_blockaddr(card)) {
+ from <<= 9;
+ to <<= 9;
+ }
+
+ if (mmc_card_sd(card))
+ cmd.opcode = SD_ERASE_WR_BLK_START;
+ else
+ cmd.opcode = MMC_ERASE_GROUP_START;
+ cmd.arg = from;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ pr_err("mmc_erase: group start error %d, "
+ "status %#x\n", err, cmd.resp[0]);
+ err = -EIO;
+ goto out;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ if (mmc_card_sd(card))
+ cmd.opcode = SD_ERASE_WR_BLK_END;
+ else
+ cmd.opcode = MMC_ERASE_GROUP_END;
+ cmd.arg = to;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ pr_err("mmc_erase: group end error %d, status %#x\n",
+ err, cmd.resp[0]);
+ err = -EIO;
+ goto out;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = MMC_ERASE;
+ cmd.arg = arg;
+ busy_timeout = mmc_erase_timeout(card, arg, qty);
+ /*
+ * If the host controller supports busy signalling and the timeout for
+ * the erase operation does not exceed the max_busy_timeout, we should
+ * use R1B response. Or we need to prevent the host from doing hw busy
+ * detection, which is done by converting to a R1 response instead.
+ * Note, some hosts requires R1B, which also means they are on their own
+ * when it comes to deal with the busy timeout.
+ */
+ if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
+ card->host->max_busy_timeout &&
+ busy_timeout > card->host->max_busy_timeout) {
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = busy_timeout;
+ use_r1b_resp = true;
+ }
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ pr_err("mmc_erase: erase error %d, status %#x\n",
+ err, cmd.resp[0]);
+ err = -EIO;
+ goto out;
+ }
+
+ if (mmc_host_is_spi(card->host))
+ goto out;
+
+ /*
+ * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
+ * shall be avoided.
+ */
+ if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
+ goto out;
+
+ /* Let's poll to find out when the erase operation completes. */
+ err = mmc_poll_for_busy(card, busy_timeout, MMC_BUSY_ERASE);
+
+out:
+ mmc_retune_release(card->host);
+ return err;
+}
+
+static unsigned int mmc_align_erase_size(struct mmc_card *card,
+ unsigned int *from,
+ unsigned int *to,
+ unsigned int nr)
+{
+ unsigned int from_new = *from, nr_new = nr, rem;
+
+ /*
+ * When the 'card->erase_size' is power of 2, we can use round_up/down()
+ * to align the erase size efficiently.
+ */
+ if (is_power_of_2(card->erase_size)) {
+ unsigned int temp = from_new;
+
+ from_new = round_up(temp, card->erase_size);
+ rem = from_new - temp;
+
+ if (nr_new > rem)
+ nr_new -= rem;
+ else
+ return 0;
+
+ nr_new = round_down(nr_new, card->erase_size);
+ } else {
+ rem = from_new % card->erase_size;
+ if (rem) {
+ rem = card->erase_size - rem;
+ from_new += rem;
+ if (nr_new > rem)
+ nr_new -= rem;
+ else
+ return 0;
+ }
+
+ rem = nr_new % card->erase_size;
+ if (rem)
+ nr_new -= rem;
+ }
+
+ if (nr_new == 0)
+ return 0;
+
+ *to = from_new + nr_new;
+ *from = from_new;
+
+ return nr_new;
+}
+
+/**
+ * mmc_erase - erase sectors.
+ * @card: card to erase
+ * @from: first sector to erase
+ * @nr: number of sectors to erase
+ * @arg: erase command argument
+ *
+ * Caller must claim host before calling this function.
+ */
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg)
+{
+ unsigned int rem, to = from + nr;
+ int err;
+
+ if (!(card->csd.cmdclass & CCC_ERASE))
+ return -EOPNOTSUPP;
+
+ if (!card->erase_size)
+ return -EOPNOTSUPP;
+
+ if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
+ return -EOPNOTSUPP;
+
+ if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
+ !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
+ return -EOPNOTSUPP;
+
+ if (mmc_card_mmc(card) && is_trim_arg(arg) &&
+ !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
+ return -EOPNOTSUPP;
+
+ if (arg == MMC_SECURE_ERASE_ARG) {
+ if (from % card->erase_size || nr % card->erase_size)
+ return -EINVAL;
+ }
+
+ if (arg == MMC_ERASE_ARG)
+ nr = mmc_align_erase_size(card, &from, &to, nr);
+
+ if (nr == 0)
+ return 0;
+
+ if (to <= from)
+ return -EINVAL;
+
+ /* 'from' and 'to' are inclusive */
+ to -= 1;
+
+ /*
+ * Special case where only one erase-group fits in the timeout budget:
+ * If the region crosses an erase-group boundary on this particular
+ * case, we will be trimming more than one erase-group which, does not
+ * fit in the timeout budget of the controller, so we need to split it
+ * and call mmc_do_erase() twice if necessary. This special case is
+ * identified by the card->eg_boundary flag.
+ */
+ rem = card->erase_size - (from % card->erase_size);
+ if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) {
+ err = mmc_do_erase(card, from, from + rem - 1, arg);
+ from += rem;
+ if ((err) || (to <= from))
+ return err;
+ }
+
+ return mmc_do_erase(card, from, to, arg);
+}
+EXPORT_SYMBOL(mmc_erase);
+
+int mmc_can_erase(struct mmc_card *card)
+{
+ if (card->csd.cmdclass & CCC_ERASE && card->erase_size)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_erase);
+
+int mmc_can_trim(struct mmc_card *card)
+{
+ if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
+ (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_trim);
+
+int mmc_can_discard(struct mmc_card *card)
+{
+ /*
+ * As there's no way to detect the discard support bit at v4.5
+ * use the s/w feature support filed.
+ */
+ if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_discard);
+
+int mmc_can_sanitize(struct mmc_card *card)
+{
+ if (!mmc_can_trim(card) && !mmc_can_erase(card))
+ return 0;
+ if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
+ return 1;
+ return 0;
+}
+
+int mmc_can_secure_erase_trim(struct mmc_card *card)
+{
+ if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_secure_erase_trim);
+
+int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
+ unsigned int nr)
+{
+ if (!card->erase_size)
+ return 0;
+ if (from % card->erase_size || nr % card->erase_size)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(mmc_erase_group_aligned);
+
+static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
+ unsigned int arg)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
+ unsigned int last_timeout = 0;
+ unsigned int max_busy_timeout = host->max_busy_timeout ?
+ host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
+
+ if (card->erase_shift) {
+ max_qty = UINT_MAX >> card->erase_shift;
+ min_qty = card->pref_erase >> card->erase_shift;
+ } else if (mmc_card_sd(card)) {
+ max_qty = UINT_MAX;
+ min_qty = card->pref_erase;
+ } else {
+ max_qty = UINT_MAX / card->erase_size;
+ min_qty = card->pref_erase / card->erase_size;
+ }
+
+ /*
+ * We should not only use 'host->max_busy_timeout' as the limitation
+ * when deciding the max discard sectors. We should set a balance value
+ * to improve the erase speed, and it can not get too long timeout at
+ * the same time.
+ *
+ * Here we set 'card->pref_erase' as the minimal discard sectors no
+ * matter what size of 'host->max_busy_timeout', but if the
+ * 'host->max_busy_timeout' is large enough for more discard sectors,
+ * then we can continue to increase the max discard sectors until we
+ * get a balance value. In cases when the 'host->max_busy_timeout'
+ * isn't specified, use the default max erase timeout.
+ */
+ do {
+ y = 0;
+ for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
+ timeout = mmc_erase_timeout(card, arg, qty + x);
+
+ if (qty + x > min_qty && timeout > max_busy_timeout)
+ break;
+
+ if (timeout < last_timeout)
+ break;
+ last_timeout = timeout;
+ y = x;
+ }
+ qty += y;
+ } while (y);
+
+ if (!qty)
+ return 0;
+
+ /*
+ * When specifying a sector range to trim, chances are we might cross
+ * an erase-group boundary even if the amount of sectors is less than
+ * one erase-group.
+ * If we can only fit one erase-group in the controller timeout budget,
+ * we have to care that erase-group boundaries are not crossed by a
+ * single trim operation. We flag that special case with "eg_boundary".
+ * In all other cases we can just decrement qty and pretend that we
+ * always touch (qty + 1) erase-groups as a simple optimization.
+ */
+ if (qty == 1)
+ card->eg_boundary = 1;
+ else
+ qty--;
+
+ /* Convert qty to sectors */
+ if (card->erase_shift)
+ max_discard = qty << card->erase_shift;
+ else if (mmc_card_sd(card))
+ max_discard = qty + 1;
+ else
+ max_discard = qty * card->erase_size;
+
+ return max_discard;
+}
+
+unsigned int mmc_calc_max_discard(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, max_trim;
+
+ /*
+ * Without erase_group_def set, MMC erase timeout depends on clock
+ * frequence which can change. In that case, the best choice is
+ * just the preferred erase size.
+ */
+ if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
+ return card->pref_erase;
+
+ max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
+ if (mmc_can_trim(card)) {
+ max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
+ if (max_trim < max_discard || max_discard == 0)
+ max_discard = max_trim;
+ } else if (max_discard < card->erase_size) {
+ max_discard = 0;
+ }
+ pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
+ mmc_hostname(host), max_discard, host->max_busy_timeout ?
+ host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
+ return max_discard;
+}
+EXPORT_SYMBOL(mmc_calc_max_discard);
+
+bool mmc_card_is_blockaddr(struct mmc_card *card)
+{
+ return card ? mmc_card_blockaddr(card) : false;
+}
+EXPORT_SYMBOL(mmc_card_is_blockaddr);
+
+int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
+{
+ struct mmc_command cmd = {};
+
+ if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
+ mmc_card_hs400(card) || mmc_card_hs400es(card))
+ return 0;
+
+ cmd.opcode = MMC_SET_BLOCKLEN;
+ cmd.arg = blocklen;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
+}
+EXPORT_SYMBOL(mmc_set_blocklen);
+
+static void mmc_hw_reset_for_init(struct mmc_host *host)
+{
+ mmc_pwrseq_reset(host);
+
+ if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
+ return;
+ host->ops->hw_reset(host);
+}
+
+/**
+ * mmc_hw_reset - reset the card in hardware
+ * @host: MMC host to which the card is attached
+ *
+ * Hard reset the card. This function is only for upper layers, like the
+ * block layer or card drivers. You cannot use it in host drivers (struct
+ * mmc_card might be gone then).
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int mmc_hw_reset(struct mmc_host *host)
+{
+ int ret;
+
+ if (!host->card)
+ return -EINVAL;
+
+ mmc_bus_get(host);
+ if (!host->bus_ops || host->bus_dead || !host->bus_ops->hw_reset) {
+ mmc_bus_put(host);
+ return -EOPNOTSUPP;
+ }
+
+ ret = host->bus_ops->hw_reset(host);
+ mmc_bus_put(host);
+
+ if (ret < 0)
+ pr_warn("%s: tried to HW reset card, got error %d\n",
+ mmc_hostname(host), ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_hw_reset);
+
+int mmc_sw_reset(struct mmc_host *host)
+{
+ int ret;
+
+ if (!host->card)
+ return -EINVAL;
+
+ mmc_bus_get(host);
+ if (!host->bus_ops || host->bus_dead || !host->bus_ops->sw_reset) {
+ mmc_bus_put(host);
+ return -EOPNOTSUPP;
+ }
+
+ ret = host->bus_ops->sw_reset(host);
+ mmc_bus_put(host);
+
+ if (ret)
+ pr_warn("%s: tried to SW reset card, got error %d\n",
+ mmc_hostname(host), ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_sw_reset);
+
+static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
+{
+ host->f_init = freq;
+
+ pr_debug("%s: %s: trying to init card at %u Hz\n",
+ mmc_hostname(host), __func__, host->f_init);
+
+ mmc_power_up(host, host->ocr_avail);
+
+ /*
+ * Some eMMCs (with VCCQ always on) may not be reset after power up, so
+ * do a hardware reset if possible.
+ */
+ mmc_hw_reset_for_init(host);
+
+ /*
+ * sdio_reset sends CMD52 to reset card. Since we do not know
+ * if the card is being re-initialized, just send it. CMD52
+ * should be ignored by SD/eMMC cards.
+ * Skip it if we already know that we do not support SDIO commands
+ */
+ if (!(host->caps2 & MMC_CAP2_NO_SDIO))
+ sdio_reset(host);
+
+ mmc_go_idle(host);
+
+ if (!(host->caps2 & MMC_CAP2_NO_SD))
+ mmc_send_if_cond(host, host->ocr_avail);
+
+ /* Order's important: probe SDIO, then SD, then MMC */
+ if (!(host->caps2 & MMC_CAP2_NO_SDIO))
+ if (!mmc_attach_sdio(host))
+ return 0;
+
+ if (!(host->caps2 & MMC_CAP2_NO_SD))
+ if (!mmc_attach_sd(host))
+ return 0;
+
+ if (!(host->caps2 & MMC_CAP2_NO_MMC))
+ if (!mmc_attach_mmc(host))
+ return 0;
+
+ mmc_power_off(host);
+ return -EIO;
+}
+
+int _mmc_detect_card_removed(struct mmc_host *host)
+{
+ int ret;
+
+ if (!host->card || mmc_card_removed(host->card))
+ return 1;
+
+ ret = host->bus_ops->alive(host);
+
+ /*
+ * Card detect status and alive check may be out of sync if card is
+ * removed slowly, when card detect switch changes while card/slot
+ * pads are still contacted in hardware (refer to "SD Card Mechanical
+ * Addendum, Appendix C: Card Detection Switch"). So reschedule a
+ * detect work 200ms later for this case.
+ */
+ if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
+ mmc_detect_change(host, msecs_to_jiffies(200));
+ pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
+ }
+
+ if (ret) {
+ mmc_card_set_removed(host->card);
+ pr_debug("%s: card remove detected\n", mmc_hostname(host));
+ }
+
+ return ret;
+}
+
+int mmc_detect_card_removed(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ int ret;
+
+ WARN_ON(!host->claimed);
+
+ if (!card)
+ return 1;
+
+ if (!mmc_card_is_removable(host))
+ return 0;
+
+ ret = mmc_card_removed(card);
+ /*
+ * The card will be considered unchanged unless we have been asked to
+ * detect a change or host requires polling to provide card detection.
+ */
+ if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
+ return ret;
+
+ host->detect_change = 0;
+ if (!ret) {
+ ret = _mmc_detect_card_removed(host);
+ if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
+ /*
+ * Schedule a detect work as soon as possible to let a
+ * rescan handle the card removal.
+ */
+ cancel_delayed_work(&host->detect);
+ _mmc_detect_change(host, 0, false);
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_detect_card_removed);
+
+void mmc_rescan(struct work_struct *work)
+{
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, detect.work);
+ int i;
+
+ if (host->rescan_disable)
+ return;
+
+ /* If there is a non-removable card registered, only scan once */
+ if (!mmc_card_is_removable(host) && host->rescan_entered)
+ return;
+ host->rescan_entered = 1;
+
+ if (host->trigger_card_event && host->ops->card_event) {
+ mmc_claim_host(host);
+ host->ops->card_event(host);
+ mmc_release_host(host);
+ host->trigger_card_event = false;
+ }
+
+ mmc_bus_get(host);
+
+ /* Verify a registered card to be functional, else remove it. */
+ if (host->bus_ops && !host->bus_dead)
+ host->bus_ops->detect(host);
+
+ host->detect_change = 0;
+
+ /*
+ * Let mmc_bus_put() free the bus/bus_ops if we've found that
+ * the card is no longer present.
+ */
+ mmc_bus_put(host);
+ mmc_bus_get(host);
+
+ /* if there still is a card present, stop here */
+ if (host->bus_ops != NULL) {
+ mmc_bus_put(host);
+ goto out;
+ }
+
+ /*
+ * Only we can add a new handler, so it's safe to
+ * release the lock here.
+ */
+ mmc_bus_put(host);
+
+ mmc_claim_host(host);
+ if (mmc_card_is_removable(host) && host->ops->get_cd &&
+ host->ops->get_cd(host) == 0) {
+ mmc_power_off(host);
+ mmc_release_host(host);
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ unsigned int freq = freqs[i];
+ if (freq > host->f_max) {
+ if (i + 1 < ARRAY_SIZE(freqs))
+ continue;
+ freq = host->f_max;
+ }
+ if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
+ break;
+ if (freqs[i] <= host->f_min)
+ break;
+ }
+ mmc_release_host(host);
+
+ out:
+ if (host->caps & MMC_CAP_NEEDS_POLL)
+ mmc_schedule_delayed_work(&host->detect, HZ);
+}
+
+void mmc_start_host(struct mmc_host *host)
+{
+ host->f_init = max(min(freqs[0], host->f_max), host->f_min);
+ host->rescan_disable = 0;
+
+ if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
+ mmc_claim_host(host);
+ mmc_power_up(host, host->ocr_avail);
+ mmc_release_host(host);
+ }
+
+ mmc_gpiod_request_cd_irq(host);
+ _mmc_detect_change(host, 0, false);
+}
+
+void __mmc_stop_host(struct mmc_host *host)
+{
+ if (host->slot.cd_irq >= 0) {
+ mmc_gpio_set_cd_wake(host, false);
+ disable_irq(host->slot.cd_irq);
+ }
+
+ host->rescan_disable = 1;
+ cancel_delayed_work_sync(&host->detect);
+}
+
+void mmc_stop_host(struct mmc_host *host)
+{
+ __mmc_stop_host(host);
+
+ /* clear pm flags now and let card drivers set them as needed */
+ host->pm_flags = 0;
+
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead) {
+ /* Calling bus_ops->remove() with a claimed host can deadlock */
+ host->bus_ops->remove(host);
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ mmc_bus_put(host);
+ return;
+ }
+ mmc_bus_put(host);
+
+ mmc_claim_host(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+}
+
+static int __init mmc_init(void)
+{
+ int ret;
+
+ ret = mmc_register_bus();
+ if (ret)
+ return ret;
+
+ ret = mmc_register_host_class();
+ if (ret)
+ goto unregister_bus;
+
+ ret = sdio_register_bus();
+ if (ret)
+ goto unregister_host_class;
+
+ return 0;
+
+unregister_host_class:
+ mmc_unregister_host_class();
+unregister_bus:
+ mmc_unregister_bus();
+ return ret;
+}
+
+static void __exit mmc_exit(void)
+{
+ sdio_unregister_bus();
+ mmc_unregister_host_class();
+ mmc_unregister_bus();
+}
+
+subsys_initcall(mmc_init);
+module_exit(mmc_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
new file mode 100644
index 000000000..a6c814fdb
--- /dev/null
+++ b/drivers/mmc/core/core.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/drivers/mmc/core/core.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright 2007 Pierre Ossman
+ */
+#ifndef _MMC_CORE_CORE_H
+#define _MMC_CORE_CORE_H
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+struct mmc_host;
+struct mmc_card;
+struct mmc_request;
+
+#define MMC_CMD_RETRIES 3
+
+struct mmc_bus_ops {
+ void (*remove)(struct mmc_host *);
+ void (*detect)(struct mmc_host *);
+ int (*pre_suspend)(struct mmc_host *);
+ int (*suspend)(struct mmc_host *);
+ int (*resume)(struct mmc_host *);
+ int (*runtime_suspend)(struct mmc_host *);
+ int (*runtime_resume)(struct mmc_host *);
+ int (*alive)(struct mmc_host *);
+ int (*shutdown)(struct mmc_host *);
+ int (*hw_reset)(struct mmc_host *);
+ int (*sw_reset)(struct mmc_host *);
+ bool (*cache_enabled)(struct mmc_host *);
+};
+
+void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
+void mmc_detach_bus(struct mmc_host *host);
+
+struct device_node *mmc_of_find_child_device(struct mmc_host *host,
+ unsigned func_num);
+
+void mmc_init_erase(struct mmc_card *card);
+
+void mmc_set_chip_select(struct mmc_host *host, int mode);
+void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
+void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
+u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
+int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr);
+int mmc_host_set_uhs_voltage(struct mmc_host *host);
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
+void mmc_set_initial_signal_voltage(struct mmc_host *host);
+void mmc_set_timing(struct mmc_host *host, unsigned int timing);
+void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
+int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
+ int card_drv_type, int *drv_type);
+void mmc_power_up(struct mmc_host *host, u32 ocr);
+void mmc_power_off(struct mmc_host *host);
+void mmc_power_cycle(struct mmc_host *host, u32 ocr);
+void mmc_set_initial_state(struct mmc_host *host);
+u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
+
+static inline void mmc_delay(unsigned int ms)
+{
+ if (ms <= 20)
+ usleep_range(ms * 1000, ms * 1250);
+ else
+ msleep(ms);
+}
+
+void mmc_rescan(struct work_struct *work);
+void mmc_start_host(struct mmc_host *host);
+void __mmc_stop_host(struct mmc_host *host);
+void mmc_stop_host(struct mmc_host *host);
+
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
+ bool cd_irq);
+int _mmc_detect_card_removed(struct mmc_host *host);
+int mmc_detect_card_removed(struct mmc_host *host);
+
+int mmc_attach_mmc(struct mmc_host *host);
+int mmc_attach_sd(struct mmc_host *host);
+int mmc_attach_sdio(struct mmc_host *host);
+
+/* Module parameters */
+extern bool use_spi_crc;
+
+/* Debugfs information for hosts and cards */
+void mmc_add_host_debugfs(struct mmc_host *host);
+void mmc_remove_host_debugfs(struct mmc_host *host);
+
+void mmc_add_card_debugfs(struct mmc_card *card);
+void mmc_remove_card_debugfs(struct mmc_card *card);
+
+int mmc_execute_tuning(struct mmc_card *card);
+int mmc_hs200_to_hs400(struct mmc_card *card);
+int mmc_hs400_to_hs200(struct mmc_card *card);
+
+void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
+bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
+
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg);
+int mmc_can_erase(struct mmc_card *card);
+int mmc_can_trim(struct mmc_card *card);
+int mmc_can_discard(struct mmc_card *card);
+int mmc_can_sanitize(struct mmc_card *card);
+int mmc_can_secure_erase_trim(struct mmc_card *card);
+int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
+ unsigned int nr);
+unsigned int mmc_calc_max_discard(struct mmc_card *card);
+
+int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
+
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort);
+void mmc_release_host(struct mmc_host *host);
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
+
+/**
+ * mmc_claim_host - exclusively claim a host
+ * @host: mmc host to claim
+ *
+ * Claim a host for a set of operations.
+ */
+static inline void mmc_claim_host(struct mmc_host *host)
+{
+ __mmc_claim_host(host, NULL, NULL);
+}
+
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_cqe_recovery(struct mmc_host *host);
+
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static inline void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq);
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another request is running.
+ */
+static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req)
+ host->ops->post_req(host, mrq, err);
+}
+
+static inline bool mmc_cache_enabled(struct mmc_host *host)
+{
+ if (host->bus_ops->cache_enabled)
+ return host->bus_ops->cache_enabled(host);
+
+ return false;
+}
+
+#endif
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
new file mode 100644
index 000000000..9ec84c86c
--- /dev/null
+++ b/drivers/mmc/core/debugfs.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Debugfs support for hosts and cards
+ *
+ * Copyright (C) 2008 Atmel Corporation
+ */
+#include <linux/moduleparam.h>
+#include <linux/export.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/fault-inject.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "mmc_ops.h"
+
+#ifdef CONFIG_FAIL_MMC_REQUEST
+
+static DECLARE_FAULT_ATTR(fail_default_attr);
+static char *fail_request;
+module_param(fail_request, charp, 0);
+
+#endif /* CONFIG_FAIL_MMC_REQUEST */
+
+/* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
+static int mmc_ios_show(struct seq_file *s, void *data)
+{
+ static const char *vdd_str[] = {
+ [8] = "2.0",
+ [9] = "2.1",
+ [10] = "2.2",
+ [11] = "2.3",
+ [12] = "2.4",
+ [13] = "2.5",
+ [14] = "2.6",
+ [15] = "2.7",
+ [16] = "2.8",
+ [17] = "2.9",
+ [18] = "3.0",
+ [19] = "3.1",
+ [20] = "3.2",
+ [21] = "3.3",
+ [22] = "3.4",
+ [23] = "3.5",
+ [24] = "3.6",
+ };
+ struct mmc_host *host = s->private;
+ struct mmc_ios *ios = &host->ios;
+ const char *str;
+
+ seq_printf(s, "clock:\t\t%u Hz\n", ios->clock);
+ if (host->actual_clock)
+ seq_printf(s, "actual clock:\t%u Hz\n", host->actual_clock);
+ seq_printf(s, "vdd:\t\t%u ", ios->vdd);
+ if ((1 << ios->vdd) & MMC_VDD_165_195)
+ seq_printf(s, "(1.65 - 1.95 V)\n");
+ else if (ios->vdd < (ARRAY_SIZE(vdd_str) - 1)
+ && vdd_str[ios->vdd] && vdd_str[ios->vdd + 1])
+ seq_printf(s, "(%s ~ %s V)\n", vdd_str[ios->vdd],
+ vdd_str[ios->vdd + 1]);
+ else
+ seq_printf(s, "(invalid)\n");
+
+ switch (ios->bus_mode) {
+ case MMC_BUSMODE_OPENDRAIN:
+ str = "open drain";
+ break;
+ case MMC_BUSMODE_PUSHPULL:
+ str = "push-pull";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "bus mode:\t%u (%s)\n", ios->bus_mode, str);
+
+ switch (ios->chip_select) {
+ case MMC_CS_DONTCARE:
+ str = "don't care";
+ break;
+ case MMC_CS_HIGH:
+ str = "active high";
+ break;
+ case MMC_CS_LOW:
+ str = "active low";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "chip select:\t%u (%s)\n", ios->chip_select, str);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ str = "off";
+ break;
+ case MMC_POWER_UP:
+ str = "up";
+ break;
+ case MMC_POWER_ON:
+ str = "on";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "power mode:\t%u (%s)\n", ios->power_mode, str);
+ seq_printf(s, "bus width:\t%u (%u bits)\n",
+ ios->bus_width, 1 << ios->bus_width);
+
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ str = "legacy";
+ break;
+ case MMC_TIMING_MMC_HS:
+ str = "mmc high-speed";
+ break;
+ case MMC_TIMING_SD_HS:
+ str = "sd high-speed";
+ break;
+ case MMC_TIMING_UHS_SDR12:
+ str = "sd uhs SDR12";
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ str = "sd uhs SDR25";
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ str = "sd uhs SDR50";
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ str = "sd uhs SDR104";
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ str = "sd uhs DDR50";
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ str = "mmc DDR52";
+ break;
+ case MMC_TIMING_MMC_HS200:
+ str = "mmc HS200";
+ break;
+ case MMC_TIMING_MMC_HS400:
+ str = mmc_card_hs400es(host->card) ?
+ "mmc HS400 enhanced strobe" : "mmc HS400";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "timing spec:\t%u (%s)\n", ios->timing, str);
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ str = "3.30 V";
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ str = "1.80 V";
+ break;
+ case MMC_SIGNAL_VOLTAGE_120:
+ str = "1.20 V";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "signal voltage:\t%u (%s)\n", ios->signal_voltage, str);
+
+ switch (ios->drv_type) {
+ case MMC_SET_DRIVER_TYPE_A:
+ str = "driver type A";
+ break;
+ case MMC_SET_DRIVER_TYPE_B:
+ str = "driver type B";
+ break;
+ case MMC_SET_DRIVER_TYPE_C:
+ str = "driver type C";
+ break;
+ case MMC_SET_DRIVER_TYPE_D:
+ str = "driver type D";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "driver type:\t%u (%s)\n", ios->drv_type, str);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mmc_ios);
+
+static int mmc_clock_opt_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+
+ *val = host->ios.clock;
+
+ return 0;
+}
+
+static int mmc_clock_opt_set(void *data, u64 val)
+{
+ struct mmc_host *host = data;
+
+ /* We need this check due to input value is u64 */
+ if (val != 0 && (val > host->f_max || val < host->f_min))
+ return -EINVAL;
+
+ mmc_claim_host(host);
+ mmc_set_clock(host, (unsigned int) val);
+ mmc_release_host(host);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
+ "%llu\n");
+
+void mmc_add_host_debugfs(struct mmc_host *host)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir(mmc_hostname(host), NULL);
+ host->debugfs_root = root;
+
+ debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops);
+ debugfs_create_x32("caps", S_IRUSR, root, &host->caps);
+ debugfs_create_x32("caps2", S_IRUSR, root, &host->caps2);
+ debugfs_create_file_unsafe("clock", S_IRUSR | S_IWUSR, root, host,
+ &mmc_clock_fops);
+
+#ifdef CONFIG_FAIL_MMC_REQUEST
+ if (fail_request)
+ setup_fault_attr(&fail_default_attr, fail_request);
+ host->fail_mmc_request = fail_default_attr;
+ fault_create_debugfs_attr("fail_mmc_request", root,
+ &host->fail_mmc_request);
+#endif
+}
+
+void mmc_remove_host_debugfs(struct mmc_host *host)
+{
+ debugfs_remove_recursive(host->debugfs_root);
+}
+
+void mmc_add_card_debugfs(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ struct dentry *root;
+
+ if (!host->debugfs_root)
+ return;
+
+ root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
+ card->debugfs_root = root;
+
+ debugfs_create_x32("state", S_IRUSR, root, &card->state);
+}
+
+void mmc_remove_card_debugfs(struct mmc_card *card)
+{
+ debugfs_remove_recursive(card->debugfs_root);
+ card->debugfs_root = NULL;
+}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
new file mode 100644
index 000000000..b949a4468
--- /dev/null
+++ b/drivers/mmc/core/host.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/mmc/core/host.c
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright (C) 2007-2008 Pierre Ossman
+ * Copyright (C) 2010 Linus Walleij
+ *
+ * MMC host class device management
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/idr.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pagemap.h>
+#include <linux/pm_wakeup.h>
+#include <linux/export.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include "core.h"
+#include "host.h"
+#include "slot-gpio.h"
+#include "pwrseq.h"
+#include "sdio_ops.h"
+
+#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
+
+static DEFINE_IDA(mmc_host_ida);
+
+#ifdef CONFIG_PM_SLEEP
+static int mmc_host_class_prepare(struct device *dev)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ /*
+ * It's safe to access the bus_ops pointer, as both userspace and the
+ * workqueue for detecting cards are frozen at this point.
+ */
+ if (!host->bus_ops)
+ return 0;
+
+ /* Validate conditions for system suspend. */
+ if (host->bus_ops->pre_suspend)
+ return host->bus_ops->pre_suspend(host);
+
+ return 0;
+}
+
+static void mmc_host_class_complete(struct device *dev)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ _mmc_detect_change(host, 0, false);
+}
+
+static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
+ .prepare = mmc_host_class_prepare,
+ .complete = mmc_host_class_complete,
+};
+
+#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
+#else
+#define MMC_HOST_CLASS_DEV_PM_OPS NULL
+#endif
+
+static void mmc_host_classdev_release(struct device *dev)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ wakeup_source_unregister(host->ws);
+ if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
+ ida_simple_remove(&mmc_host_ida, host->index);
+ kfree(host);
+}
+
+static int mmc_host_classdev_shutdown(struct device *dev)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ __mmc_stop_host(host);
+ return 0;
+}
+
+static struct class mmc_host_class = {
+ .name = "mmc_host",
+ .dev_release = mmc_host_classdev_release,
+ .shutdown_pre = mmc_host_classdev_shutdown,
+ .pm = MMC_HOST_CLASS_DEV_PM_OPS,
+};
+
+int mmc_register_host_class(void)
+{
+ return class_register(&mmc_host_class);
+}
+
+void mmc_unregister_host_class(void)
+{
+ class_unregister(&mmc_host_class);
+}
+
+void mmc_retune_enable(struct mmc_host *host)
+{
+ host->can_retune = 1;
+ if (host->retune_period)
+ mod_timer(&host->retune_timer,
+ jiffies + host->retune_period * HZ);
+}
+
+/*
+ * Pause re-tuning for a small set of operations. The pause begins after the
+ * next command and after first doing re-tuning.
+ */
+void mmc_retune_pause(struct mmc_host *host)
+{
+ if (!host->retune_paused) {
+ host->retune_paused = 1;
+ mmc_retune_needed(host);
+ mmc_retune_hold(host);
+ }
+}
+EXPORT_SYMBOL(mmc_retune_pause);
+
+void mmc_retune_unpause(struct mmc_host *host)
+{
+ if (host->retune_paused) {
+ host->retune_paused = 0;
+ mmc_retune_release(host);
+ }
+}
+EXPORT_SYMBOL(mmc_retune_unpause);
+
+void mmc_retune_disable(struct mmc_host *host)
+{
+ mmc_retune_unpause(host);
+ host->can_retune = 0;
+ del_timer_sync(&host->retune_timer);
+ host->retune_now = 0;
+ host->need_retune = 0;
+}
+
+void mmc_retune_timer_stop(struct mmc_host *host)
+{
+ del_timer_sync(&host->retune_timer);
+}
+EXPORT_SYMBOL(mmc_retune_timer_stop);
+
+void mmc_retune_hold(struct mmc_host *host)
+{
+ if (!host->hold_retune)
+ host->retune_now = 1;
+ host->hold_retune += 1;
+}
+
+void mmc_retune_release(struct mmc_host *host)
+{
+ if (host->hold_retune)
+ host->hold_retune -= 1;
+ else
+ WARN_ON(1);
+}
+EXPORT_SYMBOL(mmc_retune_release);
+
+int mmc_retune(struct mmc_host *host)
+{
+ bool return_to_hs400 = false;
+ int err;
+
+ if (host->retune_now)
+ host->retune_now = 0;
+ else
+ return 0;
+
+ if (!host->need_retune || host->doing_retune || !host->card)
+ return 0;
+
+ host->need_retune = 0;
+
+ host->doing_retune = 1;
+
+ if (host->ios.timing == MMC_TIMING_MMC_HS400) {
+ err = mmc_hs400_to_hs200(host->card);
+ if (err)
+ goto out;
+
+ return_to_hs400 = true;
+ }
+
+ err = mmc_execute_tuning(host->card);
+ if (err)
+ goto out;
+
+ if (return_to_hs400)
+ err = mmc_hs200_to_hs400(host->card);
+out:
+ host->doing_retune = 0;
+
+ return err;
+}
+
+static void mmc_retune_timer(struct timer_list *t)
+{
+ struct mmc_host *host = from_timer(host, t, retune_timer);
+
+ mmc_retune_needed(host);
+}
+
+/**
+ * mmc_of_parse() - parse host's device-tree node
+ * @host: host whose node should be parsed.
+ *
+ * To keep the rest of the MMC subsystem unaware of whether DT has been
+ * used to to instantiate and configure this host instance or not, we
+ * parse the properties and set respective generic mmc-host flags and
+ * parameters.
+ */
+int mmc_of_parse(struct mmc_host *host)
+{
+ struct device *dev = host->parent;
+ u32 bus_width, drv_type, cd_debounce_delay_ms;
+ int ret;
+
+ if (!dev || !dev_fwnode(dev))
+ return 0;
+
+ /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
+ if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
+ dev_dbg(host->parent,
+ "\"bus-width\" property is missing, assuming 1 bit.\n");
+ bus_width = 1;
+ }
+
+ switch (bus_width) {
+ case 8:
+ host->caps |= MMC_CAP_8_BIT_DATA;
+ fallthrough; /* Hosts capable of 8-bit can also do 4 bits */
+ case 4:
+ host->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ break;
+ default:
+ dev_err(host->parent,
+ "Invalid \"bus-width\" value %u!\n", bus_width);
+ return -EINVAL;
+ }
+
+ /* f_max is obtained from the optional "max-frequency" property */
+ device_property_read_u32(dev, "max-frequency", &host->f_max);
+
+ /*
+ * Configure CD and WP pins. They are both by default active low to
+ * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the
+ * mmc-gpio helpers are used to attach, configure and use them. If
+ * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH
+ * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the
+ * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability
+ * is set. If the "non-removable" property is found, the
+ * MMC_CAP_NONREMOVABLE capability is set and no card-detection
+ * configuration is performed.
+ */
+
+ /* Parse Card Detection */
+
+ if (device_property_read_bool(dev, "non-removable")) {
+ host->caps |= MMC_CAP_NONREMOVABLE;
+ } else {
+ if (device_property_read_bool(dev, "cd-inverted"))
+ host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
+ if (device_property_read_u32(dev, "cd-debounce-delay-ms",
+ &cd_debounce_delay_ms))
+ cd_debounce_delay_ms = 200;
+
+ if (device_property_read_bool(dev, "broken-cd"))
+ host->caps |= MMC_CAP_NEEDS_POLL;
+
+ ret = mmc_gpiod_request_cd(host, "cd", 0, false,
+ cd_debounce_delay_ms * 1000);
+ if (!ret)
+ dev_info(host->parent, "Got CD GPIO\n");
+ else if (ret != -ENOENT && ret != -ENOSYS)
+ return ret;
+ }
+
+ /* Parse Write Protection */
+
+ if (device_property_read_bool(dev, "wp-inverted"))
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ ret = mmc_gpiod_request_ro(host, "wp", 0, 0);
+ if (!ret)
+ dev_info(host->parent, "Got WP GPIO\n");
+ else if (ret != -ENOENT && ret != -ENOSYS)
+ return ret;
+
+ if (device_property_read_bool(dev, "disable-wp"))
+ host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+
+ if (device_property_read_bool(dev, "cap-sd-highspeed"))
+ host->caps |= MMC_CAP_SD_HIGHSPEED;
+ if (device_property_read_bool(dev, "cap-mmc-highspeed"))
+ host->caps |= MMC_CAP_MMC_HIGHSPEED;
+ if (device_property_read_bool(dev, "sd-uhs-sdr12"))
+ host->caps |= MMC_CAP_UHS_SDR12;
+ if (device_property_read_bool(dev, "sd-uhs-sdr25"))
+ host->caps |= MMC_CAP_UHS_SDR25;
+ if (device_property_read_bool(dev, "sd-uhs-sdr50"))
+ host->caps |= MMC_CAP_UHS_SDR50;
+ if (device_property_read_bool(dev, "sd-uhs-sdr104"))
+ host->caps |= MMC_CAP_UHS_SDR104;
+ if (device_property_read_bool(dev, "sd-uhs-ddr50"))
+ host->caps |= MMC_CAP_UHS_DDR50;
+ if (device_property_read_bool(dev, "cap-power-off-card"))
+ host->caps |= MMC_CAP_POWER_OFF_CARD;
+ if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
+ host->caps |= MMC_CAP_HW_RESET;
+ if (device_property_read_bool(dev, "cap-sdio-irq"))
+ host->caps |= MMC_CAP_SDIO_IRQ;
+ if (device_property_read_bool(dev, "full-pwr-cycle"))
+ host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
+ if (device_property_read_bool(dev, "full-pwr-cycle-in-suspend"))
+ host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND;
+ if (device_property_read_bool(dev, "keep-power-in-suspend"))
+ host->pm_caps |= MMC_PM_KEEP_POWER;
+ if (device_property_read_bool(dev, "wakeup-source") ||
+ device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
+ host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+ if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
+ host->caps |= MMC_CAP_3_3V_DDR;
+ if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
+ host->caps |= MMC_CAP_1_8V_DDR;
+ if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
+ host->caps |= MMC_CAP_1_2V_DDR;
+ if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
+ host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+ if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
+ host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+ if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
+ host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
+ if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
+ host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+ if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
+ host->caps2 |= MMC_CAP2_HS400_ES;
+ if (device_property_read_bool(dev, "no-sdio"))
+ host->caps2 |= MMC_CAP2_NO_SDIO;
+ if (device_property_read_bool(dev, "no-sd"))
+ host->caps2 |= MMC_CAP2_NO_SD;
+ if (device_property_read_bool(dev, "no-mmc"))
+ host->caps2 |= MMC_CAP2_NO_MMC;
+
+ /* Must be after "non-removable" check */
+ if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
+ if (host->caps & MMC_CAP_NONREMOVABLE)
+ host->fixed_drv_type = drv_type;
+ else
+ dev_err(host->parent,
+ "can't use fixed driver type, media is removable\n");
+ }
+
+ host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
+ if (host->dsr_req && (host->dsr & ~0xffff)) {
+ dev_err(host->parent,
+ "device tree specified broken value for DSR: 0x%x, ignoring\n",
+ host->dsr);
+ host->dsr_req = 0;
+ }
+
+ device_property_read_u32(dev, "post-power-on-delay-ms",
+ &host->ios.power_delay_ms);
+
+ return mmc_pwrseq_alloc(host);
+}
+
+EXPORT_SYMBOL(mmc_of_parse);
+
+/**
+ * mmc_of_parse_voltage - return mask of supported voltages
+ * @np: The device node need to be parsed.
+ * @mask: mask of voltages available for MMC/SD/SDIO
+ *
+ * Parse the "voltage-ranges" DT property, returning zero if it is not
+ * found, negative errno if the voltage-range specification is invalid,
+ * or one if the voltage-range is specified and successfully parsed.
+ */
+int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
+{
+ const u32 *voltage_ranges;
+ int num_ranges, i;
+
+ voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
+ if (!voltage_ranges) {
+ pr_debug("%pOF: voltage-ranges unspecified\n", np);
+ return 0;
+ }
+ num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
+ if (!num_ranges) {
+ pr_err("%pOF: voltage-ranges empty\n", np);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ const int j = i * 2;
+ u32 ocr_mask;
+
+ ocr_mask = mmc_vddrange_to_ocrmask(
+ be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
+ if (!ocr_mask) {
+ pr_err("%pOF: voltage-range #%d is invalid\n",
+ np, i);
+ return -EINVAL;
+ }
+ *mask |= ocr_mask;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(mmc_of_parse_voltage);
+
+/**
+ * mmc_first_nonreserved_index() - get the first index that is not reserved
+ */
+static int mmc_first_nonreserved_index(void)
+{
+ int max;
+
+ max = of_alias_get_highest_id("mmc");
+ if (max < 0)
+ return 0;
+
+ return max + 1;
+}
+
+/**
+ * mmc_alloc_host - initialise the per-host structure.
+ * @extra: sizeof private data structure
+ * @dev: pointer to host device model structure
+ *
+ * Initialise the per-host structure.
+ */
+struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
+{
+ int index;
+ struct mmc_host *host;
+ int alias_id, min_idx, max_idx;
+
+ host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ /* scanning will be enabled when we're ready */
+ host->rescan_disable = 1;
+
+ alias_id = of_alias_get_id(dev->of_node, "mmc");
+ if (alias_id >= 0) {
+ index = alias_id;
+ } else {
+ min_idx = mmc_first_nonreserved_index();
+ max_idx = 0;
+
+ index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
+ if (index < 0) {
+ kfree(host);
+ return NULL;
+ }
+ }
+
+ host->index = index;
+
+ dev_set_name(&host->class_dev, "mmc%d", host->index);
+ host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev));
+
+ host->parent = dev;
+ host->class_dev.parent = dev;
+ host->class_dev.class = &mmc_host_class;
+ device_initialize(&host->class_dev);
+ device_enable_async_suspend(&host->class_dev);
+
+ if (mmc_gpio_alloc(host)) {
+ put_device(&host->class_dev);
+ return NULL;
+ }
+
+ spin_lock_init(&host->lock);
+ init_waitqueue_head(&host->wq);
+ INIT_DELAYED_WORK(&host->detect, mmc_rescan);
+ INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
+ timer_setup(&host->retune_timer, mmc_retune_timer, 0);
+
+ /*
+ * By default, hosts do not support SGIO or large requests.
+ * They have to set these according to their abilities.
+ */
+ host->max_segs = 1;
+ host->max_seg_size = PAGE_SIZE;
+
+ host->max_req_size = PAGE_SIZE;
+ host->max_blk_size = 512;
+ host->max_blk_count = PAGE_SIZE / 512;
+
+ host->fixed_drv_type = -EINVAL;
+ host->ios.power_delay_ms = 10;
+ host->ios.power_mode = MMC_POWER_UNDEFINED;
+
+ return host;
+}
+
+EXPORT_SYMBOL(mmc_alloc_host);
+
+static void devm_mmc_host_release(struct device *dev, void *res)
+{
+ mmc_free_host(*(struct mmc_host **)res);
+}
+
+struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra)
+{
+ struct mmc_host **dr, *host;
+
+ dr = devres_alloc(devm_mmc_host_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ host = mmc_alloc_host(extra, dev);
+ if (IS_ERR(host)) {
+ devres_free(dr);
+ return host;
+ }
+
+ *dr = host;
+ devres_add(dev, dr);
+
+ return host;
+}
+EXPORT_SYMBOL(devm_mmc_alloc_host);
+
+static int mmc_validate_host_caps(struct mmc_host *host)
+{
+ if (host->caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) {
+ dev_warn(host->parent, "missing ->enable_sdio_irq() ops\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * mmc_add_host - initialise host hardware
+ * @host: mmc host
+ *
+ * Register the host with the driver model. The host must be
+ * prepared to start servicing requests before this function
+ * completes.
+ */
+int mmc_add_host(struct mmc_host *host)
+{
+ int err;
+
+ err = mmc_validate_host_caps(host);
+ if (err)
+ return err;
+
+ err = device_add(&host->class_dev);
+ if (err)
+ return err;
+
+ led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
+
+#ifdef CONFIG_DEBUG_FS
+ mmc_add_host_debugfs(host);
+#endif
+
+ mmc_start_host(host);
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_add_host);
+
+/**
+ * mmc_remove_host - remove host hardware
+ * @host: mmc host
+ *
+ * Unregister and remove all cards associated with this host,
+ * and power down the MMC bus. No new requests will be issued
+ * after this function has returned.
+ */
+void mmc_remove_host(struct mmc_host *host)
+{
+ mmc_stop_host(host);
+
+#ifdef CONFIG_DEBUG_FS
+ mmc_remove_host_debugfs(host);
+#endif
+
+ device_del(&host->class_dev);
+
+ led_trigger_unregister_simple(host->led);
+}
+
+EXPORT_SYMBOL(mmc_remove_host);
+
+/**
+ * mmc_free_host - free the host structure
+ * @host: mmc host
+ *
+ * Free the host once all references to it have been dropped.
+ */
+void mmc_free_host(struct mmc_host *host)
+{
+ cancel_delayed_work_sync(&host->detect);
+ mmc_pwrseq_free(host);
+ put_device(&host->class_dev);
+}
+
+EXPORT_SYMBOL(mmc_free_host);
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
new file mode 100644
index 000000000..5e3b9534f
--- /dev/null
+++ b/drivers/mmc/core/host.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/drivers/mmc/core/host.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright 2007 Pierre Ossman
+ */
+#ifndef _MMC_CORE_HOST_H
+#define _MMC_CORE_HOST_H
+
+#include <linux/mmc/host.h>
+
+int mmc_register_host_class(void);
+void mmc_unregister_host_class(void);
+
+void mmc_retune_enable(struct mmc_host *host);
+void mmc_retune_disable(struct mmc_host *host);
+void mmc_retune_hold(struct mmc_host *host);
+void mmc_retune_release(struct mmc_host *host);
+int mmc_retune(struct mmc_host *host);
+void mmc_retune_pause(struct mmc_host *host);
+void mmc_retune_unpause(struct mmc_host *host);
+
+static inline void mmc_retune_hold_now(struct mmc_host *host)
+{
+ host->retune_now = 0;
+ host->hold_retune += 1;
+}
+
+static inline void mmc_retune_recheck(struct mmc_host *host)
+{
+ if (host->hold_retune <= 1)
+ host->retune_now = 1;
+}
+
+static inline int mmc_host_cmd23(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_CMD23;
+}
+
+static inline bool mmc_host_done_complete(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_DONE_COMPLETE;
+}
+
+static inline int mmc_boot_partition_access(struct mmc_host *host)
+{
+ return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
+}
+
+static inline int mmc_host_uhs(struct mmc_host *host)
+{
+ return host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50) &&
+ host->caps & MMC_CAP_4_BIT_DATA;
+}
+
+static inline bool mmc_card_hs200(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_HS200;
+}
+
+static inline bool mmc_card_ddr52(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_DDR52;
+}
+
+static inline bool mmc_card_hs400(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_HS400;
+}
+
+static inline bool mmc_card_hs400es(struct mmc_card *card)
+{
+ return card->host->ios.enhanced_strobe;
+}
+
+#endif
+
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
new file mode 100644
index 000000000..87807ef01
--- /dev/null
+++ b/drivers/mmc/core/mmc.c
@@ -0,0 +1,2306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/mmc/core/mmc.c
+ *
+ * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
+ * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
+ * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "bus.h"
+#include "mmc_ops.h"
+#include "quirks.h"
+#include "sd_ops.h"
+#include "pwrseq.h"
+
+#define DEFAULT_CMD6_TIMEOUT_MS 500
+#define MIN_CACHE_EN_TIMEOUT_MS 1600
+
+static const unsigned int tran_exp[] = {
+ 10000, 100000, 1000000, 10000000,
+ 0, 0, 0, 0
+};
+
+static const unsigned char tran_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+static const unsigned int taac_exp[] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
+};
+
+static const unsigned int taac_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+#define UNSTUFF_BITS(resp,start,size) \
+ ({ \
+ const int __size = size; \
+ const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
+ const int __off = 3 - ((start) / 32); \
+ const int __shft = (start) & 31; \
+ u32 __res; \
+ \
+ __res = resp[__off] >> __shft; \
+ if (__size + __shft > 32) \
+ __res |= resp[__off-1] << ((32 - __shft) % 32); \
+ __res & __mask; \
+ })
+
+/*
+ * Given the decoded CSD structure, decode the raw CID to our CID structure.
+ */
+static int mmc_decode_cid(struct mmc_card *card)
+{
+ u32 *resp = card->raw_cid;
+
+ /*
+ * The selection of the format here is based upon published
+ * specs from sandisk and from what people have reported.
+ */
+ switch (card->csd.mmca_vsn) {
+ case 0: /* MMC v1.0 - v1.2 */
+ case 1: /* MMC v1.4 */
+ card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+ card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
+ card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
+ card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
+ card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
+ card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
+ card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
+ card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
+ card->cid.month = UNSTUFF_BITS(resp, 12, 4);
+ card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ break;
+
+ case 2: /* MMC v2.0 - v2.2 */
+ case 3: /* MMC v3.1 - v3.3 */
+ case 4: /* MMC v4 */
+ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
+ card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+ card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
+ card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
+ card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
+ card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
+ card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
+ card->cid.month = UNSTUFF_BITS(resp, 12, 4);
+ card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ break;
+
+ default:
+ pr_err("%s: card has unknown MMCA version %d\n",
+ mmc_hostname(card->host), card->csd.mmca_vsn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mmc_set_erase_size(struct mmc_card *card)
+{
+ if (card->ext_csd.erase_group_def & 1)
+ card->erase_size = card->ext_csd.hc_erase_size;
+ else
+ card->erase_size = card->csd.erase_size;
+
+ mmc_init_erase(card);
+}
+
+/*
+ * Given a 128-bit response, decode to our card CSD structure.
+ */
+static int mmc_decode_csd(struct mmc_card *card)
+{
+ struct mmc_csd *csd = &card->csd;
+ unsigned int e, m, a, b;
+ u32 *resp = card->raw_csd;
+
+ /*
+ * We only understand CSD structure v1.1 and v1.2.
+ * v1.2 has extra information in bits 15, 11 and 10.
+ * We also support eMMC v4.4 & v4.41.
+ */
+ csd->structure = UNSTUFF_BITS(resp, 126, 2);
+ if (csd->structure == 0) {
+ pr_err("%s: unrecognised CSD structure version %d\n",
+ mmc_hostname(card->host), csd->structure);
+ return -EINVAL;
+ }
+
+ csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
+ m = UNSTUFF_BITS(resp, 115, 4);
+ e = UNSTUFF_BITS(resp, 112, 3);
+ csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
+ csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
+
+ m = UNSTUFF_BITS(resp, 99, 4);
+ e = UNSTUFF_BITS(resp, 96, 3);
+ csd->max_dtr = tran_exp[e] * tran_mant[m];
+ csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+
+ e = UNSTUFF_BITS(resp, 47, 3);
+ m = UNSTUFF_BITS(resp, 62, 12);
+ csd->capacity = (1 + m) << (e + 2);
+
+ csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
+ csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
+ csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
+ csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
+ csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
+ csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
+ csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+
+ if (csd->write_blkbits >= 9) {
+ a = UNSTUFF_BITS(resp, 42, 5);
+ b = UNSTUFF_BITS(resp, 37, 5);
+ csd->erase_size = (a + 1) * (b + 1);
+ csd->erase_size <<= csd->write_blkbits - 9;
+ }
+
+ return 0;
+}
+
+static void mmc_select_card_type(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u8 card_type = card->ext_csd.raw_card_type;
+ u32 caps = host->caps, caps2 = host->caps2;
+ unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
+ unsigned int avail_type = 0;
+
+ if (caps & MMC_CAP_MMC_HIGHSPEED &&
+ card_type & EXT_CSD_CARD_TYPE_HS_26) {
+ hs_max_dtr = MMC_HIGH_26_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS_26;
+ }
+
+ if (caps & MMC_CAP_MMC_HIGHSPEED &&
+ card_type & EXT_CSD_CARD_TYPE_HS_52) {
+ hs_max_dtr = MMC_HIGH_52_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS_52;
+ }
+
+ if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
+ hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
+ }
+
+ if (caps & MMC_CAP_1_2V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
+ hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
+ }
+
+ if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
+ }
+
+ if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
+ }
+
+ if (caps2 & MMC_CAP2_HS400_1_8V &&
+ card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
+ }
+
+ if (caps2 & MMC_CAP2_HS400_1_2V &&
+ card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
+ }
+
+ if ((caps2 & MMC_CAP2_HS400_ES) &&
+ card->ext_csd.strobe_support &&
+ (avail_type & EXT_CSD_CARD_TYPE_HS400))
+ avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
+
+ card->ext_csd.hs_max_dtr = hs_max_dtr;
+ card->ext_csd.hs200_max_dtr = hs200_max_dtr;
+ card->mmc_avail_type = avail_type;
+}
+
+static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
+{
+ u8 hc_erase_grp_sz, hc_wp_grp_sz;
+
+ /*
+ * Disable these attributes by default
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+
+ /*
+ * Enhanced area feature support -- check whether the eMMC
+ * card has the Enhanced area enabled. If so, export enhanced
+ * area offset and size to user by adding sysfs interface.
+ */
+ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ if (card->ext_csd.partition_setting_completed) {
+ hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ /*
+ * calculate the enhanced data area offset, in bytes
+ */
+ card->ext_csd.enhanced_area_offset =
+ (((unsigned long long)ext_csd[139]) << 24) +
+ (((unsigned long long)ext_csd[138]) << 16) +
+ (((unsigned long long)ext_csd[137]) << 8) +
+ (((unsigned long long)ext_csd[136]));
+ if (mmc_card_blockaddr(card))
+ card->ext_csd.enhanced_area_offset <<= 9;
+ /*
+ * calculate the enhanced data area size, in kilobytes
+ */
+ card->ext_csd.enhanced_area_size =
+ (ext_csd[142] << 16) + (ext_csd[141] << 8) +
+ ext_csd[140];
+ card->ext_csd.enhanced_area_size *=
+ (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
+ card->ext_csd.enhanced_area_size <<= 9;
+ } else {
+ pr_warn("%s: defines enhanced area without partition setting complete\n",
+ mmc_hostname(card->host));
+ }
+ }
+}
+
+static void mmc_part_add(struct mmc_card *card, u64 size,
+ unsigned int part_cfg, char *name, int idx, bool ro,
+ int area_type)
+{
+ card->part[card->nr_parts].size = size;
+ card->part[card->nr_parts].part_cfg = part_cfg;
+ sprintf(card->part[card->nr_parts].name, name, idx);
+ card->part[card->nr_parts].force_ro = ro;
+ card->part[card->nr_parts].area_type = area_type;
+ card->nr_parts++;
+}
+
+static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+{
+ int idx;
+ u8 hc_erase_grp_sz, hc_wp_grp_sz;
+ u64 part_size;
+
+ /*
+ * General purpose partition feature support --
+ * If ext_csd has the size of general purpose partitions,
+ * set size, part_cfg, partition name in mmc_part.
+ */
+ if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
+ EXT_CSD_PART_SUPPORT_PART_EN) {
+ hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
+ if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
+ !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
+ !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
+ continue;
+ if (card->ext_csd.partition_setting_completed == 0) {
+ pr_warn("%s: has partition size defined without partition complete\n",
+ mmc_hostname(card->host));
+ break;
+ }
+ part_size =
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
+ << 16) +
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
+ << 8) +
+ ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
+ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
+ mmc_part_add(card, part_size << 19,
+ EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
+ "gp%d", idx, false,
+ MMC_BLK_DATA_AREA_GP);
+ }
+ }
+}
+
+/* Minimum partition switch timeout in milliseconds */
+#define MMC_MIN_PART_SWITCH_TIME 300
+
+/*
+ * Decode extended CSD.
+ */
+static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+{
+ int err = 0, idx;
+ u64 part_size;
+ struct device_node *np;
+ bool broken_hpi = false;
+
+ /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
+ if (card->csd.structure == 3) {
+ if (card->ext_csd.raw_ext_csd_structure > 2) {
+ pr_err("%s: unrecognised EXT_CSD structure "
+ "version %d\n", mmc_hostname(card->host),
+ card->ext_csd.raw_ext_csd_structure);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ np = mmc_of_find_child_device(card->host, 0);
+ if (np && of_device_is_compatible(np, "mmc-card"))
+ broken_hpi = of_property_read_bool(np, "broken-hpi");
+ of_node_put(np);
+
+ /*
+ * The EXT_CSD format is meant to be forward compatible. As long
+ * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
+ * are authorized, see JEDEC JESD84-B50 section B.8.
+ */
+ card->ext_csd.rev = ext_csd[EXT_CSD_REV];
+
+ /* fixup device after ext_csd revision field is updated */
+ mmc_fixup_device(card, mmc_ext_csd_fixups);
+
+ card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
+ card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
+ card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
+ card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
+ if (card->ext_csd.rev >= 2) {
+ card->ext_csd.sectors =
+ ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
+ ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
+ ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
+ ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
+
+ /* Cards with density > 2GiB are sector addressed */
+ if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
+ mmc_card_set_blockaddr(card);
+ }
+
+ card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
+ card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
+ mmc_select_card_type(card);
+
+ card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.raw_erase_timeout_mult =
+ ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+ card->ext_csd.raw_hc_erase_grp_size =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ if (card->ext_csd.rev >= 3) {
+ u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
+
+ /* EXT_CSD value is in units of 10ms, but we store in ms */
+ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
+
+ /* Sleep / awake timeout in 100ns units */
+ if (sa_shift > 0 && sa_shift <= 0x17)
+ card->ext_csd.sa_timeout =
+ 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.erase_group_def =
+ ext_csd[EXT_CSD_ERASE_GROUP_DEF];
+ card->ext_csd.hc_erase_timeout = 300 *
+ ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+ card->ext_csd.hc_erase_size =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
+
+ card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
+
+ /*
+ * There are two boot regions of equal size, defined in
+ * multiples of 128K.
+ */
+ if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
+ for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
+ part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
+ mmc_part_add(card, part_size,
+ EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
+ "boot%d", idx, true,
+ MMC_BLK_DATA_AREA_BOOT);
+ }
+ }
+ }
+
+ card->ext_csd.raw_hc_erase_gap_size =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+ card->ext_csd.raw_sec_trim_mult =
+ ext_csd[EXT_CSD_SEC_TRIM_MULT];
+ card->ext_csd.raw_sec_erase_mult =
+ ext_csd[EXT_CSD_SEC_ERASE_MULT];
+ card->ext_csd.raw_sec_feature_support =
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.raw_trim_mult =
+ ext_csd[EXT_CSD_TRIM_MULT];
+ card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
+ card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
+ if (card->ext_csd.rev >= 4) {
+ if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
+ EXT_CSD_PART_SETTING_COMPLETED)
+ card->ext_csd.partition_setting_completed = 1;
+ else
+ card->ext_csd.partition_setting_completed = 0;
+
+ mmc_manage_enhanced_area(card, ext_csd);
+
+ mmc_manage_gp_partitions(card, ext_csd);
+
+ card->ext_csd.sec_trim_mult =
+ ext_csd[EXT_CSD_SEC_TRIM_MULT];
+ card->ext_csd.sec_erase_mult =
+ ext_csd[EXT_CSD_SEC_ERASE_MULT];
+ card->ext_csd.sec_feature_support =
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.trim_timeout = 300 *
+ ext_csd[EXT_CSD_TRIM_MULT];
+
+ /*
+ * Note that the call to mmc_part_add above defaults to read
+ * only. If this default assumption is changed, the call must
+ * take into account the value of boot_locked below.
+ */
+ card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
+ card->ext_csd.boot_ro_lockable = true;
+
+ /* Save power class values */
+ card->ext_csd.raw_pwr_cl_52_195 =
+ ext_csd[EXT_CSD_PWR_CL_52_195];
+ card->ext_csd.raw_pwr_cl_26_195 =
+ ext_csd[EXT_CSD_PWR_CL_26_195];
+ card->ext_csd.raw_pwr_cl_52_360 =
+ ext_csd[EXT_CSD_PWR_CL_52_360];
+ card->ext_csd.raw_pwr_cl_26_360 =
+ ext_csd[EXT_CSD_PWR_CL_26_360];
+ card->ext_csd.raw_pwr_cl_200_195 =
+ ext_csd[EXT_CSD_PWR_CL_200_195];
+ card->ext_csd.raw_pwr_cl_200_360 =
+ ext_csd[EXT_CSD_PWR_CL_200_360];
+ card->ext_csd.raw_pwr_cl_ddr_52_195 =
+ ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
+ card->ext_csd.raw_pwr_cl_ddr_52_360 =
+ ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
+ card->ext_csd.raw_pwr_cl_ddr_200_360 =
+ ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
+ }
+
+ if (card->ext_csd.rev >= 5) {
+ /* Adjust production date as per JEDEC JESD84-B451 */
+ if (card->cid.year < 2010)
+ card->cid.year += 16;
+
+ /* check whether the eMMC card supports BKOPS */
+ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ card->ext_csd.bkops = 1;
+ card->ext_csd.man_bkops_en =
+ (ext_csd[EXT_CSD_BKOPS_EN] &
+ EXT_CSD_MANUAL_BKOPS_MASK);
+ card->ext_csd.raw_bkops_status =
+ ext_csd[EXT_CSD_BKOPS_STATUS];
+ if (card->ext_csd.man_bkops_en)
+ pr_debug("%s: MAN_BKOPS_EN bit is set\n",
+ mmc_hostname(card->host));
+ card->ext_csd.auto_bkops_en =
+ (ext_csd[EXT_CSD_BKOPS_EN] &
+ EXT_CSD_AUTO_BKOPS_MASK);
+ if (card->ext_csd.auto_bkops_en)
+ pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
+ mmc_hostname(card->host));
+ }
+
+ /* check whether the eMMC card supports HPI */
+ if (!mmc_card_broken_hpi(card) &&
+ !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
+ card->ext_csd.hpi = 1;
+ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
+ card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
+ else
+ card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
+ /*
+ * Indicate the maximum timeout to close
+ * a command interrupted by HPI
+ */
+ card->ext_csd.out_of_int_time =
+ ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
+ }
+
+ card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
+ card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
+
+ /*
+ * RPMB regions are defined in multiples of 128K.
+ */
+ card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
+ if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
+ mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
+ EXT_CSD_PART_CONFIG_ACC_RPMB,
+ "rpmb", 0, false,
+ MMC_BLK_DATA_AREA_RPMB);
+ }
+ }
+
+ card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
+ if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
+ card->erased_byte = 0xFF;
+ else
+ card->erased_byte = 0x0;
+
+ /* eMMC v4.5 or later */
+ card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
+ if (card->ext_csd.rev >= 6) {
+ card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
+
+ card->ext_csd.generic_cmd6_time = 10 *
+ ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
+ card->ext_csd.power_off_longtime = 10 *
+ ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
+
+ card->ext_csd.cache_size =
+ ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
+
+ if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
+ card->ext_csd.data_sector_size = 4096;
+ else
+ card->ext_csd.data_sector_size = 512;
+
+ if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
+ (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
+ card->ext_csd.data_tag_unit_size =
+ ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
+ (card->ext_csd.data_sector_size);
+ } else {
+ card->ext_csd.data_tag_unit_size = 0;
+ }
+
+ card->ext_csd.max_packed_writes =
+ ext_csd[EXT_CSD_MAX_PACKED_WRITES];
+ card->ext_csd.max_packed_reads =
+ ext_csd[EXT_CSD_MAX_PACKED_READS];
+ } else {
+ card->ext_csd.data_sector_size = 512;
+ }
+
+ /*
+ * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
+ * when accessing a specific field", so use it here if there is no
+ * PARTITION_SWITCH_TIME.
+ */
+ if (!card->ext_csd.part_time)
+ card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
+ /* Some eMMC set the value too low so set a minimum */
+ if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
+ card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
+
+ /* eMMC v5 or later */
+ if (card->ext_csd.rev >= 7) {
+ memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
+ MMC_FIRMWARE_LEN);
+ card->ext_csd.ffu_capable =
+ (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
+ !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
+
+ card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
+ card->ext_csd.device_life_time_est_typ_a =
+ ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
+ card->ext_csd.device_life_time_est_typ_b =
+ ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
+ }
+
+ /* eMMC v5.1 or later */
+ if (card->ext_csd.rev >= 8) {
+ card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
+ EXT_CSD_CMDQ_SUPPORTED;
+ card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
+ EXT_CSD_CMDQ_DEPTH_MASK) + 1;
+ /* Exclude inefficiently small queue depths */
+ if (card->ext_csd.cmdq_depth <= 2) {
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ }
+ if (card->ext_csd.cmdq_support) {
+ pr_debug("%s: Command Queue supported depth %u\n",
+ mmc_hostname(card->host),
+ card->ext_csd.cmdq_depth);
+ }
+ card->ext_csd.enhanced_rpmb_supported =
+ (card->ext_csd.rel_param &
+ EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
+ }
+out:
+ return err;
+}
+
+static int mmc_read_ext_csd(struct mmc_card *card)
+{
+ u8 *ext_csd;
+ int err;
+
+ if (!mmc_can_ext_csd(card))
+ return 0;
+
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err) {
+ /* If the host or the card can't do the switch,
+ * fail more gracefully. */
+ if ((err != -EINVAL)
+ && (err != -ENOSYS)
+ && (err != -EFAULT))
+ return err;
+
+ /*
+ * High capacity cards should have this "magic" size
+ * stored in their CSD.
+ */
+ if (card->csd.capacity == (4096 * 512)) {
+ pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
+ mmc_hostname(card->host));
+ } else {
+ pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
+ mmc_hostname(card->host));
+ err = 0;
+ }
+
+ return err;
+ }
+
+ err = mmc_decode_ext_csd(card, ext_csd);
+ kfree(ext_csd);
+ return err;
+}
+
+static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
+{
+ u8 *bw_ext_csd;
+ int err;
+
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
+ err = mmc_get_ext_csd(card, &bw_ext_csd);
+ if (err)
+ return err;
+
+ /* only compare read only fields */
+ err = !((card->ext_csd.raw_partition_support ==
+ bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
+ (card->ext_csd.raw_erased_mem_count ==
+ bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
+ (card->ext_csd.rev ==
+ bw_ext_csd[EXT_CSD_REV]) &&
+ (card->ext_csd.raw_ext_csd_structure ==
+ bw_ext_csd[EXT_CSD_STRUCTURE]) &&
+ (card->ext_csd.raw_card_type ==
+ bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
+ (card->ext_csd.raw_s_a_timeout ==
+ bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
+ (card->ext_csd.raw_hc_erase_gap_size ==
+ bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
+ (card->ext_csd.raw_erase_timeout_mult ==
+ bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
+ (card->ext_csd.raw_hc_erase_grp_size ==
+ bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
+ (card->ext_csd.raw_sec_trim_mult ==
+ bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
+ (card->ext_csd.raw_sec_erase_mult ==
+ bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
+ (card->ext_csd.raw_sec_feature_support ==
+ bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
+ (card->ext_csd.raw_trim_mult ==
+ bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
+ (card->ext_csd.raw_sectors[0] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
+ (card->ext_csd.raw_sectors[1] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
+ (card->ext_csd.raw_sectors[2] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
+ (card->ext_csd.raw_sectors[3] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
+ (card->ext_csd.raw_pwr_cl_52_195 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
+ (card->ext_csd.raw_pwr_cl_26_195 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
+ (card->ext_csd.raw_pwr_cl_52_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
+ (card->ext_csd.raw_pwr_cl_26_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
+ (card->ext_csd.raw_pwr_cl_200_195 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
+ (card->ext_csd.raw_pwr_cl_200_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
+ (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
+ (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
+ (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
+
+ if (err)
+ err = -EINVAL;
+
+ kfree(bw_ext_csd);
+ return err;
+}
+
+MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
+ card->raw_cid[2], card->raw_cid[3]);
+MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
+ card->raw_csd[2], card->raw_csd[3]);
+MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
+MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
+MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
+MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
+MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
+MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
+MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
+MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
+MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
+MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
+ card->ext_csd.device_life_time_est_typ_a,
+ card->ext_csd.device_life_time_est_typ_b);
+MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
+MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
+ card->ext_csd.enhanced_area_offset);
+MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
+MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
+MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
+ card->ext_csd.enhanced_rpmb_supported);
+MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
+MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
+MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
+
+static ssize_t mmc_fwrev_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ if (card->ext_csd.rev < 7) {
+ return sprintf(buf, "0x%x\n", card->cid.fwrev);
+ } else {
+ return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
+ card->ext_csd.fwrev);
+ }
+}
+
+static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
+
+static ssize_t mmc_dsr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ if (card->csd.dsr_imp && host->dsr_req)
+ return sprintf(buf, "0x%x\n", host->dsr);
+ else
+ /* return default DSR value */
+ return sprintf(buf, "0x%x\n", 0x404);
+}
+
+static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
+
+static struct attribute *mmc_std_attrs[] = {
+ &dev_attr_cid.attr,
+ &dev_attr_csd.attr,
+ &dev_attr_date.attr,
+ &dev_attr_erase_size.attr,
+ &dev_attr_preferred_erase_size.attr,
+ &dev_attr_fwrev.attr,
+ &dev_attr_ffu_capable.attr,
+ &dev_attr_hwrev.attr,
+ &dev_attr_manfid.attr,
+ &dev_attr_name.attr,
+ &dev_attr_oemid.attr,
+ &dev_attr_prv.attr,
+ &dev_attr_rev.attr,
+ &dev_attr_pre_eol_info.attr,
+ &dev_attr_life_time.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_enhanced_area_offset.attr,
+ &dev_attr_enhanced_area_size.attr,
+ &dev_attr_raw_rpmb_size_mult.attr,
+ &dev_attr_enhanced_rpmb_supported.attr,
+ &dev_attr_rel_sectors.attr,
+ &dev_attr_ocr.attr,
+ &dev_attr_rca.attr,
+ &dev_attr_dsr.attr,
+ &dev_attr_cmdq_en.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mmc_std);
+
+static struct device_type mmc_type = {
+ .groups = mmc_std_groups,
+};
+
+/*
+ * Select the PowerClass for the current bus width
+ * If power class is defined for 4/8 bit bus in the
+ * extended CSD register, select it by executing the
+ * mmc_switch command.
+ */
+static int __mmc_select_powerclass(struct mmc_card *card,
+ unsigned int bus_width)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_ext_csd *ext_csd = &card->ext_csd;
+ unsigned int pwrclass_val = 0;
+ int err = 0;
+
+ switch (1 << host->ios.vdd) {
+ case MMC_VDD_165_195:
+ if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
+ pwrclass_val = ext_csd->raw_pwr_cl_26_195;
+ else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
+ pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
+ ext_csd->raw_pwr_cl_52_195 :
+ ext_csd->raw_pwr_cl_ddr_52_195;
+ else if (host->ios.clock <= MMC_HS200_MAX_DTR)
+ pwrclass_val = ext_csd->raw_pwr_cl_200_195;
+ break;
+ case MMC_VDD_27_28:
+ case MMC_VDD_28_29:
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ case MMC_VDD_31_32:
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ case MMC_VDD_34_35:
+ case MMC_VDD_35_36:
+ if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
+ pwrclass_val = ext_csd->raw_pwr_cl_26_360;
+ else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
+ pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
+ ext_csd->raw_pwr_cl_52_360 :
+ ext_csd->raw_pwr_cl_ddr_52_360;
+ else if (host->ios.clock <= MMC_HS200_MAX_DTR)
+ pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
+ ext_csd->raw_pwr_cl_ddr_200_360 :
+ ext_csd->raw_pwr_cl_200_360;
+ break;
+ default:
+ pr_warn("%s: Voltage range not supported for power class\n",
+ mmc_hostname(host));
+ return -EINVAL;
+ }
+
+ if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
+ pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
+ EXT_CSD_PWR_CL_8BIT_SHIFT;
+ else
+ pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
+ EXT_CSD_PWR_CL_4BIT_SHIFT;
+
+ /* If the power class is different from the default value */
+ if (pwrclass_val > 0) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_CLASS,
+ pwrclass_val,
+ card->ext_csd.generic_cmd6_time);
+ }
+
+ return err;
+}
+
+static int mmc_select_powerclass(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 bus_width, ext_csd_bits;
+ int err, ddr;
+
+ /* Power class selection is supported for versions >= 4.0 */
+ if (!mmc_can_ext_csd(card))
+ return 0;
+
+ bus_width = host->ios.bus_width;
+ /* Power class values are defined only for 4/8 bit bus */
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
+ ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
+ if (ddr)
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
+ else
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
+
+ err = __mmc_select_powerclass(card, ext_csd_bits);
+ if (err)
+ pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
+ mmc_hostname(host), 1 << bus_width, ddr);
+
+ return err;
+}
+
+/*
+ * Set the bus speed for the selected speed mode.
+ */
+static void mmc_set_bus_speed(struct mmc_card *card)
+{
+ unsigned int max_dtr = (unsigned int)-1;
+
+ if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
+ max_dtr > card->ext_csd.hs200_max_dtr)
+ max_dtr = card->ext_csd.hs200_max_dtr;
+ else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
+ max_dtr = card->ext_csd.hs_max_dtr;
+ else if (max_dtr > card->csd.max_dtr)
+ max_dtr = card->csd.max_dtr;
+
+ mmc_set_clock(card->host, max_dtr);
+}
+
+/*
+ * Select the bus width amoung 4-bit and 8-bit(SDR).
+ * If the bus width is changed successfully, return the selected width value.
+ * Zero is returned instead of error value if the wide width is not supported.
+ */
+static int mmc_select_bus_width(struct mmc_card *card)
+{
+ static unsigned ext_csd_bits[] = {
+ EXT_CSD_BUS_WIDTH_8,
+ EXT_CSD_BUS_WIDTH_4,
+ };
+ static unsigned bus_widths[] = {
+ MMC_BUS_WIDTH_8,
+ MMC_BUS_WIDTH_4,
+ };
+ struct mmc_host *host = card->host;
+ unsigned idx, bus_width = 0;
+ int err = 0;
+
+ if (!mmc_can_ext_csd(card) ||
+ !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
+ return 0;
+
+ idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
+
+ /*
+ * Unlike SD, MMC cards dont have a configuration register to notify
+ * supported bus width. So bus test command should be run to identify
+ * the supported bus width or compare the ext csd values of current
+ * bus width and ext csd values of 1 bit mode read earlier.
+ */
+ for (; idx < ARRAY_SIZE(bus_widths); idx++) {
+ /*
+ * Host is capable of 8bit transfer, then switch
+ * the device to work in 8bit transfer mode. If the
+ * mmc switch command returns error then switch to
+ * 4bit transfer mode. On success set the corresponding
+ * bus width on the host.
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits[idx],
+ card->ext_csd.generic_cmd6_time);
+ if (err)
+ continue;
+
+ bus_width = bus_widths[idx];
+ mmc_set_bus_width(host, bus_width);
+
+ /*
+ * If controller can't handle bus width test,
+ * compare ext_csd previously read in 1 bit mode
+ * against ext_csd at new bus width
+ */
+ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+ err = mmc_compare_ext_csds(card, bus_width);
+ else
+ err = mmc_bus_test(card, bus_width);
+
+ if (!err) {
+ err = bus_width;
+ break;
+ } else {
+ pr_warn("%s: switch to bus width %d failed\n",
+ mmc_hostname(host), 1 << bus_width);
+ }
+ }
+
+ return err;
+}
+
+/*
+ * Switch to the high-speed mode
+ */
+static int mmc_select_hs(struct mmc_card *card)
+{
+ int err;
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+ card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
+ true, true);
+ if (err)
+ pr_warn("%s: switch to high-speed failed, err:%d\n",
+ mmc_hostname(card->host), err);
+
+ return err;
+}
+
+/*
+ * Activate wide bus and DDR if supported.
+ */
+static int mmc_select_hs_ddr(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 bus_width, ext_csd_bits;
+ int err = 0;
+
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
+ return 0;
+
+ bus_width = host->ios.bus_width;
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits,
+ card->ext_csd.generic_cmd6_time,
+ MMC_TIMING_MMC_DDR52,
+ true, true);
+ if (err) {
+ pr_err("%s: switch to bus width %d ddr failed\n",
+ mmc_hostname(host), 1 << bus_width);
+ return err;
+ }
+
+ /*
+ * eMMC cards can support 3.3V to 1.2V i/o (vccq)
+ * signaling.
+ *
+ * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
+ *
+ * 1.8V vccq at 3.3V core voltage (vcc) is not required
+ * in the JEDEC spec for DDR.
+ *
+ * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
+ * host controller can support this, like some of the SDHCI
+ * controller which connect to an eMMC device. Some of these
+ * host controller still needs to use 1.8v vccq for supporting
+ * DDR mode.
+ *
+ * So the sequence will be:
+ * if (host and device can both support 1.2v IO)
+ * use 1.2v IO;
+ * else if (host and device can both support 1.8v IO)
+ * use 1.8v IO;
+ * so if host and device can only support 3.3v IO, this is the
+ * last choice.
+ *
+ * WARNING: eMMC rules are NOT the same as SD DDR
+ */
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+ if (!err)
+ return 0;
+ }
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
+ host->caps & MMC_CAP_1_8V_DDR)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* make sure vccq is 3.3v after switching disaster */
+ if (err)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
+
+ return err;
+}
+
+static int mmc_select_hs400(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_dtr;
+ int err = 0;
+ u8 val;
+
+ /*
+ * HS400 mode requires 8-bit bus width
+ */
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->ios.bus_width == MMC_BUS_WIDTH_8))
+ return 0;
+
+ /* Switch card to HS mode */
+ val = EXT_CSD_TIMING_HS;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, val,
+ card->ext_csd.generic_cmd6_time, 0,
+ false, true);
+ if (err) {
+ pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ /* Prepare host to downgrade to HS timing */
+ if (host->ops->hs400_downgrade)
+ host->ops->hs400_downgrade(host);
+
+ /* Set host controller to HS timing */
+ mmc_set_timing(host, MMC_TIMING_MMC_HS);
+
+ /* Reduce frequency to HS frequency */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+
+ err = mmc_switch_status(card, true);
+ if (err)
+ goto out_err;
+
+ if (host->ops->hs400_prepare_ddr)
+ host->ops->hs400_prepare_ddr(host);
+
+ /* Switch card to DDR */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ EXT_CSD_DDR_BUS_WIDTH_8,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ /* Switch card to HS400 */
+ val = EXT_CSD_TIMING_HS400 |
+ card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, val,
+ card->ext_csd.generic_cmd6_time, 0,
+ false, true);
+ if (err) {
+ pr_err("%s: switch to hs400 failed, err:%d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ /* Set host controller to HS400 timing and frequency */
+ mmc_set_timing(host, MMC_TIMING_MMC_HS400);
+ mmc_set_bus_speed(card);
+
+ if (host->ops->hs400_complete)
+ host->ops->hs400_complete(host);
+
+ err = mmc_switch_status(card, true);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+ __func__, err);
+ return err;
+}
+
+int mmc_hs200_to_hs400(struct mmc_card *card)
+{
+ return mmc_select_hs400(card);
+}
+
+int mmc_hs400_to_hs200(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_dtr;
+ int err;
+ u8 val;
+
+ /* Reduce frequency to HS */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+
+ /* Switch HS400 to HS DDR */
+ val = EXT_CSD_TIMING_HS;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
+ val, card->ext_csd.generic_cmd6_time, 0,
+ false, true);
+ if (err)
+ goto out_err;
+
+ if (host->ops->hs400_downgrade)
+ host->ops->hs400_downgrade(host);
+
+ mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+
+ err = mmc_switch_status(card, true);
+ if (err)
+ goto out_err;
+
+ /* Switch HS DDR to HS */
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
+ EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
+ 0, false, true);
+ if (err)
+ goto out_err;
+
+ mmc_set_timing(host, MMC_TIMING_MMC_HS);
+
+ err = mmc_switch_status(card, true);
+ if (err)
+ goto out_err;
+
+ /* Switch HS to HS200 */
+ val = EXT_CSD_TIMING_HS200 |
+ card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
+ val, card->ext_csd.generic_cmd6_time, 0,
+ false, true);
+ if (err)
+ goto out_err;
+
+ mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+
+ /*
+ * For HS200, CRC errors are not a reliable way to know the switch
+ * failed. If there really is a problem, we would expect tuning will
+ * fail and the result ends up the same.
+ */
+ err = mmc_switch_status(card, false);
+ if (err)
+ goto out_err;
+
+ mmc_set_bus_speed(card);
+
+ /* Prepare tuning for HS400 mode. */
+ if (host->ops->prepare_hs400_tuning)
+ host->ops->prepare_hs400_tuning(host, &host->ios);
+
+ return 0;
+
+out_err:
+ pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+ __func__, err);
+ return err;
+}
+
+static void mmc_select_driver_type(struct mmc_card *card)
+{
+ int card_drv_type, drive_strength, drv_type = 0;
+ int fixed_drv_type = card->host->fixed_drv_type;
+
+ card_drv_type = card->ext_csd.raw_driver_strength |
+ mmc_driver_type_mask(0);
+
+ if (fixed_drv_type >= 0)
+ drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
+ ? fixed_drv_type : 0;
+ else
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
+
+ card->drive_strength = drive_strength;
+
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
+}
+
+static int mmc_select_hs400es(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = -EINVAL;
+ u8 val;
+
+ if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
+ err = -ENOTSUPP;
+ goto out_err;
+ }
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto out_err;
+
+ err = mmc_select_bus_width(card);
+ if (err != MMC_BUS_WIDTH_8) {
+ pr_err("%s: switch to 8bit bus width failed, err:%d\n",
+ mmc_hostname(host), err);
+ err = err < 0 ? err : -ENOTSUPP;
+ goto out_err;
+ }
+
+ /* Switch card to HS mode */
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+ card->ext_csd.generic_cmd6_time, 0,
+ false, true);
+ if (err) {
+ pr_err("%s: switch to hs for hs400es failed, err:%d\n",
+ mmc_hostname(host), err);
+ goto out_err;
+ }
+
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ */
+ mmc_set_timing(host, MMC_TIMING_MMC_HS);
+ mmc_set_bus_speed(card);
+
+ err = mmc_switch_status(card, true);
+ if (err)
+ goto out_err;
+
+ /* Switch card to DDR with strobe bit */
+ val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ val,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
+ mmc_hostname(host), err);
+ goto out_err;
+ }
+
+ mmc_select_driver_type(card);
+
+ /* Switch card to HS400 */
+ val = EXT_CSD_TIMING_HS400 |
+ card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, val,
+ card->ext_csd.generic_cmd6_time, 0,
+ false, true);
+ if (err) {
+ pr_err("%s: switch to hs400es failed, err:%d\n",
+ mmc_hostname(host), err);
+ goto out_err;
+ }
+
+ /* Set host controller to HS400 timing and frequency */
+ mmc_set_timing(host, MMC_TIMING_MMC_HS400);
+
+ /* Controller enable enhanced strobe function */
+ host->ios.enhanced_strobe = true;
+ if (host->ops->hs400_enhanced_strobe)
+ host->ops->hs400_enhanced_strobe(host, &host->ios);
+
+ err = mmc_switch_status(card, true);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+ __func__, err);
+ return err;
+}
+
+/*
+ * For device supporting HS200 mode, the following sequence
+ * should be done before executing the tuning process.
+ * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
+ * 2. switch to HS200 mode
+ * 3. set the clock to > 52Mhz and <=200MHz
+ */
+static int mmc_select_hs200(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ unsigned int old_timing, old_signal_voltage, old_clock;
+ int err = -EINVAL;
+ u8 val;
+
+ old_signal_voltage = host->ios.signal_voltage;
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ return err;
+
+ mmc_select_driver_type(card);
+
+ /*
+ * Set the bus width(4 or 8) with host's support and
+ * switch to HS200 mode if bus width is set successfully.
+ */
+ err = mmc_select_bus_width(card);
+ if (err > 0) {
+ val = EXT_CSD_TIMING_HS200 |
+ card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, val,
+ card->ext_csd.generic_cmd6_time, 0,
+ false, true);
+ if (err)
+ goto err;
+
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ * NB: We can't move to full (HS200) speeds until after we've
+ * successfully switched over.
+ */
+ old_timing = host->ios.timing;
+ old_clock = host->ios.clock;
+ mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+ mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
+
+ /*
+ * For HS200, CRC errors are not a reliable way to know the
+ * switch failed. If there really is a problem, we would expect
+ * tuning will fail and the result ends up the same.
+ */
+ err = mmc_switch_status(card, false);
+
+ /*
+ * mmc_select_timing() assumes timing has not changed if
+ * it is a switch error.
+ */
+ if (err == -EBADMSG) {
+ mmc_set_clock(host, old_clock);
+ mmc_set_timing(host, old_timing);
+ }
+ }
+err:
+ if (err) {
+ /* fall back to the old signal voltage, if fails report error */
+ if (mmc_set_signal_voltage(host, old_signal_voltage))
+ err = -EIO;
+
+ pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+ __func__, err);
+ }
+ return err;
+}
+
+/*
+ * Activate High Speed, HS200 or HS400ES mode if supported.
+ */
+static int mmc_select_timing(struct mmc_card *card)
+{
+ int err = 0;
+
+ if (!mmc_can_ext_csd(card))
+ goto bus_speed;
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
+ err = mmc_select_hs400es(card);
+ else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+ err = mmc_select_hs200(card);
+ else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
+ err = mmc_select_hs(card);
+
+ if (err && err != -EBADMSG)
+ return err;
+
+bus_speed:
+ /*
+ * Set the bus speed to the selected bus timing.
+ * If timing is not selected, backward compatible is the default.
+ */
+ mmc_set_bus_speed(card);
+ return 0;
+}
+
+/*
+ * Execute tuning sequence to seek the proper bus operating
+ * conditions for HS200 and HS400, which sends CMD21 to the device.
+ */
+static int mmc_hs200_tuning(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+
+ /*
+ * Timing should be adjusted to the HS400 target
+ * operation frequency for tuning process
+ */
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->ios.bus_width == MMC_BUS_WIDTH_8)
+ if (host->ops->prepare_hs400_tuning)
+ host->ops->prepare_hs400_tuning(host, &host->ios);
+
+ return mmc_execute_tuning(card);
+}
+
+/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+ * we're trying to reinitialise.
+ */
+static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ struct mmc_card *oldcard)
+{
+ struct mmc_card *card;
+ int err;
+ u32 cid[4];
+ u32 rocr;
+
+ WARN_ON(!host->claimed);
+
+ /* Set correct bus mode for MMC before attempting init */
+ if (!mmc_host_is_spi(host))
+ mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
+
+ /*
+ * Since we're changing the OCR value, we seem to
+ * need to tell some cards to go back to the idle
+ * state. We wait 1ms to give cards time to
+ * respond.
+ * mmc_go_idle is needed for eMMC that are asleep
+ */
+ mmc_go_idle(host);
+
+ /* The extra bit indicates that we support high capacity */
+ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
+ if (err)
+ goto err;
+
+ /*
+ * For SPI, enable CRC as appropriate.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Fetch CID from card.
+ */
+ err = mmc_send_cid(host, cid);
+ if (err)
+ goto err;
+
+ if (oldcard) {
+ if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
+ err = -ENOENT;
+ goto err;
+ }
+
+ card = oldcard;
+ } else {
+ /*
+ * Allocate card structure.
+ */
+ card = mmc_alloc_card(host, &mmc_type);
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto err;
+ }
+
+ card->ocr = ocr;
+ card->type = MMC_TYPE_MMC;
+ card->rca = 1;
+ memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+ }
+
+ /*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
+ * For native busses: set card RCA and quit open drain mode.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_set_relative_addr(card);
+ if (err)
+ goto free_card;
+
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+
+ if (!oldcard) {
+ /*
+ * Fetch CSD from card.
+ */
+ err = mmc_send_csd(card, card->raw_csd);
+ if (err)
+ goto free_card;
+
+ err = mmc_decode_csd(card);
+ if (err)
+ goto free_card;
+ err = mmc_decode_cid(card);
+ if (err)
+ goto free_card;
+ }
+
+ /*
+ * handling only for cards supporting DSR and hosts requesting
+ * DSR configuration
+ */
+ if (card->csd.dsr_imp && host->dsr_req)
+ mmc_set_dsr(host);
+
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto free_card;
+ }
+
+ if (!oldcard) {
+ /* Read extended CSD. */
+ err = mmc_read_ext_csd(card);
+ if (err)
+ goto free_card;
+
+ /*
+ * If doing byte addressing, check if required to do sector
+ * addressing. Handle the case of <2GB cards needing sector
+ * addressing. See section 8.1 JEDEC Standard JED84-A441;
+ * ocr register has bit 30 set for sector addressing.
+ */
+ if (rocr & BIT(30))
+ mmc_card_set_blockaddr(card);
+
+ /* Erase size depends on CSD and Extended CSD */
+ mmc_set_erase_size(card);
+ }
+
+ /* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
+ if (card->ext_csd.rev >= 3) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_ERASE_GROUP_DEF, 1,
+ card->ext_csd.generic_cmd6_time);
+
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ if (err) {
+ err = 0;
+ /*
+ * Just disable enhanced area off & sz
+ * will try to enable ERASE_GROUP_DEF
+ * during next time reinit
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+ } else {
+ card->ext_csd.erase_group_def = 1;
+ /*
+ * enable ERASE_GRP_DEF successfully.
+ * This will affect the erase size, so
+ * here need to reset erase size
+ */
+ mmc_set_erase_size(card);
+ }
+ }
+
+ /*
+ * Ensure eMMC user default partition is enabled
+ */
+ if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
+ card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
+ card->ext_csd.part_config,
+ card->ext_csd.part_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ }
+
+ /*
+ * Enable power_off_notification byte in the ext_csd register
+ */
+ if (card->ext_csd.rev >= 6) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ EXT_CSD_POWER_ON,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ /*
+ * The err can be -EBADMSG or 0,
+ * so check for success and update the flag
+ */
+ if (!err)
+ card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
+ }
+
+ /* set erase_arg */
+ if (mmc_can_discard(card))
+ card->erase_arg = MMC_DISCARD_ARG;
+ else if (mmc_can_trim(card))
+ card->erase_arg = MMC_TRIM_ARG;
+ else
+ card->erase_arg = MMC_ERASE_ARG;
+
+ /*
+ * Select timing interface
+ */
+ err = mmc_select_timing(card);
+ if (err)
+ goto free_card;
+
+ if (mmc_card_hs200(card)) {
+ host->doing_init_tune = 1;
+
+ err = mmc_hs200_tuning(card);
+ if (!err)
+ err = mmc_select_hs400(card);
+
+ host->doing_init_tune = 0;
+
+ if (err)
+ goto free_card;
+
+ } else if (!mmc_card_hs400es(card)) {
+ /* Select the desired bus width optionally */
+ err = mmc_select_bus_width(card);
+ if (err > 0 && mmc_card_hs(card)) {
+ err = mmc_select_hs_ddr(card);
+ if (err)
+ goto free_card;
+ }
+ }
+
+ /*
+ * Choose the power class with selected bus interface
+ */
+ mmc_select_powerclass(card);
+
+ /*
+ * Enable HPI feature (if supported)
+ */
+ if (card->ext_csd.hpi) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HPI_MGMT, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling HPI failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.hpi_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.hpi_en = 1;
+ }
+ }
+
+ /*
+ * If cache size is higher than 0, this indicates the existence of cache
+ * and it can be turned on. Note that some eMMCs from Micron has been
+ * reported to need ~800 ms timeout, while enabling the cache after
+ * sudden power failure tests. Let's extend the timeout to a minimum of
+ * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
+ */
+ if (card->ext_csd.cache_size > 0) {
+ unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
+
+ timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, 1, timeout_ms);
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ /*
+ * Only if no error, cache is turned on successfully.
+ */
+ if (err) {
+ pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.cache_ctrl = 0;
+ err = 0;
+ } else {
+ card->ext_csd.cache_ctrl = 1;
+ }
+ }
+
+ /*
+ * Enable Command Queue if supported. Note that Packed Commands cannot
+ * be used with Command Queue.
+ */
+ card->ext_csd.cmdq_en = false;
+ if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
+ err = mmc_cmdq_enable(card);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling CMDQ failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ err = 0;
+ }
+ }
+ /*
+ * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
+ * disabled for a time, so a flag is needed to indicate to re-enable the
+ * Command Queue.
+ */
+ card->reenable_cmdq = card->ext_csd.cmdq_en;
+
+ if (host->cqe_ops && !host->cqe_enabled) {
+ err = host->cqe_ops->cqe_enable(host, card);
+ if (!err) {
+ host->cqe_enabled = true;
+
+ if (card->ext_csd.cmdq_en) {
+ pr_info("%s: Command Queue Engine enabled\n",
+ mmc_hostname(host));
+ } else {
+ host->hsq_enabled = true;
+ pr_info("%s: Host Software Queue enabled\n",
+ mmc_hostname(host));
+ }
+ }
+ }
+
+ if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
+ host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ pr_err("%s: Host failed to negotiate down from 3.3V\n",
+ mmc_hostname(host));
+ err = -EINVAL;
+ goto free_card;
+ }
+
+ if (!oldcard)
+ host->card = card;
+
+ return 0;
+
+free_card:
+ if (!oldcard)
+ mmc_remove_card(card);
+err:
+ return err;
+}
+
+static int mmc_can_sleep(struct mmc_card *card)
+{
+ return (card && card->ext_csd.rev >= 3);
+}
+
+static int mmc_sleep(struct mmc_host *host)
+{
+ struct mmc_command cmd = {};
+ struct mmc_card *card = host->card;
+ unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+ int err;
+
+ /* Re-tuning can't be done once the card is deselected */
+ mmc_retune_hold(host);
+
+ err = mmc_deselect_cards(host);
+ if (err)
+ goto out_release;
+
+ cmd.opcode = MMC_SLEEP_AWAKE;
+ cmd.arg = card->rca << 16;
+ cmd.arg |= 1 << 15;
+
+ /*
+ * If the max_busy_timeout of the host is specified, validate it against
+ * the sleep cmd timeout. A failure means we need to prevent the host
+ * from doing hw busy detection, which is done by converting to a R1
+ * response instead of a R1B. Note, some hosts requires R1B, which also
+ * means they are on their own when it comes to deal with the busy
+ * timeout.
+ */
+ if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+ (timeout_ms > host->max_busy_timeout)) {
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = timeout_ms;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ goto out_release;
+
+ /*
+ * If the host does not wait while the card signals busy, then we will
+ * will have to wait the sleep/awake timeout. Note, we cannot use the
+ * SEND_STATUS command to poll the status because that command (and most
+ * others) is invalid while the card sleeps.
+ */
+ if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+ mmc_delay(timeout_ms);
+
+out_release:
+ mmc_retune_release(host);
+ return err;
+}
+
+static int mmc_can_poweroff_notify(const struct mmc_card *card)
+{
+ return card &&
+ mmc_card_mmc(card) &&
+ (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
+}
+
+static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
+{
+ unsigned int timeout = card->ext_csd.generic_cmd6_time;
+ int err;
+
+ /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
+ if (notify_type == EXT_CSD_POWER_OFF_LONG)
+ timeout = card->ext_csd.power_off_longtime;
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout, 0, false, false);
+ if (err)
+ pr_err("%s: Power Off Notification timed out, %u\n",
+ mmc_hostname(card->host), timeout);
+
+ /* Disable the power off notification after the switch operation. */
+ card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
+
+ return err;
+}
+
+/*
+ * Host is being removed. Free up the current card.
+ */
+static void mmc_remove(struct mmc_host *host)
+{
+ mmc_remove_card(host->card);
+ host->card = NULL;
+}
+
+/*
+ * Card detection - card is alive.
+ */
+static int mmc_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
+ * Card detection callback from host.
+ */
+static void mmc_detect(struct mmc_host *host)
+{
+ int err;
+
+ mmc_get_card(host->card, NULL);
+
+ /*
+ * Just check if our card has been removed.
+ */
+ err = _mmc_detect_card_removed(host);
+
+ mmc_put_card(host->card, NULL);
+
+ if (err) {
+ mmc_remove(host);
+
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ }
+}
+
+static bool _mmc_cache_enabled(struct mmc_host *host)
+{
+ return host->card->ext_csd.cache_size > 0 &&
+ host->card->ext_csd.cache_ctrl & 1;
+}
+
+static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
+{
+ int err = 0;
+ unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
+ EXT_CSD_POWER_OFF_LONG;
+
+ mmc_claim_host(host);
+
+ if (mmc_card_suspended(host->card))
+ goto out;
+
+ err = mmc_flush_cache(host->card);
+ if (err)
+ goto out;
+
+ if (mmc_can_poweroff_notify(host->card) &&
+ ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend ||
+ (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND)))
+ err = mmc_poweroff_notify(host->card, notify_type);
+ else if (mmc_can_sleep(host->card))
+ err = mmc_sleep(host);
+ else if (!mmc_host_is_spi(host))
+ err = mmc_deselect_cards(host);
+
+ if (!err) {
+ mmc_power_off(host);
+ mmc_card_set_suspended(host->card);
+ }
+out:
+ mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Suspend callback
+ */
+static int mmc_suspend(struct mmc_host *host)
+{
+ int err;
+
+ err = _mmc_suspend(host, true);
+ if (!err) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_suspended(&host->card->dev);
+ }
+
+ return err;
+}
+
+/*
+ * This function tries to determine if the same card is still present
+ * and, if so, restore all state to it.
+ */
+static int _mmc_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ mmc_claim_host(host);
+
+ if (!mmc_card_suspended(host->card))
+ goto out;
+
+ mmc_power_up(host, host->card->ocr);
+ err = mmc_init_card(host, host->card->ocr, host->card);
+ mmc_card_clr_suspended(host->card);
+
+out:
+ mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Shutdown callback
+ */
+static int mmc_shutdown(struct mmc_host *host)
+{
+ int err = 0;
+
+ /*
+ * In a specific case for poweroff notify, we need to resume the card
+ * before we can shutdown it properly.
+ */
+ if (mmc_can_poweroff_notify(host->card) &&
+ !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
+ err = _mmc_resume(host);
+
+ if (!err)
+ err = _mmc_suspend(host, false);
+
+ return err;
+}
+
+/*
+ * Callback for resume.
+ */
+static int mmc_resume(struct mmc_host *host)
+{
+ pm_runtime_enable(&host->card->dev);
+ return 0;
+}
+
+/*
+ * Callback for runtime_suspend.
+ */
+static int mmc_runtime_suspend(struct mmc_host *host)
+{
+ int err;
+
+ if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ return 0;
+
+ err = _mmc_suspend(host, true);
+ if (err)
+ pr_err("%s: error %d doing aggressive suspend\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
+
+/*
+ * Callback for runtime_resume.
+ */
+static int mmc_runtime_resume(struct mmc_host *host)
+{
+ int err;
+
+ err = _mmc_resume(host);
+ if (err && err != -ENOMEDIUM)
+ pr_err("%s: error %d doing runtime resume\n",
+ mmc_hostname(host), err);
+
+ return 0;
+}
+
+static int mmc_can_reset(struct mmc_card *card)
+{
+ u8 rst_n_function;
+
+ rst_n_function = card->ext_csd.rst_n_function;
+ if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
+ return 0;
+ return 1;
+}
+
+static int _mmc_hw_reset(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+
+ /*
+ * In the case of recovery, we can't expect flushing the cache to work
+ * always, but we have a go and ignore errors.
+ */
+ mmc_flush_cache(host->card);
+
+ if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
+ mmc_can_reset(card)) {
+ /* If the card accept RST_n signal, send it. */
+ mmc_set_clock(host, host->f_init);
+ host->ops->hw_reset(host);
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+ } else {
+ /* Do a brute force power cycle */
+ mmc_power_cycle(host, card->ocr);
+ mmc_pwrseq_reset(host);
+ }
+ return mmc_init_card(host, card->ocr, card);
+}
+
+static const struct mmc_bus_ops mmc_ops = {
+ .remove = mmc_remove,
+ .detect = mmc_detect,
+ .suspend = mmc_suspend,
+ .resume = mmc_resume,
+ .runtime_suspend = mmc_runtime_suspend,
+ .runtime_resume = mmc_runtime_resume,
+ .alive = mmc_alive,
+ .shutdown = mmc_shutdown,
+ .hw_reset = _mmc_hw_reset,
+ .cache_enabled = _mmc_cache_enabled,
+};
+
+/*
+ * Starting point for MMC card init.
+ */
+int mmc_attach_mmc(struct mmc_host *host)
+{
+ int err;
+ u32 ocr, rocr;
+
+ WARN_ON(!host->claimed);
+
+ /* Set correct bus mode for MMC before attempting attach */
+ if (!mmc_host_is_spi(host))
+ mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
+
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
+ mmc_attach_bus(host, &mmc_ops);
+ if (host->ocr_avail_mmc)
+ host->ocr_avail = host->ocr_avail_mmc;
+
+ /*
+ * We need to get OCR a different way for SPI.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_read_ocr(host, 1, &ocr);
+ if (err)
+ goto err;
+ }
+
+ rocr = mmc_select_voltage(host, ocr);
+
+ /*
+ * Can we support the voltage of the card?
+ */
+ if (!rocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Detect and init the card.
+ */
+ err = mmc_init_card(host, rocr, NULL);
+ if (err)
+ goto err;
+
+ mmc_release_host(host);
+ err = mmc_add_card(host->card);
+ if (err)
+ goto remove_card;
+
+ mmc_claim_host(host);
+ return 0;
+
+remove_card:
+ mmc_remove_card(host->card);
+ mmc_claim_host(host);
+ host->card = NULL;
+err:
+ mmc_detach_bus(host);
+
+ pr_err("%s: error %d whilst initialising MMC card\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
new file mode 100644
index 000000000..b6bb7f7fa
--- /dev/null
+++ b/drivers/mmc/core/mmc_ops.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/mmc/core/mmc_ops.h
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "mmc_ops.h"
+
+#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
+#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
+#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
+
+static const u8 tuning_blk_pattern_4bit[] = {
+ 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
+ 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
+ 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
+ 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
+ 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
+ 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
+ 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
+ 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
+};
+
+static const u8 tuning_blk_pattern_8bit[] = {
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+};
+
+int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
+{
+ int err;
+ struct mmc_command cmd = {};
+
+ cmd.opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(card->host))
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, retries);
+ if (err)
+ return err;
+
+ /* NOTE: callers are required to understand the difference
+ * between "native" and SPI format status words!
+ */
+ if (status)
+ *status = cmd.resp[0];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__mmc_send_status);
+
+int mmc_send_status(struct mmc_card *card, u32 *status)
+{
+ return __mmc_send_status(card, status, MMC_CMD_RETRIES);
+}
+EXPORT_SYMBOL_GPL(mmc_send_status);
+
+static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
+{
+ struct mmc_command cmd = {};
+
+ cmd.opcode = MMC_SELECT_CARD;
+
+ if (card) {
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
+ }
+
+ return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+}
+
+int mmc_select_card(struct mmc_card *card)
+{
+
+ return _mmc_select_card(card->host, card);
+}
+
+int mmc_deselect_cards(struct mmc_host *host)
+{
+ return _mmc_select_card(host, NULL);
+}
+
+/*
+ * Write the value specified in the device tree or board code into the optional
+ * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
+ * drive strength of the DAT and CMD outputs. The actual meaning of a given
+ * value is hardware dependant.
+ * The presence of the DSR register can be determined from the CSD register,
+ * bit 76.
+ */
+int mmc_set_dsr(struct mmc_host *host)
+{
+ struct mmc_command cmd = {};
+
+ cmd.opcode = MMC_SET_DSR;
+
+ cmd.arg = (host->dsr << 16) | 0xffff;
+ cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+}
+
+int mmc_go_idle(struct mmc_host *host)
+{
+ int err;
+ struct mmc_command cmd = {};
+
+ /*
+ * Non-SPI hosts need to prevent chipselect going active during
+ * GO_IDLE; that would put chips into SPI mode. Remind them of
+ * that in case of hardware that won't pull up DAT3/nCS otherwise.
+ *
+ * SPI hosts ignore ios.chip_select; it's managed according to
+ * rules that must accommodate non-MMC slaves which this layer
+ * won't even know about.
+ */
+ if (!mmc_host_is_spi(host)) {
+ mmc_set_chip_select(host, MMC_CS_HIGH);
+ mmc_delay(1);
+ }
+
+ cmd.opcode = MMC_GO_IDLE_STATE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ mmc_delay(1);
+
+ if (!mmc_host_is_spi(host)) {
+ mmc_set_chip_select(host, MMC_CS_DONTCARE);
+ mmc_delay(1);
+ }
+
+ host->use_spi_crc = 0;
+
+ return err;
+}
+
+int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd = {};
+ int i, err = 0;
+
+ cmd.opcode = MMC_SEND_OP_COND;
+ cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
+
+ for (i = 100; i; i--) {
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ break;
+
+ /* wait until reset completes */
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd.resp[0] & R1_SPI_IDLE))
+ break;
+ } else {
+ if (cmd.resp[0] & MMC_CARD_BUSY)
+ break;
+ }
+
+ err = -ETIMEDOUT;
+
+ mmc_delay(10);
+
+ /*
+ * According to eMMC specification v5.1 section 6.4.3, we
+ * should issue CMD1 repeatedly in the idle state until
+ * the eMMC is ready. Otherwise some eMMC devices seem to enter
+ * the inactive mode after mmc_init_card() issued CMD0 when
+ * the eMMC device is busy.
+ */
+ if (!ocr && !mmc_host_is_spi(host))
+ cmd.arg = cmd.resp[0] | BIT(30);
+ }
+
+ if (rocr && !mmc_host_is_spi(host))
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+int mmc_set_relative_addr(struct mmc_card *card)
+{
+ struct mmc_command cmd = {};
+
+ cmd.opcode = MMC_SET_RELATIVE_ADDR;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+}
+
+static int
+mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
+{
+ int err;
+ struct mmc_command cmd = {};
+
+ cmd.opcode = opcode;
+ cmd.arg = arg;
+ cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ memcpy(cxd, cmd.resp, sizeof(u32) * 4);
+
+ return 0;
+}
+
+/*
+ * NOTE: void *buf, caller for the buf is required to use DMA-capable
+ * buffer or on-stack buffer (with some overhead in callee).
+ */
+static int
+mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
+ u32 opcode, void *buf, unsigned len)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+
+ /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
+ * rely on callers to never use this with "native" calls for reading
+ * CSD or CID. Native versions of those commands use the R2 type,
+ * not R1 plus a data block.
+ */
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = len;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, buf, len);
+
+ if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
+ /*
+ * The spec states that CSR and CID accesses have a timeout
+ * of 64 clock cycles.
+ */
+ data.timeout_ns = 0;
+ data.timeout_clks = 64;
+ } else
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(host, &mrq);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
+{
+ int ret, i;
+ __be32 *csd_tmp;
+
+ csd_tmp = kzalloc(16, GFP_KERNEL);
+ if (!csd_tmp)
+ return -ENOMEM;
+
+ ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < 4; i++)
+ csd[i] = be32_to_cpu(csd_tmp[i]);
+
+err:
+ kfree(csd_tmp);
+ return ret;
+}
+
+int mmc_send_csd(struct mmc_card *card, u32 *csd)
+{
+ if (mmc_host_is_spi(card->host))
+ return mmc_spi_send_csd(card, csd);
+
+ return mmc_send_cxd_native(card->host, card->rca << 16, csd,
+ MMC_SEND_CSD);
+}
+
+static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
+{
+ int ret, i;
+ __be32 *cid_tmp;
+
+ cid_tmp = kzalloc(16, GFP_KERNEL);
+ if (!cid_tmp)
+ return -ENOMEM;
+
+ ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < 4; i++)
+ cid[i] = be32_to_cpu(cid_tmp[i]);
+
+err:
+ kfree(cid_tmp);
+ return ret;
+}
+
+int mmc_send_cid(struct mmc_host *host, u32 *cid)
+{
+ if (mmc_host_is_spi(host))
+ return mmc_spi_send_cid(host, cid);
+
+ return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
+}
+
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
+{
+ int err;
+ u8 *ext_csd;
+
+ if (!card || !new_ext_csd)
+ return -EINVAL;
+
+ if (!mmc_can_ext_csd(card))
+ return -EOPNOTSUPP;
+
+ /*
+ * As the ext_csd is so large and mostly unused, we don't store the
+ * raw block in mmc_card.
+ */
+ ext_csd = kzalloc(512, GFP_KERNEL);
+ if (!ext_csd)
+ return -ENOMEM;
+
+ err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
+ 512);
+ if (err)
+ kfree(ext_csd);
+ else
+ *new_ext_csd = ext_csd;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
+
+int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
+{
+ struct mmc_command cmd = {};
+ int err;
+
+ cmd.opcode = MMC_SPI_READ_OCR;
+ cmd.arg = highcap ? (1 << 30) : 0;
+ cmd.flags = MMC_RSP_SPI_R3;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ *ocrp = cmd.resp[1];
+ return err;
+}
+
+int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
+{
+ struct mmc_command cmd = {};
+ int err;
+
+ cmd.opcode = MMC_SPI_CRC_ON_OFF;
+ cmd.flags = MMC_RSP_SPI_R1;
+ cmd.arg = use_crc;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (!err)
+ host->use_spi_crc = use_crc;
+ return err;
+}
+
+static int mmc_switch_status_error(struct mmc_host *host, u32 status)
+{
+ if (mmc_host_is_spi(host)) {
+ if (status & R1_SPI_ILLEGAL_COMMAND)
+ return -EBADMSG;
+ } else {
+ if (R1_STATUS(status))
+ pr_warn("%s: unexpected status %#x after switch\n",
+ mmc_hostname(host), status);
+ if (status & R1_SWITCH_ERROR)
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+/* Caller must hold re-tuning */
+int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
+{
+ u32 status;
+ int err;
+
+ err = mmc_send_status(card, &status);
+ if (!crc_err_fatal && err == -EILSEQ)
+ return 0;
+ if (err)
+ return err;
+
+ return mmc_switch_status_error(card->host, status);
+}
+
+static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
+ enum mmc_busy_cmd busy_cmd, bool *busy)
+{
+ struct mmc_host *host = card->host;
+ u32 status = 0;
+ int err;
+
+ if (busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
+ *busy = host->ops->card_busy(host);
+ return 0;
+ }
+
+ err = mmc_send_status(card, &status);
+ if (retry_crc_err && err == -EILSEQ) {
+ *busy = true;
+ return 0;
+ }
+ if (err)
+ return err;
+
+ switch (busy_cmd) {
+ case MMC_BUSY_CMD6:
+ err = mmc_switch_status_error(card->host, status);
+ break;
+ case MMC_BUSY_ERASE:
+ err = R1_STATUS(status) ? -EIO : 0;
+ break;
+ case MMC_BUSY_HPI:
+ case MMC_BUSY_IO:
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ *busy = !mmc_ready_for_data(status);
+ return 0;
+}
+
+static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ bool send_status, bool retry_crc_err,
+ enum mmc_busy_cmd busy_cmd)
+{
+ struct mmc_host *host = card->host;
+ int err;
+ unsigned long timeout;
+ unsigned int udelay = 32, udelay_max = 32768;
+ bool expired = false;
+ bool busy = false;
+
+ /*
+ * In cases when not allowed to poll by using CMD13 or because we aren't
+ * capable of polling by using ->card_busy(), then rely on waiting the
+ * stated timeout to be sufficient.
+ */
+ if (!send_status && !host->ops->card_busy) {
+ mmc_delay(timeout_ms);
+ return 0;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ /*
+ * Due to the possibility of being preempted while polling,
+ * check the expiration time first.
+ */
+ expired = time_after(jiffies, timeout);
+
+ err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
+ if (err)
+ return err;
+
+ /* Timeout if the device still remains busy. */
+ if (expired && busy) {
+ pr_err("%s: Card stuck being busy! %s\n",
+ mmc_hostname(host), __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* Throttle the polling rate to avoid hogging the CPU. */
+ if (busy) {
+ usleep_range(udelay, udelay * 2);
+ if (udelay < udelay_max)
+ udelay *= 2;
+ }
+ } while (busy);
+
+ return 0;
+}
+
+int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ enum mmc_busy_cmd busy_cmd)
+{
+ return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
+}
+
+/**
+ * __mmc_switch - modify EXT_CSD register
+ * @card: the MMC card associated with the data transfer
+ * @set: cmd set values
+ * @index: EXT_CSD register index
+ * @value: value to program into EXT_CSD register
+ * @timeout_ms: timeout (ms) for operation performed by register write,
+ * timeout of zero implies maximum possible timeout
+ * @timing: new timing to change to
+ * @send_status: send status cmd to poll for busy
+ * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
+ *
+ * Modifies the EXT_CSD register for selected card.
+ */
+int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms, unsigned char timing,
+ bool send_status, bool retry_crc_err)
+{
+ struct mmc_host *host = card->host;
+ int err;
+ struct mmc_command cmd = {};
+ bool use_r1b_resp = true;
+ unsigned char old_timing = host->ios.timing;
+
+ mmc_retune_hold(host);
+
+ if (!timeout_ms) {
+ pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
+ mmc_hostname(host));
+ timeout_ms = card->ext_csd.generic_cmd6_time;
+ }
+
+ /*
+ * If the max_busy_timeout of the host is specified, make sure it's
+ * enough to fit the used timeout_ms. In case it's not, let's instruct
+ * the host to avoid HW busy detection, by converting to a R1 response
+ * instead of a R1B. Note, some hosts requires R1B, which also means
+ * they are on their own when it comes to deal with the busy timeout.
+ */
+ if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+ (timeout_ms > host->max_busy_timeout))
+ use_r1b_resp = false;
+
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (index << 16) |
+ (value << 8) |
+ set;
+ cmd.flags = MMC_CMD_AC;
+ if (use_r1b_resp) {
+ cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+ cmd.busy_timeout = timeout_ms;
+ } else {
+ cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ goto out;
+
+ /*If SPI or used HW busy detection above, then we don't need to poll. */
+ if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
+ mmc_host_is_spi(host))
+ goto out_tim;
+
+ /* Let's try to poll to find out when the command is completed. */
+ err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
+ MMC_BUSY_CMD6);
+ if (err)
+ goto out;
+
+out_tim:
+ /* Switch to new timing before check switch status. */
+ if (timing)
+ mmc_set_timing(host, timing);
+
+ if (send_status) {
+ err = mmc_switch_status(card, true);
+ if (err && timing)
+ mmc_set_timing(host, old_timing);
+ }
+out:
+ mmc_retune_release(host);
+
+ return err;
+}
+
+int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms)
+{
+ return __mmc_switch(card, set, index, value, timeout_ms, 0,
+ true, false);
+}
+EXPORT_SYMBOL_GPL(mmc_switch);
+
+int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ struct mmc_ios *ios = &host->ios;
+ const u8 *tuning_block_pattern;
+ int size, err = 0;
+ u8 *data_buf;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_8) {
+ tuning_block_pattern = tuning_blk_pattern_8bit;
+ size = sizeof(tuning_blk_pattern_8bit);
+ } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ tuning_block_pattern = tuning_blk_pattern_4bit;
+ size = sizeof(tuning_blk_pattern_4bit);
+ } else
+ return -EINVAL;
+
+ data_buf = kzalloc(size, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = opcode;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = size;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+
+ /*
+ * According to the tuning specs, Tuning process
+ * is normally shorter 40 executions of CMD19,
+ * and timeout value should be shorter than 150 ms
+ */
+ data.timeout_ns = 150 * NSEC_PER_MSEC;
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, data_buf, size);
+
+ mmc_wait_for_req(host, &mrq);
+
+ if (cmd_error)
+ *cmd_error = cmd.error;
+
+ if (cmd.error) {
+ err = cmd.error;
+ goto out;
+ }
+
+ if (data.error) {
+ err = data.error;
+ goto out;
+ }
+
+ if (memcmp(data_buf, tuning_block_pattern, size))
+ err = -EIO;
+
+out:
+ kfree(data_buf);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mmc_send_tuning);
+
+int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
+{
+ struct mmc_command cmd = {};
+
+ /*
+ * eMMC specification specifies that CMD12 can be used to stop a tuning
+ * command, but SD specification does not, so do nothing unless it is
+ * eMMC.
+ */
+ if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
+ return 0;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ /*
+ * For drivers that override R1 to R1b, set an arbitrary timeout based
+ * on the tuning timeout i.e. 150ms.
+ */
+ cmd.busy_timeout = 150;
+
+ return mmc_wait_for_cmd(host, &cmd, 0);
+}
+EXPORT_SYMBOL_GPL(mmc_abort_tuning);
+
+static int
+mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
+ u8 len)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ u8 *data_buf;
+ u8 *test_buf;
+ int i, err;
+ static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
+ static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
+
+ /* dma onto stack is unsafe/nonportable, but callers to this
+ * routine normally provide temporary on-stack buffers ...
+ */
+ data_buf = kmalloc(len, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ if (len == 8)
+ test_buf = testdata_8bit;
+ else if (len == 4)
+ test_buf = testdata_4bit;
+ else {
+ pr_err("%s: Invalid bus_width %d\n",
+ mmc_hostname(host), len);
+ kfree(data_buf);
+ return -EINVAL;
+ }
+
+ if (opcode == MMC_BUS_TEST_W)
+ memcpy(data_buf, test_buf, len);
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+
+ /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
+ * rely on callers to never use this with "native" calls for reading
+ * CSD or CID. Native versions of those commands use the R2 type,
+ * not R1 plus a data block.
+ */
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = len;
+ data.blocks = 1;
+ if (opcode == MMC_BUS_TEST_R)
+ data.flags = MMC_DATA_READ;
+ else
+ data.flags = MMC_DATA_WRITE;
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
+ sg_init_one(&sg, data_buf, len);
+ mmc_wait_for_req(host, &mrq);
+ err = 0;
+ if (opcode == MMC_BUS_TEST_R) {
+ for (i = 0; i < len / 4; i++)
+ if ((test_buf[i] ^ data_buf[i]) != 0xff) {
+ err = -EIO;
+ break;
+ }
+ }
+ kfree(data_buf);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return err;
+}
+
+int mmc_bus_test(struct mmc_card *card, u8 bus_width)
+{
+ int width;
+
+ if (bus_width == MMC_BUS_WIDTH_8)
+ width = 8;
+ else if (bus_width == MMC_BUS_WIDTH_4)
+ width = 4;
+ else if (bus_width == MMC_BUS_WIDTH_1)
+ return 0; /* no need for test */
+ else
+ return -EINVAL;
+
+ /*
+ * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
+ * is a problem. This improves chances that the test will work.
+ */
+ mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
+ return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
+}
+
+static int mmc_send_hpi_cmd(struct mmc_card *card)
+{
+ unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
+ struct mmc_host *host = card->host;
+ bool use_r1b_resp = true;
+ struct mmc_command cmd = {};
+ int err;
+
+ cmd.opcode = card->ext_csd.hpi_cmd;
+ cmd.arg = card->rca << 16 | 1;
+
+ /*
+ * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
+ * In case it doesn't, let's instruct the host to avoid HW busy
+ * detection, by using a R1 response instead of R1B.
+ */
+ if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
+ use_r1b_resp = false;
+
+ if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = busy_timeout_ms;
+ } else {
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ use_r1b_resp = false;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ pr_warn("%s: HPI error %d. Command response %#x\n",
+ mmc_hostname(host), err, cmd.resp[0]);
+ return err;
+ }
+
+ /* No need to poll when using HW busy detection. */
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
+ return 0;
+
+ /* Let's poll to find out when the HPI request completes. */
+ return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
+}
+
+/**
+ * mmc_interrupt_hpi - Issue for High priority Interrupt
+ * @card: the MMC card associated with the HPI transfer
+ *
+ * Issued High Priority Interrupt, and check for card status
+ * until out-of prg-state.
+ */
+static int mmc_interrupt_hpi(struct mmc_card *card)
+{
+ int err;
+ u32 status;
+
+ if (!card->ext_csd.hpi_en) {
+ pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
+ return 1;
+ }
+
+ err = mmc_send_status(card, &status);
+ if (err) {
+ pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+ goto out;
+ }
+
+ switch (R1_CURRENT_STATE(status)) {
+ case R1_STATE_IDLE:
+ case R1_STATE_READY:
+ case R1_STATE_STBY:
+ case R1_STATE_TRAN:
+ /*
+ * In idle and transfer states, HPI is not needed and the caller
+ * can issue the next intended command immediately
+ */
+ goto out;
+ case R1_STATE_PRG:
+ break;
+ default:
+ /* In all other states, it's illegal to issue HPI */
+ pr_debug("%s: HPI cannot be sent. Card state=%d\n",
+ mmc_hostname(card->host), R1_CURRENT_STATE(status));
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mmc_send_hpi_cmd(card);
+out:
+ return err;
+}
+
+int mmc_can_ext_csd(struct mmc_card *card)
+{
+ return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
+}
+
+static int mmc_read_bkops_status(struct mmc_card *card)
+{
+ int err;
+ u8 *ext_csd;
+
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err)
+ return err;
+
+ card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
+ card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+ kfree(ext_csd);
+ return 0;
+}
+
+/**
+ * mmc_run_bkops - Run BKOPS for supported cards
+ * @card: MMC card to run BKOPS for
+ *
+ * Run background operations synchronously for cards having manual BKOPS
+ * enabled and in case it reports urgent BKOPS level.
+*/
+void mmc_run_bkops(struct mmc_card *card)
+{
+ int err;
+
+ if (!card->ext_csd.man_bkops_en)
+ return;
+
+ err = mmc_read_bkops_status(card);
+ if (err) {
+ pr_err("%s: Failed to read bkops status: %d\n",
+ mmc_hostname(card->host), err);
+ return;
+ }
+
+ if (!card->ext_csd.raw_bkops_status ||
+ card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
+ return;
+
+ mmc_retune_hold(card->host);
+
+ /*
+ * For urgent BKOPS status, LEVEL_2 and higher, let's execute
+ * synchronously. Future wise, we may consider to start BKOPS, for less
+ * urgent levels by using an asynchronous background task, when idle.
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
+ if (err)
+ pr_warn("%s: Error %d starting bkops\n",
+ mmc_hostname(card->host), err);
+
+ mmc_retune_release(card->host);
+}
+EXPORT_SYMBOL(mmc_run_bkops);
+
+/*
+ * Flush the cache to the non-volatile storage.
+ */
+int mmc_flush_cache(struct mmc_card *card)
+{
+ int err = 0;
+
+ if (mmc_cache_enabled(card->host)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1,
+ MMC_CACHE_FLUSH_TIMEOUT_MS);
+ if (err)
+ pr_err("%s: cache flush error %d\n",
+ mmc_hostname(card->host), err);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_flush_cache);
+
+static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
+{
+ u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
+ int err;
+
+ if (!card->ext_csd.cmdq_support)
+ return -EOPNOTSUPP;
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
+ val, card->ext_csd.generic_cmd6_time);
+ if (!err)
+ card->ext_csd.cmdq_en = enable;
+
+ return err;
+}
+
+int mmc_cmdq_enable(struct mmc_card *card)
+{
+ return mmc_cmdq_switch(card, true);
+}
+EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
+
+int mmc_cmdq_disable(struct mmc_card *card)
+{
+ return mmc_cmdq_switch(card, false);
+}
+EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
+
+int mmc_sanitize(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err;
+
+ if (!mmc_can_sanitize(card)) {
+ pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
+ return -EOPNOTSUPP;
+ }
+
+ pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
+
+ mmc_retune_hold(host);
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
+ 1, MMC_SANITIZE_TIMEOUT_MS);
+ if (err)
+ pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
+
+ /*
+ * If the sanitize operation timed out, the card is probably still busy
+ * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
+ * it with a HPI command to get back into R1_STATE_TRAN.
+ */
+ if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
+ pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
+
+ mmc_retune_release(host);
+
+ pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mmc_sanitize);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
new file mode 100644
index 000000000..e3cbb1ddc
--- /dev/null
+++ b/drivers/mmc/core/mmc_ops.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * linux/drivers/mmc/core/mmc_ops.h
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ */
+
+#ifndef _MMC_MMC_OPS_H
+#define _MMC_MMC_OPS_H
+
+#include <linux/types.h>
+
+enum mmc_busy_cmd {
+ MMC_BUSY_CMD6,
+ MMC_BUSY_ERASE,
+ MMC_BUSY_HPI,
+ MMC_BUSY_IO,
+};
+
+struct mmc_host;
+struct mmc_card;
+
+int mmc_select_card(struct mmc_card *card);
+int mmc_deselect_cards(struct mmc_host *host);
+int mmc_set_dsr(struct mmc_host *host);
+int mmc_go_idle(struct mmc_host *host);
+int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
+int mmc_set_relative_addr(struct mmc_card *card);
+int mmc_send_csd(struct mmc_card *card, u32 *csd);
+int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries);
+int mmc_send_status(struct mmc_card *card, u32 *status);
+int mmc_send_cid(struct mmc_host *host, u32 *cid);
+int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
+int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
+int mmc_bus_test(struct mmc_card *card, u8 bus_width);
+int mmc_can_ext_csd(struct mmc_card *card);
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
+int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
+int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ enum mmc_busy_cmd busy_cmd);
+int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms, unsigned char timing,
+ bool send_status, bool retry_crc_err);
+int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms);
+void mmc_run_bkops(struct mmc_card *card);
+int mmc_flush_cache(struct mmc_card *card);
+int mmc_cmdq_enable(struct mmc_card *card);
+int mmc_cmdq_disable(struct mmc_card *card);
+int mmc_sanitize(struct mmc_card *card);
+
+#endif
+
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
new file mode 100644
index 000000000..b9b6f0001
--- /dev/null
+++ b/drivers/mmc/core/mmc_test.c
@@ -0,0 +1,3303 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2007-2008 Pierre Ossman
+ */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/slab.h>
+
+#include <linux/scatterlist.h>
+#include <linux/swap.h> /* For nr_free_buffer_pages() */
+#include <linux/list.h>
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "bus.h"
+#include "mmc_ops.h"
+
+#define RESULT_OK 0
+#define RESULT_FAIL 1
+#define RESULT_UNSUP_HOST 2
+#define RESULT_UNSUP_CARD 3
+
+#define BUFFER_ORDER 2
+#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
+
+#define TEST_ALIGN_END 8
+
+/*
+ * Limit the test area size to the maximum MMC HC erase group size. Note that
+ * the maximum SD allocation unit size is just 4MiB.
+ */
+#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
+
+/**
+ * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
+ * @page: first page in the allocation
+ * @order: order of the number of pages allocated
+ */
+struct mmc_test_pages {
+ struct page *page;
+ unsigned int order;
+};
+
+/**
+ * struct mmc_test_mem - allocated memory.
+ * @arr: array of allocations
+ * @cnt: number of allocations
+ */
+struct mmc_test_mem {
+ struct mmc_test_pages *arr;
+ unsigned int cnt;
+};
+
+/**
+ * struct mmc_test_area - information for performance tests.
+ * @max_sz: test area size (in bytes)
+ * @dev_addr: address on card at which to do performance tests
+ * @max_tfr: maximum transfer size allowed by driver (in bytes)
+ * @max_segs: maximum segments allowed by driver in scatterlist @sg
+ * @max_seg_sz: maximum segment size allowed by driver
+ * @blocks: number of (512 byte) blocks currently mapped by @sg
+ * @sg_len: length of currently mapped scatterlist @sg
+ * @mem: allocated memory
+ * @sg: scatterlist
+ * @sg_areq: scatterlist for non-blocking request
+ */
+struct mmc_test_area {
+ unsigned long max_sz;
+ unsigned int dev_addr;
+ unsigned int max_tfr;
+ unsigned int max_segs;
+ unsigned int max_seg_sz;
+ unsigned int blocks;
+ unsigned int sg_len;
+ struct mmc_test_mem *mem;
+ struct scatterlist *sg;
+ struct scatterlist *sg_areq;
+};
+
+/**
+ * struct mmc_test_transfer_result - transfer results for performance tests.
+ * @link: double-linked list
+ * @count: amount of group of sectors to check
+ * @sectors: amount of sectors to check in one group
+ * @ts: time values of transfer
+ * @rate: calculated transfer rate
+ * @iops: I/O operations per second (times 100)
+ */
+struct mmc_test_transfer_result {
+ struct list_head link;
+ unsigned int count;
+ unsigned int sectors;
+ struct timespec64 ts;
+ unsigned int rate;
+ unsigned int iops;
+};
+
+/**
+ * struct mmc_test_general_result - results for tests.
+ * @link: double-linked list
+ * @card: card under test
+ * @testcase: number of test case
+ * @result: result of test run
+ * @tr_lst: transfer measurements if any as mmc_test_transfer_result
+ */
+struct mmc_test_general_result {
+ struct list_head link;
+ struct mmc_card *card;
+ int testcase;
+ int result;
+ struct list_head tr_lst;
+};
+
+/**
+ * struct mmc_test_dbgfs_file - debugfs related file.
+ * @link: double-linked list
+ * @card: card under test
+ * @file: file created under debugfs
+ */
+struct mmc_test_dbgfs_file {
+ struct list_head link;
+ struct mmc_card *card;
+ struct dentry *file;
+};
+
+/**
+ * struct mmc_test_card - test information.
+ * @card: card under test
+ * @scratch: transfer buffer
+ * @buffer: transfer buffer
+ * @highmem: buffer for highmem tests
+ * @area: information for performance tests
+ * @gr: pointer to results of current testcase
+ */
+struct mmc_test_card {
+ struct mmc_card *card;
+
+ u8 scratch[BUFFER_SIZE];
+ u8 *buffer;
+#ifdef CONFIG_HIGHMEM
+ struct page *highmem;
+#endif
+ struct mmc_test_area area;
+ struct mmc_test_general_result *gr;
+};
+
+enum mmc_test_prep_media {
+ MMC_TEST_PREP_NONE = 0,
+ MMC_TEST_PREP_WRITE_FULL = 1 << 0,
+ MMC_TEST_PREP_ERASE = 1 << 1,
+};
+
+struct mmc_test_multiple_rw {
+ unsigned int *sg_len;
+ unsigned int *bs;
+ unsigned int len;
+ unsigned int size;
+ bool do_write;
+ bool do_nonblock_req;
+ enum mmc_test_prep_media prepare;
+};
+
+/*******************************************************************/
+/* General helper functions */
+/*******************************************************************/
+
+/*
+ * Configure correct block size in card
+ */
+static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
+{
+ return mmc_set_blocklen(test->card, size);
+}
+
+static bool mmc_test_card_cmd23(struct mmc_card *card)
+{
+ return mmc_card_mmc(card) ||
+ (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
+}
+
+static void mmc_test_prepare_sbc(struct mmc_test_card *test,
+ struct mmc_request *mrq, unsigned int blocks)
+{
+ struct mmc_card *card = test->card;
+
+ if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
+ !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
+ (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
+ mrq->sbc = NULL;
+ return;
+ }
+
+ mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
+ mrq->sbc->arg = blocks;
+ mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
+}
+
+/*
+ * Fill in the mmc_request structure given a set of transfer parameters.
+ */
+static void mmc_test_prepare_mrq(struct mmc_test_card *test,
+ struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
+ unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
+{
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
+ return;
+
+ if (blocks > 1) {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
+ } else {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+ }
+
+ mrq->cmd->arg = dev_addr;
+ if (!mmc_card_blockaddr(test->card))
+ mrq->cmd->arg <<= 9;
+
+ mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ if (blocks == 1)
+ mrq->stop = NULL;
+ else {
+ mrq->stop->opcode = MMC_STOP_TRANSMISSION;
+ mrq->stop->arg = 0;
+ mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+ }
+
+ mrq->data->blksz = blksz;
+ mrq->data->blocks = blocks;
+ mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mrq->data->sg = sg;
+ mrq->data->sg_len = sg_len;
+
+ mmc_test_prepare_sbc(test, mrq, blocks);
+
+ mmc_set_data_timeout(mrq->data, test->card);
+}
+
+static int mmc_test_busy(struct mmc_command *cmd)
+{
+ return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
+}
+
+/*
+ * Wait for the card to finish the busy state
+ */
+static int mmc_test_wait_busy(struct mmc_test_card *test)
+{
+ int ret, busy;
+ struct mmc_command cmd = {};
+
+ busy = 0;
+ do {
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = test->card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
+ if (ret)
+ break;
+
+ if (!busy && mmc_test_busy(&cmd)) {
+ busy = 1;
+ if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ pr_info("%s: Warning: Host did not wait for busy state to end.\n",
+ mmc_hostname(test->card->host));
+ }
+ } while (mmc_test_busy(&cmd));
+
+ return ret;
+}
+
+/*
+ * Transfer a single sector of kernel addressable data
+ */
+static int mmc_test_buffer_transfer(struct mmc_test_card *test,
+ u8 *buffer, unsigned addr, unsigned blksz, int write)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
+
+ struct scatterlist sg;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = &stop;
+
+ sg_init_one(&sg, buffer, blksz);
+
+ mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
+
+ mmc_wait_for_req(test->card->host, &mrq);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return mmc_test_wait_busy(test);
+}
+
+static void mmc_test_free_mem(struct mmc_test_mem *mem)
+{
+ if (!mem)
+ return;
+ while (mem->cnt--)
+ __free_pages(mem->arr[mem->cnt].page,
+ mem->arr[mem->cnt].order);
+ kfree(mem->arr);
+ kfree(mem);
+}
+
+/*
+ * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
+ * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
+ * not exceed a maximum number of segments and try not to make segments much
+ * bigger than maximum segment size.
+ */
+static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
+ unsigned long max_sz,
+ unsigned int max_segs,
+ unsigned int max_seg_sz)
+{
+ unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
+ unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
+ unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
+ unsigned long page_cnt = 0;
+ unsigned long limit = nr_free_buffer_pages() >> 4;
+ struct mmc_test_mem *mem;
+
+ if (max_page_cnt > limit)
+ max_page_cnt = limit;
+ if (min_page_cnt > max_page_cnt)
+ min_page_cnt = max_page_cnt;
+
+ if (max_seg_page_cnt > max_page_cnt)
+ max_seg_page_cnt = max_page_cnt;
+
+ if (max_segs > max_page_cnt)
+ max_segs = max_page_cnt;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return NULL;
+
+ mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
+ if (!mem->arr)
+ goto out_free;
+
+ while (max_page_cnt) {
+ struct page *page;
+ unsigned int order;
+ gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
+ __GFP_NORETRY;
+
+ order = get_order(max_seg_page_cnt << PAGE_SHIFT);
+ while (1) {
+ page = alloc_pages(flags, order);
+ if (page || !order)
+ break;
+ order -= 1;
+ }
+ if (!page) {
+ if (page_cnt < min_page_cnt)
+ goto out_free;
+ break;
+ }
+ mem->arr[mem->cnt].page = page;
+ mem->arr[mem->cnt].order = order;
+ mem->cnt += 1;
+ if (max_page_cnt <= (1UL << order))
+ break;
+ max_page_cnt -= 1UL << order;
+ page_cnt += 1UL << order;
+ if (mem->cnt >= max_segs) {
+ if (page_cnt < min_page_cnt)
+ goto out_free;
+ break;
+ }
+ }
+
+ return mem;
+
+out_free:
+ mmc_test_free_mem(mem);
+ return NULL;
+}
+
+/*
+ * Map memory into a scatterlist. Optionally allow the same memory to be
+ * mapped more than once.
+ */
+static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
+ struct scatterlist *sglist, int repeat,
+ unsigned int max_segs, unsigned int max_seg_sz,
+ unsigned int *sg_len, int min_sg_len)
+{
+ struct scatterlist *sg = NULL;
+ unsigned int i;
+ unsigned long sz = size;
+
+ sg_init_table(sglist, max_segs);
+ if (min_sg_len > max_segs)
+ min_sg_len = max_segs;
+
+ *sg_len = 0;
+ do {
+ for (i = 0; i < mem->cnt; i++) {
+ unsigned long len = PAGE_SIZE << mem->arr[i].order;
+
+ if (min_sg_len && (size / min_sg_len < len))
+ len = ALIGN(size / min_sg_len, 512);
+ if (len > sz)
+ len = sz;
+ if (len > max_seg_sz)
+ len = max_seg_sz;
+ if (sg)
+ sg = sg_next(sg);
+ else
+ sg = sglist;
+ if (!sg)
+ return -EINVAL;
+ sg_set_page(sg, mem->arr[i].page, len, 0);
+ sz -= len;
+ *sg_len += 1;
+ if (!sz)
+ break;
+ }
+ } while (sz && repeat);
+
+ if (sz)
+ return -EINVAL;
+
+ if (sg)
+ sg_mark_end(sg);
+
+ return 0;
+}
+
+/*
+ * Map memory into a scatterlist so that no pages are contiguous. Allow the
+ * same memory to be mapped more than once.
+ */
+static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
+ unsigned long sz,
+ struct scatterlist *sglist,
+ unsigned int max_segs,
+ unsigned int max_seg_sz,
+ unsigned int *sg_len)
+{
+ struct scatterlist *sg = NULL;
+ unsigned int i = mem->cnt, cnt;
+ unsigned long len;
+ void *base, *addr, *last_addr = NULL;
+
+ sg_init_table(sglist, max_segs);
+
+ *sg_len = 0;
+ while (sz) {
+ base = page_address(mem->arr[--i].page);
+ cnt = 1 << mem->arr[i].order;
+ while (sz && cnt) {
+ addr = base + PAGE_SIZE * --cnt;
+ if (last_addr && last_addr + PAGE_SIZE == addr)
+ continue;
+ last_addr = addr;
+ len = PAGE_SIZE;
+ if (len > max_seg_sz)
+ len = max_seg_sz;
+ if (len > sz)
+ len = sz;
+ if (sg)
+ sg = sg_next(sg);
+ else
+ sg = sglist;
+ if (!sg)
+ return -EINVAL;
+ sg_set_page(sg, virt_to_page(addr), len, 0);
+ sz -= len;
+ *sg_len += 1;
+ }
+ if (i == 0)
+ i = mem->cnt;
+ }
+
+ if (sg)
+ sg_mark_end(sg);
+
+ return 0;
+}
+
+/*
+ * Calculate transfer rate in bytes per second.
+ */
+static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
+{
+ uint64_t ns;
+
+ ns = timespec64_to_ns(ts);
+ bytes *= 1000000000;
+
+ while (ns > UINT_MAX) {
+ bytes >>= 1;
+ ns >>= 1;
+ }
+
+ if (!ns)
+ return 0;
+
+ do_div(bytes, (uint32_t)ns);
+
+ return bytes;
+}
+
+/*
+ * Save transfer results for future usage
+ */
+static void mmc_test_save_transfer_result(struct mmc_test_card *test,
+ unsigned int count, unsigned int sectors, struct timespec64 ts,
+ unsigned int rate, unsigned int iops)
+{
+ struct mmc_test_transfer_result *tr;
+
+ if (!test->gr)
+ return;
+
+ tr = kmalloc(sizeof(*tr), GFP_KERNEL);
+ if (!tr)
+ return;
+
+ tr->count = count;
+ tr->sectors = sectors;
+ tr->ts = ts;
+ tr->rate = rate;
+ tr->iops = iops;
+
+ list_add_tail(&tr->link, &test->gr->tr_lst);
+}
+
+/*
+ * Print the transfer rate.
+ */
+static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
+ struct timespec64 *ts1, struct timespec64 *ts2)
+{
+ unsigned int rate, iops, sectors = bytes >> 9;
+ struct timespec64 ts;
+
+ ts = timespec64_sub(*ts2, *ts1);
+
+ rate = mmc_test_rate(bytes, &ts);
+ iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
+
+ pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
+ "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
+ mmc_hostname(test->card->host), sectors, sectors >> 1,
+ (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
+ (u32)ts.tv_nsec, rate / 1000, rate / 1024,
+ iops / 100, iops % 100);
+
+ mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
+}
+
+/*
+ * Print the average transfer rate.
+ */
+static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
+ unsigned int count, struct timespec64 *ts1,
+ struct timespec64 *ts2)
+{
+ unsigned int rate, iops, sectors = bytes >> 9;
+ uint64_t tot = bytes * count;
+ struct timespec64 ts;
+
+ ts = timespec64_sub(*ts2, *ts1);
+
+ rate = mmc_test_rate(tot, &ts);
+ iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
+
+ pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
+ "%llu.%09u seconds (%u kB/s, %u KiB/s, "
+ "%u.%02u IOPS, sg_len %d)\n",
+ mmc_hostname(test->card->host), count, sectors, count,
+ sectors >> 1, (sectors & 1 ? ".5" : ""),
+ (u64)ts.tv_sec, (u32)ts.tv_nsec,
+ rate / 1000, rate / 1024, iops / 100, iops % 100,
+ test->area.sg_len);
+
+ mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
+}
+
+/*
+ * Return the card size in sectors.
+ */
+static unsigned int mmc_test_capacity(struct mmc_card *card)
+{
+ if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
+ return card->ext_csd.sectors;
+ else
+ return card->csd.capacity << (card->csd.read_blkbits - 9);
+}
+
+/*******************************************************************/
+/* Test preparation and cleanup */
+/*******************************************************************/
+
+/*
+ * Fill the first couple of sectors of the card with known data
+ * so that bad reads/writes can be detected
+ */
+static int __mmc_test_prepare(struct mmc_test_card *test, int write)
+{
+ int ret, i;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ if (write)
+ memset(test->buffer, 0xDF, 512);
+ else {
+ for (i = 0; i < 512; i++)
+ test->buffer[i] = i;
+ }
+
+ for (i = 0; i < BUFFER_SIZE / 512; i++) {
+ ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_prepare_write(struct mmc_test_card *test)
+{
+ return __mmc_test_prepare(test, 1);
+}
+
+static int mmc_test_prepare_read(struct mmc_test_card *test)
+{
+ return __mmc_test_prepare(test, 0);
+}
+
+static int mmc_test_cleanup(struct mmc_test_card *test)
+{
+ int ret, i;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ memset(test->buffer, 0, 512);
+
+ for (i = 0; i < BUFFER_SIZE / 512; i++) {
+ ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*******************************************************************/
+/* Test execution helpers */
+/*******************************************************************/
+
+/*
+ * Modifies the mmc_request to perform the "short transfer" tests
+ */
+static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
+ struct mmc_request *mrq, int write)
+{
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+ return;
+
+ if (mrq->data->blocks > 1) {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+ mrq->stop = NULL;
+ } else {
+ mrq->cmd->opcode = MMC_SEND_STATUS;
+ mrq->cmd->arg = test->card->rca << 16;
+ }
+}
+
+/*
+ * Checks that a normal transfer didn't have any errors
+ */
+static int mmc_test_check_result(struct mmc_test_card *test,
+ struct mmc_request *mrq)
+{
+ int ret;
+
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+ return -EINVAL;
+
+ ret = 0;
+
+ if (mrq->sbc && mrq->sbc->error)
+ ret = mrq->sbc->error;
+ if (!ret && mrq->cmd->error)
+ ret = mrq->cmd->error;
+ if (!ret && mrq->data->error)
+ ret = mrq->data->error;
+ if (!ret && mrq->stop && mrq->stop->error)
+ ret = mrq->stop->error;
+ if (!ret && mrq->data->bytes_xfered !=
+ mrq->data->blocks * mrq->data->blksz)
+ ret = RESULT_FAIL;
+
+ if (ret == -EINVAL)
+ ret = RESULT_UNSUP_HOST;
+
+ return ret;
+}
+
+/*
+ * Checks that a "short transfer" behaved as expected
+ */
+static int mmc_test_check_broken_result(struct mmc_test_card *test,
+ struct mmc_request *mrq)
+{
+ int ret;
+
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+ return -EINVAL;
+
+ ret = 0;
+
+ if (!ret && mrq->cmd->error)
+ ret = mrq->cmd->error;
+ if (!ret && mrq->data->error == 0)
+ ret = RESULT_FAIL;
+ if (!ret && mrq->data->error != -ETIMEDOUT)
+ ret = mrq->data->error;
+ if (!ret && mrq->stop && mrq->stop->error)
+ ret = mrq->stop->error;
+ if (mrq->data->blocks > 1) {
+ if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
+ ret = RESULT_FAIL;
+ } else {
+ if (!ret && mrq->data->bytes_xfered > 0)
+ ret = RESULT_FAIL;
+ }
+
+ if (ret == -EINVAL)
+ ret = RESULT_UNSUP_HOST;
+
+ return ret;
+}
+
+struct mmc_test_req {
+ struct mmc_request mrq;
+ struct mmc_command sbc;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_command status;
+ struct mmc_data data;
+};
+
+/*
+ * Tests nonblock transfer with certain parameters
+ */
+static void mmc_test_req_reset(struct mmc_test_req *rq)
+{
+ memset(rq, 0, sizeof(struct mmc_test_req));
+
+ rq->mrq.cmd = &rq->cmd;
+ rq->mrq.data = &rq->data;
+ rq->mrq.stop = &rq->stop;
+}
+
+static struct mmc_test_req *mmc_test_req_alloc(void)
+{
+ struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
+
+ if (rq)
+ mmc_test_req_reset(rq);
+
+ return rq;
+}
+
+static void mmc_test_wait_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+static int mmc_test_start_areq(struct mmc_test_card *test,
+ struct mmc_request *mrq,
+ struct mmc_request *prev_mrq)
+{
+ struct mmc_host *host = test->card->host;
+ int err = 0;
+
+ if (mrq) {
+ init_completion(&mrq->completion);
+ mrq->done = mmc_test_wait_done;
+ mmc_pre_req(host, mrq);
+ }
+
+ if (prev_mrq) {
+ wait_for_completion(&prev_mrq->completion);
+ err = mmc_test_wait_busy(test);
+ if (!err)
+ err = mmc_test_check_result(test, prev_mrq);
+ }
+
+ if (!err && mrq) {
+ err = mmc_start_request(host, mrq);
+ if (err)
+ mmc_retune_release(host);
+ }
+
+ if (prev_mrq)
+ mmc_post_req(host, prev_mrq, 0);
+
+ if (err && mrq)
+ mmc_post_req(host, mrq, err);
+
+ return err;
+}
+
+static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
+ unsigned int dev_addr, int write,
+ int count)
+{
+ struct mmc_test_req *rq1, *rq2;
+ struct mmc_request *mrq, *prev_mrq;
+ int i;
+ int ret = RESULT_OK;
+ struct mmc_test_area *t = &test->area;
+ struct scatterlist *sg = t->sg;
+ struct scatterlist *sg_areq = t->sg_areq;
+
+ rq1 = mmc_test_req_alloc();
+ rq2 = mmc_test_req_alloc();
+ if (!rq1 || !rq2) {
+ ret = RESULT_FAIL;
+ goto err;
+ }
+
+ mrq = &rq1->mrq;
+ prev_mrq = NULL;
+
+ for (i = 0; i < count; i++) {
+ mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
+ mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
+ t->blocks, 512, write);
+ ret = mmc_test_start_areq(test, mrq, prev_mrq);
+ if (ret)
+ goto err;
+
+ if (!prev_mrq)
+ prev_mrq = &rq2->mrq;
+
+ swap(mrq, prev_mrq);
+ swap(sg, sg_areq);
+ dev_addr += t->blocks;
+ }
+
+ ret = mmc_test_start_areq(test, NULL, prev_mrq);
+err:
+ kfree(rq1);
+ kfree(rq2);
+ return ret;
+}
+
+/*
+ * Tests a basic transfer with certain parameters
+ */
+static int mmc_test_simple_transfer(struct mmc_test_card *test,
+ struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
+ unsigned blocks, unsigned blksz, int write)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = &stop;
+
+ mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+
+ mmc_wait_for_req(test->card->host, &mrq);
+
+ mmc_test_wait_busy(test);
+
+ return mmc_test_check_result(test, &mrq);
+}
+
+/*
+ * Tests a transfer where the card will fail completely or partly
+ */
+static int mmc_test_broken_transfer(struct mmc_test_card *test,
+ unsigned blocks, unsigned blksz, int write)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
+
+ struct scatterlist sg;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = &stop;
+
+ sg_init_one(&sg, test->buffer, blocks * blksz);
+
+ mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
+ mmc_test_prepare_broken_mrq(test, &mrq, write);
+
+ mmc_wait_for_req(test->card->host, &mrq);
+
+ mmc_test_wait_busy(test);
+
+ return mmc_test_check_broken_result(test, &mrq);
+}
+
+/*
+ * Does a complete transfer test where data is also validated
+ *
+ * Note: mmc_test_prepare() must have been done before this call
+ */
+static int mmc_test_transfer(struct mmc_test_card *test,
+ struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
+ unsigned blocks, unsigned blksz, int write)
+{
+ int ret, i;
+ unsigned long flags;
+
+ if (write) {
+ for (i = 0; i < blocks * blksz; i++)
+ test->scratch[i] = i;
+ } else {
+ memset(test->scratch, 0, BUFFER_SIZE);
+ }
+ local_irq_save(flags);
+ sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+ local_irq_restore(flags);
+
+ ret = mmc_test_set_blksize(test, blksz);
+ if (ret)
+ return ret;
+
+ ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+ if (ret)
+ return ret;
+
+ if (write) {
+ int sectors;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ sectors = (blocks * blksz + 511) / 512;
+ if ((sectors * 512) == (blocks * blksz))
+ sectors++;
+
+ if ((sectors * 512) > BUFFER_SIZE)
+ return -EINVAL;
+
+ memset(test->buffer, 0, sectors * 512);
+
+ for (i = 0; i < sectors; i++) {
+ ret = mmc_test_buffer_transfer(test,
+ test->buffer + i * 512,
+ dev_addr + i, 512, 0);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < blocks * blksz; i++) {
+ if (test->buffer[i] != (u8)i)
+ return RESULT_FAIL;
+ }
+
+ for (; i < sectors * 512; i++) {
+ if (test->buffer[i] != 0xDF)
+ return RESULT_FAIL;
+ }
+ } else {
+ local_irq_save(flags);
+ sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+ local_irq_restore(flags);
+ for (i = 0; i < blocks * blksz; i++) {
+ if (test->scratch[i] != (u8)i)
+ return RESULT_FAIL;
+ }
+ }
+
+ return 0;
+}
+
+/*******************************************************************/
+/* Tests */
+/*******************************************************************/
+
+struct mmc_test_case {
+ const char *name;
+
+ int (*prepare)(struct mmc_test_card *);
+ int (*run)(struct mmc_test_card *);
+ int (*cleanup)(struct mmc_test_card *);
+};
+
+static int mmc_test_basic_write(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ sg_init_one(&sg, test->buffer, 512);
+
+ return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
+}
+
+static int mmc_test_basic_read(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ sg_init_one(&sg, test->buffer, 512);
+
+ return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
+}
+
+static int mmc_test_verify_write(struct mmc_test_card *test)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, test->buffer, 512);
+
+ return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+}
+
+static int mmc_test_verify_read(struct mmc_test_card *test)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, test->buffer, 512);
+
+ return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+}
+
+static int mmc_test_multi_write(struct mmc_test_card *test)
+{
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_one(&sg, test->buffer, size);
+
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
+}
+
+static int mmc_test_multi_read(struct mmc_test_card *test)
+{
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_one(&sg, test->buffer, size);
+
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
+}
+
+static int mmc_test_pow2_write(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ if (!test->card->csd.write_partial)
+ return RESULT_UNSUP_CARD;
+
+ for (i = 1; i < 512; i <<= 1) {
+ sg_init_one(&sg, test->buffer, i);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_pow2_read(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ if (!test->card->csd.read_partial)
+ return RESULT_UNSUP_CARD;
+
+ for (i = 1; i < 512; i <<= 1) {
+ sg_init_one(&sg, test->buffer, i);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_weird_write(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ if (!test->card->csd.write_partial)
+ return RESULT_UNSUP_CARD;
+
+ for (i = 3; i < 512; i += 7) {
+ sg_init_one(&sg, test->buffer, i);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_weird_read(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ if (!test->card->csd.read_partial)
+ return RESULT_UNSUP_CARD;
+
+ for (i = 3; i < 512; i += 7) {
+ sg_init_one(&sg, test->buffer, i);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_align_write(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ for (i = 1; i < TEST_ALIGN_END; i++) {
+ sg_init_one(&sg, test->buffer + i, 512);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_align_read(struct mmc_test_card *test)
+{
+ int ret, i;
+ struct scatterlist sg;
+
+ for (i = 1; i < TEST_ALIGN_END; i++) {
+ sg_init_one(&sg, test->buffer + i, 512);
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_align_multi_write(struct mmc_test_card *test)
+{
+ int ret, i;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ for (i = 1; i < TEST_ALIGN_END; i++) {
+ sg_init_one(&sg, test->buffer + i, size);
+ ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_align_multi_read(struct mmc_test_card *test)
+{
+ int ret, i;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ for (i = 1; i < TEST_ALIGN_END; i++) {
+ sg_init_one(&sg, test->buffer + i, size);
+ ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mmc_test_xfersize_write(struct mmc_test_card *test)
+{
+ int ret;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ return mmc_test_broken_transfer(test, 1, 512, 1);
+}
+
+static int mmc_test_xfersize_read(struct mmc_test_card *test)
+{
+ int ret;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ return mmc_test_broken_transfer(test, 1, 512, 0);
+}
+
+static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
+{
+ int ret;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ return mmc_test_broken_transfer(test, 2, 512, 1);
+}
+
+static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
+{
+ int ret;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ return mmc_test_broken_transfer(test, 2, 512, 0);
+}
+
+#ifdef CONFIG_HIGHMEM
+
+static int mmc_test_write_high(struct mmc_test_card *test)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, 512, 0);
+
+ return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+}
+
+static int mmc_test_read_high(struct mmc_test_card *test)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, 512, 0);
+
+ return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+}
+
+static int mmc_test_multi_write_high(struct mmc_test_card *test)
+{
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, size, 0);
+
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
+}
+
+static int mmc_test_multi_read_high(struct mmc_test_card *test)
+{
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, size, 0);
+
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
+}
+
+#else
+
+static int mmc_test_no_highmem(struct mmc_test_card *test)
+{
+ pr_info("%s: Highmem not configured - test skipped\n",
+ mmc_hostname(test->card->host));
+ return 0;
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+/*
+ * Map sz bytes so that it can be transferred.
+ */
+static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
+ int max_scatter, int min_sg_len, bool nonblock)
+{
+ struct mmc_test_area *t = &test->area;
+ int err;
+ unsigned int sg_len = 0;
+
+ t->blocks = sz >> 9;
+
+ if (max_scatter) {
+ err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
+ t->max_segs, t->max_seg_sz,
+ &t->sg_len);
+ } else {
+ err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
+ t->max_seg_sz, &t->sg_len, min_sg_len);
+ }
+
+ if (err || !nonblock)
+ goto err;
+
+ if (max_scatter) {
+ err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
+ t->max_segs, t->max_seg_sz,
+ &sg_len);
+ } else {
+ err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
+ t->max_seg_sz, &sg_len, min_sg_len);
+ }
+ if (!err && sg_len != t->sg_len)
+ err = -EINVAL;
+
+err:
+ if (err)
+ pr_info("%s: Failed to map sg list\n",
+ mmc_hostname(test->card->host));
+ return err;
+}
+
+/*
+ * Transfer bytes mapped by mmc_test_area_map().
+ */
+static int mmc_test_area_transfer(struct mmc_test_card *test,
+ unsigned int dev_addr, int write)
+{
+ struct mmc_test_area *t = &test->area;
+
+ return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
+ t->blocks, 512, write);
+}
+
+/*
+ * Map and transfer bytes for multiple transfers.
+ */
+static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write,
+ int max_scatter, int timed, int count,
+ bool nonblock, int min_sg_len)
+{
+ struct timespec64 ts1, ts2;
+ int ret = 0;
+ int i;
+
+ /*
+ * In the case of a maximally scattered transfer, the maximum transfer
+ * size is further limited by using PAGE_SIZE segments.
+ */
+ if (max_scatter) {
+ struct mmc_test_area *t = &test->area;
+ unsigned long max_tfr;
+
+ if (t->max_seg_sz >= PAGE_SIZE)
+ max_tfr = t->max_segs * PAGE_SIZE;
+ else
+ max_tfr = t->max_segs * t->max_seg_sz;
+ if (sz > max_tfr)
+ sz = max_tfr;
+ }
+
+ ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
+ if (ret)
+ return ret;
+
+ if (timed)
+ ktime_get_ts64(&ts1);
+ if (nonblock)
+ ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
+ else
+ for (i = 0; i < count && ret == 0; i++) {
+ ret = mmc_test_area_transfer(test, dev_addr, write);
+ dev_addr += sz >> 9;
+ }
+
+ if (ret)
+ return ret;
+
+ if (timed)
+ ktime_get_ts64(&ts2);
+
+ if (timed)
+ mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
+
+ return 0;
+}
+
+static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write, int max_scatter,
+ int timed)
+{
+ return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
+ timed, 1, false, 0);
+}
+
+/*
+ * Write the test area entirely.
+ */
+static int mmc_test_area_fill(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+
+ return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
+}
+
+/*
+ * Erase the test area entirely.
+ */
+static int mmc_test_area_erase(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+
+ if (!mmc_can_erase(test->card))
+ return 0;
+
+ return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
+ MMC_ERASE_ARG);
+}
+
+/*
+ * Cleanup struct mmc_test_area.
+ */
+static int mmc_test_area_cleanup(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+
+ kfree(t->sg);
+ kfree(t->sg_areq);
+ mmc_test_free_mem(t->mem);
+
+ return 0;
+}
+
+/*
+ * Initialize an area for testing large transfers. The test area is set to the
+ * middle of the card because cards may have different characteristics at the
+ * front (for FAT file system optimization). Optionally, the area is erased
+ * (if the card supports it) which may improve write performance. Optionally,
+ * the area is filled with data for subsequent read tests.
+ */
+static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long min_sz = 64 * 1024, sz;
+ int ret;
+
+ ret = mmc_test_set_blksize(test, 512);
+ if (ret)
+ return ret;
+
+ /* Make the test area size about 4MiB */
+ sz = (unsigned long)test->card->pref_erase << 9;
+ t->max_sz = sz;
+ while (t->max_sz < 4 * 1024 * 1024)
+ t->max_sz += sz;
+ while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
+ t->max_sz -= sz;
+
+ t->max_segs = test->card->host->max_segs;
+ t->max_seg_sz = test->card->host->max_seg_size;
+ t->max_seg_sz -= t->max_seg_sz % 512;
+
+ t->max_tfr = t->max_sz;
+ if (t->max_tfr >> 9 > test->card->host->max_blk_count)
+ t->max_tfr = test->card->host->max_blk_count << 9;
+ if (t->max_tfr > test->card->host->max_req_size)
+ t->max_tfr = test->card->host->max_req_size;
+ if (t->max_tfr / t->max_seg_sz > t->max_segs)
+ t->max_tfr = t->max_segs * t->max_seg_sz;
+
+ /*
+ * Try to allocate enough memory for a max. sized transfer. Less is OK
+ * because the same memory can be mapped into the scatterlist more than
+ * once. Also, take into account the limits imposed on scatterlist
+ * segments by the host driver.
+ */
+ t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
+ t->max_seg_sz);
+ if (!t->mem)
+ return -ENOMEM;
+
+ t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
+ if (!t->sg) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
+ GFP_KERNEL);
+ if (!t->sg_areq) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ t->dev_addr = mmc_test_capacity(test->card) / 2;
+ t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
+
+ if (erase) {
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ goto out_free;
+ }
+
+ if (fill) {
+ ret = mmc_test_area_fill(test);
+ if (ret)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ mmc_test_area_cleanup(test);
+ return ret;
+}
+
+/*
+ * Prepare for large transfers. Do not erase the test area.
+ */
+static int mmc_test_area_prepare(struct mmc_test_card *test)
+{
+ return mmc_test_area_init(test, 0, 0);
+}
+
+/*
+ * Prepare for large transfers. Do erase the test area.
+ */
+static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
+{
+ return mmc_test_area_init(test, 1, 0);
+}
+
+/*
+ * Prepare for large transfers. Erase and fill the test area.
+ */
+static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
+{
+ return mmc_test_area_init(test, 1, 1);
+}
+
+/*
+ * Test best-case performance. Best-case performance is expected from
+ * a single large transfer.
+ *
+ * An additional option (max_scatter) allows the measurement of the same
+ * transfer but with no contiguous pages in the scatter list. This tests
+ * the efficiency of DMA to handle scattered pages.
+ */
+static int mmc_test_best_performance(struct mmc_test_card *test, int write,
+ int max_scatter)
+{
+ struct mmc_test_area *t = &test->area;
+
+ return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
+ max_scatter, 1);
+}
+
+/*
+ * Best-case read performance.
+ */
+static int mmc_test_best_read_performance(struct mmc_test_card *test)
+{
+ return mmc_test_best_performance(test, 0, 0);
+}
+
+/*
+ * Best-case write performance.
+ */
+static int mmc_test_best_write_performance(struct mmc_test_card *test)
+{
+ return mmc_test_best_performance(test, 1, 0);
+}
+
+/*
+ * Best-case read performance into scattered pages.
+ */
+static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
+{
+ return mmc_test_best_performance(test, 0, 1);
+}
+
+/*
+ * Best-case write performance from scattered pages.
+ */
+static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
+{
+ return mmc_test_best_performance(test, 1, 1);
+}
+
+/*
+ * Single read performance by transfer size.
+ */
+static int mmc_test_profile_read_perf(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long sz;
+ unsigned int dev_addr;
+ int ret;
+
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+ dev_addr = t->dev_addr + (sz >> 9);
+ ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
+ if (ret)
+ return ret;
+ }
+ sz = t->max_tfr;
+ dev_addr = t->dev_addr;
+ return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
+}
+
+/*
+ * Single write performance by transfer size.
+ */
+static int mmc_test_profile_write_perf(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long sz;
+ unsigned int dev_addr;
+ int ret;
+
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+ dev_addr = t->dev_addr + (sz >> 9);
+ ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
+ if (ret)
+ return ret;
+ }
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ sz = t->max_tfr;
+ dev_addr = t->dev_addr;
+ return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
+}
+
+/*
+ * Single trim performance by transfer size.
+ */
+static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long sz;
+ unsigned int dev_addr;
+ struct timespec64 ts1, ts2;
+ int ret;
+
+ if (!mmc_can_trim(test->card))
+ return RESULT_UNSUP_CARD;
+
+ if (!mmc_can_erase(test->card))
+ return RESULT_UNSUP_HOST;
+
+ for (sz = 512; sz < t->max_sz; sz <<= 1) {
+ dev_addr = t->dev_addr + (sz >> 9);
+ ktime_get_ts64(&ts1);
+ ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
+ if (ret)
+ return ret;
+ ktime_get_ts64(&ts2);
+ mmc_test_print_rate(test, sz, &ts1, &ts2);
+ }
+ dev_addr = t->dev_addr;
+ ktime_get_ts64(&ts1);
+ ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
+ if (ret)
+ return ret;
+ ktime_get_ts64(&ts2);
+ mmc_test_print_rate(test, sz, &ts1, &ts2);
+ return 0;
+}
+
+static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned int dev_addr, i, cnt;
+ struct timespec64 ts1, ts2;
+ int ret;
+
+ cnt = t->max_sz / sz;
+ dev_addr = t->dev_addr;
+ ktime_get_ts64(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
+ }
+ ktime_get_ts64(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ return 0;
+}
+
+/*
+ * Consecutive read performance by transfer size.
+ */
+static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+ ret = mmc_test_seq_read_perf(test, sz);
+ if (ret)
+ return ret;
+ }
+ sz = t->max_tfr;
+ return mmc_test_seq_read_perf(test, sz);
+}
+
+static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned int dev_addr, i, cnt;
+ struct timespec64 ts1, ts2;
+ int ret;
+
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ cnt = t->max_sz / sz;
+ dev_addr = t->dev_addr;
+ ktime_get_ts64(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
+ }
+ ktime_get_ts64(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ return 0;
+}
+
+/*
+ * Consecutive write performance by transfer size.
+ */
+static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+ ret = mmc_test_seq_write_perf(test, sz);
+ if (ret)
+ return ret;
+ }
+ sz = t->max_tfr;
+ return mmc_test_seq_write_perf(test, sz);
+}
+
+/*
+ * Consecutive trim performance by transfer size.
+ */
+static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long sz;
+ unsigned int dev_addr, i, cnt;
+ struct timespec64 ts1, ts2;
+ int ret;
+
+ if (!mmc_can_trim(test->card))
+ return RESULT_UNSUP_CARD;
+
+ if (!mmc_can_erase(test->card))
+ return RESULT_UNSUP_HOST;
+
+ for (sz = 512; sz <= t->max_sz; sz <<= 1) {
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ ret = mmc_test_area_fill(test);
+ if (ret)
+ return ret;
+ cnt = t->max_sz / sz;
+ dev_addr = t->dev_addr;
+ ktime_get_ts64(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_erase(test->card, dev_addr, sz >> 9,
+ MMC_TRIM_ARG);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
+ }
+ ktime_get_ts64(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ }
+ return 0;
+}
+
+static unsigned int rnd_next = 1;
+
+static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
+{
+ uint64_t r;
+
+ rnd_next = rnd_next * 1103515245 + 12345;
+ r = (rnd_next >> 16) & 0x7fff;
+ return (r * rnd_cnt) >> 15;
+}
+
+static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
+ unsigned long sz)
+{
+ unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
+ unsigned int ssz;
+ struct timespec64 ts1, ts2, ts;
+ int ret;
+
+ ssz = sz >> 9;
+
+ rnd_addr = mmc_test_capacity(test->card) / 4;
+ range1 = rnd_addr / test->card->pref_erase;
+ range2 = range1 / ssz;
+
+ ktime_get_ts64(&ts1);
+ for (cnt = 0; cnt < UINT_MAX; cnt++) {
+ ktime_get_ts64(&ts2);
+ ts = timespec64_sub(ts2, ts1);
+ if (ts.tv_sec >= 10)
+ break;
+ ea = mmc_test_rnd_num(range1);
+ if (ea == last_ea)
+ ea -= 1;
+ last_ea = ea;
+ dev_addr = rnd_addr + test->card->pref_erase * ea +
+ ssz * mmc_test_rnd_num(range2);
+ ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
+ if (ret)
+ return ret;
+ }
+ if (print)
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ return 0;
+}
+
+static int mmc_test_random_perf(struct mmc_test_card *test, int write)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned int next;
+ unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+ /*
+ * When writing, try to get more consistent results by running
+ * the test twice with exactly the same I/O but outputting the
+ * results only for the 2nd run.
+ */
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ ret = mmc_test_rnd_perf(test, write, 1, sz);
+ if (ret)
+ return ret;
+ }
+ sz = t->max_tfr;
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ return mmc_test_rnd_perf(test, write, 1, sz);
+}
+
+/*
+ * Random read performance by transfer size.
+ */
+static int mmc_test_random_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 0);
+}
+
+/*
+ * Random write performance by transfer size.
+ */
+static int mmc_test_random_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 1);
+}
+
+static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
+ unsigned int tot_sz, int max_scatter)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned int dev_addr, i, cnt, sz, ssz;
+ struct timespec64 ts1, ts2;
+ int ret;
+
+ sz = t->max_tfr;
+
+ /*
+ * In the case of a maximally scattered transfer, the maximum transfer
+ * size is further limited by using PAGE_SIZE segments.
+ */
+ if (max_scatter) {
+ unsigned long max_tfr;
+
+ if (t->max_seg_sz >= PAGE_SIZE)
+ max_tfr = t->max_segs * PAGE_SIZE;
+ else
+ max_tfr = t->max_segs * t->max_seg_sz;
+ if (sz > max_tfr)
+ sz = max_tfr;
+ }
+
+ ssz = sz >> 9;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if (tot_sz > dev_addr << 9)
+ tot_sz = dev_addr << 9;
+ cnt = tot_sz / sz;
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+
+ ktime_get_ts64(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, write,
+ max_scatter, 0);
+ if (ret)
+ return ret;
+ dev_addr += ssz;
+ }
+ ktime_get_ts64(&ts2);
+
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+
+ return 0;
+}
+
+static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
+{
+ int ret, i;
+
+ for (i = 0; i < 10; i++) {
+ ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 5; i++) {
+ ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * Large sequential read performance.
+ */
+static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 0);
+}
+
+/*
+ * Large sequential write performance.
+ */
+static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 1);
+}
+
+static int mmc_test_rw_multiple(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *tdata,
+ unsigned int reqsize, unsigned int size,
+ int min_sg_len)
+{
+ unsigned int dev_addr;
+ struct mmc_test_area *t = &test->area;
+ int ret = 0;
+
+ /* Set up test area */
+ if (size > mmc_test_capacity(test->card) / 2 * 512)
+ size = mmc_test_capacity(test->card) / 2 * 512;
+ if (reqsize > t->max_tfr)
+ reqsize = t->max_tfr;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if ((dev_addr & 0xffff0000))
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+ else
+ dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
+ if (!dev_addr)
+ goto err;
+
+ if (reqsize > size)
+ return 0;
+
+ /* prepare test area */
+ if (mmc_can_erase(test->card) &&
+ tdata->prepare & MMC_TEST_PREP_ERASE) {
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_SECURE_ERASE_ARG);
+ if (ret)
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_ERASE_ARG);
+ if (ret)
+ goto err;
+ }
+
+ /* Run test */
+ ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
+ tdata->do_write, 0, 1, size / reqsize,
+ tdata->do_nonblock_req, min_sg_len);
+ if (ret)
+ goto err;
+
+ return ret;
+ err:
+ pr_info("[%s] error\n", __func__);
+ return ret;
+}
+
+static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+ void *pre_req = test->card->host->ops->pre_req;
+ void *post_req = test->card->host->ops->post_req;
+
+ if (rw->do_nonblock_req &&
+ ((!pre_req && post_req) || (pre_req && !post_req))) {
+ pr_info("error: only one of pre/post is defined\n");
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
+ rw->sg_len[i]);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Multiple blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * eMMC hardware reset.
+ */
+static int mmc_test_reset(struct mmc_test_card *test)
+{
+ struct mmc_card *card = test->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ err = mmc_hw_reset(host);
+ if (!err) {
+ /*
+ * Reset will re-enable the card's command queue, but tests
+ * expect it to be disabled.
+ */
+ if (card->ext_csd.cmdq_en)
+ mmc_cmdq_disable(card);
+ return RESULT_OK;
+ } else if (err == -EOPNOTSUPP) {
+ return RESULT_UNSUP_HOST;
+ }
+
+ return RESULT_FAIL;
+}
+
+static int mmc_test_send_status(struct mmc_test_card *test,
+ struct mmc_command *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(test->card->host))
+ cmd->arg = test->card->rca << 16;
+ cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(test->card->host, cmd, 0);
+}
+
+static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
+ unsigned int dev_addr, int use_sbc,
+ int repeat_cmd, int write, int use_areq)
+{
+ struct mmc_test_req *rq = mmc_test_req_alloc();
+ struct mmc_host *host = test->card->host;
+ struct mmc_test_area *t = &test->area;
+ struct mmc_request *mrq;
+ unsigned long timeout;
+ bool expired = false;
+ int ret = 0, cmd_ret;
+ u32 status = 0;
+ int count = 0;
+
+ if (!rq)
+ return -ENOMEM;
+
+ mrq = &rq->mrq;
+ if (use_sbc)
+ mrq->sbc = &rq->sbc;
+ mrq->cap_cmd_during_tfr = true;
+
+ mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
+ 512, write);
+
+ if (use_sbc && t->blocks > 1 && !mrq->sbc) {
+ ret = mmc_host_cmd23(host) ?
+ RESULT_UNSUP_CARD :
+ RESULT_UNSUP_HOST;
+ goto out_free;
+ }
+
+ /* Start ongoing data request */
+ if (use_areq) {
+ ret = mmc_test_start_areq(test, mrq, NULL);
+ if (ret)
+ goto out_free;
+ } else {
+ mmc_wait_for_req(host, mrq);
+ }
+
+ timeout = jiffies + msecs_to_jiffies(3000);
+ do {
+ count += 1;
+
+ /* Send status command while data transfer in progress */
+ cmd_ret = mmc_test_send_status(test, &rq->status);
+ if (cmd_ret)
+ break;
+
+ status = rq->status.resp[0];
+ if (status & R1_ERROR) {
+ cmd_ret = -EIO;
+ break;
+ }
+
+ if (mmc_is_req_done(host, mrq))
+ break;
+
+ expired = time_after(jiffies, timeout);
+ if (expired) {
+ pr_info("%s: timeout waiting for Tran state status %#x\n",
+ mmc_hostname(host), status);
+ cmd_ret = -ETIMEDOUT;
+ break;
+ }
+ } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
+
+ /* Wait for data request to complete */
+ if (use_areq) {
+ ret = mmc_test_start_areq(test, NULL, mrq);
+ } else {
+ mmc_wait_for_req_done(test->card->host, mrq);
+ }
+
+ /*
+ * For cap_cmd_during_tfr request, upper layer must send stop if
+ * required.
+ */
+ if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
+ if (ret)
+ mmc_wait_for_cmd(host, mrq->data->stop, 0);
+ else
+ ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
+ }
+
+ if (ret)
+ goto out_free;
+
+ if (cmd_ret) {
+ pr_info("%s: Send Status failed: status %#x, error %d\n",
+ mmc_hostname(test->card->host), status, cmd_ret);
+ }
+
+ ret = mmc_test_check_result(test, mrq);
+ if (ret)
+ goto out_free;
+
+ ret = mmc_test_wait_busy(test);
+ if (ret)
+ goto out_free;
+
+ if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
+ pr_info("%s: %d commands completed during transfer of %u blocks\n",
+ mmc_hostname(test->card->host), count, t->blocks);
+
+ if (cmd_ret)
+ ret = cmd_ret;
+out_free:
+ kfree(rq);
+
+ return ret;
+}
+
+static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
+ unsigned long sz, int use_sbc, int write,
+ int use_areq)
+{
+ struct mmc_test_area *t = &test->area;
+ int ret;
+
+ if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
+ return RESULT_UNSUP_HOST;
+
+ ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
+ if (ret)
+ return ret;
+
+ ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
+ use_areq);
+ if (ret)
+ return ret;
+
+ return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
+ use_areq);
+}
+
+static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
+ int write, int use_areq)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz <= t->max_tfr; sz += 512) {
+ ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
+ use_areq);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Commands during read - no Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_read(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 0, 0, 0);
+}
+
+/*
+ * Commands during write - no Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_write(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 0, 1, 0);
+}
+
+/*
+ * Commands during read - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 1, 0, 0);
+}
+
+/*
+ * Commands during write - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 1, 1, 0);
+}
+
+/*
+ * Commands during non-blocking read - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 1, 0, 1);
+}
+
+/*
+ * Commands during non-blocking write - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
+{
+ return mmc_test_cmds_during_tfr(test, 1, 1, 1);
+}
+
+static const struct mmc_test_case mmc_test_cases[] = {
+ {
+ .name = "Basic write (no data verification)",
+ .run = mmc_test_basic_write,
+ },
+
+ {
+ .name = "Basic read (no data verification)",
+ .run = mmc_test_basic_read,
+ },
+
+ {
+ .name = "Basic write (with data verification)",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_verify_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Basic read (with data verification)",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_verify_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_multi_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_multi_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Power of two block writes",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_pow2_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Power of two block reads",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_pow2_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Weird sized block writes",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_weird_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Weird sized block reads",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_weird_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Badly aligned write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Badly aligned read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Badly aligned multi-block write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_multi_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Badly aligned multi-block read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_multi_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Proper xfer_size at write (start failure)",
+ .run = mmc_test_xfersize_write,
+ },
+
+ {
+ .name = "Proper xfer_size at read (start failure)",
+ .run = mmc_test_xfersize_read,
+ },
+
+ {
+ .name = "Proper xfer_size at write (midway failure)",
+ .run = mmc_test_multi_xfersize_write,
+ },
+
+ {
+ .name = "Proper xfer_size at read (midway failure)",
+ .run = mmc_test_multi_xfersize_read,
+ },
+
+#ifdef CONFIG_HIGHMEM
+
+ {
+ .name = "Highmem write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_write_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Highmem read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_read_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block highmem write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_multi_write_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block highmem read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_multi_read_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+#else
+
+ {
+ .name = "Highmem write",
+ .run = mmc_test_no_highmem,
+ },
+
+ {
+ .name = "Highmem read",
+ .run = mmc_test_no_highmem,
+ },
+
+ {
+ .name = "Multi-block highmem write",
+ .run = mmc_test_no_highmem,
+ },
+
+ {
+ .name = "Multi-block highmem read",
+ .run = mmc_test_no_highmem,
+ },
+
+#endif /* CONFIG_HIGHMEM */
+
+ {
+ .name = "Best-case read performance",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_best_read_performance,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Best-case write performance",
+ .prepare = mmc_test_area_prepare_erase,
+ .run = mmc_test_best_write_performance,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Best-case read performance into scattered pages",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_best_read_perf_max_scatter,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Best-case write performance from scattered pages",
+ .prepare = mmc_test_area_prepare_erase,
+ .run = mmc_test_best_write_perf_max_scatter,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Single read performance by transfer size",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_profile_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Single write performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Single trim performance by transfer size",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_profile_trim_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Consecutive read performance by transfer size",
+ .prepare = mmc_test_area_prepare_fill,
+ .run = mmc_test_profile_seq_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Consecutive write performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_seq_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Consecutive trim performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_seq_trim_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Random read performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Random write performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential read into scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential write from scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Reset test",
+ .run = mmc_test_reset,
+ },
+
+ {
+ .name = "Commands during read - no Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_read,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during write - no Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_write,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during read - use Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_read_cmd23,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during write - use Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_write_cmd23,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_read_cmd23_nonblock,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_cmds_during_write_cmd23_nonblock,
+ .cleanup = mmc_test_area_cleanup,
+ },
+};
+
+static DEFINE_MUTEX(mmc_test_lock);
+
+static LIST_HEAD(mmc_test_result);
+
+static void mmc_test_run(struct mmc_test_card *test, int testcase)
+{
+ int i, ret;
+
+ pr_info("%s: Starting tests of card %s...\n",
+ mmc_hostname(test->card->host), mmc_card_id(test->card));
+
+ mmc_claim_host(test->card->host);
+
+ for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
+ struct mmc_test_general_result *gr;
+
+ if (testcase && ((i + 1) != testcase))
+ continue;
+
+ pr_info("%s: Test case %d. %s...\n",
+ mmc_hostname(test->card->host), i + 1,
+ mmc_test_cases[i].name);
+
+ if (mmc_test_cases[i].prepare) {
+ ret = mmc_test_cases[i].prepare(test);
+ if (ret) {
+ pr_info("%s: Result: Prepare stage failed! (%d)\n",
+ mmc_hostname(test->card->host),
+ ret);
+ continue;
+ }
+ }
+
+ gr = kzalloc(sizeof(*gr), GFP_KERNEL);
+ if (gr) {
+ INIT_LIST_HEAD(&gr->tr_lst);
+
+ /* Assign data what we know already */
+ gr->card = test->card;
+ gr->testcase = i;
+
+ /* Append container to global one */
+ list_add_tail(&gr->link, &mmc_test_result);
+
+ /*
+ * Save the pointer to created container in our private
+ * structure.
+ */
+ test->gr = gr;
+ }
+
+ ret = mmc_test_cases[i].run(test);
+ switch (ret) {
+ case RESULT_OK:
+ pr_info("%s: Result: OK\n",
+ mmc_hostname(test->card->host));
+ break;
+ case RESULT_FAIL:
+ pr_info("%s: Result: FAILED\n",
+ mmc_hostname(test->card->host));
+ break;
+ case RESULT_UNSUP_HOST:
+ pr_info("%s: Result: UNSUPPORTED (by host)\n",
+ mmc_hostname(test->card->host));
+ break;
+ case RESULT_UNSUP_CARD:
+ pr_info("%s: Result: UNSUPPORTED (by card)\n",
+ mmc_hostname(test->card->host));
+ break;
+ default:
+ pr_info("%s: Result: ERROR (%d)\n",
+ mmc_hostname(test->card->host), ret);
+ }
+
+ /* Save the result */
+ if (gr)
+ gr->result = ret;
+
+ if (mmc_test_cases[i].cleanup) {
+ ret = mmc_test_cases[i].cleanup(test);
+ if (ret) {
+ pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
+ mmc_hostname(test->card->host),
+ ret);
+ }
+ }
+ }
+
+ mmc_release_host(test->card->host);
+
+ pr_info("%s: Tests completed.\n",
+ mmc_hostname(test->card->host));
+}
+
+static void mmc_test_free_result(struct mmc_card *card)
+{
+ struct mmc_test_general_result *gr, *grs;
+
+ mutex_lock(&mmc_test_lock);
+
+ list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
+ struct mmc_test_transfer_result *tr, *trs;
+
+ if (card && gr->card != card)
+ continue;
+
+ list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
+ list_del(&tr->link);
+ kfree(tr);
+ }
+
+ list_del(&gr->link);
+ kfree(gr);
+ }
+
+ mutex_unlock(&mmc_test_lock);
+}
+
+static LIST_HEAD(mmc_test_file_test);
+
+static int mtf_test_show(struct seq_file *sf, void *data)
+{
+ struct mmc_card *card = (struct mmc_card *)sf->private;
+ struct mmc_test_general_result *gr;
+
+ mutex_lock(&mmc_test_lock);
+
+ list_for_each_entry(gr, &mmc_test_result, link) {
+ struct mmc_test_transfer_result *tr;
+
+ if (gr->card != card)
+ continue;
+
+ seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
+
+ list_for_each_entry(tr, &gr->tr_lst, link) {
+ seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
+ tr->count, tr->sectors,
+ (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
+ tr->rate, tr->iops / 100, tr->iops % 100);
+ }
+ }
+
+ mutex_unlock(&mmc_test_lock);
+
+ return 0;
+}
+
+static int mtf_test_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtf_test_show, inode->i_private);
+}
+
+static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *sf = (struct seq_file *)file->private_data;
+ struct mmc_card *card = (struct mmc_card *)sf->private;
+ struct mmc_test_card *test;
+ long testcase;
+ int ret;
+
+ ret = kstrtol_from_user(buf, count, 10, &testcase);
+ if (ret)
+ return ret;
+
+ test = kzalloc(sizeof(*test), GFP_KERNEL);
+ if (!test)
+ return -ENOMEM;
+
+ /*
+ * Remove all test cases associated with given card. Thus we have only
+ * actual data of the last run.
+ */
+ mmc_test_free_result(card);
+
+ test->card = card;
+
+ test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
+#ifdef CONFIG_HIGHMEM
+ test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+ if (test->buffer && test->highmem) {
+#else
+ if (test->buffer) {
+#endif
+ mutex_lock(&mmc_test_lock);
+ mmc_test_run(test, testcase);
+ mutex_unlock(&mmc_test_lock);
+ }
+
+#ifdef CONFIG_HIGHMEM
+ __free_pages(test->highmem, BUFFER_ORDER);
+#endif
+ kfree(test->buffer);
+ kfree(test);
+
+ return count;
+}
+
+static const struct file_operations mmc_test_fops_test = {
+ .open = mtf_test_open,
+ .read = seq_read,
+ .write = mtf_test_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mtf_testlist_show(struct seq_file *sf, void *data)
+{
+ int i;
+
+ mutex_lock(&mmc_test_lock);
+
+ seq_puts(sf, "0:\tRun all tests\n");
+ for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
+ seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
+
+ mutex_unlock(&mmc_test_lock);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mtf_testlist);
+
+static void mmc_test_free_dbgfs_file(struct mmc_card *card)
+{
+ struct mmc_test_dbgfs_file *df, *dfs;
+
+ mutex_lock(&mmc_test_lock);
+
+ list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
+ if (card && df->card != card)
+ continue;
+ debugfs_remove(df->file);
+ list_del(&df->link);
+ kfree(df);
+ }
+
+ mutex_unlock(&mmc_test_lock);
+}
+
+static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
+ const char *name, umode_t mode, const struct file_operations *fops)
+{
+ struct dentry *file = NULL;
+ struct mmc_test_dbgfs_file *df;
+
+ if (card->debugfs_root)
+ file = debugfs_create_file(name, mode, card->debugfs_root,
+ card, fops);
+
+ df = kmalloc(sizeof(*df), GFP_KERNEL);
+ if (!df) {
+ debugfs_remove(file);
+ return -ENOMEM;
+ }
+
+ df->card = card;
+ df->file = file;
+
+ list_add(&df->link, &mmc_test_file_test);
+ return 0;
+}
+
+static int mmc_test_register_dbgfs_file(struct mmc_card *card)
+{
+ int ret;
+
+ mutex_lock(&mmc_test_lock);
+
+ ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
+ &mmc_test_fops_test);
+ if (ret)
+ goto err;
+
+ ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
+ &mtf_testlist_fops);
+ if (ret)
+ goto err;
+
+err:
+ mutex_unlock(&mmc_test_lock);
+
+ return ret;
+}
+
+static int mmc_test_probe(struct mmc_card *card)
+{
+ int ret;
+
+ if (!mmc_card_mmc(card) && !mmc_card_sd(card))
+ return -ENODEV;
+
+ ret = mmc_test_register_dbgfs_file(card);
+ if (ret)
+ return ret;
+
+ if (card->ext_csd.cmdq_en) {
+ mmc_claim_host(card->host);
+ ret = mmc_cmdq_disable(card);
+ mmc_release_host(card->host);
+ if (ret)
+ return ret;
+ }
+
+ dev_info(&card->dev, "Card claimed for testing.\n");
+
+ return 0;
+}
+
+static void mmc_test_remove(struct mmc_card *card)
+{
+ if (card->reenable_cmdq) {
+ mmc_claim_host(card->host);
+ mmc_cmdq_enable(card);
+ mmc_release_host(card->host);
+ }
+ mmc_test_free_result(card);
+ mmc_test_free_dbgfs_file(card);
+}
+
+static void mmc_test_shutdown(struct mmc_card *card)
+{
+}
+
+static struct mmc_driver mmc_driver = {
+ .drv = {
+ .name = "mmc_test",
+ },
+ .probe = mmc_test_probe,
+ .remove = mmc_test_remove,
+ .shutdown = mmc_test_shutdown,
+};
+
+static int __init mmc_test_init(void)
+{
+ return mmc_register_driver(&mmc_driver);
+}
+
+static void __exit mmc_test_exit(void)
+{
+ /* Clear stalled data if card is still plugged */
+ mmc_test_free_result(NULL);
+ mmc_test_free_dbgfs_file(NULL);
+
+ mmc_unregister_driver(&mmc_driver);
+}
+
+module_init(mmc_test_init);
+module_exit(mmc_test_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
+MODULE_AUTHOR("Pierre Ossman");
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
new file mode 100644
index 000000000..ef675f364
--- /dev/null
+++ b/drivers/mmc/core/pwrseq.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * MMC power sequence management
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+static DEFINE_MUTEX(pwrseq_list_mutex);
+static LIST_HEAD(pwrseq_list);
+
+int mmc_pwrseq_alloc(struct mmc_host *host)
+{
+ struct device_node *np;
+ struct mmc_pwrseq *p;
+
+ np = of_parse_phandle(host->parent->of_node, "mmc-pwrseq", 0);
+ if (!np)
+ return 0;
+
+ mutex_lock(&pwrseq_list_mutex);
+ list_for_each_entry(p, &pwrseq_list, pwrseq_node) {
+ if (p->dev->of_node == np) {
+ if (!try_module_get(p->owner))
+ dev_err(host->parent,
+ "increasing module refcount failed\n");
+ else
+ host->pwrseq = p;
+
+ break;
+ }
+ }
+
+ of_node_put(np);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ if (!host->pwrseq)
+ return -EPROBE_DEFER;
+
+ dev_info(host->parent, "allocated mmc-pwrseq\n");
+
+ return 0;
+}
+
+void mmc_pwrseq_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops->pre_power_on)
+ pwrseq->ops->pre_power_on(host);
+}
+
+void mmc_pwrseq_post_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops->post_power_on)
+ pwrseq->ops->post_power_on(host);
+}
+
+void mmc_pwrseq_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops->power_off)
+ pwrseq->ops->power_off(host);
+}
+
+void mmc_pwrseq_reset(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops->reset)
+ pwrseq->ops->reset(host);
+}
+
+void mmc_pwrseq_free(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq) {
+ module_put(pwrseq->owner);
+ host->pwrseq = NULL;
+ }
+}
+
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ if (!pwrseq || !pwrseq->ops || !pwrseq->dev)
+ return -EINVAL;
+
+ mutex_lock(&pwrseq_list_mutex);
+ list_add(&pwrseq->pwrseq_node, &pwrseq_list);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_register);
+
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
+{
+ if (pwrseq) {
+ mutex_lock(&pwrseq_list_mutex);
+ list_del(&pwrseq->pwrseq_node);
+ mutex_unlock(&pwrseq_list_mutex);
+ }
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_unregister);
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
new file mode 100644
index 000000000..f3bb103db
--- /dev/null
+++ b/drivers/mmc/core/pwrseq.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ */
+#ifndef _MMC_CORE_PWRSEQ_H
+#define _MMC_CORE_PWRSEQ_H
+
+#include <linux/types.h>
+
+struct mmc_host;
+struct device;
+struct module;
+
+struct mmc_pwrseq_ops {
+ void (*pre_power_on)(struct mmc_host *host);
+ void (*post_power_on)(struct mmc_host *host);
+ void (*power_off)(struct mmc_host *host);
+ void (*reset)(struct mmc_host *host);
+};
+
+struct mmc_pwrseq {
+ const struct mmc_pwrseq_ops *ops;
+ struct device *dev;
+ struct list_head pwrseq_node;
+ struct module *owner;
+};
+
+#ifdef CONFIG_OF
+
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq);
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq);
+
+int mmc_pwrseq_alloc(struct mmc_host *host);
+void mmc_pwrseq_pre_power_on(struct mmc_host *host);
+void mmc_pwrseq_post_power_on(struct mmc_host *host);
+void mmc_pwrseq_power_off(struct mmc_host *host);
+void mmc_pwrseq_reset(struct mmc_host *host);
+void mmc_pwrseq_free(struct mmc_host *host);
+
+#else
+
+static inline int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ return -ENOSYS;
+}
+static inline void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq) {}
+static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
+static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
+static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
+static inline void mmc_pwrseq_power_off(struct mmc_host *host) {}
+static inline void mmc_pwrseq_reset(struct mmc_host *host) {}
+static inline void mmc_pwrseq_free(struct mmc_host *host) {}
+
+#endif
+
+#endif
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
new file mode 100644
index 000000000..f6dde9edd
--- /dev/null
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ *
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Simple eMMC hardware reset provider
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/reboot.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_emmc {
+ struct mmc_pwrseq pwrseq;
+ struct notifier_block reset_nb;
+ struct gpio_desc *reset_gpio;
+};
+
+#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
+
+static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
+{
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
+
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+ udelay(200);
+}
+
+static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ struct mmc_pwrseq_emmc *pwrseq = container_of(this,
+ struct mmc_pwrseq_emmc, reset_nb);
+ gpiod_set_value(pwrseq->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value(pwrseq->reset_gpio, 0);
+ udelay(200);
+
+ return NOTIFY_DONE;
+}
+
+static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
+ .reset = mmc_pwrseq_emmc_reset,
+};
+
+static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_emmc *pwrseq;
+ struct device *dev = &pdev->dev;
+
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+ if (!pwrseq)
+ return -ENOMEM;
+
+ pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
+
+ if (!gpiod_cansleep(pwrseq->reset_gpio)) {
+ /*
+ * register reset handler to ensure emmc reset also from
+ * emergency_reboot(), priority 255 is the highest priority
+ * so it will be executed before any system reboot handler.
+ */
+ pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
+ pwrseq->reset_nb.priority = 255;
+ register_restart_handler(&pwrseq->reset_nb);
+ } else {
+ dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n");
+ }
+
+ pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+ pwrseq->pwrseq.dev = dev;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
+
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
+}
+
+static int mmc_pwrseq_emmc_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_emmc *pwrseq = platform_get_drvdata(pdev);
+
+ unregister_restart_handler(&pwrseq->reset_nb);
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+ return 0;
+}
+
+static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
+ { .compatible = "mmc-pwrseq-emmc",},
+ {/* sentinel */},
+};
+
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_emmc_of_match);
+
+static struct platform_driver mmc_pwrseq_emmc_driver = {
+ .probe = mmc_pwrseq_emmc_probe,
+ .remove = mmc_pwrseq_emmc_remove,
+ .driver = {
+ .name = "pwrseq_emmc",
+ .of_match_table = mmc_pwrseq_emmc_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_emmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c
new file mode 100644
index 000000000..68a826f1c
--- /dev/null
+++ b/drivers/mmc/core/pwrseq_sd8787.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * pwrseq_sd8787.c - power sequence support for Marvell SD8787 BT + Wifi chip
+ *
+ * Copyright (C) 2016 Matt Ranostay <matt@ranostay.consulting>
+ *
+ * Based on the original work pwrseq_simple.c
+ * Copyright (C) 2014 Linaro Ltd
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_sd8787 {
+ struct mmc_pwrseq pwrseq;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *pwrdn_gpio;
+};
+
+#define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
+
+static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+
+ msleep(300);
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
+}
+
+static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+}
+
+static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
+ .pre_power_on = mmc_pwrseq_sd8787_pre_power_on,
+ .power_off = mmc_pwrseq_sd8787_power_off,
+};
+
+static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
+ { .compatible = "mmc-pwrseq-sd8787",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
+
+static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq;
+ struct device *dev = &pdev->dev;
+
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+ if (!pwrseq)
+ return -ENOMEM;
+
+ pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->pwrdn_gpio))
+ return PTR_ERR(pwrseq->pwrdn_gpio);
+
+ pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
+
+ pwrseq->pwrseq.dev = dev;
+ pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
+
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
+}
+
+static int mmc_pwrseq_sd8787_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = platform_get_drvdata(pdev);
+
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+ return 0;
+}
+
+static struct platform_driver mmc_pwrseq_sd8787_driver = {
+ .probe = mmc_pwrseq_sd8787_probe,
+ .remove = mmc_pwrseq_sd8787_remove,
+ .driver = {
+ .name = "pwrseq_sd8787",
+ .of_match_table = mmc_pwrseq_sd8787_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_sd8787_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
new file mode 100644
index 000000000..ea4d36705
--- /dev/null
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * Simple MMC power sequence management
+ */
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/property.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_simple {
+ struct mmc_pwrseq pwrseq;
+ bool clk_enabled;
+ u32 post_power_on_delay_ms;
+ u32 power_off_delay_us;
+ struct clk *ext_clk;
+ struct gpio_descs *reset_gpios;
+};
+
+#define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
+
+static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
+ int value)
+{
+ struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
+
+ if (!IS_ERR(reset_gpios)) {
+ unsigned long *values;
+ int nvalues = reset_gpios->ndescs;
+
+ values = bitmap_alloc(nvalues, GFP_KERNEL);
+ if (!values)
+ return;
+
+ if (value)
+ bitmap_fill(values, nvalues);
+ else
+ bitmap_zero(values, nvalues);
+
+ gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc,
+ reset_gpios->info, values);
+
+ kfree(values);
+ }
+}
+
+static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
+
+ if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
+ clk_prepare_enable(pwrseq->ext_clk);
+ pwrseq->clk_enabled = true;
+ }
+
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
+}
+
+static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
+
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
+
+ if (pwrseq->post_power_on_delay_ms)
+ msleep(pwrseq->post_power_on_delay_ms);
+}
+
+static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
+
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
+
+ if (pwrseq->power_off_delay_us)
+ usleep_range(pwrseq->power_off_delay_us,
+ 2 * pwrseq->power_off_delay_us);
+
+ if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) {
+ clk_disable_unprepare(pwrseq->ext_clk);
+ pwrseq->clk_enabled = false;
+ }
+}
+
+static const struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
+ .pre_power_on = mmc_pwrseq_simple_pre_power_on,
+ .post_power_on = mmc_pwrseq_simple_post_power_on,
+ .power_off = mmc_pwrseq_simple_power_off,
+};
+
+static const struct of_device_id mmc_pwrseq_simple_of_match[] = {
+ { .compatible = "mmc-pwrseq-simple",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_simple_of_match);
+
+static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_simple *pwrseq;
+ struct device *dev = &pdev->dev;
+
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+ if (!pwrseq)
+ return -ENOMEM;
+
+ pwrseq->ext_clk = devm_clk_get(dev, "ext_clock");
+ if (IS_ERR(pwrseq->ext_clk) && PTR_ERR(pwrseq->ext_clk) != -ENOENT)
+ return PTR_ERR(pwrseq->ext_clk);
+
+ pwrseq->reset_gpios = devm_gpiod_get_array(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pwrseq->reset_gpios) &&
+ PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
+ PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
+ return PTR_ERR(pwrseq->reset_gpios);
+ }
+
+ device_property_read_u32(dev, "post-power-on-delay-ms",
+ &pwrseq->post_power_on_delay_ms);
+ device_property_read_u32(dev, "power-off-delay-us",
+ &pwrseq->power_off_delay_us);
+
+ pwrseq->pwrseq.dev = dev;
+ pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
+
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
+}
+
+static int mmc_pwrseq_simple_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_simple *pwrseq = platform_get_drvdata(pdev);
+
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+ return 0;
+}
+
+static struct platform_driver mmc_pwrseq_simple_driver = {
+ .probe = mmc_pwrseq_simple_probe,
+ .remove = mmc_pwrseq_simple_remove,
+ .driver = {
+ .name = "pwrseq_simple",
+ .of_match_table = mmc_pwrseq_simple_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_simple_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
new file mode 100644
index 000000000..002426e3c
--- /dev/null
+++ b/drivers/mmc/core/queue.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright 2006-2007 Pierre Ossman
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/backing-dev.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include "queue.h"
+#include "block.h"
+#include "core.h"
+#include "card.h"
+#include "host.h"
+
+#define MMC_DMA_MAP_MERGE_SEGMENTS 512
+
+static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
+{
+ /* Allow only 1 DCMD at a time */
+ return mq->in_flight[MMC_ISSUE_DCMD];
+}
+
+void mmc_cqe_check_busy(struct mmc_queue *mq)
+{
+ if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
+ mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
+
+ mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
+}
+
+static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_CQE_DCMD;
+}
+
+static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
+ struct request *req)
+{
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ return MMC_ISSUE_SYNC;
+ case REQ_OP_FLUSH:
+ return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
+ default:
+ return MMC_ISSUE_ASYNC;
+ }
+}
+
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
+
+ if (mq->use_cqe && !host->hsq_enabled)
+ return mmc_cqe_issue_type(host, req);
+
+ if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
+ return MMC_ISSUE_ASYNC;
+
+ return MMC_ISSUE_SYNC;
+}
+
+static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
+{
+ if (!mq->recovery_needed) {
+ mq->recovery_needed = true;
+ schedule_work(&mq->recovery_work);
+ }
+}
+
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->lock, flags);
+ __mmc_cqe_recovery_notifier(mq);
+ spin_unlock_irqrestore(&mq->lock, flags);
+}
+
+static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_queue *mq = req->q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ bool recovery_needed = false;
+
+ switch (issue_type) {
+ case MMC_ISSUE_ASYNC:
+ case MMC_ISSUE_DCMD:
+ if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
+ if (recovery_needed)
+ mmc_cqe_recovery_notifier(mrq);
+ return BLK_EH_RESET_TIMER;
+ }
+ /* The request has gone already */
+ return BLK_EH_DONE;
+ default:
+ /* Timeout is handled by mmc core */
+ return BLK_EH_RESET_TIMER;
+ }
+}
+
+static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
+ bool reserved)
+{
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ unsigned long flags;
+ bool ignore_tout;
+
+ spin_lock_irqsave(&mq->lock, flags);
+ ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
+}
+
+static void mmc_mq_recovery_handler(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ recovery_work);
+ struct request_queue *q = mq->queue;
+ struct mmc_host *host = mq->card->host;
+
+ mmc_get_card(mq->card, &mq->ctx);
+
+ mq->in_recovery = true;
+
+ if (mq->use_cqe && !host->hsq_enabled)
+ mmc_blk_cqe_recovery(mq);
+ else
+ mmc_blk_mq_recovery(mq);
+
+ mq->in_recovery = false;
+
+ spin_lock_irq(&mq->lock);
+ mq->recovery_needed = false;
+ spin_unlock_irq(&mq->lock);
+
+ if (host->hsq_enabled)
+ host->cqe_ops->cqe_recovery_finish(host);
+
+ mmc_put_card(mq->card, &mq->ctx);
+
+ blk_mq_run_hw_queues(q, true);
+}
+
+static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
+{
+ struct scatterlist *sg;
+
+ sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
+ if (sg)
+ sg_init_table(sg, sg_len);
+
+ return sg;
+}
+
+static void mmc_queue_setup_discard(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned max_discard;
+
+ max_discard = mmc_calc_max_discard(card);
+ if (!max_discard)
+ return;
+
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ blk_queue_max_discard_sectors(q, max_discard);
+ q->limits.discard_granularity = card->pref_erase << 9;
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ q->limits.discard_granularity = SECTOR_SIZE;
+ if (mmc_can_secure_erase_trim(card))
+ blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
+}
+
+static unsigned int mmc_get_max_segments(struct mmc_host *host)
+{
+ return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
+ host->max_segs;
+}
+
+/**
+ * mmc_init_request() - initialize the MMC-specific per-request data
+ * @mq: the request queue
+ * @req: the request
+ * @gfp: memory allocation policy
+ */
+static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
+ gfp_t gfp)
+{
+ struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+
+ mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
+ if (!mq_rq->sg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mmc_exit_request(struct request_queue *q, struct request *req)
+{
+ struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
+
+ kfree(mq_rq->sg);
+ mq_rq->sg = NULL;
+}
+
+static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
+}
+
+static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
+{
+ struct mmc_queue *mq = set->driver_data;
+
+ mmc_exit_request(mq->queue, req);
+}
+
+static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *req = bd->rq;
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ enum mmc_issue_type issue_type;
+ enum mmc_issued issued;
+ bool get_card, cqe_retune_ok;
+ int ret;
+
+ if (mmc_card_removed(mq->card)) {
+ req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
+ }
+
+ issue_type = mmc_issue_type(mq, req);
+
+ spin_lock_irq(&mq->lock);
+
+ if (mq->recovery_needed || mq->busy) {
+ spin_unlock_irq(&mq->lock);
+ return BLK_STS_RESOURCE;
+ }
+
+ switch (issue_type) {
+ case MMC_ISSUE_DCMD:
+ if (mmc_cqe_dcmd_busy(mq)) {
+ mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
+ spin_unlock_irq(&mq->lock);
+ return BLK_STS_RESOURCE;
+ }
+ break;
+ case MMC_ISSUE_ASYNC:
+ /*
+ * For MMC host software queue, we only allow 2 requests in
+ * flight to avoid a long latency.
+ */
+ if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
+ spin_unlock_irq(&mq->lock);
+ return BLK_STS_RESOURCE;
+ }
+ break;
+ default:
+ /*
+ * Timeouts are handled by mmc core, and we don't have a host
+ * API to abort requests, so we can't handle the timeout anyway.
+ * However, when the timeout happens, blk_mq_complete_request()
+ * no longer works (to stop the request disappearing under us).
+ * To avoid racing with that, set a large timeout.
+ */
+ req->timeout = 600 * HZ;
+ break;
+ }
+
+ /* Parallel dispatch of requests is not supported at the moment */
+ mq->busy = true;
+
+ mq->in_flight[issue_type] += 1;
+ get_card = (mmc_tot_in_flight(mq) == 1);
+ cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
+
+ spin_unlock_irq(&mq->lock);
+
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ req_to_mmc_queue_req(req)->retries = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+
+ if (get_card)
+ mmc_get_card(card, &mq->ctx);
+
+ if (mq->use_cqe) {
+ host->retune_now = host->need_retune && cqe_retune_ok &&
+ !host->hold_retune;
+ }
+
+ blk_mq_start_request(req);
+
+ issued = mmc_blk_mq_issue_rq(mq, req);
+
+ switch (issued) {
+ case MMC_REQ_BUSY:
+ ret = BLK_STS_RESOURCE;
+ break;
+ case MMC_REQ_FAILED_TO_START:
+ ret = BLK_STS_IOERR;
+ break;
+ default:
+ ret = BLK_STS_OK;
+ break;
+ }
+
+ if (issued != MMC_REQ_STARTED) {
+ bool put_card = false;
+
+ spin_lock_irq(&mq->lock);
+ mq->in_flight[issue_type] -= 1;
+ if (mmc_tot_in_flight(mq) == 0)
+ put_card = true;
+ mq->busy = false;
+ spin_unlock_irq(&mq->lock);
+ if (put_card)
+ mmc_put_card(card, &mq->ctx);
+ } else {
+ WRITE_ONCE(mq->busy, false);
+ }
+
+ return ret;
+}
+
+static const struct blk_mq_ops mmc_mq_ops = {
+ .queue_rq = mmc_mq_queue_rq,
+ .init_request = mmc_mq_init_request,
+ .exit_request = mmc_mq_exit_request,
+ .complete = mmc_blk_mq_complete,
+ .timeout = mmc_mq_timed_out,
+};
+
+static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ unsigned block_size = 512;
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+ if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
+ blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ if (host->can_dma_map_merge)
+ WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
+ mmc_dev(host)),
+ "merging was advertised but not possible");
+ blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
+
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
+ block_size = card->ext_csd.data_sector_size;
+ WARN_ON(block_size != 512 && block_size != 4096);
+ }
+
+ blk_queue_logical_block_size(mq->queue, block_size);
+ /*
+ * After blk_queue_can_use_dma_map_merging() was called with succeed,
+ * since it calls blk_queue_virt_boundary(), the mmc should not call
+ * both blk_queue_max_segment_size().
+ */
+ if (!host->can_dma_map_merge)
+ blk_queue_max_segment_size(mq->queue,
+ round_down(host->max_seg_size, block_size));
+
+ dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
+
+ INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
+ INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
+
+ mutex_init(&mq->complete_lock);
+
+ init_waitqueue_head(&mq->wait);
+}
+
+static inline bool mmc_merge_capable(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
+}
+
+/* Set queue depth to get a reasonable value for q->nr_requests */
+#define MMC_QUEUE_DEPTH 64
+
+/**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ *
+ * Initialise a MMC card request queue.
+ */
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int ret;
+
+ mq->card = card;
+ mq->use_cqe = host->cqe_enabled;
+
+ spin_lock_init(&mq->lock);
+
+ memset(&mq->tag_set, 0, sizeof(mq->tag_set));
+ mq->tag_set.ops = &mmc_mq_ops;
+ /*
+ * The queue depth for CQE must match the hardware because the request
+ * tag is used to index the hardware queue.
+ */
+ if (mq->use_cqe && !host->hsq_enabled)
+ mq->tag_set.queue_depth =
+ min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
+ else
+ mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
+ mq->tag_set.numa_node = NUMA_NO_NODE;
+ mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ mq->tag_set.nr_hw_queues = 1;
+ mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
+ mq->tag_set.driver_data = mq;
+
+ /*
+ * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
+ * the host->can_dma_map_merge should be set before to get max_segs
+ * from mmc_get_max_segments().
+ */
+ if (mmc_merge_capable(host) &&
+ host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
+ dma_get_merge_boundary(mmc_dev(host)))
+ host->can_dma_map_merge = 1;
+ else
+ host->can_dma_map_merge = 0;
+
+ ret = blk_mq_alloc_tag_set(&mq->tag_set);
+ if (ret)
+ return ret;
+
+ mq->queue = blk_mq_init_queue(&mq->tag_set);
+ if (IS_ERR(mq->queue)) {
+ ret = PTR_ERR(mq->queue);
+ goto free_tag_set;
+ }
+
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
+
+ mq->queue->queuedata = mq;
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
+
+ mmc_setup_queue(mq, card);
+ return 0;
+
+free_tag_set:
+ blk_mq_free_tag_set(&mq->tag_set);
+ return ret;
+}
+
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+ blk_mq_quiesce_queue(mq->queue);
+
+ /*
+ * The host remains claimed while there are outstanding requests, so
+ * simply claiming and releasing here ensures there are none.
+ */
+ mmc_claim_host(mq->card->host);
+ mmc_release_host(mq->card->host);
+}
+
+void mmc_queue_resume(struct mmc_queue *mq)
+{
+ blk_mq_unquiesce_queue(mq->queue);
+}
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
+ struct request_queue *q = mq->queue;
+
+ /*
+ * The legacy code handled the possibility of being suspended,
+ * so do that here too.
+ */
+ if (blk_queue_quiesced(q))
+ blk_mq_unquiesce_queue(q);
+
+ blk_cleanup_queue(q);
+ blk_mq_free_tag_set(&mq->tag_set);
+
+ /*
+ * A request can be completed before the next request, potentially
+ * leaving a complete_work with nothing to do. Such a work item might
+ * still be queued at this point. Flush it.
+ */
+ flush_work(&mq->complete_work);
+
+ mq->card = NULL;
+}
+
+/*
+ * Prepare the sg list(s) to be handed of to the host driver
+ */
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
+{
+ struct request *req = mmc_queue_req_to_req(mqrq);
+
+ return blk_rq_map_sg(mq->queue, req, mqrq->sg);
+}
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
new file mode 100644
index 000000000..fd11491ce
--- /dev/null
+++ b/drivers/mmc/core/queue.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MMC_QUEUE_H
+#define MMC_QUEUE_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+
+enum mmc_issued {
+ MMC_REQ_STARTED,
+ MMC_REQ_BUSY,
+ MMC_REQ_FAILED_TO_START,
+ MMC_REQ_FINISHED,
+};
+
+enum mmc_issue_type {
+ MMC_ISSUE_SYNC,
+ MMC_ISSUE_DCMD,
+ MMC_ISSUE_ASYNC,
+ MMC_ISSUE_MAX,
+};
+
+static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
+{
+ return blk_mq_rq_to_pdu(rq);
+}
+
+struct mmc_queue_req;
+
+static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
+{
+ return blk_mq_rq_from_pdu(mqr);
+}
+
+struct mmc_blk_data;
+struct mmc_blk_ioc_data;
+
+struct mmc_blk_request {
+ struct mmc_request mrq;
+ struct mmc_command sbc;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+};
+
+/**
+ * enum mmc_drv_op - enumerates the operations in the mmc_queue_req
+ * @MMC_DRV_OP_IOCTL: ioctl operation
+ * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation
+ * @MMC_DRV_OP_BOOT_WP: write protect boot partitions
+ * @MMC_DRV_OP_GET_CARD_STATUS: get card status
+ * @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
+ */
+enum mmc_drv_op {
+ MMC_DRV_OP_IOCTL,
+ MMC_DRV_OP_IOCTL_RPMB,
+ MMC_DRV_OP_BOOT_WP,
+ MMC_DRV_OP_GET_CARD_STATUS,
+ MMC_DRV_OP_GET_EXT_CSD,
+};
+
+struct mmc_queue_req {
+ struct mmc_blk_request brq;
+ struct scatterlist *sg;
+ enum mmc_drv_op drv_op;
+ int drv_op_result;
+ void *drv_op_data;
+ unsigned int ioc_count;
+ int retries;
+};
+
+struct mmc_queue {
+ struct mmc_card *card;
+ struct mmc_ctx ctx;
+ struct blk_mq_tag_set tag_set;
+ struct mmc_blk_data *blkdata;
+ struct request_queue *queue;
+ spinlock_t lock;
+ int in_flight[MMC_ISSUE_MAX];
+ unsigned int cqe_busy;
+#define MMC_CQE_DCMD_BUSY BIT(0)
+#define MMC_CQE_QUEUE_FULL BIT(1)
+ bool busy;
+ bool use_cqe;
+ bool recovery_needed;
+ bool in_recovery;
+ bool rw_wait;
+ bool waiting;
+ struct work_struct recovery_work;
+ wait_queue_head_t wait;
+ struct request *recovery_req;
+ struct request *complete_req;
+ struct mutex complete_lock;
+ struct work_struct complete_work;
+};
+
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *);
+extern void mmc_cleanup_queue(struct mmc_queue *);
+extern void mmc_queue_suspend(struct mmc_queue *);
+extern void mmc_queue_resume(struct mmc_queue *);
+extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
+ struct mmc_queue_req *);
+
+void mmc_cqe_check_busy(struct mmc_queue *mq);
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq);
+
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
+
+static inline int mmc_tot_in_flight(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_SYNC] +
+ mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
+static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
+#endif
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
new file mode 100644
index 000000000..afe8d8c5f
--- /dev/null
+++ b/drivers/mmc/core/quirks.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file contains work-arounds for many known SD/MMC
+ * and SDIO hardware bugs.
+ *
+ * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
+ * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
+ * Inspired from pci fixup code:
+ * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ */
+
+#include <linux/mmc/sdio_ids.h>
+
+#include "card.h"
+
+static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+#define INAND_CMD38_ARG_EXT_CSD 113
+#define INAND_CMD38_ARG_ERASE 0x00
+#define INAND_CMD38_ARG_TRIM 0x01
+#define INAND_CMD38_ARG_SECERASE 0x80
+#define INAND_CMD38_ARG_SECTRIM1 0x81
+#define INAND_CMD38_ARG_SECTRIM2 0x88
+ /* CMD38 argument is passed through EXT_CSD[113] */
+ MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+
+ /*
+ * Some MMC cards experience performance degradation with CMD23
+ * instead of CMD12-bounded multiblock transfers. For now we'll
+ * black list what's bad...
+ * - Certain Toshiba cards.
+ *
+ * N.B. This doesn't affect SD cards.
+ */
+ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some SD cards lockup while using CMD23 multiblock transfers.
+ */
+ MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some MMC cards need longer data read timeout than indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+ MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
+ /*
+ * On these Samsung MoviNAND parts, performing secure erase or
+ * secure trim can result in unrecoverable corruption due to a
+ * firmware bug.
+ */
+ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
+ /*
+ * On Some Kingston eMMCs, performing trim can result in
+ * unrecoverable data conrruption occasionally due to a firmware bug.
+ */
+ MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+ MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
+ * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
+ * support being used to offload WRITE_ZEROES.
+ */
+ MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
+ * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
+ * support being used to offload WRITE_ZEROES.
+ */
+ MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
+ * Some SD cards reports discard support while they don't
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_DISCARD),
+
+ END_FIXUP
+};
+
+static const struct mmc_fixup __maybe_unused mmc_ext_csd_fixups[] = {
+ /*
+ * Certain Hynix eMMC 4.41 cards might get broken when HPI feature
+ * is used so disable the HPI feature for such buggy cards.
+ */
+ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
+ 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
+ /*
+ * Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI
+ * feature is used so disable the HPI feature for such buggy cards.
+ */
+ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX,
+ 0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6),
+
+ END_FIXUP
+};
+
+
+static const struct mmc_fixup __maybe_unused sdio_fixup_methods[] = {
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_DISABLE_CD),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ add_quirk, MMC_QUIRK_DISABLE_CD),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
+ add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
+ add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887_F0,
+ add_limit_rate_quirk, 150000000),
+
+ END_FIXUP
+};
+
+static inline void mmc_fixup_device(struct mmc_card *card,
+ const struct mmc_fixup *table)
+{
+ const struct mmc_fixup *f;
+ u64 rev = cid_rev_card(card);
+
+ for (f = table; f->vendor_fixup; f++) {
+ if ((f->manfid == CID_MANFID_ANY ||
+ f->manfid == card->cid.manfid) &&
+ (f->oemid == CID_OEMID_ANY ||
+ f->oemid == card->cid.oemid) &&
+ (f->name == CID_NAME_ANY ||
+ !strncmp(f->name, card->cid.prod_name,
+ sizeof(card->cid.prod_name))) &&
+ (f->cis_vendor == card->cis.vendor ||
+ f->cis_vendor == (u16) SDIO_ANY_ID) &&
+ (f->cis_device == card->cis.device ||
+ f->cis_device == (u16) SDIO_ANY_ID) &&
+ (f->ext_csd_rev == EXT_CSD_REV_ANY ||
+ f->ext_csd_rev == card->ext_csd.rev) &&
+ rev >= f->rev_start && rev <= f->rev_end) {
+ dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
+ }
+ }
+}
diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
new file mode 100644
index 000000000..4dcbc2281
--- /dev/null
+++ b/drivers/mmc/core/regulator.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions for MMC regulators.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/mmc/host.h>
+
+#include "core.h"
+#include "host.h"
+
+#ifdef CONFIG_REGULATOR
+
+/**
+ * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
+ * @vdd_bit: OCR bit number
+ * @min_uV: minimum voltage value (mV)
+ * @max_uV: maximum voltage value (mV)
+ *
+ * This function returns the voltage range according to the provided OCR
+ * bit number. If conversion is not possible a negative errno value returned.
+ */
+static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
+{
+ int tmp;
+
+ if (!vdd_bit)
+ return -EINVAL;
+
+ /*
+ * REVISIT mmc_vddrange_to_ocrmask() may have set some
+ * bits this regulator doesn't quite support ... don't
+ * be too picky, most cards and regulators are OK with
+ * a 0.1V range goof (it's a small error percentage).
+ */
+ tmp = vdd_bit - ilog2(MMC_VDD_165_195);
+ if (tmp == 0) {
+ *min_uV = 1650 * 1000;
+ *max_uV = 1950 * 1000;
+ } else {
+ *min_uV = 1900 * 1000 + tmp * 100 * 1000;
+ *max_uV = *min_uV + 100 * 1000;
+ }
+
+ return 0;
+}
+
+/**
+ * mmc_regulator_get_ocrmask - return mask of supported voltages
+ * @supply: regulator to use
+ *
+ * This returns either a negative errno, or a mask of voltages that
+ * can be provided to MMC/SD/SDIO devices using the specified voltage
+ * regulator. This would normally be called before registering the
+ * MMC host adapter.
+ */
+static int mmc_regulator_get_ocrmask(struct regulator *supply)
+{
+ int result = 0;
+ int count;
+ int i;
+ int vdd_uV;
+ int vdd_mV;
+
+ count = regulator_count_voltages(supply);
+ if (count < 0)
+ return count;
+
+ for (i = 0; i < count; i++) {
+ vdd_uV = regulator_list_voltage(supply, i);
+ if (vdd_uV <= 0)
+ continue;
+
+ vdd_mV = vdd_uV / 1000;
+ result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
+ }
+
+ if (!result) {
+ vdd_uV = regulator_get_voltage(supply);
+ if (vdd_uV <= 0)
+ return vdd_uV;
+
+ vdd_mV = vdd_uV / 1000;
+ result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
+ }
+
+ return result;
+}
+
+/**
+ * mmc_regulator_set_ocr - set regulator to match host->ios voltage
+ * @mmc: the host to regulate
+ * @supply: regulator to use
+ * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * MMC host drivers may use this to enable or disable a regulator using
+ * a particular supply voltage. This would normally be called from the
+ * set_ios() method.
+ */
+int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit)
+{
+ int result = 0;
+ int min_uV, max_uV;
+
+ if (vdd_bit) {
+ mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
+
+ result = regulator_set_voltage(supply, min_uV, max_uV);
+ if (result == 0 && !mmc->regulator_enabled) {
+ result = regulator_enable(supply);
+ if (!result)
+ mmc->regulator_enabled = true;
+ }
+ } else if (mmc->regulator_enabled) {
+ result = regulator_disable(supply);
+ if (result == 0)
+ mmc->regulator_enabled = false;
+ }
+
+ if (result)
+ dev_err(mmc_dev(mmc),
+ "could not set regulator OCR (%d)\n", result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
+
+static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
+ int min_uV, int target_uV,
+ int max_uV)
+{
+ int current_uV;
+
+ /*
+ * Check if supported first to avoid errors since we may try several
+ * signal levels during power up and don't want to show errors.
+ */
+ if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
+ return -EINVAL;
+
+ /*
+ * The voltage is already set, no need to switch.
+ * Return 1 to indicate that no switch happened.
+ */
+ current_uV = regulator_get_voltage(regulator);
+ if (current_uV == target_uV)
+ return 1;
+
+ return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
+ max_uV);
+}
+
+/**
+ * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
+ * @mmc: the host to regulate
+ * @ios: io bus settings
+ *
+ * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
+ * That will match the behavior of old boards where VQMMC and VMMC were supplied
+ * by the same supply. The Bus Operating conditions for 3.3V signaling in the
+ * SD card spec also define VQMMC in terms of VMMC.
+ * If this is not possible we'll try the full 2.7-3.6V of the spec.
+ *
+ * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
+ * requested voltage. This is definitely a good idea for UHS where there's a
+ * separate regulator on the card that's trying to make 1.8V and it's best if
+ * we match.
+ *
+ * This function is expected to be used by a controller's
+ * start_signal_voltage_switch() function.
+ */
+int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct device *dev = mmc_dev(mmc);
+ int ret, volt, min_uV, max_uV;
+
+ /* If no vqmmc supply then we can't change the voltage */
+ if (IS_ERR(mmc->supply.vqmmc))
+ return -EINVAL;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_120:
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 1100000, 1200000, 1300000);
+ case MMC_SIGNAL_VOLTAGE_180:
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 1700000, 1800000, 1950000);
+ case MMC_SIGNAL_VOLTAGE_330:
+ ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
+ __func__, volt, max_uV);
+
+ min_uV = max(volt - 300000, 2700000);
+ max_uV = min(max_uV + 200000, 3600000);
+
+ /*
+ * Due to a limitation in the current implementation of
+ * regulator_set_voltage_triplet() which is taking the lowest
+ * voltage possible if below the target, search for a suitable
+ * voltage in two steps and try to stay close to vmmc
+ * with a 0.3V tolerance at first.
+ */
+ ret = mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ min_uV, volt, max_uV);
+ if (ret >= 0)
+ return ret;
+
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 2700000, volt, 3600000);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
+
+#else
+
+static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
+{
+ return 0;
+}
+
+#endif /* CONFIG_REGULATOR */
+
+/**
+ * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. errno should be handled, it is either a critical error
+ * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
+ * regulators have been found because they all are optional. If you require
+ * certain regulators, you need to check separately in your driver if they got
+ * populated after calling this function.
+ */
+int mmc_regulator_get_supply(struct mmc_host *mmc)
+{
+ struct device *dev = mmc_dev(mmc);
+ int ret;
+
+ mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
+ mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
+
+ if (IS_ERR(mmc->supply.vmmc)) {
+ if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(dev, "No vmmc regulator found\n");
+ } else {
+ ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
+ if (ret > 0)
+ mmc->ocr_avail = ret;
+ else
+ dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
+ }
+
+ if (IS_ERR(mmc->supply.vqmmc)) {
+ if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(dev, "No vqmmc regulator found\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
+
+/**
+ * mmc_regulator_enable_vqmmc - enable VQMMC regulator for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. Enables the regulator for vqmmc.
+ * Keeps track of the enable status for ensuring that calls to
+ * regulator_enable/disable are balanced.
+ */
+int mmc_regulator_enable_vqmmc(struct mmc_host *mmc)
+{
+ int ret = 0;
+
+ if (!IS_ERR(mmc->supply.vqmmc) && !mmc->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret < 0)
+ dev_err(mmc_dev(mmc), "enabling vqmmc regulator failed\n");
+ else
+ mmc->vqmmc_enabled = true;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_enable_vqmmc);
+
+/**
+ * mmc_regulator_disable_vqmmc - disable VQMMC regulator for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. Disables the regulator for vqmmc.
+ * Keeps track of the enable status for ensuring that calls to
+ * regulator_enable/disable are balanced.
+ */
+void mmc_regulator_disable_vqmmc(struct mmc_host *mmc)
+{
+ if (!IS_ERR(mmc->supply.vqmmc) && mmc->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ mmc->vqmmc_enabled = false;
+ }
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_disable_vqmmc);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
new file mode 100644
index 000000000..868b121ce
--- /dev/null
+++ b/drivers/mmc/core/sd.c
@@ -0,0 +1,1400 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/mmc/core/sd.c
+ *
+ * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
+ * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
+ * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "bus.h"
+#include "mmc_ops.h"
+#include "sd.h"
+#include "sd_ops.h"
+
+static const unsigned int tran_exp[] = {
+ 10000, 100000, 1000000, 10000000,
+ 0, 0, 0, 0
+};
+
+static const unsigned char tran_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+static const unsigned int taac_exp[] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
+};
+
+static const unsigned int taac_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+static const unsigned int sd_au_size[] = {
+ 0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512,
+ SZ_128K / 512, SZ_256K / 512, SZ_512K / 512, SZ_1M / 512,
+ SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
+ SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
+};
+
+#define UNSTUFF_BITS(resp,start,size) \
+ ({ \
+ const int __size = size; \
+ const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
+ const int __off = 3 - ((start) / 32); \
+ const int __shft = (start) & 31; \
+ u32 __res; \
+ \
+ __res = resp[__off] >> __shft; \
+ if (__size + __shft > 32) \
+ __res |= resp[__off-1] << ((32 - __shft) % 32); \
+ __res & __mask; \
+ })
+
+/*
+ * Given the decoded CSD structure, decode the raw CID to our CID structure.
+ */
+void mmc_decode_cid(struct mmc_card *card)
+{
+ u32 *resp = card->raw_cid;
+
+ /*
+ * SD doesn't currently have a version field so we will
+ * have to assume we can parse this.
+ */
+ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
+ card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+ card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
+ card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
+ card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4);
+ card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4);
+ card->cid.serial = UNSTUFF_BITS(resp, 24, 32);
+ card->cid.year = UNSTUFF_BITS(resp, 12, 8);
+ card->cid.month = UNSTUFF_BITS(resp, 8, 4);
+
+ card->cid.year += 2000; /* SD cards year offset */
+}
+
+/*
+ * Given a 128-bit response, decode to our card CSD structure.
+ */
+static int mmc_decode_csd(struct mmc_card *card)
+{
+ struct mmc_csd *csd = &card->csd;
+ unsigned int e, m, csd_struct;
+ u32 *resp = card->raw_csd;
+
+ csd_struct = UNSTUFF_BITS(resp, 126, 2);
+
+ switch (csd_struct) {
+ case 0:
+ m = UNSTUFF_BITS(resp, 115, 4);
+ e = UNSTUFF_BITS(resp, 112, 3);
+ csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
+ csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
+
+ m = UNSTUFF_BITS(resp, 99, 4);
+ e = UNSTUFF_BITS(resp, 96, 3);
+ csd->max_dtr = tran_exp[e] * tran_mant[m];
+ csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+
+ e = UNSTUFF_BITS(resp, 47, 3);
+ m = UNSTUFF_BITS(resp, 62, 12);
+ csd->capacity = (1 + m) << (e + 2);
+
+ csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
+ csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
+ csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
+ csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
+ csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
+ csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
+ csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+
+ if (UNSTUFF_BITS(resp, 46, 1)) {
+ csd->erase_size = 1;
+ } else if (csd->write_blkbits >= 9) {
+ csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
+ csd->erase_size <<= csd->write_blkbits - 9;
+ }
+
+ if (UNSTUFF_BITS(resp, 13, 1))
+ mmc_card_set_readonly(card);
+ break;
+ case 1:
+ /*
+ * This is a block-addressed SDHC or SDXC card. Most
+ * interesting fields are unused and have fixed
+ * values. To avoid getting tripped by buggy cards,
+ * we assume those fixed values ourselves.
+ */
+ mmc_card_set_blockaddr(card);
+
+ csd->taac_ns = 0; /* Unused */
+ csd->taac_clks = 0; /* Unused */
+
+ m = UNSTUFF_BITS(resp, 99, 4);
+ e = UNSTUFF_BITS(resp, 96, 3);
+ csd->max_dtr = tran_exp[e] * tran_mant[m];
+ csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+ csd->c_size = UNSTUFF_BITS(resp, 48, 22);
+
+ /* SDXC cards have a minimum C_SIZE of 0x00FFFF */
+ if (csd->c_size >= 0xFFFF)
+ mmc_card_set_ext_capacity(card);
+
+ m = UNSTUFF_BITS(resp, 48, 22);
+ csd->capacity = (1 + m) << 10;
+
+ csd->read_blkbits = 9;
+ csd->read_partial = 0;
+ csd->write_misalign = 0;
+ csd->read_misalign = 0;
+ csd->r2w_factor = 4; /* Unused */
+ csd->write_blkbits = 9;
+ csd->write_partial = 0;
+ csd->erase_size = 1;
+
+ if (UNSTUFF_BITS(resp, 13, 1))
+ mmc_card_set_readonly(card);
+ break;
+ default:
+ pr_err("%s: unrecognised CSD structure version %d\n",
+ mmc_hostname(card->host), csd_struct);
+ return -EINVAL;
+ }
+
+ card->erase_size = csd->erase_size;
+
+ return 0;
+}
+
+/*
+ * Given a 64-bit response, decode to our card SCR structure.
+ */
+static int mmc_decode_scr(struct mmc_card *card)
+{
+ struct sd_scr *scr = &card->scr;
+ unsigned int scr_struct;
+ u32 resp[4];
+
+ resp[3] = card->raw_scr[1];
+ resp[2] = card->raw_scr[0];
+
+ scr_struct = UNSTUFF_BITS(resp, 60, 4);
+ if (scr_struct != 0) {
+ pr_err("%s: unrecognised SCR structure version %d\n",
+ mmc_hostname(card->host), scr_struct);
+ return -EINVAL;
+ }
+
+ scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
+ scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
+ if (scr->sda_vsn == SCR_SPEC_VER_2)
+ /* Check if Physical Layer Spec v3.0 is supported */
+ scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
+
+ if (scr->sda_spec3) {
+ scr->sda_spec4 = UNSTUFF_BITS(resp, 42, 1);
+ scr->sda_specx = UNSTUFF_BITS(resp, 38, 4);
+ }
+
+ if (UNSTUFF_BITS(resp, 55, 1))
+ card->erased_byte = 0xFF;
+ else
+ card->erased_byte = 0x0;
+
+ if (scr->sda_spec3)
+ scr->cmds = UNSTUFF_BITS(resp, 32, 2);
+
+ /* SD Spec says: any SD Card shall set at least bits 0 and 2 */
+ if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
+ !(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) {
+ pr_err("%s: invalid bus width\n", mmc_hostname(card->host));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Fetch and process SD Status register.
+ */
+static int mmc_read_ssr(struct mmc_card *card)
+{
+ unsigned int au, es, et, eo;
+ __be32 *raw_ssr;
+ u32 resp[4] = {};
+ u8 discard_support;
+ int i;
+
+ if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
+ pr_warn("%s: card lacks mandatory SD Status function\n",
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ raw_ssr = kmalloc(sizeof(card->raw_ssr), GFP_KERNEL);
+ if (!raw_ssr)
+ return -ENOMEM;
+
+ if (mmc_app_sd_status(card, raw_ssr)) {
+ pr_warn("%s: problem reading SD Status register\n",
+ mmc_hostname(card->host));
+ kfree(raw_ssr);
+ return 0;
+ }
+
+ for (i = 0; i < 16; i++)
+ card->raw_ssr[i] = be32_to_cpu(raw_ssr[i]);
+
+ kfree(raw_ssr);
+
+ /*
+ * UNSTUFF_BITS only works with four u32s so we have to offset the
+ * bitfield positions accordingly.
+ */
+ au = UNSTUFF_BITS(card->raw_ssr, 428 - 384, 4);
+ if (au) {
+ if (au <= 9 || card->scr.sda_spec3) {
+ card->ssr.au = sd_au_size[au];
+ es = UNSTUFF_BITS(card->raw_ssr, 408 - 384, 16);
+ et = UNSTUFF_BITS(card->raw_ssr, 402 - 384, 6);
+ if (es && et) {
+ eo = UNSTUFF_BITS(card->raw_ssr, 400 - 384, 2);
+ card->ssr.erase_timeout = (et * 1000) / es;
+ card->ssr.erase_offset = eo * 1000;
+ }
+ } else {
+ pr_warn("%s: SD Status: Invalid Allocation Unit size\n",
+ mmc_hostname(card->host));
+ }
+ }
+
+ /*
+ * starting SD5.1 discard is supported if DISCARD_SUPPORT (b313) is set
+ */
+ resp[3] = card->raw_ssr[6];
+ discard_support = UNSTUFF_BITS(resp, 313 - 288, 1);
+ card->erase_arg = (card->scr.sda_specx && discard_support) ?
+ SD_DISCARD_ARG : SD_ERASE_ARG;
+
+ return 0;
+}
+
+/*
+ * Fetches and decodes switch information
+ */
+static int mmc_read_switch(struct mmc_card *card)
+{
+ int err;
+ u8 *status;
+
+ if (card->scr.sda_vsn < SCR_SPEC_VER_1)
+ return 0;
+
+ if (!(card->csd.cmdclass & CCC_SWITCH)) {
+ pr_warn("%s: card lacks mandatory switch function, performance might suffer\n",
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
+ /*
+ * Find out the card's support bits with a mode 0 operation.
+ * The argument does not matter, as the support bits do not
+ * change with the arguments.
+ */
+ err = mmc_sd_switch(card, 0, 0, 0, status);
+ if (err) {
+ /*
+ * If the host or the card can't do the switch,
+ * fail more gracefully.
+ */
+ if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
+ goto out;
+
+ pr_warn("%s: problem reading Bus Speed modes\n",
+ mmc_hostname(card->host));
+ err = 0;
+
+ goto out;
+ }
+
+ if (status[13] & SD_MODE_HIGH_SPEED)
+ card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR;
+
+ if (card->scr.sda_spec3) {
+ card->sw_caps.sd3_bus_mode = status[13];
+ /* Driver Strengths supported by the card */
+ card->sw_caps.sd3_drv_type = status[9];
+ card->sw_caps.sd3_curr_limit = status[7] | status[6] << 8;
+ }
+
+out:
+ kfree(status);
+
+ return err;
+}
+
+/*
+ * Test if the card supports high-speed mode and, if so, switch to it.
+ */
+int mmc_sd_switch_hs(struct mmc_card *card)
+{
+ int err;
+ u8 *status;
+
+ if (card->scr.sda_vsn < SCR_SPEC_VER_1)
+ return 0;
+
+ if (!(card->csd.cmdclass & CCC_SWITCH))
+ return 0;
+
+ if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
+ return 0;
+
+ if (card->sw_caps.hs_max_dtr == 0)
+ return 0;
+
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
+ err = mmc_sd_switch(card, 1, 0, HIGH_SPEED_BUS_SPEED, status);
+ if (err)
+ goto out;
+
+ if ((status[16] & 0xF) != HIGH_SPEED_BUS_SPEED) {
+ pr_warn("%s: Problem switching card into high-speed mode!\n",
+ mmc_hostname(card->host));
+ err = 0;
+ } else {
+ err = 1;
+ }
+
+out:
+ kfree(status);
+
+ return err;
+}
+
+static int sd_select_driver_type(struct mmc_card *card, u8 *status)
+{
+ int card_drv_type, drive_strength, drv_type;
+ int err;
+
+ card->drive_strength = 0;
+
+ card_drv_type = card->sw_caps.sd3_drv_type | SD_DRIVER_TYPE_B;
+
+ drive_strength = mmc_select_drive_strength(card,
+ card->sw_caps.uhs_max_dtr,
+ card_drv_type, &drv_type);
+
+ if (drive_strength) {
+ err = mmc_sd_switch(card, 1, 2, drive_strength, status);
+ if (err)
+ return err;
+ if ((status[15] & 0xF) != drive_strength) {
+ pr_warn("%s: Problem setting drive strength!\n",
+ mmc_hostname(card->host));
+ return 0;
+ }
+ card->drive_strength = drive_strength;
+ }
+
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
+
+ return 0;
+}
+
+static void sd_update_bus_speed_mode(struct mmc_card *card)
+{
+ /*
+ * If the host doesn't support any of the UHS-I modes, fallback on
+ * default speed.
+ */
+ if (!mmc_host_uhs(card->host)) {
+ card->sd_bus_speed = 0;
+ return;
+ }
+
+ if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR50)) {
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR12)) {
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ }
+}
+
+static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+{
+ int err;
+ unsigned int timing = 0;
+
+ switch (card->sd_bus_speed) {
+ case UHS_SDR104_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR104;
+ card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ break;
+ case UHS_DDR50_BUS_SPEED:
+ timing = MMC_TIMING_UHS_DDR50;
+ card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ break;
+ case UHS_SDR50_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR50;
+ card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ break;
+ case UHS_SDR25_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR25;
+ card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ break;
+ case UHS_SDR12_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR12;
+ card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ break;
+ default:
+ return 0;
+ }
+
+ err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
+ if (err)
+ return err;
+
+ if ((status[16] & 0xF) != card->sd_bus_speed)
+ pr_warn("%s: Problem setting bus speed mode!\n",
+ mmc_hostname(card->host));
+ else {
+ mmc_set_timing(card->host, timing);
+ mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
+ }
+
+ return 0;
+}
+
+/* Get host's max current setting at its current voltage */
+static u32 sd_get_host_max_current(struct mmc_host *host)
+{
+ u32 voltage, max_current;
+
+ voltage = 1 << host->ios.vdd;
+ switch (voltage) {
+ case MMC_VDD_165_195:
+ max_current = host->max_current_180;
+ break;
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ max_current = host->max_current_300;
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ max_current = host->max_current_330;
+ break;
+ default:
+ max_current = 0;
+ }
+
+ return max_current;
+}
+
+static int sd_set_current_limit(struct mmc_card *card, u8 *status)
+{
+ int current_limit = SD_SET_CURRENT_NO_CHANGE;
+ int err;
+ u32 max_current;
+
+ /*
+ * Current limit switch is only defined for SDR50, SDR104, and DDR50
+ * bus speed modes. For other bus speed modes, we do not change the
+ * current limit.
+ */
+ if ((card->sd_bus_speed != UHS_SDR50_BUS_SPEED) &&
+ (card->sd_bus_speed != UHS_SDR104_BUS_SPEED) &&
+ (card->sd_bus_speed != UHS_DDR50_BUS_SPEED))
+ return 0;
+
+ /*
+ * Host has different current capabilities when operating at
+ * different voltages, so find out its max current first.
+ */
+ max_current = sd_get_host_max_current(card->host);
+
+ /*
+ * We only check host's capability here, if we set a limit that is
+ * higher than the card's maximum current, the card will be using its
+ * maximum current, e.g. if the card's maximum current is 300ma, and
+ * when we set current limit to 200ma, the card will draw 200ma, and
+ * when we set current limit to 400/600/800ma, the card will draw its
+ * maximum 300ma from the host.
+ *
+ * The above is incorrect: if we try to set a current limit that is
+ * not supported by the card, the card can rightfully error out the
+ * attempt, and remain at the default current limit. This results
+ * in a 300mA card being limited to 200mA even though the host
+ * supports 800mA. Failures seen with SanDisk 8GB UHS cards with
+ * an iMX6 host. --rmk
+ */
+ if (max_current >= 800 &&
+ card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800)
+ current_limit = SD_SET_CURRENT_LIMIT_800;
+ else if (max_current >= 600 &&
+ card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600)
+ current_limit = SD_SET_CURRENT_LIMIT_600;
+ else if (max_current >= 400 &&
+ card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
+ current_limit = SD_SET_CURRENT_LIMIT_400;
+ else if (max_current >= 200 &&
+ card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
+ current_limit = SD_SET_CURRENT_LIMIT_200;
+
+ if (current_limit != SD_SET_CURRENT_NO_CHANGE) {
+ err = mmc_sd_switch(card, 1, 3, current_limit, status);
+ if (err)
+ return err;
+
+ if (((status[15] >> 4) & 0x0F) != current_limit)
+ pr_warn("%s: Problem setting current limit!\n",
+ mmc_hostname(card->host));
+
+ }
+
+ return 0;
+}
+
+/*
+ * UHS-I specific initialization procedure
+ */
+static int mmc_sd_init_uhs_card(struct mmc_card *card)
+{
+ int err;
+ u8 *status;
+
+ if (!(card->csd.cmdclass & CCC_SWITCH))
+ return 0;
+
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
+ /* Set 4-bit bus width */
+ err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
+ if (err)
+ goto out;
+
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+
+ /*
+ * Select the bus speed mode depending on host
+ * and card capability.
+ */
+ sd_update_bus_speed_mode(card);
+
+ /* Set the driver strength for the card */
+ err = sd_select_driver_type(card, status);
+ if (err)
+ goto out;
+
+ /* Set current limit for the card */
+ err = sd_set_current_limit(card, status);
+ if (err)
+ goto out;
+
+ /* Set bus speed mode of the card */
+ err = sd_set_bus_speed_mode(card, status);
+ if (err)
+ goto out;
+
+ /*
+ * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (!mmc_host_is_spi(card->host) &&
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
+ card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
+ card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
+ err = mmc_execute_tuning(card);
+
+ /*
+ * As SD Specifications Part1 Physical Layer Specification
+ * Version 3.01 says, CMD19 tuning is available for unlocked
+ * cards in transfer state of 1.8V signaling mode. The small
+ * difference between v3.00 and 3.01 spec means that CMD19
+ * tuning is also available for DDR50 mode.
+ */
+ if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
+ pr_warn("%s: ddr50 tuning failed\n",
+ mmc_hostname(card->host));
+ err = 0;
+ }
+ }
+
+out:
+ kfree(status);
+
+ return err;
+}
+
+MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
+ card->raw_cid[2], card->raw_cid[3]);
+MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
+ card->raw_csd[2], card->raw_csd[3]);
+MMC_DEV_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]);
+MMC_DEV_ATTR(ssr,
+ "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ card->raw_ssr[0], card->raw_ssr[1], card->raw_ssr[2],
+ card->raw_ssr[3], card->raw_ssr[4], card->raw_ssr[5],
+ card->raw_ssr[6], card->raw_ssr[7], card->raw_ssr[8],
+ card->raw_ssr[9], card->raw_ssr[10], card->raw_ssr[11],
+ card->raw_ssr[12], card->raw_ssr[13], card->raw_ssr[14],
+ card->raw_ssr[15]);
+MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
+MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
+MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
+MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
+MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
+MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
+MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
+MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
+MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
+MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
+
+
+static ssize_t mmc_dsr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ if (card->csd.dsr_imp && host->dsr_req)
+ return sprintf(buf, "0x%x\n", host->dsr);
+ else
+ /* return default DSR value */
+ return sprintf(buf, "0x%x\n", 0x404);
+}
+
+static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
+
+MMC_DEV_ATTR(vendor, "0x%04x\n", card->cis.vendor);
+MMC_DEV_ATTR(device, "0x%04x\n", card->cis.device);
+MMC_DEV_ATTR(revision, "%u.%u\n", card->major_rev, card->minor_rev);
+
+#define sdio_info_attr(num) \
+static ssize_t info##num##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct mmc_card *card = mmc_dev_to_card(dev); \
+ \
+ if (num > card->num_info) \
+ return -ENODATA; \
+ if (!card->info[num-1][0]) \
+ return 0; \
+ return sprintf(buf, "%s\n", card->info[num-1]); \
+} \
+static DEVICE_ATTR_RO(info##num)
+
+sdio_info_attr(1);
+sdio_info_attr(2);
+sdio_info_attr(3);
+sdio_info_attr(4);
+
+static struct attribute *sd_std_attrs[] = {
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_info1.attr,
+ &dev_attr_info2.attr,
+ &dev_attr_info3.attr,
+ &dev_attr_info4.attr,
+ &dev_attr_cid.attr,
+ &dev_attr_csd.attr,
+ &dev_attr_scr.attr,
+ &dev_attr_ssr.attr,
+ &dev_attr_date.attr,
+ &dev_attr_erase_size.attr,
+ &dev_attr_preferred_erase_size.attr,
+ &dev_attr_fwrev.attr,
+ &dev_attr_hwrev.attr,
+ &dev_attr_manfid.attr,
+ &dev_attr_name.attr,
+ &dev_attr_oemid.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_ocr.attr,
+ &dev_attr_rca.attr,
+ &dev_attr_dsr.attr,
+ NULL,
+};
+
+static umode_t sd_std_is_visible(struct kobject *kobj, struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ /* CIS vendor and device ids, revision and info string are available only for Combo cards */
+ if ((attr == &dev_attr_vendor.attr ||
+ attr == &dev_attr_device.attr ||
+ attr == &dev_attr_revision.attr ||
+ attr == &dev_attr_info1.attr ||
+ attr == &dev_attr_info2.attr ||
+ attr == &dev_attr_info3.attr ||
+ attr == &dev_attr_info4.attr
+ ) && card->type != MMC_TYPE_SD_COMBO)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group sd_std_group = {
+ .attrs = sd_std_attrs,
+ .is_visible = sd_std_is_visible,
+};
+__ATTRIBUTE_GROUPS(sd_std);
+
+struct device_type sd_type = {
+ .groups = sd_std_groups,
+};
+
+/*
+ * Fetch CID from card.
+ */
+int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
+{
+ int err;
+ u32 max_current;
+ int retries = 10;
+ u32 pocr = ocr;
+
+try_again:
+ if (!retries) {
+ ocr &= ~SD_OCR_S18R;
+ pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host));
+ }
+
+ /*
+ * Since we're changing the OCR value, we seem to
+ * need to tell some cards to go back to the idle
+ * state. We wait 1ms to give cards time to
+ * respond.
+ */
+ mmc_go_idle(host);
+
+ /*
+ * If SD_SEND_IF_COND indicates an SD 2.0
+ * compliant card and we should set bit 30
+ * of the ocr to indicate that we can handle
+ * block-addressed SDHC cards.
+ */
+ err = mmc_send_if_cond(host, ocr);
+ if (!err)
+ ocr |= SD_OCR_CCS;
+
+ /*
+ * If the host supports one of UHS-I modes, request the card
+ * to switch to 1.8V signaling level. If the card has failed
+ * repeatedly to switch however, skip this.
+ */
+ if (retries && mmc_host_uhs(host))
+ ocr |= SD_OCR_S18R;
+
+ /*
+ * If the host can supply more than 150mA at current voltage,
+ * XPC should be set to 1.
+ */
+ max_current = sd_get_host_max_current(host);
+ if (max_current > 150)
+ ocr |= SD_OCR_XPC;
+
+ err = mmc_send_app_op_cond(host, ocr, rocr);
+ if (err)
+ return err;
+
+ /*
+ * In case the S18A bit is set in the response, let's start the signal
+ * voltage switch procedure. SPI mode doesn't support CMD11.
+ * Note that, according to the spec, the S18A bit is not valid unless
+ * the CCS bit is set as well. We deliberately deviate from the spec in
+ * regards to this, which allows UHS-I to be supported for SDSC cards.
+ */
+ if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) &&
+ rocr && (*rocr & SD_ROCR_S18A)) {
+ err = mmc_set_uhs_voltage(host, pocr);
+ if (err == -EAGAIN) {
+ retries--;
+ goto try_again;
+ } else if (err) {
+ retries = 0;
+ goto try_again;
+ }
+ }
+
+ err = mmc_send_cid(host, cid);
+ return err;
+}
+
+int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card)
+{
+ int err;
+
+ /*
+ * Fetch CSD from card.
+ */
+ err = mmc_send_csd(card, card->raw_csd);
+ if (err)
+ return err;
+
+ err = mmc_decode_csd(card);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mmc_sd_get_ro(struct mmc_host *host)
+{
+ int ro;
+
+ /*
+ * Some systems don't feature a write-protect pin and don't need one.
+ * E.g. because they only have micro-SD card slot. For those systems
+ * assume that the SD card is always read-write.
+ */
+ if (host->caps2 & MMC_CAP2_NO_WRITE_PROTECT)
+ return 0;
+
+ if (!host->ops->get_ro)
+ return -1;
+
+ ro = host->ops->get_ro(host);
+
+ return ro;
+}
+
+int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
+ bool reinit)
+{
+ int err;
+
+ if (!reinit) {
+ /*
+ * Fetch SCR from card.
+ */
+ err = mmc_app_send_scr(card);
+ if (err)
+ return err;
+
+ err = mmc_decode_scr(card);
+ if (err)
+ return err;
+
+ /*
+ * Fetch and process SD Status register.
+ */
+ err = mmc_read_ssr(card);
+ if (err)
+ return err;
+
+ /* Erase init depends on CSD and SSR */
+ mmc_init_erase(card);
+ }
+
+ /*
+ * Fetch switch information from card. Note, sd3_bus_mode can change if
+ * voltage switch outcome changes, so do this always.
+ */
+ err = mmc_read_switch(card);
+ if (err)
+ return err;
+
+ /*
+ * For SPI, enable CRC as appropriate.
+ * This CRC enable is located AFTER the reading of the
+ * card registers because some SDHC cards are not able
+ * to provide valid CRCs for non-512-byte blocks.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Check if read-only switch is active.
+ */
+ if (!reinit) {
+ int ro = mmc_sd_get_ro(host);
+
+ if (ro < 0) {
+ pr_warn("%s: host does not support reading read-only switch, assuming write-enable\n",
+ mmc_hostname(host));
+ } else if (ro > 0) {
+ mmc_card_set_readonly(card);
+ }
+ }
+
+ return 0;
+}
+
+unsigned mmc_sd_get_max_clock(struct mmc_card *card)
+{
+ unsigned max_dtr = (unsigned int)-1;
+
+ if (mmc_card_hs(card)) {
+ if (max_dtr > card->sw_caps.hs_max_dtr)
+ max_dtr = card->sw_caps.hs_max_dtr;
+ } else if (max_dtr > card->csd.max_dtr) {
+ max_dtr = card->csd.max_dtr;
+ }
+
+ return max_dtr;
+}
+
+static bool mmc_sd_card_using_v18(struct mmc_card *card)
+{
+ /*
+ * According to the SD spec., the Bus Speed Mode (function group 1) bits
+ * 2 to 4 are zero if the card is initialized at 3.3V signal level. Thus
+ * they can be used to determine if the card has already switched to
+ * 1.8V signaling.
+ */
+ return card->sw_caps.sd3_bus_mode &
+ (SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
+}
+
+/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+ * we're trying to reinitialise.
+ */
+static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
+ struct mmc_card *oldcard)
+{
+ struct mmc_card *card;
+ int err;
+ u32 cid[4];
+ u32 rocr = 0;
+ bool v18_fixup_failed = false;
+
+ WARN_ON(!host->claimed);
+retry:
+ err = mmc_sd_get_cid(host, ocr, cid, &rocr);
+ if (err)
+ return err;
+
+ if (oldcard) {
+ if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
+ return -ENOENT;
+ }
+
+ card = oldcard;
+ } else {
+ /*
+ * Allocate card structure.
+ */
+ card = mmc_alloc_card(host, &sd_type);
+ if (IS_ERR(card))
+ return PTR_ERR(card);
+
+ card->ocr = ocr;
+ card->type = MMC_TYPE_SD;
+ memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+ }
+
+ /*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
+ * For native busses: get card RCA and quit open drain mode.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto free_card;
+ }
+
+ if (!oldcard) {
+ err = mmc_sd_get_csd(host, card);
+ if (err)
+ goto free_card;
+
+ mmc_decode_cid(card);
+ }
+
+ /*
+ * handling only for cards supporting DSR and hosts requesting
+ * DSR configuration
+ */
+ if (card->csd.dsr_imp && host->dsr_req)
+ mmc_set_dsr(host);
+
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto free_card;
+ }
+
+ err = mmc_sd_setup_card(host, card, oldcard != NULL);
+ if (err)
+ goto free_card;
+
+ /*
+ * If the card has not been power cycled, it may still be using 1.8V
+ * signaling. Detect that situation and try to initialize a UHS-I (1.8V)
+ * transfer mode.
+ */
+ if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
+ mmc_sd_card_using_v18(card) &&
+ host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
+ }
+ goto cont;
+ }
+
+ /* Initialization sequence for UHS-I cards */
+ if (rocr & SD_ROCR_S18A && mmc_host_uhs(host)) {
+ err = mmc_sd_init_uhs_card(card);
+ if (err)
+ goto free_card;
+ } else {
+ /*
+ * Attempt to change to high-speed (if supported)
+ */
+ err = mmc_sd_switch_hs(card);
+ if (err > 0)
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ else if (err)
+ goto free_card;
+
+ /*
+ * Set bus speed.
+ */
+ mmc_set_clock(host, mmc_sd_get_max_clock(card));
+
+ /*
+ * Switch to wider bus (if supported).
+ */
+ if ((host->caps & MMC_CAP_4_BIT_DATA) &&
+ (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+ err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
+ if (err)
+ goto free_card;
+
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ }
+ }
+cont:
+ if (host->cqe_ops && !host->cqe_enabled) {
+ err = host->cqe_ops->cqe_enable(host, card);
+ if (!err) {
+ host->cqe_enabled = true;
+ host->hsq_enabled = true;
+ pr_info("%s: Host Software Queue enabled\n",
+ mmc_hostname(host));
+ }
+ }
+
+ if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
+ host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ pr_err("%s: Host failed to negotiate down from 3.3V\n",
+ mmc_hostname(host));
+ err = -EINVAL;
+ goto free_card;
+ }
+
+ host->card = card;
+ return 0;
+
+free_card:
+ if (!oldcard)
+ mmc_remove_card(card);
+
+ return err;
+}
+
+/*
+ * Host is being removed. Free up the current card.
+ */
+static void mmc_sd_remove(struct mmc_host *host)
+{
+ mmc_remove_card(host->card);
+ host->card = NULL;
+}
+
+/*
+ * Card detection - card is alive.
+ */
+static int mmc_sd_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
+ * Card detection callback from host.
+ */
+static void mmc_sd_detect(struct mmc_host *host)
+{
+ int err;
+
+ mmc_get_card(host->card, NULL);
+
+ /*
+ * Just check if our card has been removed.
+ */
+ err = _mmc_detect_card_removed(host);
+
+ mmc_put_card(host->card, NULL);
+
+ if (err) {
+ mmc_sd_remove(host);
+
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ }
+}
+
+static int _mmc_sd_suspend(struct mmc_host *host)
+{
+ int err = 0;
+
+ mmc_claim_host(host);
+
+ if (mmc_card_suspended(host->card))
+ goto out;
+
+ if (!mmc_host_is_spi(host))
+ err = mmc_deselect_cards(host);
+
+ if (!err) {
+ mmc_power_off(host);
+ mmc_card_set_suspended(host->card);
+ }
+
+out:
+ mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Callback for suspend
+ */
+static int mmc_sd_suspend(struct mmc_host *host)
+{
+ int err;
+
+ err = _mmc_sd_suspend(host);
+ if (!err) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_suspended(&host->card->dev);
+ }
+
+ return err;
+}
+
+/*
+ * This function tries to determine if the same card is still present
+ * and, if so, restore all state to it.
+ */
+static int _mmc_sd_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ mmc_claim_host(host);
+
+ if (!mmc_card_suspended(host->card))
+ goto out;
+
+ mmc_power_up(host, host->card->ocr);
+ err = mmc_sd_init_card(host, host->card->ocr, host->card);
+ mmc_card_clr_suspended(host->card);
+
+out:
+ mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Callback for resume
+ */
+static int mmc_sd_resume(struct mmc_host *host)
+{
+ pm_runtime_enable(&host->card->dev);
+ return 0;
+}
+
+/*
+ * Callback for runtime_suspend.
+ */
+static int mmc_sd_runtime_suspend(struct mmc_host *host)
+{
+ int err;
+
+ if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ return 0;
+
+ err = _mmc_sd_suspend(host);
+ if (err)
+ pr_err("%s: error %d doing aggressive suspend\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
+
+/*
+ * Callback for runtime_resume.
+ */
+static int mmc_sd_runtime_resume(struct mmc_host *host)
+{
+ int err;
+
+ err = _mmc_sd_resume(host);
+ if (err && err != -ENOMEDIUM)
+ pr_err("%s: error %d doing runtime resume\n",
+ mmc_hostname(host), err);
+
+ return 0;
+}
+
+static int mmc_sd_hw_reset(struct mmc_host *host)
+{
+ mmc_power_cycle(host, host->card->ocr);
+ return mmc_sd_init_card(host, host->card->ocr, host->card);
+}
+
+static const struct mmc_bus_ops mmc_sd_ops = {
+ .remove = mmc_sd_remove,
+ .detect = mmc_sd_detect,
+ .runtime_suspend = mmc_sd_runtime_suspend,
+ .runtime_resume = mmc_sd_runtime_resume,
+ .suspend = mmc_sd_suspend,
+ .resume = mmc_sd_resume,
+ .alive = mmc_sd_alive,
+ .shutdown = mmc_sd_suspend,
+ .hw_reset = mmc_sd_hw_reset,
+};
+
+/*
+ * Starting point for SD card init.
+ */
+int mmc_attach_sd(struct mmc_host *host)
+{
+ int err;
+ u32 ocr, rocr;
+
+ WARN_ON(!host->claimed);
+
+ err = mmc_send_app_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
+ mmc_attach_bus(host, &mmc_sd_ops);
+ if (host->ocr_avail_sd)
+ host->ocr_avail = host->ocr_avail_sd;
+
+ /*
+ * We need to get OCR a different way for SPI.
+ */
+ if (mmc_host_is_spi(host)) {
+ mmc_go_idle(host);
+
+ err = mmc_spi_read_ocr(host, 0, &ocr);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Some SD cards claims an out of spec VDD voltage range. Let's treat
+ * these bits as being in-valid and especially also bit7.
+ */
+ ocr &= ~0x7FFF;
+
+ rocr = mmc_select_voltage(host, ocr);
+
+ /*
+ * Can we support the voltage(s) of the card(s)?
+ */
+ if (!rocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Detect and init the card.
+ */
+ err = mmc_sd_init_card(host, rocr, NULL);
+ if (err)
+ goto err;
+
+ mmc_release_host(host);
+ err = mmc_add_card(host->card);
+ if (err)
+ goto remove_card;
+
+ mmc_claim_host(host);
+ return 0;
+
+remove_card:
+ mmc_remove_card(host->card);
+ host->card = NULL;
+ mmc_claim_host(host);
+err:
+ mmc_detach_bus(host);
+
+ pr_err("%s: error %d whilst initialising SD card\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
new file mode 100644
index 000000000..497c026a5
--- /dev/null
+++ b/drivers/mmc/core/sd.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MMC_CORE_SD_H
+#define _MMC_CORE_SD_H
+
+#include <linux/types.h>
+
+extern struct device_type sd_type;
+
+struct mmc_host;
+struct mmc_card;
+
+int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
+int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
+void mmc_decode_cid(struct mmc_card *card);
+int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
+ bool reinit);
+unsigned mmc_sd_get_max_clock(struct mmc_card *card);
+int mmc_sd_switch_hs(struct mmc_card *card);
+
+#endif
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
new file mode 100644
index 000000000..22bf52829
--- /dev/null
+++ b/drivers/mmc/core/sd_ops.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/mmc/core/sd_ops.h
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/scatterlist.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include "core.h"
+#include "sd_ops.h"
+
+int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
+{
+ int err;
+ struct mmc_command cmd = {};
+
+ if (WARN_ON(card && card->host != host))
+ return -EINVAL;
+
+ cmd.opcode = MMC_APP_CMD;
+
+ if (card) {
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_BCR;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ /* Check that card supported application commands */
+ if (!mmc_host_is_spi(host) && !(cmd.resp[0] & R1_APP_CMD))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_app_cmd);
+
+static int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
+ struct mmc_command *cmd)
+{
+ struct mmc_request mrq = {};
+ int i, err = -EIO;
+
+ /*
+ * We have to resend MMC_APP_CMD for each attempt so
+ * we cannot use the retries field in mmc_command.
+ */
+ for (i = 0; i <= MMC_CMD_RETRIES; i++) {
+ err = mmc_app_cmd(host, card);
+ if (err) {
+ /* no point in retrying; no APP commands allowed */
+ if (mmc_host_is_spi(host)) {
+ if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
+ break;
+ }
+ continue;
+ }
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ memset(cmd->resp, 0, sizeof(cmd->resp));
+ cmd->retries = 0;
+
+ mrq.cmd = cmd;
+ cmd->data = NULL;
+
+ mmc_wait_for_req(host, &mrq);
+
+ err = cmd->error;
+ if (!cmd->error)
+ break;
+
+ /* no point in retrying illegal APP commands */
+ if (mmc_host_is_spi(host)) {
+ if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
+ break;
+ }
+ }
+
+ return err;
+}
+
+int mmc_app_set_bus_width(struct mmc_card *card, int width)
+{
+ struct mmc_command cmd = {};
+
+ cmd.opcode = SD_APP_SET_BUS_WIDTH;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ switch (width) {
+ case MMC_BUS_WIDTH_1:
+ cmd.arg = SD_BUS_WIDTH_1;
+ break;
+ case MMC_BUS_WIDTH_4:
+ cmd.arg = SD_BUS_WIDTH_4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mmc_wait_for_app_cmd(card->host, card, &cmd);
+}
+
+int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd = {};
+ int i, err = 0;
+
+ cmd.opcode = SD_APP_OP_COND;
+ if (mmc_host_is_spi(host))
+ cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
+ else
+ cmd.arg = ocr;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
+
+ for (i = 100; i; i--) {
+ err = mmc_wait_for_app_cmd(host, NULL, &cmd);
+ if (err)
+ break;
+
+ /* if we're just probing, do a single pass */
+ if (ocr == 0)
+ break;
+
+ /* otherwise wait until reset completes */
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd.resp[0] & R1_SPI_IDLE))
+ break;
+ } else {
+ if (cmd.resp[0] & MMC_CARD_BUSY)
+ break;
+ }
+
+ err = -ETIMEDOUT;
+
+ mmc_delay(10);
+ }
+
+ if (!i)
+ pr_err("%s: card never left busy state\n", mmc_hostname(host));
+
+ if (rocr && !mmc_host_is_spi(host))
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
+{
+ struct mmc_command cmd = {};
+ int err;
+ static const u8 test_pattern = 0xAA;
+ u8 result_pattern;
+
+ /*
+ * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
+ * before SD_APP_OP_COND. This command will harmlessly fail for
+ * SD 1.0 cards.
+ */
+ cmd.opcode = SD_SEND_IF_COND;
+ cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
+ cmd.flags = MMC_RSP_SPI_R7 | MMC_RSP_R7 | MMC_CMD_BCR;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ if (mmc_host_is_spi(host))
+ result_pattern = cmd.resp[1] & 0xFF;
+ else
+ result_pattern = cmd.resp[0] & 0xFF;
+
+ if (result_pattern != test_pattern)
+ return -EIO;
+
+ return 0;
+}
+
+int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
+{
+ int err;
+ struct mmc_command cmd = {};
+
+ cmd.opcode = SD_SEND_RELATIVE_ADDR;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
+
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ *rca = cmd.resp[0] >> 16;
+
+ return 0;
+}
+
+int mmc_app_send_scr(struct mmc_card *card)
+{
+ int err;
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ __be32 *scr;
+
+ /* NOTE: caller guarantees scr is heap-allocated */
+
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ return err;
+
+ /* dma onto stack is unsafe/nonportable, but callers to this
+ * routine normally provide temporary on-stack buffers ...
+ */
+ scr = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
+ if (!scr)
+ return -ENOMEM;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = SD_APP_SEND_SCR;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = 8;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, scr, 8);
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ card->raw_scr[0] = be32_to_cpu(scr[0]);
+ card->raw_scr[1] = be32_to_cpu(scr[1]);
+
+ kfree(scr);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+int mmc_sd_switch(struct mmc_card *card, int mode, int group,
+ u8 value, u8 *resp)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+
+ /* NOTE: caller guarantees resp is heap-allocated */
+
+ mode = !!mode;
+ value &= 0xF;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = SD_SWITCH;
+ cmd.arg = mode << 31 | 0x00FFFFFF;
+ cmd.arg &= ~(0xF << (group * 4));
+ cmd.arg |= value << (group * 4);
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = 64;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, resp, 64);
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+int mmc_app_sd_status(struct mmc_card *card, void *ssr)
+{
+ int err;
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+
+ /* NOTE: caller guarantees ssr is heap-allocated */
+
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ return err;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = SD_APP_SD_STATUS;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = 64;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, ssr, 64);
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
new file mode 100644
index 000000000..2194cabfc
--- /dev/null
+++ b/drivers/mmc/core/sd_ops.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * linux/drivers/mmc/core/sd_ops.h
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ */
+
+#ifndef _MMC_SD_OPS_H
+#define _MMC_SD_OPS_H
+
+#include <linux/types.h>
+
+struct mmc_card;
+struct mmc_host;
+
+int mmc_app_set_bus_width(struct mmc_card *card, int width);
+int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
+int mmc_send_if_cond(struct mmc_host *host, u32 ocr);
+int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca);
+int mmc_app_send_scr(struct mmc_card *card);
+int mmc_sd_switch(struct mmc_card *card, int mode, int group,
+ u8 value, u8 *resp);
+int mmc_app_sd_status(struct mmc_card *card, void *ssr);
+int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
+
+#endif
+
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
new file mode 100644
index 000000000..85c2947ed
--- /dev/null
+++ b/drivers/mmc/core/sdio.c
@@ -0,0 +1,1316 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/mmc/sdio.c
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ */
+
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "bus.h"
+#include "quirks.h"
+#include "sd.h"
+#include "sdio_bus.h"
+#include "mmc_ops.h"
+#include "sd_ops.h"
+#include "sdio_ops.h"
+#include "sdio_cis.h"
+
+MMC_DEV_ATTR(vendor, "0x%04x\n", card->cis.vendor);
+MMC_DEV_ATTR(device, "0x%04x\n", card->cis.device);
+MMC_DEV_ATTR(revision, "%u.%u\n", card->major_rev, card->minor_rev);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
+MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
+
+#define sdio_info_attr(num) \
+static ssize_t info##num##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct mmc_card *card = mmc_dev_to_card(dev); \
+ \
+ if (num > card->num_info) \
+ return -ENODATA; \
+ if (!card->info[num-1][0]) \
+ return 0; \
+ return sprintf(buf, "%s\n", card->info[num-1]); \
+} \
+static DEVICE_ATTR_RO(info##num)
+
+sdio_info_attr(1);
+sdio_info_attr(2);
+sdio_info_attr(3);
+sdio_info_attr(4);
+
+static struct attribute *sdio_std_attrs[] = {
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_info1.attr,
+ &dev_attr_info2.attr,
+ &dev_attr_info3.attr,
+ &dev_attr_info4.attr,
+ &dev_attr_ocr.attr,
+ &dev_attr_rca.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sdio_std);
+
+static struct device_type sdio_type = {
+ .groups = sdio_std_groups,
+};
+
+static int sdio_read_fbr(struct sdio_func *func)
+{
+ int ret;
+ unsigned char data;
+
+ if (mmc_card_nonstd_func_interface(func->card)) {
+ func->class = SDIO_CLASS_NONE;
+ return 0;
+ }
+
+ ret = mmc_io_rw_direct(func->card, 0, 0,
+ SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data);
+ if (ret)
+ goto out;
+
+ data &= 0x0f;
+
+ if (data == 0x0f) {
+ ret = mmc_io_rw_direct(func->card, 0, 0,
+ SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF_EXT, 0, &data);
+ if (ret)
+ goto out;
+ }
+
+ func->class = data;
+
+out:
+ return ret;
+}
+
+static int sdio_init_func(struct mmc_card *card, unsigned int fn)
+{
+ int ret;
+ struct sdio_func *func;
+
+ if (WARN_ON(fn > SDIO_MAX_FUNCS))
+ return -EINVAL;
+
+ func = sdio_alloc_func(card);
+ if (IS_ERR(func))
+ return PTR_ERR(func);
+
+ func->num = fn;
+
+ if (!(card->quirks & MMC_QUIRK_NONSTD_SDIO)) {
+ ret = sdio_read_fbr(func);
+ if (ret)
+ goto fail;
+
+ ret = sdio_read_func_cis(func);
+ if (ret)
+ goto fail;
+ } else {
+ func->vendor = func->card->cis.vendor;
+ func->device = func->card->cis.device;
+ func->max_blksize = func->card->cis.blksize;
+ }
+
+ card->sdio_func[fn - 1] = func;
+
+ return 0;
+
+fail:
+ /*
+ * It is okay to remove the function here even though we hold
+ * the host lock as we haven't registered the device yet.
+ */
+ sdio_remove_func(func);
+ return ret;
+}
+
+static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
+{
+ int ret;
+ int cccr_vsn;
+ int uhs = ocr & R4_18V_PRESENT;
+ unsigned char data;
+ unsigned char speed;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CCCR, 0, &data);
+ if (ret)
+ goto out;
+
+ cccr_vsn = data & 0x0f;
+
+ if (cccr_vsn > SDIO_CCCR_REV_3_00) {
+ pr_err("%s: unrecognised CCCR structure version %d\n",
+ mmc_hostname(card->host), cccr_vsn);
+ return -EINVAL;
+ }
+
+ card->cccr.sdio_vsn = (data & 0xf0) >> 4;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CAPS, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_CCCR_CAP_SMB)
+ card->cccr.multi_block = 1;
+ if (data & SDIO_CCCR_CAP_LSC)
+ card->cccr.low_speed = 1;
+ if (data & SDIO_CCCR_CAP_4BLS)
+ card->cccr.wide_bus = 1;
+
+ if (cccr_vsn >= SDIO_CCCR_REV_1_10) {
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_POWER, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_POWER_SMPC)
+ card->cccr.high_power = 1;
+ }
+
+ if (cccr_vsn >= SDIO_CCCR_REV_1_20) {
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
+ if (ret)
+ goto out;
+
+ card->scr.sda_spec3 = 0;
+ card->sw_caps.sd3_bus_mode = 0;
+ card->sw_caps.sd3_drv_type = 0;
+ if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) {
+ card->scr.sda_spec3 = 1;
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_UHS, 0, &data);
+ if (ret)
+ goto out;
+
+ if (mmc_host_uhs(card->host)) {
+ if (data & SDIO_UHS_DDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_DDR50 | SD_MODE_UHS_SDR50
+ | SD_MODE_UHS_SDR25 | SD_MODE_UHS_SDR12;
+
+ if (data & SDIO_UHS_SDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR25
+ | SD_MODE_UHS_SDR12;
+
+ if (data & SDIO_UHS_SDR104)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR104 | SD_MODE_UHS_SDR50
+ | SD_MODE_UHS_SDR25 | SD_MODE_UHS_SDR12;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_DRIVE_STRENGTH, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_DRIVE_SDTA)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A;
+ if (data & SDIO_DRIVE_SDTC)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
+ if (data & SDIO_DRIVE_SDTD)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+ }
+
+ /* if no uhs mode ensure we check for high speed */
+ if (!card->sw_caps.sd3_bus_mode) {
+ if (speed & SDIO_SPEED_SHS) {
+ card->cccr.high_speed = 1;
+ card->sw_caps.hs_max_dtr = 50000000;
+ } else {
+ card->cccr.high_speed = 0;
+ card->sw_caps.hs_max_dtr = 25000000;
+ }
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int sdio_enable_wide(struct mmc_card *card)
+{
+ int ret;
+ u8 ctrl;
+
+ if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ return 0;
+
+ if (card->cccr.low_speed && !card->cccr.wide_bus)
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
+ if (ret)
+ return ret;
+
+ if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
+ pr_warn("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+ mmc_hostname(card->host), ctrl);
+
+ /* set as 4-bit bus width */
+ ctrl &= ~SDIO_BUS_WIDTH_MASK;
+ ctrl |= SDIO_BUS_WIDTH_4BIT;
+
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+/*
+ * If desired, disconnect the pull-up resistor on CD/DAT[3] (pin 1)
+ * of the card. This may be required on certain setups of boards,
+ * controllers and embedded sdio device which do not need the card's
+ * pull-up. As a result, card detection is disabled and power is saved.
+ */
+static int sdio_disable_cd(struct mmc_card *card)
+{
+ int ret;
+ u8 ctrl;
+
+ if (!mmc_card_disable_cd(card))
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
+ if (ret)
+ return ret;
+
+ ctrl |= SDIO_BUS_CD_DISABLE;
+
+ return mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
+}
+
+/*
+ * Devices that remain active during a system suspend are
+ * put back into 1-bit mode.
+ */
+static int sdio_disable_wide(struct mmc_card *card)
+{
+ int ret;
+ u8 ctrl;
+
+ if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ return 0;
+
+ if (card->cccr.low_speed && !card->cccr.wide_bus)
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
+ if (ret)
+ return ret;
+
+ if (!(ctrl & SDIO_BUS_WIDTH_4BIT))
+ return 0;
+
+ ctrl &= ~SDIO_BUS_WIDTH_4BIT;
+ ctrl |= SDIO_BUS_ASYNC_INT;
+
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
+ if (ret)
+ return ret;
+
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_1);
+
+ return 0;
+}
+
+static int sdio_disable_4bit_bus(struct mmc_card *card)
+{
+ int err;
+
+ if (card->type == MMC_TYPE_SDIO)
+ goto out;
+
+ if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ return 0;
+
+ if (!(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4))
+ return 0;
+
+ err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_1);
+ if (err)
+ return err;
+
+out:
+ return sdio_disable_wide(card);
+}
+
+
+static int sdio_enable_4bit_bus(struct mmc_card *card)
+{
+ int err;
+
+ err = sdio_enable_wide(card);
+ if (err <= 0)
+ return err;
+ if (card->type == MMC_TYPE_SDIO)
+ goto out;
+
+ if (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) {
+ err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
+ if (err) {
+ sdio_disable_wide(card);
+ return err;
+ }
+ }
+out:
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+
+ return 0;
+}
+
+
+/*
+ * Test if the card supports high-speed mode and, if so, switch to it.
+ */
+static int mmc_sdio_switch_hs(struct mmc_card *card, int enable)
+{
+ int ret;
+ u8 speed;
+
+ if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
+ return 0;
+
+ if (!card->cccr.high_speed)
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
+ if (ret)
+ return ret;
+
+ if (enable)
+ speed |= SDIO_SPEED_EHS;
+ else
+ speed &= ~SDIO_SPEED_EHS;
+
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+/*
+ * Enable SDIO/combo card's high-speed mode. Return 0/1 if [not]supported.
+ */
+static int sdio_enable_hs(struct mmc_card *card)
+{
+ int ret;
+
+ ret = mmc_sdio_switch_hs(card, true);
+ if (ret <= 0 || card->type == MMC_TYPE_SDIO)
+ return ret;
+
+ ret = mmc_sd_switch_hs(card);
+ if (ret <= 0)
+ mmc_sdio_switch_hs(card, false);
+
+ return ret;
+}
+
+static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
+{
+ unsigned max_dtr;
+
+ if (mmc_card_hs(card)) {
+ /*
+ * The SDIO specification doesn't mention how
+ * the CIS transfer speed register relates to
+ * high-speed, but it seems that 50 MHz is
+ * mandatory.
+ */
+ max_dtr = 50000000;
+ } else {
+ max_dtr = card->cis.max_dtr;
+ }
+
+ if (card->type == MMC_TYPE_SD_COMBO)
+ max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
+
+ return max_dtr;
+}
+
+static unsigned char host_drive_to_sdio_drive(int host_strength)
+{
+ switch (host_strength) {
+ case MMC_SET_DRIVER_TYPE_A:
+ return SDIO_DTSx_SET_TYPE_A;
+ case MMC_SET_DRIVER_TYPE_B:
+ return SDIO_DTSx_SET_TYPE_B;
+ case MMC_SET_DRIVER_TYPE_C:
+ return SDIO_DTSx_SET_TYPE_C;
+ case MMC_SET_DRIVER_TYPE_D:
+ return SDIO_DTSx_SET_TYPE_D;
+ default:
+ return SDIO_DTSx_SET_TYPE_B;
+ }
+}
+
+static void sdio_select_driver_type(struct mmc_card *card)
+{
+ int card_drv_type, drive_strength, drv_type;
+ unsigned char card_strength;
+ int err;
+
+ card->drive_strength = 0;
+
+ card_drv_type = card->sw_caps.sd3_drv_type | SD_DRIVER_TYPE_B;
+
+ drive_strength = mmc_select_drive_strength(card,
+ card->sw_caps.uhs_max_dtr,
+ card_drv_type, &drv_type);
+
+ if (drive_strength) {
+ /* if error just use default for drive strength B */
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
+ &card_strength);
+ if (err)
+ return;
+
+ card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
+ card_strength |= host_drive_to_sdio_drive(drive_strength);
+
+ /* if error default to drive strength B */
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
+ card_strength, NULL);
+ if (err)
+ return;
+ card->drive_strength = drive_strength;
+ }
+
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
+}
+
+
+static int sdio_set_bus_speed_mode(struct mmc_card *card)
+{
+ unsigned int bus_speed, timing;
+ int err;
+ unsigned char speed;
+ unsigned int max_rate;
+
+ /*
+ * If the host doesn't support any of the UHS-I modes, fallback on
+ * default speed.
+ */
+ if (!mmc_host_uhs(card->host))
+ return 0;
+
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
+ bus_speed = SDIO_SPEED_SDR104;
+ timing = MMC_TIMING_UHS_SDR104;
+ card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
+ bus_speed = SDIO_SPEED_DDR50;
+ timing = MMC_TIMING_UHS_DDR50;
+ card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR50)) {
+ bus_speed = SDIO_SPEED_SDR50;
+ timing = MMC_TIMING_UHS_SDR50;
+ card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
+ bus_speed = SDIO_SPEED_SDR25;
+ timing = MMC_TIMING_UHS_SDR25;
+ card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR12)) {
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ }
+
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
+ if (err)
+ return err;
+
+ speed &= ~SDIO_SPEED_BSS_MASK;
+ speed |= bus_speed;
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
+ if (err)
+ return err;
+
+ max_rate = min_not_zero(card->quirk_max_rate,
+ card->sw_caps.uhs_max_dtr);
+
+ mmc_set_timing(card->host, timing);
+ mmc_set_clock(card->host, max_rate);
+
+ return 0;
+}
+
+/*
+ * UHS-I specific initialization procedure
+ */
+static int mmc_sdio_init_uhs_card(struct mmc_card *card)
+{
+ int err;
+
+ if (!card->scr.sda_spec3)
+ return 0;
+
+ /* Switch to wider bus */
+ err = sdio_enable_4bit_bus(card);
+ if (err)
+ goto out;
+
+ /* Set the driver strength for the card */
+ sdio_select_driver_type(card);
+
+ /* Set bus speed mode of the card */
+ err = sdio_set_bus_speed_mode(card);
+ if (err)
+ goto out;
+
+ /*
+ * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (!mmc_host_is_spi(card->host) &&
+ ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
+ err = mmc_execute_tuning(card);
+out:
+ return err;
+}
+
+static int mmc_sdio_pre_init(struct mmc_host *host, u32 ocr,
+ struct mmc_card *card)
+{
+ if (card)
+ mmc_remove_card(card);
+
+ /*
+ * Reset the card by performing the same steps that are taken by
+ * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
+ *
+ * sdio_reset() is technically not needed. Having just powered up the
+ * hardware, it should already be in reset state. However, some
+ * platforms (such as SD8686 on OLPC) do not instantly cut power,
+ * meaning that a reset is required when restoring power soon after
+ * powering off. It is harmless in other cases.
+ *
+ * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
+ * is not necessary for non-removable cards. However, it is required
+ * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
+ * harmless in other situations.
+ *
+ */
+
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, ocr);
+ return mmc_send_io_op_cond(host, 0, NULL);
+}
+
+/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+ * we're trying to reinitialise.
+ */
+static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
+ struct mmc_card *oldcard)
+{
+ struct mmc_card *card;
+ int err;
+ int retries = 10;
+ u32 rocr = 0;
+ u32 ocr_card = ocr;
+
+ WARN_ON(!host->claimed);
+
+ /* to query card if 1.8V signalling is supported */
+ if (mmc_host_uhs(host))
+ ocr |= R4_18V_PRESENT;
+
+try_again:
+ if (!retries) {
+ pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host));
+ ocr &= ~R4_18V_PRESENT;
+ }
+
+ /*
+ * Inform the card of the voltage
+ */
+ err = mmc_send_io_op_cond(host, ocr, &rocr);
+ if (err)
+ return err;
+
+ /*
+ * For SPI, enable CRC as appropriate.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Allocate card structure.
+ */
+ card = mmc_alloc_card(host, &sdio_type);
+ if (IS_ERR(card))
+ return PTR_ERR(card);
+
+ if ((rocr & R4_MEMORY_PRESENT) &&
+ mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
+ card->type = MMC_TYPE_SD_COMBO;
+
+ if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
+ memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
+ err = -ENOENT;
+ goto mismatch;
+ }
+ } else {
+ card->type = MMC_TYPE_SDIO;
+
+ if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
+ err = -ENOENT;
+ goto mismatch;
+ }
+ }
+
+ /*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ card->ocr = ocr_card;
+
+ /*
+ * If the host and card support UHS-I mode request the card
+ * to switch to 1.8V signaling level. No 1.8v signalling if
+ * UHS mode is not enabled to maintain compatibility and some
+ * systems that claim 1.8v signalling in fact do not support
+ * it. Per SDIO spec v3, section 3.1.2, if the voltage is already
+ * 1.8v, the card sets S18A to 0 in the R4 response. So it will
+ * fails to check rocr & R4_18V_PRESENT, but we still need to
+ * try to init uhs card. sdio_read_cccr will take over this task
+ * to make sure which speed mode should work.
+ */
+ if (rocr & ocr & R4_18V_PRESENT) {
+ err = mmc_set_uhs_voltage(host, ocr_card);
+ if (err == -EAGAIN) {
+ mmc_sdio_pre_init(host, ocr_card, card);
+ retries--;
+ goto try_again;
+ } else if (err) {
+ ocr &= ~R4_18V_PRESENT;
+ }
+ }
+
+ /*
+ * For native busses: set card RCA and quit open drain mode.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto remove;
+
+ /*
+ * Update oldcard with the new RCA received from the SDIO
+ * device -- we're doing this so that it's updated in the
+ * "card" struct when oldcard overwrites that later.
+ */
+ if (oldcard)
+ oldcard->rca = card->rca;
+ }
+
+ /*
+ * Read CSD, before selecting the card
+ */
+ if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
+ err = mmc_sd_get_csd(host, card);
+ if (err)
+ goto remove;
+
+ mmc_decode_cid(card);
+ }
+
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto remove;
+ }
+
+ if (card->quirks & MMC_QUIRK_NONSTD_SDIO) {
+ /*
+ * This is non-standard SDIO device, meaning it doesn't
+ * have any CIA (Common I/O area) registers present.
+ * It's host's responsibility to fill cccr and cis
+ * structures in init_card().
+ */
+ mmc_set_clock(host, card->cis.max_dtr);
+
+ if (card->cccr.high_speed) {
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ }
+
+ if (oldcard)
+ mmc_remove_card(card);
+ else
+ host->card = card;
+
+ return 0;
+ }
+
+ /*
+ * Read the common registers. Note that we should try to
+ * validate whether UHS would work or not.
+ */
+ err = sdio_read_cccr(card, ocr);
+ if (err) {
+ mmc_sdio_pre_init(host, ocr_card, card);
+ if (ocr & R4_18V_PRESENT) {
+ /* Retry init sequence, but without R4_18V_PRESENT. */
+ retries = 0;
+ goto try_again;
+ }
+ return err;
+ }
+
+ /*
+ * Read the common CIS tuples.
+ */
+ err = sdio_read_common_cis(card);
+ if (err)
+ goto remove;
+
+ if (oldcard) {
+ if (card->cis.vendor == oldcard->cis.vendor &&
+ card->cis.device == oldcard->cis.device) {
+ mmc_remove_card(card);
+ card = oldcard;
+ } else {
+ err = -ENOENT;
+ goto mismatch;
+ }
+ }
+
+ mmc_fixup_device(card, sdio_fixup_methods);
+
+ if (card->type == MMC_TYPE_SD_COMBO) {
+ err = mmc_sd_setup_card(host, card, oldcard != NULL);
+ /* handle as SDIO-only card if memory init failed */
+ if (err) {
+ mmc_go_idle(host);
+ if (mmc_host_is_spi(host))
+ /* should not fail, as it worked previously */
+ mmc_spi_set_crc(host, use_spi_crc);
+ card->type = MMC_TYPE_SDIO;
+ } else
+ card->dev.type = &sd_type;
+ }
+
+ /*
+ * If needed, disconnect card detection pull-up resistor.
+ */
+ err = sdio_disable_cd(card);
+ if (err)
+ goto remove;
+
+ /* Initialization sequence for UHS-I cards */
+ /* Only if card supports 1.8v and UHS signaling */
+ if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) {
+ err = mmc_sdio_init_uhs_card(card);
+ if (err)
+ goto remove;
+ } else {
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ else if (err)
+ goto remove;
+
+ /*
+ * Change to the card's maximum speed.
+ */
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+
+ /*
+ * Switch to wider bus (if supported).
+ */
+ err = sdio_enable_4bit_bus(card);
+ if (err)
+ goto remove;
+ }
+
+ if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
+ host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ pr_err("%s: Host failed to negotiate down from 3.3V\n",
+ mmc_hostname(host));
+ err = -EINVAL;
+ goto remove;
+ }
+
+ host->card = card;
+ return 0;
+
+mismatch:
+ pr_debug("%s: Perhaps the card was replaced\n", mmc_hostname(host));
+remove:
+ if (oldcard != card)
+ mmc_remove_card(card);
+ return err;
+}
+
+static int mmc_sdio_reinit_card(struct mmc_host *host)
+{
+ int ret;
+
+ ret = mmc_sdio_pre_init(host, host->card->ocr, NULL);
+ if (ret)
+ return ret;
+
+ return mmc_sdio_init_card(host, host->card->ocr, host->card);
+}
+
+/*
+ * Host is being removed. Free up the current card.
+ */
+static void mmc_sdio_remove(struct mmc_host *host)
+{
+ int i;
+
+ for (i = 0;i < host->card->sdio_funcs;i++) {
+ if (host->card->sdio_func[i]) {
+ sdio_remove_func(host->card->sdio_func[i]);
+ host->card->sdio_func[i] = NULL;
+ }
+ }
+
+ mmc_remove_card(host->card);
+ host->card = NULL;
+}
+
+/*
+ * Card detection - card is alive.
+ */
+static int mmc_sdio_alive(struct mmc_host *host)
+{
+ return mmc_select_card(host->card);
+}
+
+/*
+ * Card detection callback from host.
+ */
+static void mmc_sdio_detect(struct mmc_host *host)
+{
+ int err;
+
+ /* Make sure card is powered before detecting it */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD) {
+ err = pm_runtime_get_sync(&host->card->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(&host->card->dev);
+ goto out;
+ }
+ }
+
+ mmc_claim_host(host);
+
+ /*
+ * Just check if our card has been removed.
+ */
+ err = _mmc_detect_card_removed(host);
+
+ mmc_release_host(host);
+
+ /*
+ * Tell PM core it's OK to power off the card now.
+ *
+ * The _sync variant is used in order to ensure that the card
+ * is left powered off in case an error occurred, and the card
+ * is going to be removed.
+ *
+ * Since there is no specific reason to believe a new user
+ * is about to show up at this point, the _sync variant is
+ * desirable anyway.
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_sync(&host->card->dev);
+
+out:
+ if (err) {
+ mmc_sdio_remove(host);
+
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ }
+}
+
+/*
+ * SDIO pre_suspend. We need to suspend all functions separately.
+ * Therefore all registered functions must have drivers with suspend
+ * and resume methods. Failing that we simply remove the whole card.
+ */
+static int mmc_sdio_pre_suspend(struct mmc_host *host)
+{
+ int i;
+
+ for (i = 0; i < host->card->sdio_funcs; i++) {
+ struct sdio_func *func = host->card->sdio_func[i];
+ if (func && sdio_func_present(func) && func->dev.driver) {
+ const struct dev_pm_ops *pmops = func->dev.driver->pm;
+ if (!pmops || !pmops->suspend || !pmops->resume)
+ /* force removal of entire card in that case */
+ goto remove;
+ }
+ }
+
+ return 0;
+
+remove:
+ if (!mmc_card_is_removable(host)) {
+ dev_warn(mmc_dev(host),
+ "missing suspend/resume ops for non-removable SDIO card\n");
+ /* Don't remove a non-removable card - we can't re-detect it. */
+ return 0;
+ }
+
+ /* Remove the SDIO card and let it be re-detected later on. */
+ mmc_sdio_remove(host);
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ host->pm_flags = 0;
+
+ return 0;
+}
+
+/*
+ * SDIO suspend. Suspend all functions separately.
+ */
+static int mmc_sdio_suspend(struct mmc_host *host)
+{
+ WARN_ON(host->sdio_irqs && !mmc_card_keep_power(host));
+
+ /* Prevent processing of SDIO IRQs in suspended state. */
+ mmc_card_set_suspended(host->card);
+ cancel_delayed_work_sync(&host->sdio_irq_work);
+
+ mmc_claim_host(host);
+
+ if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
+ sdio_disable_4bit_bus(host->card);
+
+ if (!mmc_card_keep_power(host)) {
+ mmc_power_off(host);
+ } else if (host->retune_period) {
+ mmc_retune_timer_stop(host);
+ mmc_retune_needed(host);
+ }
+
+ mmc_release_host(host);
+
+ return 0;
+}
+
+static int mmc_sdio_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ /* Basic card reinitialization. */
+ mmc_claim_host(host);
+
+ /*
+ * Restore power and reinitialize the card when needed. Note that a
+ * removable card is checked from a detect work later on in the resume
+ * process.
+ */
+ if (!mmc_card_keep_power(host)) {
+ mmc_power_up(host, host->card->ocr);
+ /*
+ * Tell runtime PM core we just powered up the card,
+ * since it still believes the card is powered off.
+ * Note that currently runtime PM is only enabled
+ * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_enable(&host->card->dev);
+ }
+ err = mmc_sdio_reinit_card(host);
+ } else if (mmc_card_wake_sdio_irq(host)) {
+ /*
+ * We may have switched to 1-bit mode during suspend,
+ * need to hold retuning, because tuning only supprt
+ * 4-bit mode or 8 bit mode.
+ */
+ mmc_retune_hold_now(host);
+ err = sdio_enable_4bit_bus(host->card);
+ mmc_retune_release(host);
+ }
+
+ if (err)
+ goto out;
+
+ /* Allow SDIO IRQs to be processed again. */
+ mmc_card_clr_suspended(host->card);
+
+ if (host->sdio_irqs) {
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
+ wake_up_process(host->sdio_irq_thread);
+ else if (host->caps & MMC_CAP_SDIO_IRQ)
+ queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+ }
+
+out:
+ mmc_release_host(host);
+
+ host->pm_flags &= ~MMC_PM_KEEP_POWER;
+ return err;
+}
+
+static int mmc_sdio_runtime_suspend(struct mmc_host *host)
+{
+ /* No references to the card, cut the power to it. */
+ mmc_claim_host(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+
+ return 0;
+}
+
+static int mmc_sdio_runtime_resume(struct mmc_host *host)
+{
+ int ret;
+
+ /* Restore power and re-initialize. */
+ mmc_claim_host(host);
+ mmc_power_up(host, host->card->ocr);
+ ret = mmc_sdio_reinit_card(host);
+ mmc_release_host(host);
+
+ return ret;
+}
+
+/*
+ * SDIO HW reset
+ *
+ * Returns 0 if the HW reset was executed synchronously, returns 1 if the HW
+ * reset was asynchronously scheduled, else a negative error code.
+ */
+static int mmc_sdio_hw_reset(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+
+ /*
+ * In case the card is shared among multiple func drivers, reset the
+ * card through a rescan work. In this way it will be removed and
+ * re-detected, thus all func drivers becomes informed about it.
+ */
+ if (atomic_read(&card->sdio_funcs_probed) > 1) {
+ if (mmc_card_removed(card))
+ return 1;
+ host->rescan_entered = 0;
+ mmc_card_set_removed(card);
+ _mmc_detect_change(host, 0, false);
+ return 1;
+ }
+
+ /*
+ * A single func driver has been probed, then let's skip the heavy
+ * hotplug dance above and execute the reset immediately.
+ */
+ mmc_power_cycle(host, card->ocr);
+ return mmc_sdio_reinit_card(host);
+}
+
+static int mmc_sdio_sw_reset(struct mmc_host *host)
+{
+ mmc_set_clock(host, host->f_init);
+ sdio_reset(host);
+ mmc_go_idle(host);
+
+ mmc_set_initial_state(host);
+ mmc_set_initial_signal_voltage(host);
+
+ return mmc_sdio_reinit_card(host);
+}
+
+static const struct mmc_bus_ops mmc_sdio_ops = {
+ .remove = mmc_sdio_remove,
+ .detect = mmc_sdio_detect,
+ .pre_suspend = mmc_sdio_pre_suspend,
+ .suspend = mmc_sdio_suspend,
+ .resume = mmc_sdio_resume,
+ .runtime_suspend = mmc_sdio_runtime_suspend,
+ .runtime_resume = mmc_sdio_runtime_resume,
+ .alive = mmc_sdio_alive,
+ .hw_reset = mmc_sdio_hw_reset,
+ .sw_reset = mmc_sdio_sw_reset,
+};
+
+
+/*
+ * Starting point for SDIO card init.
+ */
+int mmc_attach_sdio(struct mmc_host *host)
+{
+ int err, i, funcs;
+ u32 ocr, rocr;
+ struct mmc_card *card;
+
+ WARN_ON(!host->claimed);
+
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
+ mmc_attach_bus(host, &mmc_sdio_ops);
+ if (host->ocr_avail_sdio)
+ host->ocr_avail = host->ocr_avail_sdio;
+
+
+ rocr = mmc_select_voltage(host, ocr);
+
+ /*
+ * Can we support the voltage(s) of the card(s)?
+ */
+ if (!rocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Detect and init the card.
+ */
+ err = mmc_sdio_init_card(host, rocr, NULL);
+ if (err)
+ goto err;
+
+ card = host->card;
+
+ /*
+ * Enable runtime PM only if supported by host+card+board
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD) {
+ /*
+ * Do not allow runtime suspend until after SDIO function
+ * devices are added.
+ */
+ pm_runtime_get_noresume(&card->dev);
+
+ /*
+ * Let runtime PM core know our card is active
+ */
+ err = pm_runtime_set_active(&card->dev);
+ if (err)
+ goto remove;
+
+ /*
+ * Enable runtime PM for this card
+ */
+ pm_runtime_enable(&card->dev);
+ }
+
+ /*
+ * The number of functions on the card is encoded inside
+ * the ocr.
+ */
+ funcs = (ocr & 0x70000000) >> 28;
+ card->sdio_funcs = 0;
+
+ /*
+ * Initialize (but don't add) all present functions.
+ */
+ for (i = 0; i < funcs; i++, card->sdio_funcs++) {
+ err = sdio_init_func(host->card, i + 1);
+ if (err)
+ goto remove;
+
+ /*
+ * Enable Runtime PM for this func (if supported)
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_enable(&card->sdio_func[i]->dev);
+ }
+
+ /*
+ * First add the card to the driver model...
+ */
+ mmc_release_host(host);
+ err = mmc_add_card(host->card);
+ if (err)
+ goto remove_added;
+
+ /*
+ * ...then the SDIO functions.
+ */
+ for (i = 0;i < funcs;i++) {
+ err = sdio_add_func(host->card->sdio_func[i]);
+ if (err)
+ goto remove_added;
+ }
+
+ if (host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put(&card->dev);
+
+ mmc_claim_host(host);
+ return 0;
+
+
+remove:
+ mmc_release_host(host);
+remove_added:
+ /*
+ * The devices are being deleted so it is not necessary to disable
+ * runtime PM. Similarly we also don't pm_runtime_put() the SDIO card
+ * because it needs to be active to remove any function devices that
+ * were probed, and after that it gets deleted.
+ */
+ mmc_sdio_remove(host);
+ mmc_claim_host(host);
+err:
+ mmc_detach_bus(host);
+
+ pr_err("%s: error %d whilst initialising SDIO card\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
+
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
new file mode 100644
index 000000000..89dd49260
--- /dev/null
+++ b/drivers/mmc/core/sdio_bus.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/mmc/core/sdio_bus.c
+ *
+ * Copyright 2007 Pierre Ossman
+ *
+ * SDIO function driver model
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/acpi.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/of.h>
+
+#include "core.h"
+#include "card.h"
+#include "sdio_cis.h"
+#include "sdio_bus.h"
+
+#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
+
+/* show configuration fields */
+#define sdio_config_attr(field, format_string, args...) \
+static ssize_t \
+field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct sdio_func *func; \
+ \
+ func = dev_to_sdio_func (dev); \
+ return sprintf(buf, format_string, args); \
+} \
+static DEVICE_ATTR_RO(field)
+
+sdio_config_attr(class, "0x%02x\n", func->class);
+sdio_config_attr(vendor, "0x%04x\n", func->vendor);
+sdio_config_attr(device, "0x%04x\n", func->device);
+sdio_config_attr(revision, "%u.%u\n", func->major_rev, func->minor_rev);
+sdio_config_attr(modalias, "sdio:c%02Xv%04Xd%04X\n", func->class, func->vendor, func->device);
+
+#define sdio_info_attr(num) \
+static ssize_t info##num##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct sdio_func *func = dev_to_sdio_func(dev); \
+ \
+ if (num > func->num_info) \
+ return -ENODATA; \
+ if (!func->info[num-1][0]) \
+ return 0; \
+ return sprintf(buf, "%s\n", func->info[num-1]); \
+} \
+static DEVICE_ATTR_RO(info##num)
+
+sdio_info_attr(1);
+sdio_info_attr(2);
+sdio_info_attr(3);
+sdio_info_attr(4);
+
+static struct attribute *sdio_dev_attrs[] = {
+ &dev_attr_class.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_info1.attr,
+ &dev_attr_info2.attr,
+ &dev_attr_info3.attr,
+ &dev_attr_info4.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sdio_dev);
+
+static const struct sdio_device_id *sdio_match_one(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ if (id->class != (__u8)SDIO_ANY_ID && id->class != func->class)
+ return NULL;
+ if (id->vendor != (__u16)SDIO_ANY_ID && id->vendor != func->vendor)
+ return NULL;
+ if (id->device != (__u16)SDIO_ANY_ID && id->device != func->device)
+ return NULL;
+ return id;
+}
+
+static const struct sdio_device_id *sdio_match_device(struct sdio_func *func,
+ struct sdio_driver *sdrv)
+{
+ const struct sdio_device_id *ids;
+
+ ids = sdrv->id_table;
+
+ if (ids) {
+ while (ids->class || ids->vendor || ids->device) {
+ if (sdio_match_one(func, ids))
+ return ids;
+ ids++;
+ }
+ }
+
+ return NULL;
+}
+
+static int sdio_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct sdio_driver *sdrv = to_sdio_driver(drv);
+
+ if (sdio_match_device(func, sdrv))
+ return 1;
+
+ return 0;
+}
+
+static int
+sdio_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ unsigned int i;
+
+ if (add_uevent_var(env,
+ "SDIO_CLASS=%02X", func->class))
+ return -ENOMEM;
+
+ if (add_uevent_var(env,
+ "SDIO_ID=%04X:%04X", func->vendor, func->device))
+ return -ENOMEM;
+
+ if (add_uevent_var(env,
+ "SDIO_REVISION=%u.%u", func->major_rev, func->minor_rev))
+ return -ENOMEM;
+
+ for (i = 0; i < func->num_info; i++) {
+ if (add_uevent_var(env, "SDIO_INFO%u=%s", i+1, func->info[i]))
+ return -ENOMEM;
+ }
+
+ if (add_uevent_var(env,
+ "MODALIAS=sdio:c%02Xv%04Xd%04X",
+ func->class, func->vendor, func->device))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sdio_bus_probe(struct device *dev)
+{
+ struct sdio_driver *drv = to_sdio_driver(dev->driver);
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ const struct sdio_device_id *id;
+ int ret;
+
+ id = sdio_match_device(func, drv);
+ if (!id)
+ return -ENODEV;
+
+ ret = dev_pm_domain_attach(dev, false);
+ if (ret)
+ return ret;
+
+ atomic_inc(&func->card->sdio_funcs_probed);
+
+ /* Unbound SDIO functions are always suspended.
+ * During probe, the function is set active and the usage count
+ * is incremented. If the driver supports runtime PM,
+ * it should call pm_runtime_put_noidle() in its probe routine and
+ * pm_runtime_get_noresume() in its remove routine.
+ */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto disable_runtimepm;
+ }
+
+ /* Set the default block size so the driver is sure it's something
+ * sensible. */
+ sdio_claim_host(func);
+ if (mmc_card_removed(func->card))
+ ret = -ENOMEDIUM;
+ else
+ ret = sdio_set_block_size(func, 0);
+ sdio_release_host(func);
+ if (ret)
+ goto disable_runtimepm;
+
+ ret = drv->probe(func, id);
+ if (ret)
+ goto disable_runtimepm;
+
+ return 0;
+
+disable_runtimepm:
+ atomic_dec(&func->card->sdio_funcs_probed);
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_noidle(dev);
+ dev_pm_domain_detach(dev, false);
+ return ret;
+}
+
+static int sdio_bus_remove(struct device *dev)
+{
+ struct sdio_driver *drv = to_sdio_driver(dev->driver);
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ /* Make sure card is powered before invoking ->remove() */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_get_sync(dev);
+
+ drv->remove(func);
+ atomic_dec(&func->card->sdio_funcs_probed);
+
+ if (func->irq_handler) {
+ pr_warn("WARNING: driver %s did not remove its interrupt handler!\n",
+ drv->name);
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ sdio_release_host(func);
+ }
+
+ /* First, undo the increment made directly above */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_noidle(dev);
+
+ /* Then undo the runtime PM settings in sdio_bus_probe() */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_sync(dev);
+
+ dev_pm_domain_detach(dev, false);
+
+ return 0;
+}
+
+static const struct dev_pm_ops sdio_bus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ NULL
+ )
+};
+
+static struct bus_type sdio_bus_type = {
+ .name = "sdio",
+ .dev_groups = sdio_dev_groups,
+ .match = sdio_bus_match,
+ .uevent = sdio_bus_uevent,
+ .probe = sdio_bus_probe,
+ .remove = sdio_bus_remove,
+ .pm = &sdio_bus_pm_ops,
+};
+
+int sdio_register_bus(void)
+{
+ return bus_register(&sdio_bus_type);
+}
+
+void sdio_unregister_bus(void)
+{
+ bus_unregister(&sdio_bus_type);
+}
+
+/**
+ * sdio_register_driver - register a function driver
+ * @drv: SDIO function driver
+ */
+int sdio_register_driver(struct sdio_driver *drv)
+{
+ drv->drv.name = drv->name;
+ drv->drv.bus = &sdio_bus_type;
+ return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(sdio_register_driver);
+
+/**
+ * sdio_unregister_driver - unregister a function driver
+ * @drv: SDIO function driver
+ */
+void sdio_unregister_driver(struct sdio_driver *drv)
+{
+ drv->drv.bus = &sdio_bus_type;
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(sdio_unregister_driver);
+
+static void sdio_release_func(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
+ sdio_free_func_cis(func);
+
+ /*
+ * We have now removed the link to the tuples in the
+ * card structure, so remove the reference.
+ */
+ put_device(&func->card->dev);
+
+ kfree(func->info);
+ kfree(func->tmpbuf);
+ kfree(func);
+}
+
+/*
+ * Allocate and initialise a new SDIO function structure.
+ */
+struct sdio_func *sdio_alloc_func(struct mmc_card *card)
+{
+ struct sdio_func *func;
+
+ func = kzalloc(sizeof(struct sdio_func), GFP_KERNEL);
+ if (!func)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * allocate buffer separately to make sure it's properly aligned for
+ * DMA usage (incl. 64 bit DMA)
+ */
+ func->tmpbuf = kmalloc(4, GFP_KERNEL);
+ if (!func->tmpbuf) {
+ kfree(func);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ func->card = card;
+
+ device_initialize(&func->dev);
+
+ /*
+ * We may link to tuples in the card structure,
+ * we need make sure we have a reference to it.
+ */
+ get_device(&func->card->dev);
+
+ func->dev.parent = &card->dev;
+ func->dev.bus = &sdio_bus_type;
+ func->dev.release = sdio_release_func;
+
+ return func;
+}
+
+#ifdef CONFIG_ACPI
+static void sdio_acpi_set_handle(struct sdio_func *func)
+{
+ struct mmc_host *host = func->card->host;
+ u64 addr = ((u64)host->slotno << 16) | func->num;
+
+ acpi_preset_companion(&func->dev, ACPI_COMPANION(host->parent), addr);
+}
+#else
+static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
+#endif
+
+static void sdio_set_of_node(struct sdio_func *func)
+{
+ struct mmc_host *host = func->card->host;
+
+ func->dev.of_node = mmc_of_find_child_device(host, func->num);
+}
+
+/*
+ * Register a new SDIO function with the driver model.
+ */
+int sdio_add_func(struct sdio_func *func)
+{
+ int ret;
+
+ dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num);
+
+ sdio_set_of_node(func);
+ sdio_acpi_set_handle(func);
+ device_enable_async_suspend(&func->dev);
+ ret = device_add(&func->dev);
+ if (ret == 0)
+ sdio_func_set_present(func);
+
+ return ret;
+}
+
+/*
+ * Unregister a SDIO function with the driver model, and
+ * (eventually) free it.
+ * This function can be called through error paths where sdio_add_func() was
+ * never executed (because a failure occurred at an earlier point).
+ */
+void sdio_remove_func(struct sdio_func *func)
+{
+ if (sdio_func_present(func))
+ device_del(&func->dev);
+
+ of_node_put(func->dev.of_node);
+ put_device(&func->dev);
+}
+
diff --git a/drivers/mmc/core/sdio_bus.h b/drivers/mmc/core/sdio_bus.h
new file mode 100644
index 000000000..27b8069a7
--- /dev/null
+++ b/drivers/mmc/core/sdio_bus.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * linux/drivers/mmc/core/sdio_bus.h
+ *
+ * Copyright 2007 Pierre Ossman
+ */
+#ifndef _MMC_CORE_SDIO_BUS_H
+#define _MMC_CORE_SDIO_BUS_H
+
+struct mmc_card;
+struct sdio_func;
+
+struct sdio_func *sdio_alloc_func(struct mmc_card *card);
+int sdio_add_func(struct sdio_func *func);
+void sdio_remove_func(struct sdio_func *func);
+
+int sdio_register_bus(void);
+void sdio_unregister_bus(void);
+
+#endif
+
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
new file mode 100644
index 000000000..ce524f7e1
--- /dev/null
+++ b/drivers/mmc/core/sdio_cis.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/mmc/core/sdio_cis.c
+ *
+ * Author: Nicolas Pitre
+ * Created: June 11, 2007
+ * Copyright: MontaVista Software Inc.
+ *
+ * Copyright 2007 Pierre Ossman
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "sdio_cis.h"
+#include "sdio_ops.h"
+
+#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
+
+static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ u8 major_rev, minor_rev;
+ unsigned i, nr_strings;
+ char **buffer, *string;
+
+ if (size < 2)
+ return 0;
+
+ major_rev = buf[0];
+ minor_rev = buf[1];
+
+ /* Find all null-terminated (including zero length) strings in
+ the TPLLV1_INFO field. Trailing garbage is ignored. */
+ buf += 2;
+ size -= 2;
+
+ nr_strings = 0;
+ for (i = 0; i < size; i++) {
+ if (buf[i] == 0xff)
+ break;
+ if (buf[i] == 0)
+ nr_strings++;
+ }
+ if (nr_strings == 0)
+ return 0;
+
+ size = i;
+
+ buffer = kzalloc(sizeof(char*) * nr_strings + size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ string = (char*)(buffer + nr_strings);
+
+ for (i = 0; i < nr_strings; i++) {
+ buffer[i] = string;
+ strcpy(string, buf);
+ string += strlen(string) + 1;
+ buf += strlen(buf) + 1;
+ }
+
+ if (func) {
+ func->major_rev = major_rev;
+ func->minor_rev = minor_rev;
+ func->num_info = nr_strings;
+ func->info = (const char**)buffer;
+ } else {
+ card->major_rev = major_rev;
+ card->minor_rev = minor_rev;
+ card->num_info = nr_strings;
+ card->info = (const char**)buffer;
+ }
+
+ return 0;
+}
+
+static int cistpl_manfid(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ unsigned int vendor, device;
+
+ /* TPLMID_MANF */
+ vendor = buf[0] | (buf[1] << 8);
+
+ /* TPLMID_CARD */
+ device = buf[2] | (buf[3] << 8);
+
+ if (func) {
+ func->vendor = vendor;
+ func->device = device;
+ } else {
+ card->cis.vendor = vendor;
+ card->cis.device = device;
+ }
+
+ return 0;
+}
+
+static const unsigned char speed_val[16] =
+ { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 };
+static const unsigned int speed_unit[8] =
+ { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
+
+
+typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
+ const unsigned char *, unsigned);
+
+struct cis_tpl {
+ unsigned char code;
+ unsigned char min_size;
+ tpl_parse_t *parse;
+};
+
+static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func,
+ const char *tpl_descr,
+ const struct cis_tpl *tpl, int tpl_count,
+ unsigned char code,
+ const unsigned char *buf, unsigned size)
+{
+ int i, ret;
+
+ /* look for a matching code in the table */
+ for (i = 0; i < tpl_count; i++, tpl++) {
+ if (tpl->code == code)
+ break;
+ }
+ if (i < tpl_count) {
+ if (size >= tpl->min_size) {
+ if (tpl->parse)
+ ret = tpl->parse(card, func, buf, size);
+ else
+ ret = -EILSEQ; /* known tuple, not parsed */
+ } else {
+ /* invalid tuple */
+ ret = -EINVAL;
+ }
+ if (ret && ret != -EILSEQ && ret != -ENOENT) {
+ pr_err("%s: bad %s tuple 0x%02x (%u bytes)\n",
+ mmc_hostname(card->host), tpl_descr, code, size);
+ }
+ } else {
+ /* unknown tuple */
+ ret = -ENOENT;
+ }
+
+ return ret;
+}
+
+static int cistpl_funce_common(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ /* Only valid for the common CIS (function 0) */
+ if (func)
+ return -EINVAL;
+
+ /* TPLFE_FN0_BLK_SIZE */
+ card->cis.blksize = buf[1] | (buf[2] << 8);
+
+ /* TPLFE_MAX_TRAN_SPEED */
+ card->cis.max_dtr = speed_val[(buf[3] >> 3) & 15] *
+ speed_unit[buf[3] & 7];
+
+ return 0;
+}
+
+static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ unsigned vsn;
+ unsigned min_size;
+
+ /* Only valid for the individual function's CIS (1-7) */
+ if (!func)
+ return -EINVAL;
+
+ /*
+ * This tuple has a different length depending on the SDIO spec
+ * version.
+ */
+ vsn = func->card->cccr.sdio_vsn;
+ min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
+
+ if (size == 28 && vsn == SDIO_SDIO_REV_1_10) {
+ pr_warn("%s: card has broken SDIO 1.1 CIS, forcing SDIO 1.0\n",
+ mmc_hostname(card->host));
+ vsn = SDIO_SDIO_REV_1_00;
+ } else if (size < min_size) {
+ return -EINVAL;
+ }
+
+ /* TPLFE_MAX_BLK_SIZE */
+ func->max_blksize = buf[12] | (buf[13] << 8);
+
+ /* TPLFE_ENABLE_TIMEOUT_VAL, present in ver 1.1 and above */
+ if (vsn > SDIO_SDIO_REV_1_00)
+ func->enable_timeout = (buf[28] | (buf[29] << 8)) * 10;
+ else
+ func->enable_timeout = jiffies_to_msecs(HZ);
+
+ return 0;
+}
+
+/*
+ * Known TPLFE_TYPEs table for CISTPL_FUNCE tuples.
+ *
+ * Note that, unlike PCMCIA, CISTPL_FUNCE tuples are not parsed depending
+ * on the TPLFID_FUNCTION value of the previous CISTPL_FUNCID as on SDIO
+ * TPLFID_FUNCTION is always hardcoded to 0x0C.
+ */
+static const struct cis_tpl cis_tpl_funce_list[] = {
+ { 0x00, 4, cistpl_funce_common },
+ { 0x01, 0, cistpl_funce_func },
+ { 0x04, 1+1+6, /* CISTPL_FUNCE_LAN_NODE_ID */ },
+};
+
+static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ if (size < 1)
+ return -EINVAL;
+
+ return cis_tpl_parse(card, func, "CISTPL_FUNCE",
+ cis_tpl_funce_list,
+ ARRAY_SIZE(cis_tpl_funce_list),
+ buf[0], buf, size);
+}
+
+/* Known TPL_CODEs table for CIS tuples */
+static const struct cis_tpl cis_tpl_list[] = {
+ { 0x15, 3, cistpl_vers_1 },
+ { 0x20, 4, cistpl_manfid },
+ { 0x21, 2, /* cistpl_funcid */ },
+ { 0x22, 0, cistpl_funce },
+ { 0x91, 2, /* cistpl_sdio_std */ },
+};
+
+static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
+{
+ int ret;
+ struct sdio_func_tuple *this, **prev;
+ unsigned i, ptr = 0;
+
+ /*
+ * Note that this works for the common CIS (function number 0) as
+ * well as a function's CIS * since SDIO_CCCR_CIS and SDIO_FBR_CIS
+ * have the same offset.
+ */
+ for (i = 0; i < 3; i++) {
+ unsigned char x, fn;
+
+ if (func)
+ fn = func->num;
+ else
+ fn = 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i, 0, &x);
+ if (ret)
+ return ret;
+ ptr |= x << (i * 8);
+ }
+
+ if (func)
+ prev = &func->tuples;
+ else
+ prev = &card->tuples;
+
+ if (*prev)
+ return -EINVAL;
+
+ do {
+ unsigned char tpl_code, tpl_link;
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
+
+ ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
+ if (ret)
+ break;
+
+ /* 0xff means we're done */
+ if (tpl_code == 0xff)
+ break;
+
+ /* null entries have no link field or data */
+ if (tpl_code == 0x00)
+ continue;
+
+ ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
+ if (ret)
+ break;
+
+ /* a size of 0xff also means we're done */
+ if (tpl_link == 0xff)
+ break;
+
+ this = kmalloc(sizeof(*this) + tpl_link, GFP_KERNEL);
+ if (!this)
+ return -ENOMEM;
+
+ for (i = 0; i < tpl_link; i++) {
+ ret = mmc_io_rw_direct(card, 0, 0,
+ ptr + i, 0, &this->data[i]);
+ if (ret)
+ break;
+ }
+ if (ret) {
+ kfree(this);
+ break;
+ }
+
+ /* Try to parse the CIS tuple */
+ ret = cis_tpl_parse(card, func, "CIS",
+ cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
+ tpl_code, this->data, tpl_link);
+ if (ret == -EILSEQ || ret == -ENOENT) {
+ /*
+ * The tuple is unknown or known but not parsed.
+ * Queue the tuple for the function driver.
+ */
+ this->next = NULL;
+ this->code = tpl_code;
+ this->size = tpl_link;
+ *prev = this;
+ prev = &this->next;
+
+ if (ret == -ENOENT) {
+ if (time_after(jiffies, timeout))
+ break;
+ /* warn about unknown tuples */
+ pr_warn_ratelimited("%s: queuing unknown"
+ " CIS tuple 0x%02x (%u bytes)\n",
+ mmc_hostname(card->host),
+ tpl_code, tpl_link);
+ }
+
+ /* keep on analyzing tuples */
+ ret = 0;
+ } else {
+ /*
+ * We don't need the tuple anymore if it was
+ * successfully parsed by the SDIO core or if it is
+ * not going to be queued for a driver.
+ */
+ kfree(this);
+ }
+
+ ptr += tpl_link;
+ } while (!ret);
+
+ /*
+ * Link in all unknown tuples found in the common CIS so that
+ * drivers don't have to go digging in two places.
+ */
+ if (func)
+ *prev = card->tuples;
+
+ return ret;
+}
+
+int sdio_read_common_cis(struct mmc_card *card)
+{
+ return sdio_read_cis(card, NULL);
+}
+
+void sdio_free_common_cis(struct mmc_card *card)
+{
+ struct sdio_func_tuple *tuple, *victim;
+
+ tuple = card->tuples;
+
+ while (tuple) {
+ victim = tuple;
+ tuple = tuple->next;
+ kfree(victim);
+ }
+
+ card->tuples = NULL;
+}
+
+int sdio_read_func_cis(struct sdio_func *func)
+{
+ int ret;
+
+ ret = sdio_read_cis(func->card, func);
+ if (ret)
+ return ret;
+
+ /*
+ * Vendor/device id is optional for function CIS, so
+ * copy it from the card structure as needed.
+ */
+ if (func->vendor == 0) {
+ func->vendor = func->card->cis.vendor;
+ func->device = func->card->cis.device;
+ }
+
+ return 0;
+}
+
+void sdio_free_func_cis(struct sdio_func *func)
+{
+ struct sdio_func_tuple *tuple, *victim;
+
+ tuple = func->tuples;
+
+ while (tuple && tuple != func->card->tuples) {
+ victim = tuple;
+ tuple = tuple->next;
+ kfree(victim);
+ }
+
+ func->tuples = NULL;
+}
+
diff --git a/drivers/mmc/core/sdio_cis.h b/drivers/mmc/core/sdio_cis.h
new file mode 100644
index 000000000..6d76f6fa6
--- /dev/null
+++ b/drivers/mmc/core/sdio_cis.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * linux/drivers/mmc/core/sdio_cis.h
+ *
+ * Author: Nicolas Pitre
+ * Created: June 11, 2007
+ * Copyright: MontaVista Software Inc.
+ */
+
+#ifndef _MMC_SDIO_CIS_H
+#define _MMC_SDIO_CIS_H
+
+struct mmc_card;
+struct sdio_func;
+
+int sdio_read_common_cis(struct mmc_card *card);
+void sdio_free_common_cis(struct mmc_card *card);
+
+int sdio_read_func_cis(struct sdio_func *func);
+void sdio_free_func_cis(struct sdio_func *func);
+
+#endif
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
new file mode 100644
index 000000000..79dbf9021
--- /dev/null
+++ b/drivers/mmc/core/sdio_io.c
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/mmc/core/sdio_io.c
+ *
+ * Copyright 2007-2008 Pierre Ossman
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "sdio_ops.h"
+#include "core.h"
+#include "card.h"
+#include "host.h"
+
+/**
+ * sdio_claim_host - exclusively claim a bus for a certain SDIO function
+ * @func: SDIO function that will be accessed
+ *
+ * Claim a bus for a set of operations. The SDIO function given
+ * is used to figure out which bus is relevant.
+ */
+void sdio_claim_host(struct sdio_func *func)
+{
+ if (WARN_ON(!func))
+ return;
+
+ mmc_claim_host(func->card->host);
+}
+EXPORT_SYMBOL_GPL(sdio_claim_host);
+
+/**
+ * sdio_release_host - release a bus for a certain SDIO function
+ * @func: SDIO function that was accessed
+ *
+ * Release a bus, allowing others to claim the bus for their
+ * operations.
+ */
+void sdio_release_host(struct sdio_func *func)
+{
+ if (WARN_ON(!func))
+ return;
+
+ mmc_release_host(func->card->host);
+}
+EXPORT_SYMBOL_GPL(sdio_release_host);
+
+/**
+ * sdio_enable_func - enables a SDIO function for usage
+ * @func: SDIO function to enable
+ *
+ * Powers up and activates a SDIO function so that register
+ * access is possible.
+ */
+int sdio_enable_func(struct sdio_func *func)
+{
+ int ret;
+ unsigned char reg;
+ unsigned long timeout;
+
+ if (!func)
+ return -EINVAL;
+
+ pr_debug("SDIO: Enabling device %s...\n", sdio_func_id(func));
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IOEx, 0, &reg);
+ if (ret)
+ goto err;
+
+ reg |= 1 << func->num;
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IOEx, reg, NULL);
+ if (ret)
+ goto err;
+
+ timeout = jiffies + msecs_to_jiffies(func->enable_timeout);
+
+ while (1) {
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, &reg);
+ if (ret)
+ goto err;
+ if (reg & (1 << func->num))
+ break;
+ ret = -ETIME;
+ if (time_after(jiffies, timeout))
+ goto err;
+ }
+
+ pr_debug("SDIO: Enabled device %s\n", sdio_func_id(func));
+
+ return 0;
+
+err:
+ pr_debug("SDIO: Failed to enable device %s\n", sdio_func_id(func));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdio_enable_func);
+
+/**
+ * sdio_disable_func - disable a SDIO function
+ * @func: SDIO function to disable
+ *
+ * Powers down and deactivates a SDIO function. Register access
+ * to this function will fail until the function is reenabled.
+ */
+int sdio_disable_func(struct sdio_func *func)
+{
+ int ret;
+ unsigned char reg;
+
+ if (!func)
+ return -EINVAL;
+
+ pr_debug("SDIO: Disabling device %s...\n", sdio_func_id(func));
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IOEx, 0, &reg);
+ if (ret)
+ goto err;
+
+ reg &= ~(1 << func->num);
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IOEx, reg, NULL);
+ if (ret)
+ goto err;
+
+ pr_debug("SDIO: Disabled device %s\n", sdio_func_id(func));
+
+ return 0;
+
+err:
+ pr_debug("SDIO: Failed to disable device %s\n", sdio_func_id(func));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdio_disable_func);
+
+/**
+ * sdio_set_block_size - set the block size of an SDIO function
+ * @func: SDIO function to change
+ * @blksz: new block size or 0 to use the default.
+ *
+ * The default block size is the largest supported by both the function
+ * and the host, with a maximum of 512 to ensure that arbitrarily sized
+ * data transfer use the optimal (least) number of commands.
+ *
+ * A driver may call this to override the default block size set by the
+ * core. This can be used to set a block size greater than the maximum
+ * that reported by the card; it is the driver's responsibility to ensure
+ * it uses a value that the card supports.
+ *
+ * Returns 0 on success, -EINVAL if the host does not support the
+ * requested block size, or -EIO (etc.) if one of the resultant FBR block
+ * size register writes failed.
+ *
+ */
+int sdio_set_block_size(struct sdio_func *func, unsigned blksz)
+{
+ int ret;
+
+ if (blksz > func->card->host->max_blk_size)
+ return -EINVAL;
+
+ if (blksz == 0) {
+ blksz = min(func->max_blksize, func->card->host->max_blk_size);
+ blksz = min(blksz, 512u);
+ }
+
+ ret = mmc_io_rw_direct(func->card, 1, 0,
+ SDIO_FBR_BASE(func->num) + SDIO_FBR_BLKSIZE,
+ blksz & 0xff, NULL);
+ if (ret)
+ return ret;
+ ret = mmc_io_rw_direct(func->card, 1, 0,
+ SDIO_FBR_BASE(func->num) + SDIO_FBR_BLKSIZE + 1,
+ (blksz >> 8) & 0xff, NULL);
+ if (ret)
+ return ret;
+ func->cur_blksize = blksz;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdio_set_block_size);
+
+/*
+ * Calculate the maximum byte mode transfer size
+ */
+static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
+{
+ unsigned mval = func->card->host->max_blk_size;
+
+ if (mmc_blksz_for_byte_mode(func->card))
+ mval = min(mval, func->cur_blksize);
+ else
+ mval = min(mval, func->max_blksize);
+
+ if (mmc_card_broken_byte_mode_512(func->card))
+ return min(mval, 511u);
+
+ return min(mval, 512u); /* maximum size for byte mode */
+}
+
+/*
+ * This is legacy code, which needs to be re-worked some day. Basically we need
+ * to take into account the properties of the host, as to enable the SDIO func
+ * driver layer to allocate optimal buffers.
+ */
+static inline unsigned int _sdio_align_size(unsigned int sz)
+{
+ /*
+ * FIXME: We don't have a system for the controller to tell
+ * the core about its problems yet, so for now we just 32-bit
+ * align the size.
+ */
+ return ALIGN(sz, 4);
+}
+
+/**
+ * sdio_align_size - pads a transfer size to a more optimal value
+ * @func: SDIO function
+ * @sz: original transfer size
+ *
+ * Pads the original data size with a number of extra bytes in
+ * order to avoid controller bugs and/or performance hits
+ * (e.g. some controllers revert to PIO for certain sizes).
+ *
+ * If possible, it will also adjust the size so that it can be
+ * handled in just a single request.
+ *
+ * Returns the improved size, which might be unmodified.
+ */
+unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
+{
+ unsigned int orig_sz;
+ unsigned int blk_sz, byte_sz;
+ unsigned chunk_sz;
+
+ orig_sz = sz;
+
+ /*
+ * Do a first check with the controller, in case it
+ * wants to increase the size up to a point where it
+ * might need more than one block.
+ */
+ sz = _sdio_align_size(sz);
+
+ /*
+ * If we can still do this with just a byte transfer, then
+ * we're done.
+ */
+ if (sz <= sdio_max_byte_size(func))
+ return sz;
+
+ if (func->card->cccr.multi_block) {
+ /*
+ * Check if the transfer is already block aligned
+ */
+ if ((sz % func->cur_blksize) == 0)
+ return sz;
+
+ /*
+ * Realign it so that it can be done with one request,
+ * and recheck if the controller still likes it.
+ */
+ blk_sz = ((sz + func->cur_blksize - 1) /
+ func->cur_blksize) * func->cur_blksize;
+ blk_sz = _sdio_align_size(blk_sz);
+
+ /*
+ * This value is only good if it is still just
+ * one request.
+ */
+ if ((blk_sz % func->cur_blksize) == 0)
+ return blk_sz;
+
+ /*
+ * We failed to do one request, but at least try to
+ * pad the remainder properly.
+ */
+ byte_sz = _sdio_align_size(sz % func->cur_blksize);
+ if (byte_sz <= sdio_max_byte_size(func)) {
+ blk_sz = sz / func->cur_blksize;
+ return blk_sz * func->cur_blksize + byte_sz;
+ }
+ } else {
+ /*
+ * We need multiple requests, so first check that the
+ * controller can handle the chunk size;
+ */
+ chunk_sz = _sdio_align_size(sdio_max_byte_size(func));
+ if (chunk_sz == sdio_max_byte_size(func)) {
+ /*
+ * Fix up the size of the remainder (if any)
+ */
+ byte_sz = orig_sz % chunk_sz;
+ if (byte_sz) {
+ byte_sz = _sdio_align_size(byte_sz);
+ }
+
+ return (orig_sz / chunk_sz) * chunk_sz + byte_sz;
+ }
+ }
+
+ /*
+ * The controller is simply incapable of transferring the size
+ * we want in decent manner, so just return the original size.
+ */
+ return orig_sz;
+}
+EXPORT_SYMBOL_GPL(sdio_align_size);
+
+/* Split an arbitrarily sized data transfer into several
+ * IO_RW_EXTENDED commands. */
+static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
+ unsigned addr, int incr_addr, u8 *buf, unsigned size)
+{
+ unsigned remainder = size;
+ unsigned max_blocks;
+ int ret;
+
+ if (!func || (func->num > 7))
+ return -EINVAL;
+
+ /* Do the bulk of the transfer using block mode (if supported). */
+ if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
+ /* Blocks per command is limited by host count, host transfer
+ * size and the maximum for IO_RW_EXTENDED of 511 blocks. */
+ max_blocks = min(func->card->host->max_blk_count, 511u);
+
+ while (remainder >= func->cur_blksize) {
+ unsigned blocks;
+
+ blocks = remainder / func->cur_blksize;
+ if (blocks > max_blocks)
+ blocks = max_blocks;
+ size = blocks * func->cur_blksize;
+
+ ret = mmc_io_rw_extended(func->card, write,
+ func->num, addr, incr_addr, buf,
+ blocks, func->cur_blksize);
+ if (ret)
+ return ret;
+
+ remainder -= size;
+ buf += size;
+ if (incr_addr)
+ addr += size;
+ }
+ }
+
+ /* Write the remainder using byte mode. */
+ while (remainder > 0) {
+ size = min(remainder, sdio_max_byte_size(func));
+
+ /* Indicate byte mode by setting "blocks" = 0 */
+ ret = mmc_io_rw_extended(func->card, write, func->num, addr,
+ incr_addr, buf, 0, size);
+ if (ret)
+ return ret;
+
+ remainder -= size;
+ buf += size;
+ if (incr_addr)
+ addr += size;
+ }
+ return 0;
+}
+
+/**
+ * sdio_readb - read a single byte from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ *
+ * Reads a single byte from the address space of a given SDIO
+ * function. If there is a problem reading the address, 0xff
+ * is returned and @err_ret will contain the error code.
+ */
+u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
+{
+ int ret;
+ u8 val;
+
+ if (!func) {
+ if (err_ret)
+ *err_ret = -EINVAL;
+ return 0xFF;
+ }
+
+ ret = mmc_io_rw_direct(func->card, 0, func->num, addr, 0, &val);
+ if (err_ret)
+ *err_ret = ret;
+ if (ret)
+ return 0xFF;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb);
+
+/**
+ * sdio_writeb - write a single byte to a SDIO function
+ * @func: SDIO function to access
+ * @b: byte to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Writes a single byte to the address space of a given SDIO
+ * function. @err_ret will contain the status of the actual
+ * transfer.
+ */
+void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ if (!func) {
+ if (err_ret)
+ *err_ret = -EINVAL;
+ return;
+ }
+
+ ret = mmc_io_rw_direct(func->card, 1, func->num, addr, b, NULL);
+ if (err_ret)
+ *err_ret = ret;
+}
+EXPORT_SYMBOL_GPL(sdio_writeb);
+
+/**
+ * sdio_writeb_readb - write and read a byte from SDIO function
+ * @func: SDIO function to access
+ * @write_byte: byte to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Performs a RAW (Read after Write) operation as defined by SDIO spec -
+ * single byte is written to address space of a given SDIO function and
+ * response is read back from the same address, both using single request.
+ * If there is a problem with the operation, 0xff is returned and
+ * @err_ret will contain the error code.
+ */
+u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
+ unsigned int addr, int *err_ret)
+{
+ int ret;
+ u8 val;
+
+ ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
+ write_byte, &val);
+ if (err_ret)
+ *err_ret = ret;
+ if (ret)
+ return 0xff;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_writeb_readb);
+
+/**
+ * sdio_memcpy_fromio - read a chunk of memory from a SDIO function
+ * @func: SDIO function to access
+ * @dst: buffer to store the data
+ * @addr: address to begin reading from
+ * @count: number of bytes to read
+ *
+ * Reads from the address space of a given SDIO function. Return
+ * value indicates if the transfer succeeded or not.
+ */
+int sdio_memcpy_fromio(struct sdio_func *func, void *dst,
+ unsigned int addr, int count)
+{
+ return sdio_io_rw_ext_helper(func, 0, addr, 1, dst, count);
+}
+EXPORT_SYMBOL_GPL(sdio_memcpy_fromio);
+
+/**
+ * sdio_memcpy_toio - write a chunk of memory to a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to start writing to
+ * @src: buffer that contains the data to write
+ * @count: number of bytes to write
+ *
+ * Writes to the address space of a given SDIO function. Return
+ * value indicates if the transfer succeeded or not.
+ */
+int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
+ void *src, int count)
+{
+ return sdio_io_rw_ext_helper(func, 1, addr, 1, src, count);
+}
+EXPORT_SYMBOL_GPL(sdio_memcpy_toio);
+
+/**
+ * sdio_readsb - read from a FIFO on a SDIO function
+ * @func: SDIO function to access
+ * @dst: buffer to store the data
+ * @addr: address of (single byte) FIFO
+ * @count: number of bytes to read
+ *
+ * Reads from the specified FIFO of a given SDIO function. Return
+ * value indicates if the transfer succeeded or not.
+ */
+int sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr,
+ int count)
+{
+ return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count);
+}
+EXPORT_SYMBOL_GPL(sdio_readsb);
+
+/**
+ * sdio_writesb - write to a FIFO of a SDIO function
+ * @func: SDIO function to access
+ * @addr: address of (single byte) FIFO
+ * @src: buffer that contains the data to write
+ * @count: number of bytes to write
+ *
+ * Writes to the specified FIFO of a given SDIO function. Return
+ * value indicates if the transfer succeeded or not.
+ */
+int sdio_writesb(struct sdio_func *func, unsigned int addr, void *src,
+ int count)
+{
+ return sdio_io_rw_ext_helper(func, 1, addr, 0, src, count);
+}
+EXPORT_SYMBOL_GPL(sdio_writesb);
+
+/**
+ * sdio_readw - read a 16 bit integer from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ *
+ * Reads a 16 bit integer from the address space of a given SDIO
+ * function. If there is a problem reading the address, 0xffff
+ * is returned and @err_ret will contain the error code.
+ */
+u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 2);
+ if (err_ret)
+ *err_ret = ret;
+ if (ret)
+ return 0xFFFF;
+
+ return le16_to_cpup((__le16 *)func->tmpbuf);
+}
+EXPORT_SYMBOL_GPL(sdio_readw);
+
+/**
+ * sdio_writew - write a 16 bit integer to a SDIO function
+ * @func: SDIO function to access
+ * @b: integer to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Writes a 16 bit integer to the address space of a given SDIO
+ * function. @err_ret will contain the status of the actual
+ * transfer.
+ */
+void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ *(__le16 *)func->tmpbuf = cpu_to_le16(b);
+
+ ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2);
+ if (err_ret)
+ *err_ret = ret;
+}
+EXPORT_SYMBOL_GPL(sdio_writew);
+
+/**
+ * sdio_readl - read a 32 bit integer from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ *
+ * Reads a 32 bit integer from the address space of a given SDIO
+ * function. If there is a problem reading the address,
+ * 0xffffffff is returned and @err_ret will contain the error
+ * code.
+ */
+u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 4);
+ if (err_ret)
+ *err_ret = ret;
+ if (ret)
+ return 0xFFFFFFFF;
+
+ return le32_to_cpup((__le32 *)func->tmpbuf);
+}
+EXPORT_SYMBOL_GPL(sdio_readl);
+
+/**
+ * sdio_writel - write a 32 bit integer to a SDIO function
+ * @func: SDIO function to access
+ * @b: integer to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Writes a 32 bit integer to the address space of a given SDIO
+ * function. @err_ret will contain the status of the actual
+ * transfer.
+ */
+void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ *(__le32 *)func->tmpbuf = cpu_to_le32(b);
+
+ ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4);
+ if (err_ret)
+ *err_ret = ret;
+}
+EXPORT_SYMBOL_GPL(sdio_writel);
+
+/**
+ * sdio_f0_readb - read a single byte from SDIO function 0
+ * @func: an SDIO function of the card
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ *
+ * Reads a single byte from the address space of SDIO function 0.
+ * If there is a problem reading the address, 0xff is returned
+ * and @err_ret will contain the error code.
+ */
+unsigned char sdio_f0_readb(struct sdio_func *func, unsigned int addr,
+ int *err_ret)
+{
+ int ret;
+ unsigned char val;
+
+ if (!func) {
+ if (err_ret)
+ *err_ret = -EINVAL;
+ return 0xFF;
+ }
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, addr, 0, &val);
+ if (err_ret)
+ *err_ret = ret;
+ if (ret)
+ return 0xFF;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_f0_readb);
+
+/**
+ * sdio_f0_writeb - write a single byte to SDIO function 0
+ * @func: an SDIO function of the card
+ * @b: byte to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Writes a single byte to the address space of SDIO function 0.
+ * @err_ret will contain the status of the actual transfer.
+ *
+ * Only writes to the vendor specific CCCR registers (0xF0 -
+ * 0xFF) are permiited; @err_ret will be set to -EINVAL for *
+ * writes outside this range.
+ */
+void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr,
+ int *err_ret)
+{
+ int ret;
+
+ if (!func) {
+ if (err_ret)
+ *err_ret = -EINVAL;
+ return;
+ }
+
+ if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) {
+ if (err_ret)
+ *err_ret = -EINVAL;
+ return;
+ }
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, addr, b, NULL);
+ if (err_ret)
+ *err_ret = ret;
+}
+EXPORT_SYMBOL_GPL(sdio_f0_writeb);
+
+/**
+ * sdio_get_host_pm_caps - get host power management capabilities
+ * @func: SDIO function attached to host
+ *
+ * Returns a capability bitmask corresponding to power management
+ * features supported by the host controller that the card function
+ * might rely upon during a system suspend. The host doesn't need
+ * to be claimed, nor the function active, for this information to be
+ * obtained.
+ */
+mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func)
+{
+ if (!func)
+ return 0;
+
+ return func->card->host->pm_caps;
+}
+EXPORT_SYMBOL_GPL(sdio_get_host_pm_caps);
+
+/**
+ * sdio_set_host_pm_flags - set wanted host power management capabilities
+ * @func: SDIO function attached to host
+ * @flags: Power Management flags to set
+ *
+ * Set a capability bitmask corresponding to wanted host controller
+ * power management features for the upcoming suspend state.
+ * This must be called, if needed, each time the suspend method of
+ * the function driver is called, and must contain only bits that
+ * were returned by sdio_get_host_pm_caps().
+ * The host doesn't need to be claimed, nor the function active,
+ * for this information to be set.
+ */
+int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
+{
+ struct mmc_host *host;
+
+ if (!func)
+ return -EINVAL;
+
+ host = func->card->host;
+
+ if (flags & ~host->pm_caps)
+ return -EINVAL;
+
+ /* function suspend methods are serialized, hence no lock needed */
+ host->pm_flags |= flags;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags);
+
+/**
+ * sdio_retune_crc_disable - temporarily disable retuning on CRC errors
+ * @func: SDIO function attached to host
+ *
+ * If the SDIO card is known to be in a state where it might produce
+ * CRC errors on the bus in response to commands (like if we know it is
+ * transitioning between power states), an SDIO function driver can
+ * call this function to temporarily disable the SD/MMC core behavior of
+ * triggering an automatic retuning.
+ *
+ * This function should be called while the host is claimed and the host
+ * should remain claimed until sdio_retune_crc_enable() is called.
+ * Specifically, the expected sequence of calls is:
+ * - sdio_claim_host()
+ * - sdio_retune_crc_disable()
+ * - some number of calls like sdio_writeb() and sdio_readb()
+ * - sdio_retune_crc_enable()
+ * - sdio_release_host()
+ */
+void sdio_retune_crc_disable(struct sdio_func *func)
+{
+ func->card->host->retune_crc_disable = true;
+}
+EXPORT_SYMBOL_GPL(sdio_retune_crc_disable);
+
+/**
+ * sdio_retune_crc_enable - re-enable retuning on CRC errors
+ * @func: SDIO function attached to host
+ *
+ * This is the compement to sdio_retune_crc_disable().
+ */
+void sdio_retune_crc_enable(struct sdio_func *func)
+{
+ func->card->host->retune_crc_disable = false;
+}
+EXPORT_SYMBOL_GPL(sdio_retune_crc_enable);
+
+/**
+ * sdio_retune_hold_now - start deferring retuning requests till release
+ * @func: SDIO function attached to host
+ *
+ * This function can be called if it's currently a bad time to do
+ * a retune of the SDIO card. Retune requests made during this time
+ * will be held and we'll actually do the retune sometime after the
+ * release.
+ *
+ * This function could be useful if an SDIO card is in a power state
+ * where it can respond to a small subset of commands that doesn't
+ * include the retuning command. Care should be taken when using
+ * this function since (presumably) the retuning request we might be
+ * deferring was made for a good reason.
+ *
+ * This function should be called while the host is claimed.
+ */
+void sdio_retune_hold_now(struct sdio_func *func)
+{
+ mmc_retune_hold_now(func->card->host);
+}
+EXPORT_SYMBOL_GPL(sdio_retune_hold_now);
+
+/**
+ * sdio_retune_release - signal that it's OK to retune now
+ * @func: SDIO function attached to host
+ *
+ * This is the complement to sdio_retune_hold_now(). Calling this
+ * function won't make a retune happen right away but will allow
+ * them to be scheduled normally.
+ *
+ * This function should be called while the host is claimed.
+ */
+void sdio_retune_release(struct sdio_func *func)
+{
+ mmc_retune_release(func->card->host);
+}
+EXPORT_SYMBOL_GPL(sdio_retune_release);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
new file mode 100644
index 000000000..4b1f7c966
--- /dev/null
+++ b/drivers/mmc/core/sdio_irq.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/mmc/core/sdio_irq.c
+ *
+ * Author: Nicolas Pitre
+ * Created: June 18, 2007
+ * Copyright: MontaVista Software Inc.
+ *
+ * Copyright 2008 Pierre Ossman
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/kthread.h>
+#include <linux/export.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "sdio_ops.h"
+#include "core.h"
+#include "card.h"
+
+static int sdio_get_pending_irqs(struct mmc_host *host, u8 *pending)
+{
+ struct mmc_card *card = host->card;
+ int ret;
+
+ WARN_ON(!host->claimed);
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, pending);
+ if (ret) {
+ pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
+ mmc_card_id(card), ret);
+ return ret;
+ }
+
+ if (*pending && mmc_card_broken_irq_polling(card) &&
+ !(host->caps & MMC_CAP_SDIO_IRQ)) {
+ unsigned char dummy;
+
+ /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
+ * register with a Marvell SD8797 card. A dummy CMD52 read to
+ * function 0 register 0xff can avoid this.
+ */
+ mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
+ }
+
+ return 0;
+}
+
+static int process_sdio_pending_irqs(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ int i, ret, count;
+ bool sdio_irq_pending = host->sdio_irq_pending;
+ unsigned char pending;
+ struct sdio_func *func;
+
+ /* Don't process SDIO IRQs if the card is suspended. */
+ if (mmc_card_suspended(card))
+ return 0;
+
+ /* Clear the flag to indicate that we have processed the IRQ. */
+ host->sdio_irq_pending = false;
+
+ /*
+ * Optimization, if there is only 1 function interrupt registered
+ * and we know an IRQ was signaled then call irq handler directly.
+ * Otherwise do the full probe.
+ */
+ func = card->sdio_single_irq;
+ if (func && sdio_irq_pending) {
+ func->irq_handler(func);
+ return 1;
+ }
+
+ ret = sdio_get_pending_irqs(host, &pending);
+ if (ret)
+ return ret;
+
+ count = 0;
+ for (i = 1; i <= 7; i++) {
+ if (pending & (1 << i)) {
+ func = card->sdio_func[i - 1];
+ if (!func) {
+ pr_warn("%s: pending IRQ for non-existent function\n",
+ mmc_card_id(card));
+ ret = -EINVAL;
+ } else if (func->irq_handler) {
+ func->irq_handler(func);
+ count++;
+ } else {
+ pr_warn("%s: pending IRQ with no handler\n",
+ sdio_func_id(func));
+ ret = -EINVAL;
+ }
+ }
+ }
+
+ if (count)
+ return count;
+
+ return ret;
+}
+
+static void sdio_run_irqs(struct mmc_host *host)
+{
+ mmc_claim_host(host);
+ if (host->sdio_irqs) {
+ process_sdio_pending_irqs(host);
+ if (!host->sdio_irq_pending)
+ host->ops->ack_sdio_irq(host);
+ }
+ mmc_release_host(host);
+}
+
+void sdio_irq_work(struct work_struct *work)
+{
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, sdio_irq_work.work);
+
+ sdio_run_irqs(host);
+}
+
+void sdio_signal_irq(struct mmc_host *host)
+{
+ host->sdio_irq_pending = true;
+ queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+}
+EXPORT_SYMBOL_GPL(sdio_signal_irq);
+
+static int sdio_irq_thread(void *_host)
+{
+ struct mmc_host *host = _host;
+ unsigned long period, idle_period;
+ int ret;
+
+ sched_set_fifo_low(current);
+
+ /*
+ * We want to allow for SDIO cards to work even on non SDIO
+ * aware hosts. One thing that non SDIO host cannot do is
+ * asynchronous notification of pending SDIO card interrupts
+ * hence we poll for them in that case.
+ */
+ idle_period = msecs_to_jiffies(10);
+ period = (host->caps & MMC_CAP_SDIO_IRQ) ?
+ MAX_SCHEDULE_TIMEOUT : idle_period;
+
+ pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
+ mmc_hostname(host), period);
+
+ do {
+ /*
+ * We claim the host here on drivers behalf for a couple
+ * reasons:
+ *
+ * 1) it is already needed to retrieve the CCCR_INTx;
+ * 2) we want the driver(s) to clear the IRQ condition ASAP;
+ * 3) we need to control the abort condition locally.
+ *
+ * Just like traditional hard IRQ handlers, we expect SDIO
+ * IRQ handlers to be quick and to the point, so that the
+ * holding of the host lock does not cover too much work
+ * that doesn't require that lock to be held.
+ */
+ ret = __mmc_claim_host(host, NULL,
+ &host->sdio_irq_thread_abort);
+ if (ret)
+ break;
+ ret = process_sdio_pending_irqs(host);
+ mmc_release_host(host);
+
+ /*
+ * Give other threads a chance to run in the presence of
+ * errors.
+ */
+ if (ret < 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop())
+ schedule_timeout(HZ);
+ set_current_state(TASK_RUNNING);
+ }
+
+ /*
+ * Adaptive polling frequency based on the assumption
+ * that an interrupt will be closely followed by more.
+ * This has a substantial benefit for network devices.
+ */
+ if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
+ if (ret > 0)
+ period /= 2;
+ else {
+ period++;
+ if (period > idle_period)
+ period = idle_period;
+ }
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (host->caps & MMC_CAP_SDIO_IRQ)
+ host->ops->enable_sdio_irq(host, 1);
+ if (!kthread_should_stop())
+ schedule_timeout(period);
+ set_current_state(TASK_RUNNING);
+ } while (!kthread_should_stop());
+
+ if (host->caps & MMC_CAP_SDIO_IRQ)
+ host->ops->enable_sdio_irq(host, 0);
+
+ pr_debug("%s: IRQ thread exiting with code %d\n",
+ mmc_hostname(host), ret);
+
+ return ret;
+}
+
+static int sdio_card_irq_get(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+
+ WARN_ON(!host->claimed);
+
+ if (!host->sdio_irqs++) {
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ atomic_set(&host->sdio_irq_thread_abort, 0);
+ host->sdio_irq_thread =
+ kthread_run(sdio_irq_thread, host,
+ "ksdioirqd/%s", mmc_hostname(host));
+ if (IS_ERR(host->sdio_irq_thread)) {
+ int err = PTR_ERR(host->sdio_irq_thread);
+ host->sdio_irqs--;
+ return err;
+ }
+ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ host->ops->enable_sdio_irq(host, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int sdio_card_irq_put(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+
+ WARN_ON(!host->claimed);
+
+ if (host->sdio_irqs < 1)
+ return -EINVAL;
+
+ if (!--host->sdio_irqs) {
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ atomic_set(&host->sdio_irq_thread_abort, 1);
+ kthread_stop(host->sdio_irq_thread);
+ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ host->ops->enable_sdio_irq(host, 0);
+ }
+ }
+
+ return 0;
+}
+
+/* If there is only 1 function registered set sdio_single_irq */
+static void sdio_single_irq_set(struct mmc_card *card)
+{
+ struct sdio_func *func;
+ int i;
+
+ card->sdio_single_irq = NULL;
+ if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
+ card->host->sdio_irqs == 1) {
+ for (i = 0; i < card->sdio_funcs; i++) {
+ func = card->sdio_func[i];
+ if (func && func->irq_handler) {
+ card->sdio_single_irq = func;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * sdio_claim_irq - claim the IRQ for a SDIO function
+ * @func: SDIO function
+ * @handler: IRQ handler callback
+ *
+ * Claim and activate the IRQ for the given SDIO function. The provided
+ * handler will be called when that IRQ is asserted. The host is always
+ * claimed already when the handler is called so the handler should not
+ * call sdio_claim_host() or sdio_release_host().
+ */
+int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
+{
+ int ret;
+ unsigned char reg;
+
+ if (!func)
+ return -EINVAL;
+
+ pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
+
+ if (func->irq_handler) {
+ pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
+ return -EBUSY;
+ }
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
+ if (ret)
+ return ret;
+
+ reg |= 1 << func->num;
+
+ reg |= 1; /* Master interrupt enable */
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
+ if (ret)
+ return ret;
+
+ func->irq_handler = handler;
+ ret = sdio_card_irq_get(func->card);
+ if (ret)
+ func->irq_handler = NULL;
+ sdio_single_irq_set(func->card);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdio_claim_irq);
+
+/**
+ * sdio_release_irq - release the IRQ for a SDIO function
+ * @func: SDIO function
+ *
+ * Disable and release the IRQ for the given SDIO function.
+ */
+int sdio_release_irq(struct sdio_func *func)
+{
+ int ret;
+ unsigned char reg;
+
+ if (!func)
+ return -EINVAL;
+
+ pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
+
+ if (func->irq_handler) {
+ func->irq_handler = NULL;
+ sdio_card_irq_put(func->card);
+ sdio_single_irq_set(func->card);
+ }
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
+ if (ret)
+ return ret;
+
+ reg &= ~(1 << func->num);
+
+ /* Disable master interrupt with the last function interrupt */
+ if (!(reg & 0xFE))
+ reg = 0;
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdio_release_irq);
+
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
new file mode 100644
index 000000000..4c229dd2b
--- /dev/null
+++ b/drivers/mmc/core/sdio_ops.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/drivers/mmc/sdio_ops.c
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ */
+
+#include <linux/scatterlist.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+
+#include "core.h"
+#include "sdio_ops.h"
+
+int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd = {};
+ int i, err = 0;
+
+ cmd.opcode = SD_IO_SEND_OP_COND;
+ cmd.arg = ocr;
+ cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
+
+ for (i = 100; i; i--) {
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ break;
+
+ /* if we're just probing, do a single pass */
+ if (ocr == 0)
+ break;
+
+ /* otherwise wait until reset completes */
+ if (mmc_host_is_spi(host)) {
+ /*
+ * Both R1_SPI_IDLE and MMC_CARD_BUSY indicate
+ * an initialized card under SPI, but some cards
+ * (Marvell's) only behave when looking at this
+ * one.
+ */
+ if (cmd.resp[1] & MMC_CARD_BUSY)
+ break;
+ } else {
+ if (cmd.resp[0] & MMC_CARD_BUSY)
+ break;
+ }
+
+ err = -ETIMEDOUT;
+
+ mmc_delay(10);
+ }
+
+ if (rocr)
+ *rocr = cmd.resp[mmc_host_is_spi(host) ? 1 : 0];
+
+ return err;
+}
+
+static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
+ unsigned addr, u8 in, u8 *out)
+{
+ struct mmc_command cmd = {};
+ int err;
+
+ if (fn > 7)
+ return -EINVAL;
+
+ /* sanity check */
+ if (addr & ~0x1FFFF)
+ return -EINVAL;
+
+ cmd.opcode = SD_IO_RW_DIRECT;
+ cmd.arg = write ? 0x80000000 : 0x00000000;
+ cmd.arg |= fn << 28;
+ cmd.arg |= (write && out) ? 0x08000000 : 0x00000000;
+ cmd.arg |= addr << 9;
+ cmd.arg |= in;
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ if (mmc_host_is_spi(host)) {
+ /* host driver already reported errors */
+ } else {
+ if (cmd.resp[0] & R5_ERROR)
+ return -EIO;
+ if (cmd.resp[0] & R5_FUNCTION_NUMBER)
+ return -EINVAL;
+ if (cmd.resp[0] & R5_OUT_OF_RANGE)
+ return -ERANGE;
+ }
+
+ if (out) {
+ if (mmc_host_is_spi(host))
+ *out = (cmd.resp[0] >> 8) & 0xFF;
+ else
+ *out = cmd.resp[0] & 0xFF;
+ }
+
+ return 0;
+}
+
+int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
+ unsigned addr, u8 in, u8 *out)
+{
+ return mmc_io_rw_direct_host(card->host, write, fn, addr, in, out);
+}
+
+int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
+ unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg, *sg_ptr;
+ struct sg_table sgtable;
+ unsigned int nents, left_size, i;
+ unsigned int seg_size = card->host->max_seg_size;
+ int err;
+
+ WARN_ON(blksz == 0);
+
+ /* sanity check */
+ if (addr & ~0x1FFFF)
+ return -EINVAL;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = SD_IO_RW_EXTENDED;
+ cmd.arg = write ? 0x80000000 : 0x00000000;
+ cmd.arg |= fn << 28;
+ cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;
+ cmd.arg |= addr << 9;
+ if (blocks == 0)
+ cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */
+ else
+ cmd.arg |= 0x08000000 | blocks; /* block mode */
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ data.blksz = blksz;
+ /* Code in host drivers/fwk assumes that "blocks" always is >=1 */
+ data.blocks = blocks ? blocks : 1;
+ data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+
+ left_size = data.blksz * data.blocks;
+ nents = DIV_ROUND_UP(left_size, seg_size);
+ if (nents > 1) {
+ if (sg_alloc_table(&sgtable, nents, GFP_KERNEL))
+ return -ENOMEM;
+
+ data.sg = sgtable.sgl;
+ data.sg_len = nents;
+
+ for_each_sg(data.sg, sg_ptr, data.sg_len, i) {
+ sg_set_buf(sg_ptr, buf + i * seg_size,
+ min(seg_size, left_size));
+ left_size -= seg_size;
+ }
+ } else {
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, buf, left_size);
+ }
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_pre_req(card->host, &mrq);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error)
+ err = cmd.error;
+ else if (data.error)
+ err = data.error;
+ else if (mmc_host_is_spi(card->host))
+ /* host driver already reported errors */
+ err = 0;
+ else if (cmd.resp[0] & R5_ERROR)
+ err = -EIO;
+ else if (cmd.resp[0] & R5_FUNCTION_NUMBER)
+ err = -EINVAL;
+ else if (cmd.resp[0] & R5_OUT_OF_RANGE)
+ err = -ERANGE;
+ else
+ err = 0;
+
+ mmc_post_req(card->host, &mrq, err);
+
+ if (nents > 1)
+ sg_free_table(&sgtable);
+
+ return err;
+}
+
+int sdio_reset(struct mmc_host *host)
+{
+ int ret;
+ u8 abort;
+
+ /* SDIO Simplified Specification V2.0, 4.4 Reset for SDIO */
+
+ ret = mmc_io_rw_direct_host(host, 0, 0, SDIO_CCCR_ABORT, 0, &abort);
+ if (ret)
+ abort = 0x08;
+ else
+ abort |= 0x08;
+
+ return mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
+}
+
diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h
new file mode 100644
index 000000000..37f79732a
--- /dev/null
+++ b/drivers/mmc/core/sdio_ops.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * linux/drivers/mmc/sdio_ops.c
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ */
+
+#ifndef _MMC_SDIO_OPS_H
+#define _MMC_SDIO_OPS_H
+
+#include <linux/types.h>
+#include <linux/mmc/sdio.h>
+
+struct mmc_host;
+struct mmc_card;
+struct work_struct;
+
+int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
+int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
+ unsigned addr, u8 in, u8* out);
+int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
+ unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
+int sdio_reset(struct mmc_host *host);
+void sdio_irq_work(struct work_struct *work);
+
+static inline bool sdio_is_io_busy(u32 opcode, u32 arg)
+{
+ u32 addr;
+
+ addr = (arg >> 9) & 0x1FFFF;
+
+ return (opcode == SD_IO_RW_EXTENDED ||
+ (opcode == SD_IO_RW_DIRECT &&
+ !(addr == SDIO_CCCR_ABORT || addr == SDIO_CCCR_SUSPEND)));
+}
+
+#endif
+
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
new file mode 100644
index 000000000..be4067281
--- /dev/null
+++ b/drivers/mmc/core/sdio_uart.c
@@ -0,0 +1,1183 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SDIO UART/GPS driver
+ *
+ * Based on drivers/serial/8250.c and drivers/serial/serial_core.c
+ * by Russell King.
+ *
+ * Author: Nicolas Pitre
+ * Created: June 15, 2007
+ * Copyright: MontaVista Software, Inc.
+ */
+
+/*
+ * Note: Although this driver assumes a 16550A-like UART implementation,
+ * it is not possible to leverage the common 8250/16550 driver, nor the
+ * core UART infrastructure, as they assumes direct access to the hardware
+ * registers, often under a spinlock. This is not possible in the SDIO
+ * context as SDIO access functions must be able to sleep.
+ *
+ * Because we need to lock the SDIO host to ensure an exclusive access to
+ * the card, we simply rely on that lock to also prevent and serialize
+ * concurrent access to the same port.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+
+#define UART_NR 8 /* Number of UARTs this driver can handle */
+
+
+#define FIFO_SIZE PAGE_SIZE
+#define WAKEUP_CHARS 256
+
+struct uart_icount {
+ __u32 cts;
+ __u32 dsr;
+ __u32 rng;
+ __u32 dcd;
+ __u32 rx;
+ __u32 tx;
+ __u32 frame;
+ __u32 overrun;
+ __u32 parity;
+ __u32 brk;
+};
+
+struct sdio_uart_port {
+ struct tty_port port;
+ unsigned int index;
+ struct sdio_func *func;
+ struct mutex func_lock;
+ struct task_struct *in_sdio_uart_irq;
+ unsigned int regs_offset;
+ struct kfifo xmit_fifo;
+ spinlock_t write_lock;
+ struct uart_icount icount;
+ unsigned int uartclk;
+ unsigned int mctrl;
+ unsigned int rx_mctrl;
+ unsigned int read_status_mask;
+ unsigned int ignore_status_mask;
+ unsigned char x_char;
+ unsigned char ier;
+ unsigned char lcr;
+};
+
+static struct sdio_uart_port *sdio_uart_table[UART_NR];
+static DEFINE_SPINLOCK(sdio_uart_table_lock);
+
+static int sdio_uart_add_port(struct sdio_uart_port *port)
+{
+ int index, ret = -EBUSY;
+
+ mutex_init(&port->func_lock);
+ spin_lock_init(&port->write_lock);
+ if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
+ return -ENOMEM;
+
+ spin_lock(&sdio_uart_table_lock);
+ for (index = 0; index < UART_NR; index++) {
+ if (!sdio_uart_table[index]) {
+ port->index = index;
+ sdio_uart_table[index] = port;
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock(&sdio_uart_table_lock);
+
+ return ret;
+}
+
+static struct sdio_uart_port *sdio_uart_port_get(unsigned index)
+{
+ struct sdio_uart_port *port;
+
+ if (index >= UART_NR)
+ return NULL;
+
+ spin_lock(&sdio_uart_table_lock);
+ port = sdio_uart_table[index];
+ if (port)
+ tty_port_get(&port->port);
+ spin_unlock(&sdio_uart_table_lock);
+
+ return port;
+}
+
+static void sdio_uart_port_put(struct sdio_uart_port *port)
+{
+ tty_port_put(&port->port);
+}
+
+static void sdio_uart_port_remove(struct sdio_uart_port *port)
+{
+ struct sdio_func *func;
+
+ spin_lock(&sdio_uart_table_lock);
+ sdio_uart_table[port->index] = NULL;
+ spin_unlock(&sdio_uart_table_lock);
+
+ /*
+ * We're killing a port that potentially still is in use by
+ * the tty layer. Be careful to prevent any further access
+ * to the SDIO function and arrange for the tty layer to
+ * give up on that port ASAP.
+ * Beware: the lock ordering is critical.
+ */
+ mutex_lock(&port->port.mutex);
+ mutex_lock(&port->func_lock);
+ func = port->func;
+ sdio_claim_host(func);
+ port->func = NULL;
+ mutex_unlock(&port->func_lock);
+ /* tty_hangup is async so is this safe as is ?? */
+ tty_port_tty_hangup(&port->port, false);
+ mutex_unlock(&port->port.mutex);
+ sdio_release_irq(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+
+ sdio_uart_port_put(port);
+}
+
+static int sdio_uart_claim_func(struct sdio_uart_port *port)
+{
+ mutex_lock(&port->func_lock);
+ if (unlikely(!port->func)) {
+ mutex_unlock(&port->func_lock);
+ return -ENODEV;
+ }
+ if (likely(port->in_sdio_uart_irq != current))
+ sdio_claim_host(port->func);
+ mutex_unlock(&port->func_lock);
+ return 0;
+}
+
+static inline void sdio_uart_release_func(struct sdio_uart_port *port)
+{
+ if (likely(port->in_sdio_uart_irq != current))
+ sdio_release_host(port->func);
+}
+
+static inline unsigned int sdio_in(struct sdio_uart_port *port, int offset)
+{
+ unsigned char c;
+ c = sdio_readb(port->func, port->regs_offset + offset, NULL);
+ return c;
+}
+
+static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
+{
+ sdio_writeb(port->func, value, port->regs_offset + offset, NULL);
+}
+
+static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
+{
+ unsigned char status;
+ unsigned int ret;
+
+ /* FIXME: What stops this losing the delta bits and breaking
+ sdio_uart_check_modem_status ? */
+ status = sdio_in(port, UART_MSR);
+
+ ret = 0;
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void sdio_uart_write_mctrl(struct sdio_uart_port *port,
+ unsigned int mctrl)
+{
+ unsigned char mcr = 0;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ sdio_out(port, UART_MCR, mcr);
+}
+
+static inline void sdio_uart_update_mctrl(struct sdio_uart_port *port,
+ unsigned int set, unsigned int clear)
+{
+ unsigned int old;
+
+ old = port->mctrl;
+ port->mctrl = (old & ~clear) | set;
+ if (old != port->mctrl)
+ sdio_uart_write_mctrl(port, port->mctrl);
+}
+
+#define sdio_uart_set_mctrl(port, x) sdio_uart_update_mctrl(port, x, 0)
+#define sdio_uart_clear_mctrl(port, x) sdio_uart_update_mctrl(port, 0, x)
+
+static void sdio_uart_change_speed(struct sdio_uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned char cval, fcr = 0;
+ unsigned int baud, quot;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+
+ for (;;) {
+ baud = tty_termios_baud_rate(termios);
+ if (baud == 0)
+ baud = 9600; /* Special case: B0 rate. */
+ if (baud <= port->uartclk)
+ break;
+ /*
+ * Oops, the quotient was zero. Try again with the old
+ * baud rate if possible, otherwise default to 9600.
+ */
+ termios->c_cflag &= ~CBAUD;
+ if (old) {
+ termios->c_cflag |= old->c_cflag & CBAUD;
+ old = NULL;
+ } else
+ termios->c_cflag |= B9600;
+ }
+ quot = (2 * port->uartclk + baud) / (2 * baud);
+
+ if (baud < 2400)
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
+ else
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
+
+ port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts
+ */
+ port->ier &= ~UART_IER_MSI;
+ if ((termios->c_cflag & CRTSCTS) || !(termios->c_cflag & CLOCAL))
+ port->ier |= UART_IER_MSI;
+
+ port->lcr = cval;
+
+ sdio_out(port, UART_IER, port->ier);
+ sdio_out(port, UART_LCR, cval | UART_LCR_DLAB);
+ sdio_out(port, UART_DLL, quot & 0xff);
+ sdio_out(port, UART_DLM, quot >> 8);
+ sdio_out(port, UART_LCR, cval);
+ sdio_out(port, UART_FCR, fcr);
+
+ sdio_uart_write_mctrl(port, port->mctrl);
+}
+
+static void sdio_uart_start_tx(struct sdio_uart_port *port)
+{
+ if (!(port->ier & UART_IER_THRI)) {
+ port->ier |= UART_IER_THRI;
+ sdio_out(port, UART_IER, port->ier);
+ }
+}
+
+static void sdio_uart_stop_tx(struct sdio_uart_port *port)
+{
+ if (port->ier & UART_IER_THRI) {
+ port->ier &= ~UART_IER_THRI;
+ sdio_out(port, UART_IER, port->ier);
+ }
+}
+
+static void sdio_uart_stop_rx(struct sdio_uart_port *port)
+{
+ port->ier &= ~UART_IER_RLSI;
+ port->read_status_mask &= ~UART_LSR_DR;
+ sdio_out(port, UART_IER, port->ier);
+}
+
+static void sdio_uart_receive_chars(struct sdio_uart_port *port,
+ unsigned int *status)
+{
+ unsigned int ch, flag;
+ int max_count = 256;
+
+ do {
+ ch = sdio_in(port, UART_RX);
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE))) {
+ /*
+ * For statistics only
+ */
+ if (*status & UART_LSR_BI) {
+ *status &= ~(UART_LSR_FE | UART_LSR_PE);
+ port->icount.brk++;
+ } else if (*status & UART_LSR_PE)
+ port->icount.parity++;
+ else if (*status & UART_LSR_FE)
+ port->icount.frame++;
+ if (*status & UART_LSR_OE)
+ port->icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ignored.
+ */
+ *status &= port->read_status_mask;
+ if (*status & UART_LSR_BI)
+ flag = TTY_BREAK;
+ else if (*status & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (*status & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
+ tty_insert_flip_char(&port->port, ch, flag);
+
+ /*
+ * Overrun is special. Since it's reported immediately,
+ * it doesn't affect the current character.
+ */
+ if (*status & ~port->ignore_status_mask & UART_LSR_OE)
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
+
+ *status = sdio_in(port, UART_LSR);
+ } while ((*status & UART_LSR_DR) && (max_count-- > 0));
+
+ tty_flip_buffer_push(&port->port);
+}
+
+static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
+{
+ struct kfifo *xmit = &port->xmit_fifo;
+ int count;
+ struct tty_struct *tty;
+ u8 iobuf[16];
+ int len;
+
+ if (port->x_char) {
+ sdio_out(port, UART_TX, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ tty = tty_port_tty_get(&port->port);
+
+ if (tty == NULL || !kfifo_len(xmit) ||
+ tty->stopped || tty->hw_stopped) {
+ sdio_uart_stop_tx(port);
+ tty_kref_put(tty);
+ return;
+ }
+
+ len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
+ for (count = 0; count < len; count++) {
+ sdio_out(port, UART_TX, iobuf[count]);
+ port->icount.tx++;
+ }
+
+ len = kfifo_len(xmit);
+ if (len < WAKEUP_CHARS) {
+ tty_wakeup(tty);
+ if (len == 0)
+ sdio_uart_stop_tx(port);
+ }
+ tty_kref_put(tty);
+}
+
+static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
+{
+ int status;
+ struct tty_struct *tty;
+
+ status = sdio_in(port, UART_MSR);
+
+ if ((status & UART_MSR_ANY_DELTA) == 0)
+ return;
+
+ if (status & UART_MSR_TERI)
+ port->icount.rng++;
+ if (status & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (status & UART_MSR_DDCD) {
+ port->icount.dcd++;
+ /* DCD raise - wake for open */
+ if (status & UART_MSR_DCD)
+ wake_up_interruptible(&port->port.open_wait);
+ else {
+ /* DCD drop - hang up if tty attached */
+ tty_port_tty_hangup(&port->port, false);
+ }
+ }
+ if (status & UART_MSR_DCTS) {
+ port->icount.cts++;
+ tty = tty_port_tty_get(&port->port);
+ if (tty && C_CRTSCTS(tty)) {
+ int cts = (status & UART_MSR_CTS);
+ if (tty->hw_stopped) {
+ if (cts) {
+ tty->hw_stopped = 0;
+ sdio_uart_start_tx(port);
+ tty_wakeup(tty);
+ }
+ } else {
+ if (!cts) {
+ tty->hw_stopped = 1;
+ sdio_uart_stop_tx(port);
+ }
+ }
+ }
+ tty_kref_put(tty);
+ }
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static void sdio_uart_irq(struct sdio_func *func)
+{
+ struct sdio_uart_port *port = sdio_get_drvdata(func);
+ unsigned int iir, lsr;
+
+ /*
+ * In a few places sdio_uart_irq() is called directly instead of
+ * waiting for the actual interrupt to be raised and the SDIO IRQ
+ * thread scheduled in order to reduce latency. However, some
+ * interaction with the tty core may end up calling us back
+ * (serial echo, flow control, etc.) through those same places
+ * causing undesirable effects. Let's stop the recursion here.
+ */
+ if (unlikely(port->in_sdio_uart_irq == current))
+ return;
+
+ iir = sdio_in(port, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return;
+
+ port->in_sdio_uart_irq = current;
+ lsr = sdio_in(port, UART_LSR);
+ if (lsr & UART_LSR_DR)
+ sdio_uart_receive_chars(port, &lsr);
+ sdio_uart_check_modem_status(port);
+ if (lsr & UART_LSR_THRE)
+ sdio_uart_transmit_chars(port);
+ port->in_sdio_uart_irq = NULL;
+}
+
+static int uart_carrier_raised(struct tty_port *tport)
+{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
+ unsigned int ret = sdio_uart_claim_func(port);
+ if (ret) /* Missing hardware shouldn't block for carrier */
+ return 1;
+ ret = sdio_uart_get_mctrl(port);
+ sdio_uart_release_func(port);
+ if (ret & TIOCM_CAR)
+ return 1;
+ return 0;
+}
+
+/**
+ * uart_dtr_rts - port helper to set uart signals
+ * @tport: tty port to be updated
+ * @onoff: set to turn on DTR/RTS
+ *
+ * Called by the tty port helpers when the modem signals need to be
+ * adjusted during an open, close and hangup.
+ */
+
+static void uart_dtr_rts(struct tty_port *tport, int onoff)
+{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
+ int ret = sdio_uart_claim_func(port);
+ if (ret)
+ return;
+ if (onoff == 0)
+ sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ else
+ sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ sdio_uart_release_func(port);
+}
+
+/**
+ * sdio_uart_activate - start up hardware
+ * @tport: tty port to activate
+ * @tty: tty bound to this port
+ *
+ * Activate a tty port. The port locking guarantees us this will be
+ * run exactly once per set of opens, and if successful will see the
+ * shutdown method run exactly once to match. Start up and shutdown are
+ * protected from each other by the internal locking and will not run
+ * at the same time even during a hangup event.
+ *
+ * If we successfully start up the port we take an extra kref as we
+ * will keep it around until shutdown when the kref is dropped.
+ */
+
+static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
+{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
+ int ret;
+
+ /*
+ * Set the TTY IO error marker - we will only clear this
+ * once we have successfully opened the port.
+ */
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
+ kfifo_reset(&port->xmit_fifo);
+
+ ret = sdio_uart_claim_func(port);
+ if (ret)
+ return ret;
+ ret = sdio_enable_func(port->func);
+ if (ret)
+ goto err1;
+ ret = sdio_claim_irq(port->func, sdio_uart_irq);
+ if (ret)
+ goto err2;
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in sdio_change_speed())
+ */
+ sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
+ sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ sdio_out(port, UART_FCR, 0);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) sdio_in(port, UART_LSR);
+ (void) sdio_in(port, UART_RX);
+ (void) sdio_in(port, UART_IIR);
+ (void) sdio_in(port, UART_MSR);
+
+ /*
+ * Now, initialize the UART
+ */
+ sdio_out(port, UART_LCR, UART_LCR_WLEN8);
+
+ port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE;
+ port->mctrl = TIOCM_OUT2;
+
+ sdio_uart_change_speed(port, &tty->termios, NULL);
+
+ if (C_BAUD(tty))
+ sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
+
+ if (C_CRTSCTS(tty))
+ if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
+ tty->hw_stopped = 1;
+
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+
+ /* Kick the IRQ handler once while we're still holding the host lock */
+ sdio_uart_irq(port->func);
+
+ sdio_uart_release_func(port);
+ return 0;
+
+err2:
+ sdio_disable_func(port->func);
+err1:
+ sdio_uart_release_func(port);
+ return ret;
+}
+
+/**
+ * sdio_uart_shutdown - stop hardware
+ * @tport: tty port to shut down
+ *
+ * Deactivate a tty port. The port locking guarantees us this will be
+ * run only if a successful matching activate already ran. The two are
+ * protected from each other by the internal locking and will not run
+ * at the same time even during a hangup event.
+ */
+
+static void sdio_uart_shutdown(struct tty_port *tport)
+{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
+ int ret;
+
+ ret = sdio_uart_claim_func(port);
+ if (ret)
+ return;
+
+ sdio_uart_stop_rx(port);
+
+ /* Disable interrupts from this port */
+ sdio_release_irq(port->func);
+ port->ier = 0;
+ sdio_out(port, UART_IER, 0);
+
+ sdio_uart_clear_mctrl(port, TIOCM_OUT2);
+
+ /* Disable break condition and FIFOs. */
+ port->lcr &= ~UART_LCR_SBC;
+ sdio_out(port, UART_LCR, port->lcr);
+ sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT);
+ sdio_out(port, UART_FCR, 0);
+
+ sdio_disable_func(port->func);
+
+ sdio_uart_release_func(port);
+}
+
+static void sdio_uart_port_destroy(struct tty_port *tport)
+{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
+ kfifo_free(&port->xmit_fifo);
+ kfree(port);
+}
+
+/**
+ * sdio_uart_install - install method
+ * @driver: the driver in use (sdio_uart in our case)
+ * @tty: the tty being bound
+ *
+ * Look up and bind the tty and the driver together. Initialize
+ * any needed private data (in our case the termios)
+ */
+
+static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ int idx = tty->index;
+ struct sdio_uart_port *port = sdio_uart_port_get(idx);
+ int ret = tty_standard_install(driver, tty);
+
+ if (ret == 0)
+ /* This is the ref sdio_uart_port get provided */
+ tty->driver_data = port;
+ else
+ sdio_uart_port_put(port);
+ return ret;
+}
+
+/**
+ * sdio_uart_cleanup - called on the last tty kref drop
+ * @tty: the tty being destroyed
+ *
+ * Called asynchronously when the last reference to the tty is dropped.
+ * We cannot destroy the tty->driver_data port kref until this point
+ */
+
+static void sdio_uart_cleanup(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ tty->driver_data = NULL; /* Bug trap */
+ sdio_uart_port_put(port);
+}
+
+/*
+ * Open/close/hangup is now entirely boilerplate
+ */
+
+static int sdio_uart_open(struct tty_struct *tty, struct file *filp)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ return tty_port_open(&port->port, tty, filp);
+}
+
+static void sdio_uart_close(struct tty_struct *tty, struct file * filp)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ tty_port_close(&port->port, tty, filp);
+}
+
+static void sdio_uart_hangup(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ tty_port_hangup(&port->port);
+}
+
+static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ int ret;
+
+ if (!port->func)
+ return -ENODEV;
+
+ ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
+ if (!(port->ier & UART_IER_THRI)) {
+ int err = sdio_uart_claim_func(port);
+ if (!err) {
+ sdio_uart_start_tx(port);
+ sdio_uart_irq(port->func);
+ sdio_uart_release_func(port);
+ } else
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int sdio_uart_write_room(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
+}
+
+static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ return kfifo_len(&port->xmit_fifo);
+}
+
+static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+
+ port->x_char = ch;
+ if (ch && !(port->ier & UART_IER_THRI)) {
+ if (sdio_uart_claim_func(port) != 0)
+ return;
+ sdio_uart_start_tx(port);
+ sdio_uart_irq(port->func);
+ sdio_uart_release_func(port);
+ }
+}
+
+static void sdio_uart_throttle(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+
+ if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
+ return;
+
+ if (sdio_uart_claim_func(port) != 0)
+ return;
+
+ if (I_IXOFF(tty)) {
+ port->x_char = STOP_CHAR(tty);
+ sdio_uart_start_tx(port);
+ }
+
+ if (C_CRTSCTS(tty))
+ sdio_uart_clear_mctrl(port, TIOCM_RTS);
+
+ sdio_uart_irq(port->func);
+ sdio_uart_release_func(port);
+}
+
+static void sdio_uart_unthrottle(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+
+ if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
+ return;
+
+ if (sdio_uart_claim_func(port) != 0)
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (port->x_char) {
+ port->x_char = 0;
+ } else {
+ port->x_char = START_CHAR(tty);
+ sdio_uart_start_tx(port);
+ }
+ }
+
+ if (C_CRTSCTS(tty))
+ sdio_uart_set_mctrl(port, TIOCM_RTS);
+
+ sdio_uart_irq(port->func);
+ sdio_uart_release_func(port);
+}
+
+static void sdio_uart_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ unsigned int cflag = tty->termios.c_cflag;
+
+ if (sdio_uart_claim_func(port) != 0)
+ return;
+
+ sdio_uart_change_speed(port, &tty->termios, old_termios);
+
+ /* Handle transition to B0 status */
+ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
+ sdio_uart_clear_mctrl(port, TIOCM_RTS | TIOCM_DTR);
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
+ unsigned int mask = TIOCM_DTR;
+ if (!(cflag & CRTSCTS) || !tty_throttled(tty))
+ mask |= TIOCM_RTS;
+ sdio_uart_set_mctrl(port, mask);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ sdio_uart_start_tx(port);
+ }
+
+ /* Handle turning on CRTSCTS */
+ if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
+ if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) {
+ tty->hw_stopped = 1;
+ sdio_uart_stop_tx(port);
+ }
+ }
+
+ sdio_uart_release_func(port);
+}
+
+static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ int result;
+
+ result = sdio_uart_claim_func(port);
+ if (result != 0)
+ return result;
+
+ if (break_state == -1)
+ port->lcr |= UART_LCR_SBC;
+ else
+ port->lcr &= ~UART_LCR_SBC;
+ sdio_out(port, UART_LCR, port->lcr);
+
+ sdio_uart_release_func(port);
+ return 0;
+}
+
+static int sdio_uart_tiocmget(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ int result;
+
+ result = sdio_uart_claim_func(port);
+ if (!result) {
+ result = port->mctrl | sdio_uart_get_mctrl(port);
+ sdio_uart_release_func(port);
+ }
+
+ return result;
+}
+
+static int sdio_uart_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ int result;
+
+ result = sdio_uart_claim_func(port);
+ if (!result) {
+ sdio_uart_update_mctrl(port, set, clear);
+ sdio_uart_release_func(port);
+ }
+
+ return result;
+}
+
+static int sdio_uart_proc_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
+ "", "", "");
+ for (i = 0; i < UART_NR; i++) {
+ struct sdio_uart_port *port = sdio_uart_port_get(i);
+ if (port) {
+ seq_printf(m, "%d: uart:SDIO", i);
+ if (capable(CAP_SYS_ADMIN)) {
+ seq_printf(m, " tx:%d rx:%d",
+ port->icount.tx, port->icount.rx);
+ if (port->icount.frame)
+ seq_printf(m, " fe:%d",
+ port->icount.frame);
+ if (port->icount.parity)
+ seq_printf(m, " pe:%d",
+ port->icount.parity);
+ if (port->icount.brk)
+ seq_printf(m, " brk:%d",
+ port->icount.brk);
+ if (port->icount.overrun)
+ seq_printf(m, " oe:%d",
+ port->icount.overrun);
+ if (port->icount.cts)
+ seq_printf(m, " cts:%d",
+ port->icount.cts);
+ if (port->icount.dsr)
+ seq_printf(m, " dsr:%d",
+ port->icount.dsr);
+ if (port->icount.rng)
+ seq_printf(m, " rng:%d",
+ port->icount.rng);
+ if (port->icount.dcd)
+ seq_printf(m, " dcd:%d",
+ port->icount.dcd);
+ }
+ sdio_uart_port_put(port);
+ seq_putc(m, '\n');
+ }
+ }
+ return 0;
+}
+
+static const struct tty_port_operations sdio_uart_port_ops = {
+ .dtr_rts = uart_dtr_rts,
+ .carrier_raised = uart_carrier_raised,
+ .shutdown = sdio_uart_shutdown,
+ .activate = sdio_uart_activate,
+ .destruct = sdio_uart_port_destroy,
+};
+
+static const struct tty_operations sdio_uart_ops = {
+ .open = sdio_uart_open,
+ .close = sdio_uart_close,
+ .write = sdio_uart_write,
+ .write_room = sdio_uart_write_room,
+ .chars_in_buffer = sdio_uart_chars_in_buffer,
+ .send_xchar = sdio_uart_send_xchar,
+ .throttle = sdio_uart_throttle,
+ .unthrottle = sdio_uart_unthrottle,
+ .set_termios = sdio_uart_set_termios,
+ .hangup = sdio_uart_hangup,
+ .break_ctl = sdio_uart_break_ctl,
+ .tiocmget = sdio_uart_tiocmget,
+ .tiocmset = sdio_uart_tiocmset,
+ .install = sdio_uart_install,
+ .cleanup = sdio_uart_cleanup,
+ .proc_show = sdio_uart_proc_show,
+};
+
+static struct tty_driver *sdio_uart_tty_driver;
+
+static int sdio_uart_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct sdio_uart_port *port;
+ int ret;
+
+ port = kzalloc(sizeof(struct sdio_uart_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ if (func->class == SDIO_CLASS_UART) {
+ pr_warn("%s: need info on UART class basic setup\n",
+ sdio_func_id(func));
+ kfree(port);
+ return -ENOSYS;
+ } else if (func->class == SDIO_CLASS_GPS) {
+ /*
+ * We need tuple 0x91. It contains SUBTPL_SIOREG
+ * and SUBTPL_RCVCAPS.
+ */
+ struct sdio_func_tuple *tpl;
+ for (tpl = func->tuples; tpl; tpl = tpl->next) {
+ if (tpl->code != 0x91)
+ continue;
+ if (tpl->size < 10)
+ continue;
+ if (tpl->data[1] == 0) /* SUBTPL_SIOREG */
+ break;
+ }
+ if (!tpl) {
+ pr_warn("%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
+ sdio_func_id(func));
+ kfree(port);
+ return -EINVAL;
+ }
+ pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
+ sdio_func_id(func), tpl->data[2], tpl->data[3]);
+ port->regs_offset = (tpl->data[4] << 0) |
+ (tpl->data[5] << 8) |
+ (tpl->data[6] << 16);
+ pr_debug("%s: regs offset = 0x%x\n",
+ sdio_func_id(func), port->regs_offset);
+ port->uartclk = tpl->data[7] * 115200;
+ if (port->uartclk == 0)
+ port->uartclk = 115200;
+ pr_debug("%s: clk %d baudcode %u 4800-div %u\n",
+ sdio_func_id(func), port->uartclk,
+ tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));
+ } else {
+ kfree(port);
+ return -EINVAL;
+ }
+
+ port->func = func;
+ sdio_set_drvdata(func, port);
+ tty_port_init(&port->port);
+ port->port.ops = &sdio_uart_port_ops;
+
+ ret = sdio_uart_add_port(port);
+ if (ret) {
+ kfree(port);
+ } else {
+ struct device *dev;
+ dev = tty_port_register_device(&port->port,
+ sdio_uart_tty_driver, port->index, &func->dev);
+ if (IS_ERR(dev)) {
+ sdio_uart_port_remove(port);
+ ret = PTR_ERR(dev);
+ }
+ }
+
+ return ret;
+}
+
+static void sdio_uart_remove(struct sdio_func *func)
+{
+ struct sdio_uart_port *port = sdio_get_drvdata(func);
+
+ tty_unregister_device(sdio_uart_tty_driver, port->index);
+ sdio_uart_port_remove(port);
+}
+
+static const struct sdio_device_id sdio_uart_ids[] = {
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_UART) },
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_GPS) },
+ { /* end: all zeroes */ },
+};
+
+MODULE_DEVICE_TABLE(sdio, sdio_uart_ids);
+
+static struct sdio_driver sdio_uart_driver = {
+ .probe = sdio_uart_probe,
+ .remove = sdio_uart_remove,
+ .name = "sdio_uart",
+ .id_table = sdio_uart_ids,
+};
+
+static int __init sdio_uart_init(void)
+{
+ int ret;
+ struct tty_driver *tty_drv;
+
+ sdio_uart_tty_driver = tty_drv = alloc_tty_driver(UART_NR);
+ if (!tty_drv)
+ return -ENOMEM;
+
+ tty_drv->driver_name = "sdio_uart";
+ tty_drv->name = "ttySDIO";
+ tty_drv->major = 0; /* dynamically allocated */
+ tty_drv->minor_start = 0;
+ tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_drv->subtype = SERIAL_TYPE_NORMAL;
+ tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_drv->init_termios = tty_std_termios;
+ tty_drv->init_termios.c_cflag = B4800 | CS8 | CREAD | HUPCL | CLOCAL;
+ tty_drv->init_termios.c_ispeed = 4800;
+ tty_drv->init_termios.c_ospeed = 4800;
+ tty_set_operations(tty_drv, &sdio_uart_ops);
+
+ ret = tty_register_driver(tty_drv);
+ if (ret)
+ goto err1;
+
+ ret = sdio_register_driver(&sdio_uart_driver);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ tty_unregister_driver(tty_drv);
+err1:
+ put_tty_driver(tty_drv);
+ return ret;
+}
+
+static void __exit sdio_uart_exit(void)
+{
+ sdio_unregister_driver(&sdio_uart_driver);
+ tty_unregister_driver(sdio_uart_tty_driver);
+ put_tty_driver(sdio_uart_tty_driver);
+}
+
+module_init(sdio_uart_init);
+module_exit(sdio_uart_exit);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
new file mode 100644
index 000000000..05e907451
--- /dev/null
+++ b/drivers/mmc/core/slot-gpio.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic GPIO card-detect helper
+ *
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "slot-gpio.h"
+
+struct mmc_gpio {
+ struct gpio_desc *ro_gpio;
+ struct gpio_desc *cd_gpio;
+ irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
+ char *ro_label;
+ char *cd_label;
+ u32 cd_debounce_delay_ms;
+};
+
+static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
+{
+ /* Schedule a card detection after a debounce timeout */
+ struct mmc_host *host = dev_id;
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ host->trigger_card_event = true;
+ mmc_detect_change(host, msecs_to_jiffies(ctx->cd_debounce_delay_ms));
+
+ return IRQ_HANDLED;
+}
+
+int mmc_gpio_alloc(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = devm_kzalloc(host->parent,
+ sizeof(*ctx), GFP_KERNEL);
+
+ if (ctx) {
+ ctx->cd_debounce_delay_ms = 200;
+ ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL,
+ "%s cd", dev_name(host->parent));
+ if (!ctx->cd_label)
+ return -ENOMEM;
+ ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL,
+ "%s ro", dev_name(host->parent));
+ if (!ctx->ro_label)
+ return -ENOMEM;
+ host->slot.handler_priv = ctx;
+ host->slot.cd_irq = -EINVAL;
+ }
+
+ return ctx ? 0 : -ENOMEM;
+}
+
+int mmc_gpio_get_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ if (!ctx || !ctx->ro_gpio)
+ return -ENOSYS;
+
+ return gpiod_get_value_cansleep(ctx->ro_gpio);
+}
+EXPORT_SYMBOL(mmc_gpio_get_ro);
+
+int mmc_gpio_get_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int cansleep;
+
+ if (!ctx || !ctx->cd_gpio)
+ return -ENOSYS;
+
+ cansleep = gpiod_cansleep(ctx->cd_gpio);
+ return cansleep ?
+ gpiod_get_value_cansleep(ctx->cd_gpio) :
+ gpiod_get_value(ctx->cd_gpio);
+}
+EXPORT_SYMBOL(mmc_gpio_get_cd);
+
+void mmc_gpiod_request_cd_irq(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int irq = -EINVAL;
+ int ret;
+
+ if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio)
+ return;
+
+ /*
+ * Do not use IRQ if the platform prefers to poll, e.g., because that
+ * IRQ number is already used by another unit and cannot be shared.
+ */
+ if (!(host->caps & MMC_CAP_NEEDS_POLL))
+ irq = gpiod_to_irq(ctx->cd_gpio);
+
+ if (irq >= 0) {
+ if (!ctx->cd_gpio_isr)
+ ctx->cd_gpio_isr = mmc_gpio_cd_irqt;
+ ret = devm_request_threaded_irq(host->parent, irq,
+ NULL, ctx->cd_gpio_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ ctx->cd_label, host);
+ if (ret < 0)
+ irq = ret;
+ }
+
+ host->slot.cd_irq = irq;
+
+ if (irq < 0)
+ host->caps |= MMC_CAP_NEEDS_POLL;
+}
+EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
+
+int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on)
+{
+ int ret = 0;
+
+ if (!(host->caps & MMC_CAP_CD_WAKE) ||
+ host->slot.cd_irq < 0 ||
+ on == host->slot.cd_wake_enabled)
+ return 0;
+
+ if (on) {
+ ret = enable_irq_wake(host->slot.cd_irq);
+ host->slot.cd_wake_enabled = !ret;
+ } else {
+ disable_irq_wake(host->slot.cd_irq);
+ host->slot.cd_wake_enabled = false;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_gpio_set_cd_wake);
+
+/* Register an alternate interrupt service routine for
+ * the card-detect GPIO.
+ */
+void mmc_gpio_set_cd_isr(struct mmc_host *host,
+ irqreturn_t (*isr)(int irq, void *dev_id))
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ WARN_ON(ctx->cd_gpio_isr);
+ ctx->cd_gpio_isr = isr;
+}
+EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
+
+/**
+ * mmc_gpiod_request_cd - request a gpio descriptor for card-detection
+ * @host: mmc host
+ * @con_id: function within the GPIO consumer
+ * @idx: index of the GPIO to obtain in the consumer
+ * @override_active_level: ignore %GPIO_ACTIVE_LOW flag
+ * @debounce: debounce time in microseconds
+ *
+ * Note that this must be called prior to mmc_add_host()
+ * otherwise the caller must also call mmc_gpiod_request_cd_irq().
+ *
+ * Returns zero on success, else an error.
+ */
+int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ struct gpio_desc *desc;
+ int ret;
+
+ desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (debounce) {
+ ret = gpiod_set_debounce(desc, debounce);
+ if (ret < 0)
+ ctx->cd_debounce_delay_ms = debounce / 1000;
+ }
+
+ /* override forces default (active-low) polarity ... */
+ if (override_active_level && !gpiod_is_active_low(desc))
+ gpiod_toggle_active_low(desc);
+
+ /* ... or active-high */
+ if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
+
+ ctx->cd_gpio = desc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpiod_request_cd);
+
+bool mmc_can_gpio_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return ctx->cd_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_cd);
+
+/**
+ * mmc_gpiod_request_ro - request a gpio descriptor for write protection
+ * @host: mmc host
+ * @con_id: function within the GPIO consumer
+ * @idx: index of the GPIO to obtain in the consumer
+ * @debounce: debounce time in microseconds
+ *
+ * Returns zero on success, else an error.
+ */
+int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ unsigned int idx, unsigned int debounce)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ struct gpio_desc *desc;
+ int ret;
+
+ desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (debounce) {
+ ret = gpiod_set_debounce(desc, debounce);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
+
+ ctx->ro_gpio = desc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpiod_request_ro);
+
+bool mmc_can_gpio_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return ctx->ro_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_ro);
diff --git a/drivers/mmc/core/slot-gpio.h b/drivers/mmc/core/slot-gpio.h
new file mode 100644
index 000000000..546a5843b
--- /dev/null
+++ b/drivers/mmc/core/slot-gpio.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ */
+#ifndef _MMC_CORE_SLOTGPIO_H
+#define _MMC_CORE_SLOTGPIO_H
+
+struct mmc_host;
+
+int mmc_gpio_alloc(struct mmc_host *host);
+
+#endif