summaryrefslogtreecommitdiffstats
path: root/drivers/memstick
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/memstick
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/memstick')
-rw-r--r--drivers/memstick/Kconfig27
-rw-r--r--drivers/memstick/Makefile10
-rw-r--r--drivers/memstick/core/Kconfig39
-rw-r--r--drivers/memstick/core/Makefile8
-rw-r--r--drivers/memstick/core/memstick.c662
-rw-r--r--drivers/memstick/core/ms_block.c2344
-rw-r--r--drivers/memstick/core/ms_block.h286
-rw-r--r--drivers/memstick/core/mspro_block.c1437
-rw-r--r--drivers/memstick/host/Kconfig65
-rw-r--r--drivers/memstick/host/Makefile10
-rw-r--r--drivers/memstick/host/jmb38x_ms.c1030
-rw-r--r--drivers/memstick/host/r592.c894
-rw-r--r--drivers/memstick/host/r592.h172
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c643
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c869
-rw-r--r--drivers/memstick/host/tifm_ms.c683
16 files changed, 9179 insertions, 0 deletions
diff --git a/drivers/memstick/Kconfig b/drivers/memstick/Kconfig
new file mode 100644
index 000000000..e6180135d
--- /dev/null
+++ b/drivers/memstick/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# MemoryStick subsystem configuration
+#
+
+menuconfig MEMSTICK
+ tristate "Sony MemoryStick card support"
+ help
+ Sony MemoryStick is a proprietary storage/extension card protocol.
+
+ If you want MemoryStick support, you should say Y here and also
+ to the specific driver for your MemoryStick interface.
+
+if MEMSTICK
+
+config MEMSTICK_DEBUG
+ bool "MemoryStick debugging"
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables MemoryStick core and driver debugging.
+
+
+source "drivers/memstick/core/Kconfig"
+
+source "drivers/memstick/host/Kconfig"
+
+endif # MEMSTICK
diff --git a/drivers/memstick/Makefile b/drivers/memstick/Makefile
new file mode 100644
index 000000000..61ea1d3ab
--- /dev/null
+++ b/drivers/memstick/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the kernel MemoryStick device drivers.
+#
+
+subdir-ccflags-$(CONFIG_MEMSTICK_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_MEMSTICK) += core/
+obj-$(CONFIG_MEMSTICK) += host/
+
diff --git a/drivers/memstick/core/Kconfig b/drivers/memstick/core/Kconfig
new file mode 100644
index 000000000..08192fd70
--- /dev/null
+++ b/drivers/memstick/core/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# MemoryStick core configuration
+#
+
+comment "MemoryStick drivers"
+
+config MEMSTICK_UNSAFE_RESUME
+ bool "Allow unsafe resume (DANGEROUS)"
+ help
+ If you say Y here, the MemoryStick layer will assume that all
+ cards stayed in their respective slots during the suspend. The
+ normal behaviour is to remove them at suspend and
+ redetecting them at resume. Breaking this assumption will
+ in most cases result in data corruption.
+
+ This option is usually just for embedded systems which use
+ a MemoryStick card for rootfs. Most people should say N here.
+
+config MSPRO_BLOCK
+ tristate "MemoryStick Pro block device driver"
+ depends on BLOCK
+ help
+ Say Y here to enable the MemoryStick Pro block device driver
+ support. This provides a block device driver, which you can use
+ to mount the filesystem. Almost everyone wishing MemoryStick
+ support should say Y or M here.
+
+config MS_BLOCK
+ tristate "MemoryStick Standard device driver"
+ depends on BLOCK
+ help
+ Say Y here to enable the MemoryStick Standard device driver
+ support. This provides a block device driver, which you can use
+ to mount the filesystem.
+ This driver works with old (bulky) MemoryStick and MemoryStick Duo
+ but not PRO. Say Y if you have such card.
+ Driver is new and not yet well tested, thus it can damage your card
+ (even permanently)
diff --git a/drivers/memstick/core/Makefile b/drivers/memstick/core/Makefile
new file mode 100644
index 000000000..6b9b9ba4a
--- /dev/null
+++ b/drivers/memstick/core/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the kernel MemoryStick core.
+#
+
+obj-$(CONFIG_MEMSTICK) += memstick.o
+obj-$(CONFIG_MS_BLOCK) += ms_block.o
+obj-$(CONFIG_MSPRO_BLOCK) += mspro_block.o
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
new file mode 100644
index 000000000..d410e2e78
--- /dev/null
+++ b/drivers/memstick/core/memstick.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sony MemoryStick support
+ *
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * Special thanks to Carlos Corbacho for providing various MemoryStick cards
+ * that made this driver possible.
+ */
+
+#include <linux/memstick.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#define DRIVER_NAME "memstick"
+
+static unsigned int cmd_retries = 3;
+module_param(cmd_retries, uint, 0644);
+
+static struct workqueue_struct *workqueue;
+static DEFINE_IDR(memstick_host_idr);
+static DEFINE_SPINLOCK(memstick_host_lock);
+
+static int memstick_dev_match(struct memstick_dev *card,
+ struct memstick_device_id *id)
+{
+ if (id->match_flags & MEMSTICK_MATCH_ALL) {
+ if ((id->type == card->id.type)
+ && (id->category == card->id.category)
+ && (id->class == card->id.class))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int memstick_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct memstick_dev *card = container_of(dev, struct memstick_dev,
+ dev);
+ struct memstick_driver *ms_drv = container_of(drv,
+ struct memstick_driver,
+ driver);
+ struct memstick_device_id *ids = ms_drv->id_table;
+
+ if (ids) {
+ while (ids->match_flags) {
+ if (memstick_dev_match(card, ids))
+ return 1;
+ ++ids;
+ }
+ }
+ return 0;
+}
+
+static int memstick_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct memstick_dev *card = container_of(dev, struct memstick_dev,
+ dev);
+
+ if (add_uevent_var(env, "MEMSTICK_TYPE=%02X", card->id.type))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MEMSTICK_CATEGORY=%02X", card->id.category))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MEMSTICK_CLASS=%02X", card->id.class))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int memstick_device_probe(struct device *dev)
+{
+ struct memstick_dev *card = container_of(dev, struct memstick_dev,
+ dev);
+ struct memstick_driver *drv = container_of(dev->driver,
+ struct memstick_driver,
+ driver);
+ int rc = -ENODEV;
+
+ if (dev->driver && drv->probe) {
+ rc = drv->probe(card);
+ if (!rc)
+ get_device(dev);
+ }
+ return rc;
+}
+
+static void memstick_device_remove(struct device *dev)
+{
+ struct memstick_dev *card = container_of(dev, struct memstick_dev,
+ dev);
+ struct memstick_driver *drv = container_of(dev->driver,
+ struct memstick_driver,
+ driver);
+
+ if (dev->driver && drv->remove) {
+ drv->remove(card);
+ card->dev.driver = NULL;
+ }
+
+ put_device(dev);
+}
+
+#ifdef CONFIG_PM
+
+static int memstick_device_suspend(struct device *dev, pm_message_t state)
+{
+ struct memstick_dev *card = container_of(dev, struct memstick_dev,
+ dev);
+ struct memstick_driver *drv = container_of(dev->driver,
+ struct memstick_driver,
+ driver);
+
+ if (dev->driver && drv->suspend)
+ return drv->suspend(card, state);
+ return 0;
+}
+
+static int memstick_device_resume(struct device *dev)
+{
+ struct memstick_dev *card = container_of(dev, struct memstick_dev,
+ dev);
+ struct memstick_driver *drv = container_of(dev->driver,
+ struct memstick_driver,
+ driver);
+
+ if (dev->driver && drv->resume)
+ return drv->resume(card);
+ return 0;
+}
+
+#else
+
+#define memstick_device_suspend NULL
+#define memstick_device_resume NULL
+
+#endif /* CONFIG_PM */
+
+#define MEMSTICK_ATTR(name, format) \
+static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct memstick_dev *card = container_of(dev, struct memstick_dev, \
+ dev); \
+ return sprintf(buf, format, card->id.name); \
+} \
+static DEVICE_ATTR_RO(name);
+
+MEMSTICK_ATTR(type, "%02X");
+MEMSTICK_ATTR(category, "%02X");
+MEMSTICK_ATTR(class, "%02X");
+
+static struct attribute *memstick_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_category.attr,
+ &dev_attr_class.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(memstick_dev);
+
+static struct bus_type memstick_bus_type = {
+ .name = "memstick",
+ .dev_groups = memstick_dev_groups,
+ .match = memstick_bus_match,
+ .uevent = memstick_uevent,
+ .probe = memstick_device_probe,
+ .remove = memstick_device_remove,
+ .suspend = memstick_device_suspend,
+ .resume = memstick_device_resume
+};
+
+static void memstick_free(struct device *dev)
+{
+ struct memstick_host *host = container_of(dev, struct memstick_host,
+ dev);
+ kfree(host);
+}
+
+static struct class memstick_host_class = {
+ .name = "memstick_host",
+ .dev_release = memstick_free
+};
+
+static void memstick_free_card(struct device *dev)
+{
+ struct memstick_dev *card = container_of(dev, struct memstick_dev,
+ dev);
+ kfree(card);
+}
+
+static int memstick_dummy_check(struct memstick_dev *card)
+{
+ return 0;
+}
+
+/**
+ * memstick_detect_change - schedule media detection on memstick host
+ * @host - host to use
+ */
+void memstick_detect_change(struct memstick_host *host)
+{
+ queue_work(workqueue, &host->media_checker);
+}
+EXPORT_SYMBOL(memstick_detect_change);
+
+/**
+ * memstick_next_req - called by host driver to obtain next request to process
+ * @host - host to use
+ * @mrq - pointer to stick the request to
+ *
+ * Host calls this function from idle state (*mrq == NULL) or after finishing
+ * previous request (*mrq should point to it). If previous request was
+ * unsuccessful, it is retried for predetermined number of times. Return value
+ * of 0 means that new request was assigned to the host.
+ */
+int memstick_next_req(struct memstick_host *host, struct memstick_request **mrq)
+{
+ int rc = -ENXIO;
+
+ if ((*mrq) && (*mrq)->error && host->retries) {
+ (*mrq)->error = rc;
+ host->retries--;
+ return 0;
+ }
+
+ if (host->card && host->card->next_request)
+ rc = host->card->next_request(host->card, mrq);
+
+ if (!rc)
+ host->retries = cmd_retries > 1 ? cmd_retries - 1 : 1;
+ else
+ *mrq = NULL;
+
+ return rc;
+}
+EXPORT_SYMBOL(memstick_next_req);
+
+/**
+ * memstick_new_req - notify the host that some requests are pending
+ * @host - host to use
+ */
+void memstick_new_req(struct memstick_host *host)
+{
+ if (host->card) {
+ host->retries = cmd_retries;
+ reinit_completion(&host->card->mrq_complete);
+ host->request(host);
+ }
+}
+EXPORT_SYMBOL(memstick_new_req);
+
+/**
+ * memstick_init_req_sg - set request fields needed for bulk data transfer
+ * @mrq - request to use
+ * @tpc - memstick Transport Protocol Command
+ * @sg - TPC argument
+ */
+void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc,
+ const struct scatterlist *sg)
+{
+ mrq->tpc = tpc;
+ if (tpc & 8)
+ mrq->data_dir = WRITE;
+ else
+ mrq->data_dir = READ;
+
+ mrq->sg = *sg;
+ mrq->long_data = 1;
+
+ if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD)
+ mrq->need_card_int = 1;
+ else
+ mrq->need_card_int = 0;
+}
+EXPORT_SYMBOL(memstick_init_req_sg);
+
+/**
+ * memstick_init_req - set request fields needed for short data transfer
+ * @mrq - request to use
+ * @tpc - memstick Transport Protocol Command
+ * @buf - TPC argument buffer
+ * @length - TPC argument size
+ *
+ * The intended use of this function (transfer of data items several bytes
+ * in size) allows us to just copy the value between request structure and
+ * user supplied buffer.
+ */
+void memstick_init_req(struct memstick_request *mrq, unsigned char tpc,
+ const void *buf, size_t length)
+{
+ mrq->tpc = tpc;
+ if (tpc & 8)
+ mrq->data_dir = WRITE;
+ else
+ mrq->data_dir = READ;
+
+ mrq->data_len = length > sizeof(mrq->data) ? sizeof(mrq->data) : length;
+ if (mrq->data_dir == WRITE)
+ memcpy(mrq->data, buf, mrq->data_len);
+
+ mrq->long_data = 0;
+
+ if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD)
+ mrq->need_card_int = 1;
+ else
+ mrq->need_card_int = 0;
+}
+EXPORT_SYMBOL(memstick_init_req);
+
+/*
+ * Functions prefixed with "h_" are protocol callbacks. They can be called from
+ * interrupt context. Return value of 0 means that request processing is still
+ * ongoing, while special error value of -EAGAIN means that current request is
+ * finished (and request processor should come back some time later).
+ */
+
+static int h_memstick_read_dev_id(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ struct ms_id_register id_reg;
+
+ if (!(*mrq)) {
+ memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
+ sizeof(struct ms_id_register));
+ *mrq = &card->current_mrq;
+ return 0;
+ }
+ if (!(*mrq)->error) {
+ memcpy(&id_reg, (*mrq)->data, sizeof(id_reg));
+ card->id.match_flags = MEMSTICK_MATCH_ALL;
+ card->id.type = id_reg.type;
+ card->id.category = id_reg.category;
+ card->id.class = id_reg.class;
+ dev_dbg(&card->dev, "if_mode = %02x\n", id_reg.if_mode);
+ }
+ complete(&card->mrq_complete);
+ return -EAGAIN;
+}
+
+static int h_memstick_set_rw_addr(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ if (!(*mrq)) {
+ memstick_init_req(&card->current_mrq, MS_TPC_SET_RW_REG_ADRS,
+ (char *)&card->reg_addr,
+ sizeof(card->reg_addr));
+ *mrq = &card->current_mrq;
+ return 0;
+ } else {
+ complete(&card->mrq_complete);
+ return -EAGAIN;
+ }
+}
+
+/**
+ * memstick_set_rw_addr - issue SET_RW_REG_ADDR request and wait for it to
+ * complete
+ * @card - media device to use
+ */
+int memstick_set_rw_addr(struct memstick_dev *card)
+{
+ card->next_request = h_memstick_set_rw_addr;
+ memstick_new_req(card->host);
+ wait_for_completion(&card->mrq_complete);
+
+ return card->current_mrq.error;
+}
+EXPORT_SYMBOL(memstick_set_rw_addr);
+
+static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
+{
+ struct memstick_dev *card = kzalloc(sizeof(struct memstick_dev),
+ GFP_KERNEL);
+ struct memstick_dev *old_card = host->card;
+ struct ms_id_register id_reg;
+
+ if (card) {
+ card->host = host;
+ dev_set_name(&card->dev, "%s", dev_name(&host->dev));
+ card->dev.parent = &host->dev;
+ card->dev.bus = &memstick_bus_type;
+ card->dev.release = memstick_free_card;
+ card->check = memstick_dummy_check;
+
+ card->reg_addr.r_offset = offsetof(struct ms_register, id);
+ card->reg_addr.r_length = sizeof(id_reg);
+ card->reg_addr.w_offset = offsetof(struct ms_register, id);
+ card->reg_addr.w_length = sizeof(id_reg);
+
+ init_completion(&card->mrq_complete);
+
+ host->card = card;
+ if (memstick_set_rw_addr(card))
+ goto err_out;
+
+ card->next_request = h_memstick_read_dev_id;
+ memstick_new_req(host);
+ wait_for_completion(&card->mrq_complete);
+
+ if (card->current_mrq.error)
+ goto err_out;
+ }
+ host->card = old_card;
+ return card;
+err_out:
+ host->card = old_card;
+ kfree_const(card->dev.kobj.name);
+ kfree(card);
+ return NULL;
+}
+
+static int memstick_power_on(struct memstick_host *host)
+{
+ int rc = host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
+
+ if (!rc)
+ rc = host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+
+ return rc;
+}
+
+static void memstick_check(struct work_struct *work)
+{
+ struct memstick_host *host = container_of(work, struct memstick_host,
+ media_checker);
+ struct memstick_dev *card;
+
+ dev_dbg(&host->dev, "memstick_check started\n");
+ pm_runtime_get_noresume(host->dev.parent);
+ mutex_lock(&host->lock);
+ if (!host->card) {
+ if (memstick_power_on(host))
+ goto out_power_off;
+ } else if (host->card->stop)
+ host->card->stop(host->card);
+
+ if (host->removing)
+ goto out_power_off;
+
+ card = memstick_alloc_card(host);
+
+ if (!card) {
+ if (host->card) {
+ device_unregister(&host->card->dev);
+ host->card = NULL;
+ }
+ } else {
+ dev_dbg(&host->dev, "new card %02x, %02x, %02x\n",
+ card->id.type, card->id.category, card->id.class);
+ if (host->card) {
+ if (memstick_set_rw_addr(host->card)
+ || !memstick_dev_match(host->card, &card->id)
+ || !(host->card->check(host->card))) {
+ device_unregister(&host->card->dev);
+ host->card = NULL;
+ } else if (host->card->start)
+ host->card->start(host->card);
+ }
+
+ if (!host->card) {
+ host->card = card;
+ if (device_register(&card->dev)) {
+ put_device(&card->dev);
+ host->card = NULL;
+ }
+ } else {
+ kfree_const(card->dev.kobj.name);
+ kfree(card);
+ }
+ }
+
+out_power_off:
+ if (!host->card)
+ host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+
+ mutex_unlock(&host->lock);
+ pm_runtime_put(host->dev.parent);
+ dev_dbg(&host->dev, "memstick_check finished\n");
+}
+
+/**
+ * memstick_alloc_host - allocate a memstick_host structure
+ * @extra: size of the user private data to allocate
+ * @dev: parent device of the host
+ */
+struct memstick_host *memstick_alloc_host(unsigned int extra,
+ struct device *dev)
+{
+ struct memstick_host *host;
+
+ host = kzalloc(sizeof(struct memstick_host) + extra, GFP_KERNEL);
+ if (host) {
+ mutex_init(&host->lock);
+ INIT_WORK(&host->media_checker, memstick_check);
+ host->dev.class = &memstick_host_class;
+ host->dev.parent = dev;
+ device_initialize(&host->dev);
+ }
+ return host;
+}
+EXPORT_SYMBOL(memstick_alloc_host);
+
+/**
+ * memstick_add_host - start request processing on memstick host
+ * @host - host to use
+ */
+int memstick_add_host(struct memstick_host *host)
+{
+ int rc;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&memstick_host_lock);
+
+ rc = idr_alloc(&memstick_host_idr, host, 0, 0, GFP_NOWAIT);
+ if (rc >= 0)
+ host->id = rc;
+
+ spin_unlock(&memstick_host_lock);
+ idr_preload_end();
+ if (rc < 0)
+ return rc;
+
+ dev_set_name(&host->dev, "memstick%u", host->id);
+
+ rc = device_add(&host->dev);
+ if (rc) {
+ spin_lock(&memstick_host_lock);
+ idr_remove(&memstick_host_idr, host->id);
+ spin_unlock(&memstick_host_lock);
+ return rc;
+ }
+
+ host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+ memstick_detect_change(host);
+ return 0;
+}
+EXPORT_SYMBOL(memstick_add_host);
+
+/**
+ * memstick_remove_host - stop request processing on memstick host
+ * @host - host to use
+ */
+void memstick_remove_host(struct memstick_host *host)
+{
+ host->removing = 1;
+ flush_workqueue(workqueue);
+ mutex_lock(&host->lock);
+ if (host->card)
+ device_unregister(&host->card->dev);
+ host->card = NULL;
+ host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+ mutex_unlock(&host->lock);
+
+ spin_lock(&memstick_host_lock);
+ idr_remove(&memstick_host_idr, host->id);
+ spin_unlock(&memstick_host_lock);
+ device_del(&host->dev);
+}
+EXPORT_SYMBOL(memstick_remove_host);
+
+/**
+ * memstick_free_host - free memstick host
+ * @host - host to use
+ */
+void memstick_free_host(struct memstick_host *host)
+{
+ mutex_destroy(&host->lock);
+ put_device(&host->dev);
+}
+EXPORT_SYMBOL(memstick_free_host);
+
+/**
+ * memstick_suspend_host - notify bus driver of host suspension
+ * @host - host to use
+ */
+void memstick_suspend_host(struct memstick_host *host)
+{
+ mutex_lock(&host->lock);
+ host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+ mutex_unlock(&host->lock);
+}
+EXPORT_SYMBOL(memstick_suspend_host);
+
+/**
+ * memstick_resume_host - notify bus driver of host resumption
+ * @host - host to use
+ */
+void memstick_resume_host(struct memstick_host *host)
+{
+ int rc = 0;
+
+ mutex_lock(&host->lock);
+ if (host->card)
+ rc = memstick_power_on(host);
+ mutex_unlock(&host->lock);
+
+ if (!rc)
+ memstick_detect_change(host);
+}
+EXPORT_SYMBOL(memstick_resume_host);
+
+int memstick_register_driver(struct memstick_driver *drv)
+{
+ drv->driver.bus = &memstick_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(memstick_register_driver);
+
+void memstick_unregister_driver(struct memstick_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(memstick_unregister_driver);
+
+
+static int __init memstick_init(void)
+{
+ int rc;
+
+ workqueue = create_freezable_workqueue("kmemstick");
+ if (!workqueue)
+ return -ENOMEM;
+
+ rc = bus_register(&memstick_bus_type);
+ if (rc)
+ goto error_destroy_workqueue;
+
+ rc = class_register(&memstick_host_class);
+ if (rc)
+ goto error_bus_unregister;
+
+ return 0;
+
+error_bus_unregister:
+ bus_unregister(&memstick_bus_type);
+error_destroy_workqueue:
+ destroy_workqueue(workqueue);
+
+ return rc;
+}
+
+static void __exit memstick_exit(void)
+{
+ class_unregister(&memstick_host_class);
+ bus_unregister(&memstick_bus_type);
+ destroy_workqueue(workqueue);
+ idr_destroy(&memstick_host_idr);
+}
+
+module_init(memstick_init);
+module_exit(memstick_exit);
+
+MODULE_AUTHOR("Alex Dubov");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Sony MemoryStick core driver");
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
new file mode 100644
index 000000000..04115cd92
--- /dev/null
+++ b/drivers/memstick/core/ms_block.c
@@ -0,0 +1,2344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ms_block.c - Sony MemoryStick (legacy) storage support
+
+ * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * Minor portions of the driver were copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ */
+#define DRIVER_NAME "ms_block"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blk-mq.h>
+#include <linux/memstick.h>
+#include <linux/idr.h>
+#include <linux/hdreg.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include "ms_block.h"
+
+static int debug;
+static int cache_flush_timeout = 1000;
+static bool verify_writes;
+
+/*
+ * Copies section of 'sg_from' starting from offset 'offset' and with length
+ * 'len' To another scatterlist of to_nents enties
+ */
+static size_t msb_sg_copy(struct scatterlist *sg_from,
+ struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
+{
+ size_t copied = 0;
+
+ while (offset > 0) {
+ if (offset >= sg_from->length) {
+ if (sg_is_last(sg_from))
+ return 0;
+
+ offset -= sg_from->length;
+ sg_from = sg_next(sg_from);
+ continue;
+ }
+
+ copied = min(len, sg_from->length - offset);
+ sg_set_page(sg_to, sg_page(sg_from),
+ copied, sg_from->offset + offset);
+
+ len -= copied;
+ offset = 0;
+
+ if (sg_is_last(sg_from) || !len)
+ goto out;
+
+ sg_to = sg_next(sg_to);
+ to_nents--;
+ sg_from = sg_next(sg_from);
+ }
+
+ while (len > sg_from->length && to_nents--) {
+ len -= sg_from->length;
+ copied += sg_from->length;
+
+ sg_set_page(sg_to, sg_page(sg_from),
+ sg_from->length, sg_from->offset);
+
+ if (sg_is_last(sg_from) || !len)
+ goto out;
+
+ sg_from = sg_next(sg_from);
+ sg_to = sg_next(sg_to);
+ }
+
+ if (len && to_nents) {
+ sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
+ copied += len;
+ }
+out:
+ sg_mark_end(sg_to);
+ return copied;
+}
+
+/*
+ * Compares section of 'sg' starting from offset 'offset' and with length 'len'
+ * to linear buffer of length 'len' at address 'buffer'
+ * Returns 0 if equal and -1 otherwice
+ */
+static int msb_sg_compare_to_buffer(struct scatterlist *sg,
+ size_t offset, u8 *buffer, size_t len)
+{
+ int retval = 0, cmplen;
+ struct sg_mapping_iter miter;
+
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+ while (sg_miter_next(&miter) && len > 0) {
+ if (offset >= miter.length) {
+ offset -= miter.length;
+ continue;
+ }
+
+ cmplen = min(miter.length - offset, len);
+ retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
+ if (retval)
+ break;
+
+ buffer += cmplen;
+ len -= cmplen;
+ offset = 0;
+ }
+
+ if (!retval && len)
+ retval = -1;
+
+ sg_miter_stop(&miter);
+ return retval;
+}
+
+
+/* Get zone at which block with logical address 'lba' lives
+ * Flash is broken into zones.
+ * Each zone consists of 512 eraseblocks, out of which in first
+ * zone 494 are used and 496 are for all following zones.
+ * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
+ */
+static int msb_get_zone_from_lba(int lba)
+{
+ if (lba < 494)
+ return 0;
+ return ((lba - 494) / 496) + 1;
+}
+
+/* Get zone of physical block. Trivial */
+static int msb_get_zone_from_pba(int pba)
+{
+ return pba / MS_BLOCKS_IN_ZONE;
+}
+
+/* Debug test to validate free block counts */
+static int msb_validate_used_block_bitmap(struct msb_data *msb)
+{
+ int total_free_blocks = 0;
+ int i;
+
+ if (!debug)
+ return 0;
+
+ for (i = 0; i < msb->zone_count; i++)
+ total_free_blocks += msb->free_block_count[i];
+
+ if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
+ msb->block_count) == total_free_blocks)
+ return 0;
+
+ pr_err("BUG: free block counts don't match the bitmap");
+ msb->read_only = true;
+ return -EINVAL;
+}
+
+/* Mark physical block as used */
+static void msb_mark_block_used(struct msb_data *msb, int pba)
+{
+ int zone = msb_get_zone_from_pba(pba);
+
+ if (test_bit(pba, msb->used_blocks_bitmap)) {
+ pr_err(
+ "BUG: attempt to mark already used pba %d as used", pba);
+ msb->read_only = true;
+ return;
+ }
+
+ if (msb_validate_used_block_bitmap(msb))
+ return;
+
+ /* No races because all IO is single threaded */
+ __set_bit(pba, msb->used_blocks_bitmap);
+ msb->free_block_count[zone]--;
+}
+
+/* Mark physical block as free */
+static void msb_mark_block_unused(struct msb_data *msb, int pba)
+{
+ int zone = msb_get_zone_from_pba(pba);
+
+ if (!test_bit(pba, msb->used_blocks_bitmap)) {
+ pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
+ msb->read_only = true;
+ return;
+ }
+
+ if (msb_validate_used_block_bitmap(msb))
+ return;
+
+ /* No races because all IO is single threaded */
+ __clear_bit(pba, msb->used_blocks_bitmap);
+ msb->free_block_count[zone]++;
+}
+
+/* Invalidate current register window */
+static void msb_invalidate_reg_window(struct msb_data *msb)
+{
+ msb->reg_addr.w_offset = offsetof(struct ms_register, id);
+ msb->reg_addr.w_length = sizeof(struct ms_id_register);
+ msb->reg_addr.r_offset = offsetof(struct ms_register, id);
+ msb->reg_addr.r_length = sizeof(struct ms_id_register);
+ msb->addr_valid = false;
+}
+
+/* Start a state machine */
+static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
+ (struct memstick_dev *card, struct memstick_request **req))
+{
+ struct memstick_dev *card = msb->card;
+
+ WARN_ON(msb->state != -1);
+ msb->int_polling = false;
+ msb->state = 0;
+ msb->exit_error = 0;
+
+ memset(&card->current_mrq, 0, sizeof(card->current_mrq));
+
+ card->next_request = state_func;
+ memstick_new_req(card->host);
+ wait_for_completion(&card->mrq_complete);
+
+ WARN_ON(msb->state != -1);
+ return msb->exit_error;
+}
+
+/* State machines call that to exit */
+static int msb_exit_state_machine(struct msb_data *msb, int error)
+{
+ WARN_ON(msb->state == -1);
+
+ msb->state = -1;
+ msb->exit_error = error;
+ msb->card->next_request = h_msb_default_bad;
+
+ /* Invalidate reg window on errors */
+ if (error)
+ msb_invalidate_reg_window(msb);
+
+ complete(&msb->card->mrq_complete);
+ return -ENXIO;
+}
+
+/* read INT register */
+static int msb_read_int_reg(struct msb_data *msb, long timeout)
+{
+ struct memstick_request *mrq = &msb->card->current_mrq;
+
+ WARN_ON(msb->state == -1);
+
+ if (!msb->int_polling) {
+ msb->int_timeout = jiffies +
+ msecs_to_jiffies(timeout == -1 ? 500 : timeout);
+ msb->int_polling = true;
+ } else if (time_after(jiffies, msb->int_timeout)) {
+ mrq->data[0] = MEMSTICK_INT_CMDNAK;
+ return 0;
+ }
+
+ if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
+ mrq->need_card_int && !mrq->error) {
+ mrq->data[0] = mrq->int_reg;
+ mrq->need_card_int = false;
+ return 0;
+ } else {
+ memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+ return 1;
+ }
+}
+
+/* Read a register */
+static int msb_read_regs(struct msb_data *msb, int offset, int len)
+{
+ struct memstick_request *req = &msb->card->current_mrq;
+
+ if (msb->reg_addr.r_offset != offset ||
+ msb->reg_addr.r_length != len || !msb->addr_valid) {
+
+ msb->reg_addr.r_offset = offset;
+ msb->reg_addr.r_length = len;
+ msb->addr_valid = true;
+
+ memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+ &msb->reg_addr, sizeof(msb->reg_addr));
+ return 0;
+ }
+
+ memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
+ return 1;
+}
+
+/* Write a card register */
+static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
+{
+ struct memstick_request *req = &msb->card->current_mrq;
+
+ if (msb->reg_addr.w_offset != offset ||
+ msb->reg_addr.w_length != len || !msb->addr_valid) {
+
+ msb->reg_addr.w_offset = offset;
+ msb->reg_addr.w_length = len;
+ msb->addr_valid = true;
+
+ memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+ &msb->reg_addr, sizeof(msb->reg_addr));
+ return 0;
+ }
+
+ memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
+ return 1;
+}
+
+/* Handler for absence of IO */
+static int h_msb_default_bad(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ return -ENXIO;
+}
+
+/*
+ * This function is a handler for reads of one page from device.
+ * Writes output to msb->current_sg, takes sector address from msb->reg.param
+ * Can also be used to read extra data only. Set params accordintly.
+ */
+static int h_msb_read_page(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ struct scatterlist sg[2];
+ u8 command, intreg;
+
+ if (mrq->error) {
+ dbg("read_page, unknown error");
+ return msb_exit_state_machine(msb, mrq->error);
+ }
+again:
+ switch (msb->state) {
+ case MSB_RP_SEND_BLOCK_ADDRESS:
+ /* msb_write_regs sometimes "fails" because it needs to update
+ * the reg window, and thus it returns request for that.
+ * Then we stay in this state and retry
+ */
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ sizeof(struct ms_param_register),
+ (unsigned char *)&msb->regs.param))
+ return 0;
+
+ msb->state = MSB_RP_SEND_READ_COMMAND;
+ return 0;
+
+ case MSB_RP_SEND_READ_COMMAND:
+ command = MS_CMD_BLOCK_READ;
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+ msb->state = MSB_RP_SEND_INT_REQ;
+ return 0;
+
+ case MSB_RP_SEND_INT_REQ:
+ msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
+ /* If dont actually need to send the int read request (only in
+ * serial mode), then just fall through
+ */
+ if (msb_read_int_reg(msb, -1))
+ return 0;
+ fallthrough;
+
+ case MSB_RP_RECEIVE_INT_REQ_RESULT:
+ intreg = mrq->data[0];
+ msb->regs.status.interrupt = intreg;
+
+ if (intreg & MEMSTICK_INT_CMDNAK)
+ return msb_exit_state_machine(msb, -EIO);
+
+ if (!(intreg & MEMSTICK_INT_CED)) {
+ msb->state = MSB_RP_SEND_INT_REQ;
+ goto again;
+ }
+
+ msb->int_polling = false;
+ msb->state = (intreg & MEMSTICK_INT_ERR) ?
+ MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
+ goto again;
+
+ case MSB_RP_SEND_READ_STATUS_REG:
+ /* read the status register to understand source of the INT_ERR */
+ if (!msb_read_regs(msb,
+ offsetof(struct ms_register, status),
+ sizeof(struct ms_status_register)))
+ return 0;
+
+ msb->state = MSB_RP_RECEIVE_STATUS_REG;
+ return 0;
+
+ case MSB_RP_RECEIVE_STATUS_REG:
+ msb->regs.status = *(struct ms_status_register *)mrq->data;
+ msb->state = MSB_RP_SEND_OOB_READ;
+ fallthrough;
+
+ case MSB_RP_SEND_OOB_READ:
+ if (!msb_read_regs(msb,
+ offsetof(struct ms_register, extra_data),
+ sizeof(struct ms_extra_data_register)))
+ return 0;
+
+ msb->state = MSB_RP_RECEIVE_OOB_READ;
+ return 0;
+
+ case MSB_RP_RECEIVE_OOB_READ:
+ msb->regs.extra_data =
+ *(struct ms_extra_data_register *) mrq->data;
+ msb->state = MSB_RP_SEND_READ_DATA;
+ fallthrough;
+
+ case MSB_RP_SEND_READ_DATA:
+ /* Skip that state if we only read the oob */
+ if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
+ msb->state = MSB_RP_RECEIVE_READ_DATA;
+ goto again;
+ }
+
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+ msb->current_sg_offset,
+ msb->page_size);
+
+ memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
+ msb->state = MSB_RP_RECEIVE_READ_DATA;
+ return 0;
+
+ case MSB_RP_RECEIVE_READ_DATA:
+ if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
+ msb->current_sg_offset += msb->page_size;
+ return msb_exit_state_machine(msb, 0);
+ }
+
+ if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
+ dbg("read_page: uncorrectable error");
+ return msb_exit_state_machine(msb, -EBADMSG);
+ }
+
+ if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
+ dbg("read_page: correctable error");
+ msb->current_sg_offset += msb->page_size;
+ return msb_exit_state_machine(msb, -EUCLEAN);
+ } else {
+ dbg("read_page: INT error, but no status error bits");
+ return msb_exit_state_machine(msb, -EIO);
+ }
+ }
+
+ BUG();
+}
+
+/*
+ * Handler of writes of exactly one block.
+ * Takes address from msb->regs.param.
+ * Writes same extra data to blocks, also taken
+ * from msb->regs.extra
+ * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
+ * device refuses to take the command or something else
+ */
+static int h_msb_write_block(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ struct scatterlist sg[2];
+ u8 intreg, command;
+
+ if (mrq->error)
+ return msb_exit_state_machine(msb, mrq->error);
+
+again:
+ switch (msb->state) {
+
+ /* HACK: Jmicon handling of TPCs between 8 and
+ * sizeof(memstick_request.data) is broken due to hardware
+ * bug in PIO mode that is used for these TPCs
+ * Therefore split the write
+ */
+
+ case MSB_WB_SEND_WRITE_PARAMS:
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ sizeof(struct ms_param_register),
+ &msb->regs.param))
+ return 0;
+
+ msb->state = MSB_WB_SEND_WRITE_OOB;
+ return 0;
+
+ case MSB_WB_SEND_WRITE_OOB:
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, extra_data),
+ sizeof(struct ms_extra_data_register),
+ &msb->regs.extra_data))
+ return 0;
+ msb->state = MSB_WB_SEND_WRITE_COMMAND;
+ return 0;
+
+
+ case MSB_WB_SEND_WRITE_COMMAND:
+ command = MS_CMD_BLOCK_WRITE;
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+ msb->state = MSB_WB_SEND_INT_REQ;
+ return 0;
+
+ case MSB_WB_SEND_INT_REQ:
+ msb->state = MSB_WB_RECEIVE_INT_REQ;
+ if (msb_read_int_reg(msb, -1))
+ return 0;
+ fallthrough;
+
+ case MSB_WB_RECEIVE_INT_REQ:
+ intreg = mrq->data[0];
+ msb->regs.status.interrupt = intreg;
+
+ /* errors mean out of here, and fast... */
+ if (intreg & (MEMSTICK_INT_CMDNAK))
+ return msb_exit_state_machine(msb, -EIO);
+
+ if (intreg & MEMSTICK_INT_ERR)
+ return msb_exit_state_machine(msb, -EBADMSG);
+
+
+ /* for last page we need to poll CED */
+ if (msb->current_page == msb->pages_in_block) {
+ if (intreg & MEMSTICK_INT_CED)
+ return msb_exit_state_machine(msb, 0);
+ msb->state = MSB_WB_SEND_INT_REQ;
+ goto again;
+
+ }
+
+ /* for non-last page we need BREQ before writing next chunk */
+ if (!(intreg & MEMSTICK_INT_BREQ)) {
+ msb->state = MSB_WB_SEND_INT_REQ;
+ goto again;
+ }
+
+ msb->int_polling = false;
+ msb->state = MSB_WB_SEND_WRITE_DATA;
+ fallthrough;
+
+ case MSB_WB_SEND_WRITE_DATA:
+ sg_init_table(sg, ARRAY_SIZE(sg));
+
+ if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+ msb->current_sg_offset,
+ msb->page_size) < msb->page_size)
+ return msb_exit_state_machine(msb, -EIO);
+
+ memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
+ mrq->need_card_int = 1;
+ msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
+ return 0;
+
+ case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
+ msb->current_page++;
+ msb->current_sg_offset += msb->page_size;
+ msb->state = MSB_WB_SEND_INT_REQ;
+ goto again;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used to send simple IO requests to device that consist
+ * of register write + command
+ */
+static int h_msb_send_command(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ u8 intreg;
+
+ if (mrq->error) {
+ dbg("send_command: unknown error");
+ return msb_exit_state_machine(msb, mrq->error);
+ }
+again:
+ switch (msb->state) {
+
+ /* HACK: see h_msb_write_block */
+ case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ sizeof(struct ms_param_register),
+ &msb->regs.param))
+ return 0;
+ msb->state = MSB_SC_SEND_WRITE_OOB;
+ return 0;
+
+ case MSB_SC_SEND_WRITE_OOB:
+ if (!msb->command_need_oob) {
+ msb->state = MSB_SC_SEND_COMMAND;
+ goto again;
+ }
+
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, extra_data),
+ sizeof(struct ms_extra_data_register),
+ &msb->regs.extra_data))
+ return 0;
+
+ msb->state = MSB_SC_SEND_COMMAND;
+ return 0;
+
+ case MSB_SC_SEND_COMMAND:
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
+ msb->state = MSB_SC_SEND_INT_REQ;
+ return 0;
+
+ case MSB_SC_SEND_INT_REQ:
+ msb->state = MSB_SC_RECEIVE_INT_REQ;
+ if (msb_read_int_reg(msb, -1))
+ return 0;
+ fallthrough;
+
+ case MSB_SC_RECEIVE_INT_REQ:
+ intreg = mrq->data[0];
+
+ if (intreg & MEMSTICK_INT_CMDNAK)
+ return msb_exit_state_machine(msb, -EIO);
+ if (intreg & MEMSTICK_INT_ERR)
+ return msb_exit_state_machine(msb, -EBADMSG);
+
+ if (!(intreg & MEMSTICK_INT_CED)) {
+ msb->state = MSB_SC_SEND_INT_REQ;
+ goto again;
+ }
+
+ return msb_exit_state_machine(msb, 0);
+ }
+
+ BUG();
+}
+
+/* Small handler for card reset */
+static int h_msb_reset(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ u8 command = MS_CMD_RESET;
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+
+ if (mrq->error)
+ return msb_exit_state_machine(msb, mrq->error);
+
+ switch (msb->state) {
+ case MSB_RS_SEND:
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+ mrq->need_card_int = 0;
+ msb->state = MSB_RS_CONFIRM;
+ return 0;
+ case MSB_RS_CONFIRM:
+ return msb_exit_state_machine(msb, 0);
+ }
+ BUG();
+}
+
+/* This handler is used to do serial->parallel switch */
+static int h_msb_parallel_switch(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ struct memstick_host *host = card->host;
+
+ if (mrq->error) {
+ dbg("parallel_switch: error");
+ msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+ return msb_exit_state_machine(msb, mrq->error);
+ }
+
+ switch (msb->state) {
+ case MSB_PS_SEND_SWITCH_COMMAND:
+ /* Set the parallel interface on memstick side */
+ msb->regs.param.system |= MEMSTICK_SYS_PAM;
+
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ 1,
+ (unsigned char *)&msb->regs.param))
+ return 0;
+
+ msb->state = MSB_PS_SWICH_HOST;
+ return 0;
+
+ case MSB_PS_SWICH_HOST:
+ /* Set parallel interface on our side + send a dummy request
+ * to see if card responds
+ */
+ host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
+ memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+ msb->state = MSB_PS_CONFIRM;
+ return 0;
+
+ case MSB_PS_CONFIRM:
+ return msb_exit_state_machine(msb, 0);
+ }
+
+ BUG();
+}
+
+static int msb_switch_to_parallel(struct msb_data *msb);
+
+/* Reset the card, to guard against hw errors beeing treated as bad blocks */
+static int msb_reset(struct msb_data *msb, bool full)
+{
+
+ bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
+ struct memstick_dev *card = msb->card;
+ struct memstick_host *host = card->host;
+ int error;
+
+ /* Reset the card */
+ msb->regs.param.system = MEMSTICK_SYS_BAMD;
+
+ if (full) {
+ error = host->set_param(host,
+ MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+ if (error)
+ goto out_error;
+
+ msb_invalidate_reg_window(msb);
+
+ error = host->set_param(host,
+ MEMSTICK_POWER, MEMSTICK_POWER_ON);
+ if (error)
+ goto out_error;
+
+ error = host->set_param(host,
+ MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+ if (error) {
+out_error:
+ dbg("Failed to reset the host controller");
+ msb->read_only = true;
+ return -EFAULT;
+ }
+ }
+
+ error = msb_run_state_machine(msb, h_msb_reset);
+ if (error) {
+ dbg("Failed to reset the card");
+ msb->read_only = true;
+ return -ENODEV;
+ }
+
+ /* Set parallel mode */
+ if (was_parallel)
+ msb_switch_to_parallel(msb);
+ return 0;
+}
+
+/* Attempts to switch interface to parallel mode */
+static int msb_switch_to_parallel(struct msb_data *msb)
+{
+ int error;
+
+ error = msb_run_state_machine(msb, h_msb_parallel_switch);
+ if (error) {
+ pr_err("Switch to parallel failed");
+ msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+ msb_reset(msb, true);
+ return -EFAULT;
+ }
+
+ msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
+ return 0;
+}
+
+/* Changes overwrite flag on a page */
+static int msb_set_overwrite_flag(struct msb_data *msb,
+ u16 pba, u8 page, u8 flag)
+{
+ if (msb->read_only)
+ return -EROFS;
+
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = page;
+ msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
+ msb->regs.extra_data.overwrite_flag = flag;
+ msb->command_value = MS_CMD_BLOCK_WRITE;
+ msb->command_need_oob = true;
+
+ dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
+ flag, pba, page);
+ return msb_run_state_machine(msb, h_msb_send_command);
+}
+
+static int msb_mark_bad(struct msb_data *msb, int pba)
+{
+ pr_notice("marking pba %d as bad", pba);
+ msb_reset(msb, true);
+ return msb_set_overwrite_flag(
+ msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
+}
+
+static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
+{
+ dbg("marking page %d of pba %d as bad", page, pba);
+ msb_reset(msb, true);
+ return msb_set_overwrite_flag(msb,
+ pba, page, ~MEMSTICK_OVERWRITE_PGST0);
+}
+
+/* Erases one physical block */
+static int msb_erase_block(struct msb_data *msb, u16 pba)
+{
+ int error, try;
+
+ if (msb->read_only)
+ return -EROFS;
+
+ dbg_verbose("erasing pba %d", pba);
+
+ for (try = 1; try < 3; try++) {
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = 0;
+ msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+ msb->command_value = MS_CMD_BLOCK_ERASE;
+ msb->command_need_oob = false;
+
+
+ error = msb_run_state_machine(msb, h_msb_send_command);
+ if (!error || msb_reset(msb, true))
+ break;
+ }
+
+ if (error) {
+ pr_err("erase failed, marking pba %d as bad", pba);
+ msb_mark_bad(msb, pba);
+ }
+
+ dbg_verbose("erase success, marking pba %d as unused", pba);
+ msb_mark_block_unused(msb, pba);
+ __set_bit(pba, msb->erased_blocks_bitmap);
+ return error;
+}
+
+/* Reads one page from device */
+static int msb_read_page(struct msb_data *msb,
+ u16 pba, u8 page, struct ms_extra_data_register *extra,
+ struct scatterlist *sg, int offset)
+{
+ int try, error;
+
+ if (pba == MS_BLOCK_INVALID) {
+ unsigned long flags;
+ struct sg_mapping_iter miter;
+ size_t len = msb->page_size;
+
+ dbg_verbose("read unmapped sector. returning 0xFF");
+
+ local_irq_save(flags);
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+ while (sg_miter_next(&miter) && len > 0) {
+
+ int chunklen;
+
+ if (offset && offset >= miter.length) {
+ offset -= miter.length;
+ continue;
+ }
+
+ chunklen = min(miter.length - offset, len);
+ memset(miter.addr + offset, 0xFF, chunklen);
+ len -= chunklen;
+ offset = 0;
+ }
+
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+
+ if (offset)
+ return -EFAULT;
+
+ if (extra)
+ memset(extra, 0xFF, sizeof(*extra));
+ return 0;
+ }
+
+ if (pba >= msb->block_count) {
+ pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
+ return -EINVAL;
+ }
+
+ for (try = 1; try < 3; try++) {
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = page;
+ msb->regs.param.cp = MEMSTICK_CP_PAGE;
+
+ msb->current_sg = sg;
+ msb->current_sg_offset = offset;
+ error = msb_run_state_machine(msb, h_msb_read_page);
+
+
+ if (error == -EUCLEAN) {
+ pr_notice("correctable error on pba %d, page %d",
+ pba, page);
+ error = 0;
+ }
+
+ if (!error && extra)
+ *extra = msb->regs.extra_data;
+
+ if (!error || msb_reset(msb, true))
+ break;
+
+ }
+
+ /* Mark bad pages */
+ if (error == -EBADMSG) {
+ pr_err("uncorrectable error on read of pba %d, page %d",
+ pba, page);
+
+ if (msb->regs.extra_data.overwrite_flag &
+ MEMSTICK_OVERWRITE_PGST0)
+ msb_mark_page_bad(msb, pba, page);
+ return -EBADMSG;
+ }
+
+ if (error)
+ pr_err("read of pba %d, page %d failed with error %d",
+ pba, page, error);
+ return error;
+}
+
+/* Reads oob of page only */
+static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
+ struct ms_extra_data_register *extra)
+{
+ int error;
+
+ BUG_ON(!extra);
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = page;
+ msb->regs.param.cp = MEMSTICK_CP_EXTRA;
+
+ if (pba > msb->block_count) {
+ pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
+ return -EINVAL;
+ }
+
+ error = msb_run_state_machine(msb, h_msb_read_page);
+ *extra = msb->regs.extra_data;
+
+ if (error == -EUCLEAN) {
+ pr_notice("correctable error on pba %d, page %d",
+ pba, page);
+ return 0;
+ }
+
+ return error;
+}
+
+/* Reads a block and compares it with data contained in scatterlist orig_sg */
+static int msb_verify_block(struct msb_data *msb, u16 pba,
+ struct scatterlist *orig_sg, int offset)
+{
+ struct scatterlist sg;
+ int page = 0, error;
+
+ sg_init_one(&sg, msb->block_buffer, msb->block_size);
+
+ while (page < msb->pages_in_block) {
+
+ error = msb_read_page(msb, pba, page,
+ NULL, &sg, page * msb->page_size);
+ if (error)
+ return error;
+ page++;
+ }
+
+ if (msb_sg_compare_to_buffer(orig_sg, offset,
+ msb->block_buffer, msb->block_size))
+ return -EIO;
+ return 0;
+}
+
+/* Writes exectly one block + oob */
+static int msb_write_block(struct msb_data *msb,
+ u16 pba, u32 lba, struct scatterlist *sg, int offset)
+{
+ int error, current_try = 1;
+
+ BUG_ON(sg->length < msb->page_size);
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (pba == MS_BLOCK_INVALID) {
+ pr_err(
+ "BUG: write: attempt to write MS_BLOCK_INVALID block");
+ return -EINVAL;
+ }
+
+ if (pba >= msb->block_count || lba >= msb->logical_block_count) {
+ pr_err(
+ "BUG: write: attempt to write beyond the end of device");
+ return -EINVAL;
+ }
+
+ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+ pr_err("BUG: write: lba zone mismatch");
+ return -EINVAL;
+ }
+
+ if (pba == msb->boot_block_locations[0] ||
+ pba == msb->boot_block_locations[1]) {
+ pr_err("BUG: write: attempt to write to boot blocks!");
+ return -EINVAL;
+ }
+
+ while (1) {
+
+ if (msb->read_only)
+ return -EROFS;
+
+ msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+ msb->regs.param.page_address = 0;
+ msb->regs.param.block_address = cpu_to_be16(pba);
+
+ msb->regs.extra_data.management_flag = 0xFF;
+ msb->regs.extra_data.overwrite_flag = 0xF8;
+ msb->regs.extra_data.logical_address = cpu_to_be16(lba);
+
+ msb->current_sg = sg;
+ msb->current_sg_offset = offset;
+ msb->current_page = 0;
+
+ error = msb_run_state_machine(msb, h_msb_write_block);
+
+ /* Sector we just wrote to is assumed erased since its pba
+ * was erased. If it wasn't erased, write will succeed
+ * and will just clear the bits that were set in the block
+ * thus test that what we have written,
+ * matches what we expect.
+ * We do trust the blocks that we erased
+ */
+ if (!error && (verify_writes ||
+ !test_bit(pba, msb->erased_blocks_bitmap)))
+ error = msb_verify_block(msb, pba, sg, offset);
+
+ if (!error)
+ break;
+
+ if (current_try > 1 || msb_reset(msb, true))
+ break;
+
+ pr_err("write failed, trying to erase the pba %d", pba);
+ error = msb_erase_block(msb, pba);
+ if (error)
+ break;
+
+ current_try++;
+ }
+ return error;
+}
+
+/* Finds a free block for write replacement */
+static u16 msb_get_free_block(struct msb_data *msb, int zone)
+{
+ u16 pos;
+ int pba = zone * MS_BLOCKS_IN_ZONE;
+ int i;
+
+ get_random_bytes(&pos, sizeof(pos));
+
+ if (!msb->free_block_count[zone]) {
+ pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
+ msb->read_only = true;
+ return MS_BLOCK_INVALID;
+ }
+
+ pos %= msb->free_block_count[zone];
+
+ dbg_verbose("have %d choices for a free block, selected randomly: %d",
+ msb->free_block_count[zone], pos);
+
+ pba = find_next_zero_bit(msb->used_blocks_bitmap,
+ msb->block_count, pba);
+ for (i = 0; i < pos; ++i)
+ pba = find_next_zero_bit(msb->used_blocks_bitmap,
+ msb->block_count, pba + 1);
+
+ dbg_verbose("result of the free blocks scan: pba %d", pba);
+
+ if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
+ pr_err("BUG: can't get a free block");
+ msb->read_only = true;
+ return MS_BLOCK_INVALID;
+ }
+
+ msb_mark_block_used(msb, pba);
+ return pba;
+}
+
+static int msb_update_block(struct msb_data *msb, u16 lba,
+ struct scatterlist *sg, int offset)
+{
+ u16 pba, new_pba;
+ int error, try;
+
+ pba = msb->lba_to_pba_table[lba];
+ dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
+
+ if (pba != MS_BLOCK_INVALID) {
+ dbg_verbose("setting the update flag on the block");
+ msb_set_overwrite_flag(msb, pba, 0,
+ 0xFF & ~MEMSTICK_OVERWRITE_UDST);
+ }
+
+ for (try = 0; try < 3; try++) {
+ new_pba = msb_get_free_block(msb,
+ msb_get_zone_from_lba(lba));
+
+ if (new_pba == MS_BLOCK_INVALID) {
+ error = -EIO;
+ goto out;
+ }
+
+ dbg_verbose("block update: writing updated block to the pba %d",
+ new_pba);
+ error = msb_write_block(msb, new_pba, lba, sg, offset);
+ if (error == -EBADMSG) {
+ msb_mark_bad(msb, new_pba);
+ continue;
+ }
+
+ if (error)
+ goto out;
+
+ dbg_verbose("block update: erasing the old block");
+ msb_erase_block(msb, pba);
+ msb->lba_to_pba_table[lba] = new_pba;
+ return 0;
+ }
+out:
+ if (error) {
+ pr_err("block update error after %d tries, switching to r/o mode", try);
+ msb->read_only = true;
+ }
+ return error;
+}
+
+/* Converts endiannes in the boot block for easy use */
+static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
+{
+ p->header.block_id = be16_to_cpu(p->header.block_id);
+ p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
+ p->entry.disabled_block.start_addr
+ = be32_to_cpu(p->entry.disabled_block.start_addr);
+ p->entry.disabled_block.data_size
+ = be32_to_cpu(p->entry.disabled_block.data_size);
+ p->entry.cis_idi.start_addr
+ = be32_to_cpu(p->entry.cis_idi.start_addr);
+ p->entry.cis_idi.data_size
+ = be32_to_cpu(p->entry.cis_idi.data_size);
+ p->attr.block_size = be16_to_cpu(p->attr.block_size);
+ p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
+ p->attr.number_of_effective_blocks
+ = be16_to_cpu(p->attr.number_of_effective_blocks);
+ p->attr.page_size = be16_to_cpu(p->attr.page_size);
+ p->attr.memory_manufacturer_code
+ = be16_to_cpu(p->attr.memory_manufacturer_code);
+ p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
+ p->attr.implemented_capacity
+ = be16_to_cpu(p->attr.implemented_capacity);
+ p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
+ p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
+}
+
+static int msb_read_boot_blocks(struct msb_data *msb)
+{
+ int pba = 0;
+ struct scatterlist sg;
+ struct ms_extra_data_register extra;
+ struct ms_boot_page *page;
+
+ msb->boot_block_locations[0] = MS_BLOCK_INVALID;
+ msb->boot_block_locations[1] = MS_BLOCK_INVALID;
+ msb->boot_block_count = 0;
+
+ dbg_verbose("Start of a scan for the boot blocks");
+
+ if (!msb->boot_page) {
+ page = kmalloc_array(2, sizeof(struct ms_boot_page),
+ GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ msb->boot_page = page;
+ } else
+ page = msb->boot_page;
+
+ msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
+
+ for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
+
+ sg_init_one(&sg, page, sizeof(*page));
+ if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
+ dbg("boot scan: can't read pba %d", pba);
+ continue;
+ }
+
+ if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
+ dbg("management flag doesn't indicate boot block %d",
+ pba);
+ continue;
+ }
+
+ if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
+ dbg("the pba at %d doesn't contain boot block ID", pba);
+ continue;
+ }
+
+ msb_fix_boot_page_endianness(page);
+ msb->boot_block_locations[msb->boot_block_count] = pba;
+
+ page++;
+ msb->boot_block_count++;
+
+ if (msb->boot_block_count == 2)
+ break;
+ }
+
+ if (!msb->boot_block_count) {
+ pr_err("media doesn't contain master page, aborting");
+ return -EIO;
+ }
+
+ dbg_verbose("End of scan for boot blocks");
+ return 0;
+}
+
+static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
+{
+ struct ms_boot_page *boot_block;
+ struct scatterlist sg;
+ u16 *buffer = NULL;
+ int offset = 0;
+ int i, error = 0;
+ int data_size, data_offset, page, page_offset, size_to_read;
+ u16 pba;
+
+ BUG_ON(block_nr > 1);
+ boot_block = &msb->boot_page[block_nr];
+ pba = msb->boot_block_locations[block_nr];
+
+ if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
+ return -EINVAL;
+
+ data_size = boot_block->entry.disabled_block.data_size;
+ data_offset = sizeof(struct ms_boot_page) +
+ boot_block->entry.disabled_block.start_addr;
+ if (!data_size)
+ return 0;
+
+ page = data_offset / msb->page_size;
+ page_offset = data_offset % msb->page_size;
+ size_to_read =
+ DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
+ msb->page_size;
+
+ dbg("reading bad block of boot block at pba %d, offset %d len %d",
+ pba, data_offset, data_size);
+
+ buffer = kzalloc(size_to_read, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Read the buffer */
+ sg_init_one(&sg, buffer, size_to_read);
+
+ while (offset < size_to_read) {
+ error = msb_read_page(msb, pba, page, NULL, &sg, offset);
+ if (error)
+ goto out;
+
+ page++;
+ offset += msb->page_size;
+
+ if (page == msb->pages_in_block) {
+ pr_err(
+ "bad block table extends beyond the boot block");
+ break;
+ }
+ }
+
+ /* Process the bad block table */
+ for (i = page_offset; i < data_size / sizeof(u16); i++) {
+
+ u16 bad_block = be16_to_cpu(buffer[i]);
+
+ if (bad_block >= msb->block_count) {
+ dbg("bad block table contains invalid block %d",
+ bad_block);
+ continue;
+ }
+
+ if (test_bit(bad_block, msb->used_blocks_bitmap)) {
+ dbg("duplicate bad block %d in the table",
+ bad_block);
+ continue;
+ }
+
+ dbg("block %d is marked as factory bad", bad_block);
+ msb_mark_block_used(msb, bad_block);
+ }
+out:
+ kfree(buffer);
+ return error;
+}
+
+static int msb_ftl_initialize(struct msb_data *msb)
+{
+ int i;
+
+ if (msb->ftl_initialized)
+ return 0;
+
+ msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
+ msb->logical_block_count = msb->zone_count * 496 - 2;
+
+ msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
+ msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
+ msb->lba_to_pba_table =
+ kmalloc_array(msb->logical_block_count, sizeof(u16),
+ GFP_KERNEL);
+
+ if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
+ !msb->erased_blocks_bitmap) {
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
+ kfree(msb->lba_to_pba_table);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < msb->zone_count; i++)
+ msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
+
+ memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
+ msb->logical_block_count * sizeof(u16));
+
+ dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
+ msb->zone_count, msb->logical_block_count);
+
+ msb->ftl_initialized = true;
+ return 0;
+}
+
+static int msb_ftl_scan(struct msb_data *msb)
+{
+ u16 pba, lba, other_block;
+ u8 overwrite_flag, management_flag, other_overwrite_flag;
+ int error;
+ struct ms_extra_data_register extra;
+ u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
+
+ if (!overwrite_flags)
+ return -ENOMEM;
+
+ dbg("Start of media scanning");
+ for (pba = 0; pba < msb->block_count; pba++) {
+
+ if (pba == msb->boot_block_locations[0] ||
+ pba == msb->boot_block_locations[1]) {
+ dbg_verbose("pba %05d -> [boot block]", pba);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ if (test_bit(pba, msb->used_blocks_bitmap)) {
+ dbg_verbose("pba %05d -> [factory bad]", pba);
+ continue;
+ }
+
+ memset(&extra, 0, sizeof(extra));
+ error = msb_read_oob(msb, pba, 0, &extra);
+
+ /* can't trust the page if we can't read the oob */
+ if (error == -EBADMSG) {
+ pr_notice(
+ "oob of pba %d damaged, will try to erase it", pba);
+ msb_mark_block_used(msb, pba);
+ msb_erase_block(msb, pba);
+ continue;
+ } else if (error) {
+ pr_err("unknown error %d on read of oob of pba %d - aborting",
+ error, pba);
+
+ kfree(overwrite_flags);
+ return error;
+ }
+
+ lba = be16_to_cpu(extra.logical_address);
+ management_flag = extra.management_flag;
+ overwrite_flag = extra.overwrite_flag;
+ overwrite_flags[pba] = overwrite_flag;
+
+ /* Skip bad blocks */
+ if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
+ dbg("pba %05d -> [BAD]", pba);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ /* Skip system/drm blocks */
+ if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
+ MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
+ dbg("pba %05d -> [reserved management flag %02x]",
+ pba, management_flag);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ /* Erase temporary tables */
+ if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
+ dbg("pba %05d -> [temp table] - will erase", pba);
+
+ msb_mark_block_used(msb, pba);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ if (lba == MS_BLOCK_INVALID) {
+ dbg_verbose("pba %05d -> [free]", pba);
+ continue;
+ }
+
+ msb_mark_block_used(msb, pba);
+
+ /* Block has LBA not according to zoning*/
+ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+ pr_notice("pba %05d -> [bad lba %05d] - will erase",
+ pba, lba);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ /* No collisions - great */
+ if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
+ dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
+ msb->lba_to_pba_table[lba] = pba;
+ continue;
+ }
+
+ other_block = msb->lba_to_pba_table[lba];
+ other_overwrite_flag = overwrite_flags[other_block];
+
+ pr_notice("Collision between pba %d and pba %d",
+ pba, other_block);
+
+ if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+ pr_notice("pba %d is marked as stable, use it", pba);
+ msb_erase_block(msb, other_block);
+ msb->lba_to_pba_table[lba] = pba;
+ continue;
+ }
+
+ if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+ pr_notice("pba %d is marked as stable, use it",
+ other_block);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
+ pba, other_block, other_block);
+
+ msb_erase_block(msb, other_block);
+ msb->lba_to_pba_table[lba] = pba;
+ }
+
+ dbg("End of media scanning");
+ kfree(overwrite_flags);
+ return 0;
+}
+
+static void msb_cache_flush_timer(struct timer_list *t)
+{
+ struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
+
+ msb->need_flush_cache = true;
+ queue_work(msb->io_queue, &msb->io_work);
+}
+
+
+static void msb_cache_discard(struct msb_data *msb)
+{
+ if (msb->cache_block_lba == MS_BLOCK_INVALID)
+ return;
+
+ del_timer_sync(&msb->cache_flush_timer);
+
+ dbg_verbose("Discarding the write cache");
+ msb->cache_block_lba = MS_BLOCK_INVALID;
+ bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
+}
+
+static int msb_cache_init(struct msb_data *msb)
+{
+ timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
+
+ if (!msb->cache)
+ msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
+ if (!msb->cache)
+ return -ENOMEM;
+
+ msb_cache_discard(msb);
+ return 0;
+}
+
+static int msb_cache_flush(struct msb_data *msb)
+{
+ struct scatterlist sg;
+ struct ms_extra_data_register extra;
+ int page, offset, error;
+ u16 pba, lba;
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID)
+ return 0;
+
+ lba = msb->cache_block_lba;
+ pba = msb->lba_to_pba_table[lba];
+
+ dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
+ pba, msb->cache_block_lba);
+
+ sg_init_one(&sg, msb->cache , msb->block_size);
+
+ /* Read all missing pages in cache */
+ for (page = 0; page < msb->pages_in_block; page++) {
+
+ if (test_bit(page, &msb->valid_cache_bitmap))
+ continue;
+
+ offset = page * msb->page_size;
+
+ dbg_verbose("reading non-present sector %d of cache block %d",
+ page, lba);
+ error = msb_read_page(msb, pba, page, &extra, &sg, offset);
+
+ /* Bad pages are copied with 00 page status */
+ if (error == -EBADMSG) {
+ pr_err("read error on sector %d, contents probably damaged", page);
+ continue;
+ }
+
+ if (error)
+ return error;
+
+ if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
+ MEMSTICK_OV_PG_NORMAL) {
+ dbg("page %d is marked as bad", page);
+ continue;
+ }
+
+ set_bit(page, &msb->valid_cache_bitmap);
+ }
+
+ /* Write the cache now */
+ error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
+ pba = msb->lba_to_pba_table[msb->cache_block_lba];
+
+ /* Mark invalid pages */
+ if (!error) {
+ for (page = 0; page < msb->pages_in_block; page++) {
+
+ if (test_bit(page, &msb->valid_cache_bitmap))
+ continue;
+
+ dbg("marking page %d as containing damaged data",
+ page);
+ msb_set_overwrite_flag(msb,
+ pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
+ }
+ }
+
+ msb_cache_discard(msb);
+ return error;
+}
+
+static int msb_cache_write(struct msb_data *msb, int lba,
+ int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
+{
+ int error;
+ struct scatterlist sg_tmp[10];
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID ||
+ lba != msb->cache_block_lba)
+ if (add_to_cache_only)
+ return 0;
+
+ /* If we need to write different block */
+ if (msb->cache_block_lba != MS_BLOCK_INVALID &&
+ lba != msb->cache_block_lba) {
+ dbg_verbose("first flush the cache");
+ error = msb_cache_flush(msb);
+ if (error)
+ return error;
+ }
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID) {
+ msb->cache_block_lba = lba;
+ mod_timer(&msb->cache_flush_timer,
+ jiffies + msecs_to_jiffies(cache_flush_timeout));
+ }
+
+ dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
+
+ sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+ msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
+
+ sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
+ msb->cache + page * msb->page_size, msb->page_size);
+
+ set_bit(page, &msb->valid_cache_bitmap);
+ return 0;
+}
+
+static int msb_cache_read(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, int offset)
+{
+ int pba = msb->lba_to_pba_table[lba];
+ struct scatterlist sg_tmp[10];
+ int error = 0;
+
+ if (lba == msb->cache_block_lba &&
+ test_bit(page, &msb->valid_cache_bitmap)) {
+
+ dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
+ lba, pba, page);
+
+ sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+ msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
+ offset, msb->page_size);
+ sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
+ msb->cache + msb->page_size * page,
+ msb->page_size);
+ } else {
+ dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
+ lba, pba, page);
+
+ error = msb_read_page(msb, pba, page, NULL, sg, offset);
+ if (error)
+ return error;
+
+ msb_cache_write(msb, lba, page, true, sg, offset);
+ }
+ return error;
+}
+
+/* Emulated geometry table
+ * This table content isn't that importaint,
+ * One could put here different values, providing that they still
+ * cover whole disk.
+ * 64 MB entry is what windows reports for my 64M memstick
+ */
+
+static const struct chs_entry chs_table[] = {
+/* size sectors cylynders heads */
+ { 4, 16, 247, 2 },
+ { 8, 16, 495, 2 },
+ { 16, 16, 495, 4 },
+ { 32, 16, 991, 4 },
+ { 64, 16, 991, 8 },
+ {128, 16, 991, 16 },
+ { 0 }
+};
+
+/* Load information about the card */
+static int msb_init_card(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_host *host = card->host;
+ struct ms_boot_page *boot_block;
+ int error = 0, i, raw_size_in_megs;
+
+ msb->caps = 0;
+
+ if (card->id.class >= MEMSTICK_CLASS_ROM &&
+ card->id.class <= MEMSTICK_CLASS_ROM)
+ msb->read_only = true;
+
+ msb->state = -1;
+ error = msb_reset(msb, false);
+ if (error)
+ return error;
+
+ /* Due to a bug in Jmicron driver written by Alex Dubov,
+ * its serial mode barely works,
+ * so we switch to parallel mode right away
+ */
+ if (host->caps & MEMSTICK_CAP_PAR4)
+ msb_switch_to_parallel(msb);
+
+ msb->page_size = sizeof(struct ms_boot_page);
+
+ /* Read the boot page */
+ error = msb_read_boot_blocks(msb);
+ if (error)
+ return -EIO;
+
+ boot_block = &msb->boot_page[0];
+
+ /* Save intersting attributes from boot page */
+ msb->block_count = boot_block->attr.number_of_blocks;
+ msb->page_size = boot_block->attr.page_size;
+
+ msb->pages_in_block = boot_block->attr.block_size * 2;
+ msb->block_size = msb->page_size * msb->pages_in_block;
+
+ if ((size_t)msb->page_size > PAGE_SIZE) {
+ /* this isn't supported by linux at all, anyway*/
+ dbg("device page %d size isn't supported", msb->page_size);
+ return -EINVAL;
+ }
+
+ msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
+ if (!msb->block_buffer)
+ return -ENOMEM;
+
+ raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
+
+ for (i = 0; chs_table[i].size; i++) {
+
+ if (chs_table[i].size != raw_size_in_megs)
+ continue;
+
+ msb->geometry.cylinders = chs_table[i].cyl;
+ msb->geometry.heads = chs_table[i].head;
+ msb->geometry.sectors = chs_table[i].sec;
+ break;
+ }
+
+ if (boot_block->attr.transfer_supporting == 1)
+ msb->caps |= MEMSTICK_CAP_PAR4;
+
+ if (boot_block->attr.device_type & 0x03)
+ msb->read_only = true;
+
+ dbg("Total block count = %d", msb->block_count);
+ dbg("Each block consists of %d pages", msb->pages_in_block);
+ dbg("Page size = %d bytes", msb->page_size);
+ dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
+ dbg("Read only: %d", msb->read_only);
+
+#if 0
+ /* Now we can switch the interface */
+ if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
+ msb_switch_to_parallel(msb);
+#endif
+
+ error = msb_cache_init(msb);
+ if (error)
+ return error;
+
+ error = msb_ftl_initialize(msb);
+ if (error)
+ return error;
+
+
+ /* Read the bad block table */
+ error = msb_read_bad_block_table(msb, 0);
+
+ if (error && error != -ENOMEM) {
+ dbg("failed to read bad block table from primary boot block, trying from backup");
+ error = msb_read_bad_block_table(msb, 1);
+ }
+
+ if (error)
+ return error;
+
+ /* *drum roll* Scan the media */
+ error = msb_ftl_scan(msb);
+ if (error) {
+ pr_err("Scan of media failed");
+ return error;
+ }
+
+ return 0;
+
+}
+
+static int msb_do_write_request(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
+{
+ int error = 0;
+ off_t offset = 0;
+ *sucessfuly_written = 0;
+
+ while (offset < len) {
+ if (page == 0 && len - offset >= msb->block_size) {
+
+ if (msb->cache_block_lba == lba)
+ msb_cache_discard(msb);
+
+ dbg_verbose("Writing whole lba %d", lba);
+ error = msb_update_block(msb, lba, sg, offset);
+ if (error)
+ return error;
+
+ offset += msb->block_size;
+ *sucessfuly_written += msb->block_size;
+ lba++;
+ continue;
+ }
+
+ error = msb_cache_write(msb, lba, page, false, sg, offset);
+ if (error)
+ return error;
+
+ offset += msb->page_size;
+ *sucessfuly_written += msb->page_size;
+
+ page++;
+ if (page == msb->pages_in_block) {
+ page = 0;
+ lba++;
+ }
+ }
+ return 0;
+}
+
+static int msb_do_read_request(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, int len, int *sucessfuly_read)
+{
+ int error = 0;
+ int offset = 0;
+ *sucessfuly_read = 0;
+
+ while (offset < len) {
+
+ error = msb_cache_read(msb, lba, page, sg, offset);
+ if (error)
+ return error;
+
+ offset += msb->page_size;
+ *sucessfuly_read += msb->page_size;
+
+ page++;
+ if (page == msb->pages_in_block) {
+ page = 0;
+ lba++;
+ }
+ }
+ return 0;
+}
+
+static void msb_io_work(struct work_struct *work)
+{
+ struct msb_data *msb = container_of(work, struct msb_data, io_work);
+ int page, error, len;
+ sector_t lba;
+ struct scatterlist *sg = msb->prealloc_sg;
+ struct request *req;
+
+ dbg_verbose("IO: work started");
+
+ while (1) {
+ spin_lock_irq(&msb->q_lock);
+
+ if (msb->need_flush_cache) {
+ msb->need_flush_cache = false;
+ spin_unlock_irq(&msb->q_lock);
+ msb_cache_flush(msb);
+ continue;
+ }
+
+ req = msb->req;
+ if (!req) {
+ dbg_verbose("IO: no more requests exiting");
+ spin_unlock_irq(&msb->q_lock);
+ return;
+ }
+
+ spin_unlock_irq(&msb->q_lock);
+
+ /* process the request */
+ dbg_verbose("IO: processing new request");
+ blk_rq_map_sg(msb->queue, req, sg);
+
+ lba = blk_rq_pos(req);
+
+ sector_div(lba, msb->page_size / 512);
+ page = sector_div(lba, msb->pages_in_block);
+
+ if (rq_data_dir(msb->req) == READ)
+ error = msb_do_read_request(msb, lba, page, sg,
+ blk_rq_bytes(req), &len);
+ else
+ error = msb_do_write_request(msb, lba, page, sg,
+ blk_rq_bytes(req), &len);
+
+ if (len && !blk_update_request(req, BLK_STS_OK, len)) {
+ __blk_mq_end_request(req, BLK_STS_OK);
+ spin_lock_irq(&msb->q_lock);
+ msb->req = NULL;
+ spin_unlock_irq(&msb->q_lock);
+ }
+
+ if (error && msb->req) {
+ blk_status_t ret = errno_to_blk_status(error);
+
+ dbg_verbose("IO: ending one sector of the request with error");
+ blk_mq_end_request(req, ret);
+ spin_lock_irq(&msb->q_lock);
+ msb->req = NULL;
+ spin_unlock_irq(&msb->q_lock);
+ }
+
+ if (msb->req)
+ dbg_verbose("IO: request still pending");
+ }
+}
+
+static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
+static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
+
+static void msb_data_clear(struct msb_data *msb)
+{
+ kfree(msb->boot_page);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
+ kfree(msb->lba_to_pba_table);
+ kfree(msb->cache);
+ msb->card = NULL;
+}
+
+static int msb_bd_getgeo(struct block_device *bdev,
+ struct hd_geometry *geo)
+{
+ struct msb_data *msb = bdev->bd_disk->private_data;
+ *geo = msb->geometry;
+ return 0;
+}
+
+static void msb_bd_free_disk(struct gendisk *disk)
+{
+ struct msb_data *msb = disk->private_data;
+
+ mutex_lock(&msb_disk_lock);
+ idr_remove(&msb_disk_idr, msb->disk_id);
+ mutex_unlock(&msb_disk_lock);
+
+ kfree(msb);
+}
+
+static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct memstick_dev *card = hctx->queue->queuedata;
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct request *req = bd->rq;
+
+ dbg_verbose("Submit request");
+
+ spin_lock_irq(&msb->q_lock);
+
+ if (msb->card_dead) {
+ dbg("Refusing requests on removed card");
+
+ WARN_ON(!msb->io_queue_stopped);
+
+ spin_unlock_irq(&msb->q_lock);
+ blk_mq_start_request(req);
+ return BLK_STS_IOERR;
+ }
+
+ if (msb->req) {
+ spin_unlock_irq(&msb->q_lock);
+ return BLK_STS_DEV_RESOURCE;
+ }
+
+ blk_mq_start_request(req);
+ msb->req = req;
+
+ if (!msb->io_queue_stopped)
+ queue_work(msb->io_queue, &msb->io_work);
+
+ spin_unlock_irq(&msb->q_lock);
+ return BLK_STS_OK;
+}
+
+static int msb_check_card(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+
+ return (msb->card_dead == 0);
+}
+
+static void msb_stop(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ dbg("Stopping all msblock IO");
+
+ blk_mq_stop_hw_queues(msb->queue);
+ spin_lock_irqsave(&msb->q_lock, flags);
+ msb->io_queue_stopped = true;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ del_timer_sync(&msb->cache_flush_timer);
+ flush_workqueue(msb->io_queue);
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ if (msb->req) {
+ blk_mq_requeue_request(msb->req, false);
+ msb->req = NULL;
+ }
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+}
+
+static void msb_start(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ dbg("Resuming IO from msblock");
+
+ msb_invalidate_reg_window(msb);
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ if (!msb->io_queue_stopped || msb->card_dead) {
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ /* Kick cache flush anyway, its harmless */
+ msb->need_flush_cache = true;
+ msb->io_queue_stopped = false;
+
+ blk_mq_start_hw_queues(msb->queue);
+
+ queue_work(msb->io_queue, &msb->io_work);
+
+}
+
+static const struct block_device_operations msb_bdops = {
+ .owner = THIS_MODULE,
+ .getgeo = msb_bd_getgeo,
+ .free_disk = msb_bd_free_disk,
+};
+
+static const struct blk_mq_ops msb_mq_ops = {
+ .queue_rq = msb_queue_rq,
+};
+
+/* Registers the block device */
+static int msb_init_disk(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ int rc;
+ unsigned long capacity;
+
+ mutex_lock(&msb_disk_lock);
+ msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
+ mutex_unlock(&msb_disk_lock);
+
+ if (msb->disk_id < 0)
+ return msb->disk_id;
+
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (rc)
+ goto out_release_id;
+
+ msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
+ if (IS_ERR(msb->disk)) {
+ rc = PTR_ERR(msb->disk);
+ goto out_free_tag_set;
+ }
+ msb->queue = msb->disk->queue;
+
+ blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
+ blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
+ blk_queue_max_segment_size(msb->queue,
+ MS_BLOCK_MAX_PAGES * msb->page_size);
+ blk_queue_logical_block_size(msb->queue, msb->page_size);
+
+ sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
+ msb->disk->fops = &msb_bdops;
+ msb->disk->private_data = msb;
+
+ capacity = msb->pages_in_block * msb->logical_block_count;
+ capacity *= (msb->page_size / 512);
+ set_capacity(msb->disk, capacity);
+ dbg("Set total disk size to %lu sectors", capacity);
+
+ msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
+ if (!msb->io_queue) {
+ rc = -ENOMEM;
+ goto out_cleanup_disk;
+ }
+
+ INIT_WORK(&msb->io_work, msb_io_work);
+ sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+ if (msb->read_only)
+ set_disk_ro(msb->disk, 1);
+
+ msb_start(card);
+ rc = device_add_disk(&card->dev, msb->disk, NULL);
+ if (rc)
+ goto out_destroy_workqueue;
+ dbg("Disk added");
+ return 0;
+
+out_destroy_workqueue:
+ destroy_workqueue(msb->io_queue);
+out_cleanup_disk:
+ put_disk(msb->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&msb->tag_set);
+out_release_id:
+ mutex_lock(&msb_disk_lock);
+ idr_remove(&msb_disk_idr, msb->disk_id);
+ mutex_unlock(&msb_disk_lock);
+ return rc;
+}
+
+static int msb_probe(struct memstick_dev *card)
+{
+ struct msb_data *msb;
+ int rc = 0;
+
+ msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+ if (!msb)
+ return -ENOMEM;
+ memstick_set_drvdata(card, msb);
+ msb->card = card;
+ spin_lock_init(&msb->q_lock);
+
+ rc = msb_init_card(card);
+ if (rc)
+ goto out_free;
+
+ rc = msb_init_disk(card);
+ if (!rc) {
+ card->check = msb_check_card;
+ card->stop = msb_stop;
+ card->start = msb_start;
+ return 0;
+ }
+out_free:
+ memstick_set_drvdata(card, NULL);
+ msb_data_clear(msb);
+ kfree(msb);
+ return rc;
+}
+
+static void msb_remove(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ if (!msb->io_queue_stopped)
+ msb_stop(card);
+
+ dbg("Removing the disk device");
+
+ /* Take care of unhandled + new requests from now on */
+ spin_lock_irqsave(&msb->q_lock, flags);
+ msb->card_dead = true;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ blk_mq_start_hw_queues(msb->queue);
+
+ /* Remove the disk */
+ del_gendisk(msb->disk);
+ blk_mq_free_tag_set(&msb->tag_set);
+ msb->queue = NULL;
+
+ mutex_lock(&msb_disk_lock);
+ msb_data_clear(msb);
+ mutex_unlock(&msb_disk_lock);
+
+ put_disk(msb->disk);
+ memstick_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int msb_suspend(struct memstick_dev *card, pm_message_t state)
+{
+ msb_stop(card);
+ return 0;
+}
+
+static int msb_resume(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct msb_data *new_msb = NULL;
+ bool card_dead = true;
+
+#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
+ msb->card_dead = true;
+ return 0;
+#endif
+ mutex_lock(&card->host->lock);
+
+ new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+ if (!new_msb)
+ goto out;
+
+ new_msb->card = card;
+ memstick_set_drvdata(card, new_msb);
+ spin_lock_init(&new_msb->q_lock);
+ sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+ if (msb_init_card(card))
+ goto out;
+
+ if (msb->block_size != new_msb->block_size)
+ goto out;
+
+ if (memcmp(msb->boot_page, new_msb->boot_page,
+ sizeof(struct ms_boot_page)))
+ goto out;
+
+ if (msb->logical_block_count != new_msb->logical_block_count ||
+ memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
+ msb->logical_block_count))
+ goto out;
+
+ if (msb->block_count != new_msb->block_count ||
+ !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+ msb->block_count))
+ goto out;
+
+ card_dead = false;
+out:
+ if (card_dead)
+ dbg("Card was removed/replaced during suspend");
+
+ msb->card_dead = card_dead;
+ memstick_set_drvdata(card, msb);
+
+ if (new_msb) {
+ msb_data_clear(new_msb);
+ kfree(new_msb);
+ }
+
+ msb_start(card);
+ mutex_unlock(&card->host->lock);
+ return 0;
+}
+#else
+
+#define msb_suspend NULL
+#define msb_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct memstick_device_id msb_id_tbl[] = {
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_FLASH},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_ROM},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_RO},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_WP},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
+ MEMSTICK_CLASS_DUO},
+ {}
+};
+MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
+
+
+static struct memstick_driver msb_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE
+ },
+ .id_table = msb_id_tbl,
+ .probe = msb_probe,
+ .remove = msb_remove,
+ .suspend = msb_suspend,
+ .resume = msb_resume
+};
+
+static int __init msb_init(void)
+{
+ int rc = memstick_register_driver(&msb_driver);
+
+ if (rc)
+ pr_err("failed to register memstick driver (error %d)\n", rc);
+
+ return rc;
+}
+
+static void __exit msb_exit(void)
+{
+ memstick_unregister_driver(&msb_driver);
+ idr_destroy(&msb_disk_idr);
+}
+
+module_init(msb_init);
+module_exit(msb_exit);
+
+module_param(cache_flush_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cache_flush_timeout,
+ "Cache flush timeout in msec (1000 default)");
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+module_param(verify_writes, bool, S_IRUGO);
+MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky");
+MODULE_DESCRIPTION("Sony MemoryStick block device driver");
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
new file mode 100644
index 000000000..7058f9aef
--- /dev/null
+++ b/drivers/memstick/core/ms_block.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ms_block.h - Sony MemoryStick (legacy) storage support
+
+ * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * Minor portions of the driver are copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * Also ms structures were copied from old broken driver by same author
+ * These probably come from MS spec
+ */
+
+#ifndef MS_BLOCK_NEW_H
+#define MS_BLOCK_NEW_H
+
+#define MS_BLOCK_MAX_SEGS 32
+#define MS_BLOCK_MAX_PAGES ((2 << 16) - 1)
+
+#define MS_BLOCK_MAX_BOOT_ADDR 0x000c
+#define MS_BLOCK_BOOT_ID 0x0001
+#define MS_BLOCK_INVALID 0xffff
+#define MS_MAX_ZONES 16
+#define MS_BLOCKS_IN_ZONE 512
+
+#define MS_BLOCK_MAP_LINE_SZ 16
+#define MS_BLOCK_PART_SHIFT 3
+
+
+#define MEMSTICK_UNCORR_ERROR (MEMSTICK_STATUS1_UCFG | \
+ MEMSTICK_STATUS1_UCEX | MEMSTICK_STATUS1_UCDT)
+
+#define MEMSTICK_CORR_ERROR (MEMSTICK_STATUS1_FGER | MEMSTICK_STATUS1_EXER | \
+ MEMSTICK_STATUS1_DTER)
+
+#define MEMSTICK_INT_ERROR (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)
+
+#define MEMSTICK_OVERWRITE_FLAG_NORMAL \
+ (MEMSTICK_OVERWRITE_PGST1 | \
+ MEMSTICK_OVERWRITE_PGST0 | \
+ MEMSTICK_OVERWRITE_BKST)
+
+#define MEMSTICK_OV_PG_NORMAL \
+ (MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
+
+#define MEMSTICK_MANAGEMENT_FLAG_NORMAL \
+ (MEMSTICK_MANAGEMENT_SYSFLG | \
+ MEMSTICK_MANAGEMENT_SCMS1 | \
+ MEMSTICK_MANAGEMENT_SCMS0) \
+
+struct ms_boot_header {
+ unsigned short block_id;
+ unsigned short format_reserved;
+ unsigned char reserved0[184];
+ unsigned char data_entry;
+ unsigned char reserved1[179];
+} __packed;
+
+
+struct ms_system_item {
+ unsigned int start_addr;
+ unsigned int data_size;
+ unsigned char data_type_id;
+ unsigned char reserved[3];
+} __packed;
+
+struct ms_system_entry {
+ struct ms_system_item disabled_block;
+ struct ms_system_item cis_idi;
+ unsigned char reserved[24];
+} __packed;
+
+struct ms_boot_attr_info {
+ unsigned char memorystick_class;
+ unsigned char format_unique_value1;
+ unsigned short block_size;
+ unsigned short number_of_blocks;
+ unsigned short number_of_effective_blocks;
+ unsigned short page_size;
+ unsigned char extra_data_size;
+ unsigned char format_unique_value2;
+ unsigned char assembly_time[8];
+ unsigned char format_unique_value3;
+ unsigned char serial_number[3];
+ unsigned char assembly_manufacturer_code;
+ unsigned char assembly_model_code[3];
+ unsigned short memory_manufacturer_code;
+ unsigned short memory_device_code;
+ unsigned short implemented_capacity;
+ unsigned char format_unique_value4[2];
+ unsigned char vcc;
+ unsigned char vpp;
+ unsigned short controller_number;
+ unsigned short controller_function;
+ unsigned char reserved0[9];
+ unsigned char transfer_supporting;
+ unsigned short format_unique_value5;
+ unsigned char format_type;
+ unsigned char memorystick_application;
+ unsigned char device_type;
+ unsigned char reserved1[22];
+ unsigned char format_uniqure_value6[2];
+ unsigned char reserved2[15];
+} __packed;
+
+struct ms_cis_idi {
+ unsigned short general_config;
+ unsigned short logical_cylinders;
+ unsigned short reserved0;
+ unsigned short logical_heads;
+ unsigned short track_size;
+ unsigned short page_size;
+ unsigned short pages_per_track;
+ unsigned short msw;
+ unsigned short lsw;
+ unsigned short reserved1;
+ unsigned char serial_number[20];
+ unsigned short buffer_type;
+ unsigned short buffer_size_increments;
+ unsigned short long_command_ecc;
+ unsigned char firmware_version[28];
+ unsigned char model_name[18];
+ unsigned short reserved2[5];
+ unsigned short pio_mode_number;
+ unsigned short dma_mode_number;
+ unsigned short field_validity;
+ unsigned short current_logical_cylinders;
+ unsigned short current_logical_heads;
+ unsigned short current_pages_per_track;
+ unsigned int current_page_capacity;
+ unsigned short mutiple_page_setting;
+ unsigned int addressable_pages;
+ unsigned short single_word_dma;
+ unsigned short multi_word_dma;
+ unsigned char reserved3[128];
+} __packed;
+
+
+struct ms_boot_page {
+ struct ms_boot_header header;
+ struct ms_system_entry entry;
+ struct ms_boot_attr_info attr;
+} __packed;
+
+struct msb_data {
+ struct memstick_dev *card;
+ struct gendisk *disk;
+ struct request_queue *queue;
+ spinlock_t q_lock;
+ struct blk_mq_tag_set tag_set;
+ struct hd_geometry geometry;
+ struct attribute_group attr_group;
+ struct request *req;
+ int caps;
+ int disk_id;
+
+ /* IO */
+ struct workqueue_struct *io_queue;
+ bool io_queue_stopped;
+ struct work_struct io_work;
+ bool card_dead;
+
+ /* Media properties */
+ struct ms_boot_page *boot_page;
+ u16 boot_block_locations[2];
+ int boot_block_count;
+
+ bool read_only;
+ unsigned short page_size;
+ int block_size;
+ int pages_in_block;
+ int zone_count;
+ int block_count;
+ int logical_block_count;
+
+ /* FTL tables */
+ unsigned long *used_blocks_bitmap;
+ unsigned long *erased_blocks_bitmap;
+ u16 *lba_to_pba_table;
+ int free_block_count[MS_MAX_ZONES];
+ bool ftl_initialized;
+
+ /* Cache */
+ unsigned char *cache;
+ unsigned long valid_cache_bitmap;
+ int cache_block_lba;
+ bool need_flush_cache;
+ struct timer_list cache_flush_timer;
+
+ /* Preallocated buffers */
+ unsigned char *block_buffer;
+ struct scatterlist prealloc_sg[MS_BLOCK_MAX_SEGS+1];
+
+
+ /* handler's local data */
+ struct ms_register_addr reg_addr;
+ bool addr_valid;
+
+ u8 command_value;
+ bool command_need_oob;
+ struct scatterlist *current_sg;
+ int current_sg_offset;
+
+ struct ms_register regs;
+ int current_page;
+
+ int state;
+ int exit_error;
+ bool int_polling;
+ unsigned long int_timeout;
+
+};
+
+enum msb_readpage_states {
+ MSB_RP_SEND_BLOCK_ADDRESS = 0,
+ MSB_RP_SEND_READ_COMMAND,
+
+ MSB_RP_SEND_INT_REQ,
+ MSB_RP_RECEIVE_INT_REQ_RESULT,
+
+ MSB_RP_SEND_READ_STATUS_REG,
+ MSB_RP_RECEIVE_STATUS_REG,
+
+ MSB_RP_SEND_OOB_READ,
+ MSB_RP_RECEIVE_OOB_READ,
+
+ MSB_RP_SEND_READ_DATA,
+ MSB_RP_RECEIVE_READ_DATA,
+};
+
+enum msb_write_block_states {
+ MSB_WB_SEND_WRITE_PARAMS = 0,
+ MSB_WB_SEND_WRITE_OOB,
+ MSB_WB_SEND_WRITE_COMMAND,
+
+ MSB_WB_SEND_INT_REQ,
+ MSB_WB_RECEIVE_INT_REQ,
+
+ MSB_WB_SEND_WRITE_DATA,
+ MSB_WB_RECEIVE_WRITE_CONFIRMATION,
+};
+
+enum msb_send_command_states {
+ MSB_SC_SEND_WRITE_PARAMS,
+ MSB_SC_SEND_WRITE_OOB,
+ MSB_SC_SEND_COMMAND,
+
+ MSB_SC_SEND_INT_REQ,
+ MSB_SC_RECEIVE_INT_REQ,
+
+};
+
+enum msb_reset_states {
+ MSB_RS_SEND,
+ MSB_RS_CONFIRM,
+};
+
+enum msb_par_switch_states {
+ MSB_PS_SEND_SWITCH_COMMAND,
+ MSB_PS_SWICH_HOST,
+ MSB_PS_CONFIRM,
+};
+
+struct chs_entry {
+ unsigned long size;
+ unsigned char sec;
+ unsigned short cyl;
+ unsigned char head;
+};
+
+static int msb_reset(struct msb_data *msb, bool full);
+
+static int h_msb_default_bad(struct memstick_dev *card,
+ struct memstick_request **mrq);
+
+#define __dbg(level, format, ...) \
+ do { \
+ if (debug >= level) \
+ pr_err(format "\n", ## __VA_ARGS__); \
+ } while (0)
+
+
+#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
+
+#endif
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
new file mode 100644
index 000000000..61cf75d4a
--- /dev/null
+++ b/drivers/memstick/core/mspro_block.c
@@ -0,0 +1,1437 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sony MemoryStick Pro storage support
+ *
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * Special thanks to Carlos Corbacho for providing various MemoryStick cards
+ * that made this driver possible.
+ */
+
+#include <linux/blk-mq.h>
+#include <linux/idr.h>
+#include <linux/hdreg.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/memstick.h>
+#include <linux/module.h>
+
+#define DRIVER_NAME "mspro_block"
+
+static int major;
+module_param(major, int, 0644);
+
+#define MSPRO_BLOCK_MAX_SEGS 32
+#define MSPRO_BLOCK_MAX_PAGES ((2 << 16) - 1)
+
+#define MSPRO_BLOCK_SIGNATURE 0xa5c3
+#define MSPRO_BLOCK_MAX_ATTRIBUTES 41
+
+#define MSPRO_BLOCK_PART_SHIFT 3
+
+enum {
+ MSPRO_BLOCK_ID_SYSINFO = 0x10,
+ MSPRO_BLOCK_ID_MODELNAME = 0x15,
+ MSPRO_BLOCK_ID_MBR = 0x20,
+ MSPRO_BLOCK_ID_PBR16 = 0x21,
+ MSPRO_BLOCK_ID_PBR32 = 0x22,
+ MSPRO_BLOCK_ID_SPECFILEVALUES1 = 0x25,
+ MSPRO_BLOCK_ID_SPECFILEVALUES2 = 0x26,
+ MSPRO_BLOCK_ID_DEVINFO = 0x30
+};
+
+struct mspro_sys_attr {
+ size_t size;
+ void *data;
+ unsigned char id;
+ char name[32];
+ struct device_attribute dev_attr;
+};
+
+struct mspro_attr_entry {
+ __be32 address;
+ __be32 size;
+ unsigned char id;
+ unsigned char reserved[3];
+} __attribute__((packed));
+
+struct mspro_attribute {
+ __be16 signature;
+ unsigned short version;
+ unsigned char count;
+ unsigned char reserved[11];
+ struct mspro_attr_entry entries[];
+} __attribute__((packed));
+
+struct mspro_sys_info {
+ unsigned char class;
+ unsigned char reserved0;
+ __be16 block_size;
+ __be16 block_count;
+ __be16 user_block_count;
+ __be16 page_size;
+ unsigned char reserved1[2];
+ unsigned char assembly_date[8];
+ __be32 serial_number;
+ unsigned char assembly_maker_code;
+ unsigned char assembly_model_code[3];
+ __be16 memory_maker_code;
+ __be16 memory_model_code;
+ unsigned char reserved2[4];
+ unsigned char vcc;
+ unsigned char vpp;
+ __be16 controller_number;
+ __be16 controller_function;
+ __be16 start_sector;
+ __be16 unit_size;
+ unsigned char ms_sub_class;
+ unsigned char reserved3[4];
+ unsigned char interface_type;
+ __be16 controller_code;
+ unsigned char format_type;
+ unsigned char reserved4;
+ unsigned char device_type;
+ unsigned char reserved5[7];
+ unsigned char mspro_id[16];
+ unsigned char reserved6[16];
+} __attribute__((packed));
+
+struct mspro_mbr {
+ unsigned char boot_partition;
+ unsigned char start_head;
+ unsigned char start_sector;
+ unsigned char start_cylinder;
+ unsigned char partition_type;
+ unsigned char end_head;
+ unsigned char end_sector;
+ unsigned char end_cylinder;
+ unsigned int start_sectors;
+ unsigned int sectors_per_partition;
+} __attribute__((packed));
+
+struct mspro_specfile {
+ char name[8];
+ char ext[3];
+ unsigned char attr;
+ unsigned char reserved[10];
+ unsigned short time;
+ unsigned short date;
+ unsigned short cluster;
+ unsigned int size;
+} __attribute__((packed));
+
+struct mspro_devinfo {
+ __be16 cylinders;
+ __be16 heads;
+ __be16 bytes_per_track;
+ __be16 bytes_per_sector;
+ __be16 sectors_per_track;
+ unsigned char reserved[6];
+} __attribute__((packed));
+
+struct mspro_block_data {
+ struct memstick_dev *card;
+ unsigned int caps;
+ struct gendisk *disk;
+ struct request_queue *queue;
+ struct request *block_req;
+ struct blk_mq_tag_set tag_set;
+ spinlock_t q_lock;
+
+ unsigned short page_size;
+ unsigned short cylinders;
+ unsigned short heads;
+ unsigned short sectors_per_track;
+
+ unsigned char system;
+ unsigned char read_only:1,
+ eject:1,
+ data_dir:1,
+ active:1;
+ unsigned char transfer_cmd;
+
+ int (*mrq_handler)(struct memstick_dev *card,
+ struct memstick_request **mrq);
+
+
+ /* Default request setup function for data access method preferred by
+ * this host instance.
+ */
+ void (*setup_transfer)(struct memstick_dev *card,
+ u64 offset, size_t length);
+
+ struct attribute_group attr_group;
+
+ struct scatterlist req_sg[MSPRO_BLOCK_MAX_SEGS];
+ unsigned int seg_count;
+ unsigned int current_seg;
+ unsigned int current_page;
+};
+
+static DEFINE_IDR(mspro_block_disk_idr);
+static DEFINE_MUTEX(mspro_block_disk_lock);
+
+static int mspro_block_complete_req(struct memstick_dev *card, int error);
+
+/*** Block device ***/
+
+static void mspro_block_bd_free_disk(struct gendisk *disk)
+{
+ struct mspro_block_data *msb = disk->private_data;
+ int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
+
+ mutex_lock(&mspro_block_disk_lock);
+ idr_remove(&mspro_block_disk_idr, disk_id);
+ mutex_unlock(&mspro_block_disk_lock);
+
+ kfree(msb);
+}
+
+static int mspro_block_bd_getgeo(struct block_device *bdev,
+ struct hd_geometry *geo)
+{
+ struct mspro_block_data *msb = bdev->bd_disk->private_data;
+
+ geo->heads = msb->heads;
+ geo->sectors = msb->sectors_per_track;
+ geo->cylinders = msb->cylinders;
+
+ return 0;
+}
+
+static const struct block_device_operations ms_block_bdops = {
+ .owner = THIS_MODULE,
+ .getgeo = mspro_block_bd_getgeo,
+ .free_disk = mspro_block_bd_free_disk,
+};
+
+/*** Information ***/
+
+static struct mspro_sys_attr *mspro_from_sysfs_attr(struct attribute *attr)
+{
+ struct device_attribute *dev_attr
+ = container_of(attr, struct device_attribute, attr);
+ return container_of(dev_attr, struct mspro_sys_attr, dev_attr);
+}
+
+static const char *mspro_block_attr_name(unsigned char tag)
+{
+ switch (tag) {
+ case MSPRO_BLOCK_ID_SYSINFO:
+ return "attr_sysinfo";
+ case MSPRO_BLOCK_ID_MODELNAME:
+ return "attr_modelname";
+ case MSPRO_BLOCK_ID_MBR:
+ return "attr_mbr";
+ case MSPRO_BLOCK_ID_PBR16:
+ return "attr_pbr16";
+ case MSPRO_BLOCK_ID_PBR32:
+ return "attr_pbr32";
+ case MSPRO_BLOCK_ID_SPECFILEVALUES1:
+ return "attr_specfilevalues1";
+ case MSPRO_BLOCK_ID_SPECFILEVALUES2:
+ return "attr_specfilevalues2";
+ case MSPRO_BLOCK_ID_DEVINFO:
+ return "attr_devinfo";
+ default:
+ return NULL;
+ }
+}
+
+typedef ssize_t (*sysfs_show_t)(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer);
+
+static ssize_t mspro_block_attr_show_default(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ struct mspro_sys_attr *s_attr = container_of(attr,
+ struct mspro_sys_attr,
+ dev_attr);
+
+ ssize_t cnt, rc = 0;
+
+ for (cnt = 0; cnt < s_attr->size; cnt++) {
+ if (cnt && !(cnt % 16)) {
+ if (PAGE_SIZE - rc)
+ buffer[rc++] = '\n';
+ }
+
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "%02x ",
+ ((unsigned char *)s_attr->data)[cnt]);
+ }
+ return rc;
+}
+
+static ssize_t mspro_block_attr_show_sysinfo(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ struct mspro_sys_attr *x_attr = container_of(attr,
+ struct mspro_sys_attr,
+ dev_attr);
+ struct mspro_sys_info *x_sys = x_attr->data;
+ ssize_t rc = 0;
+ int date_tz = 0, date_tz_f = 0;
+
+ if (x_sys->assembly_date[0] > 0x80U) {
+ date_tz = (~x_sys->assembly_date[0]) + 1;
+ date_tz_f = date_tz & 3;
+ date_tz >>= 2;
+ date_tz = -date_tz;
+ date_tz_f *= 15;
+ } else if (x_sys->assembly_date[0] < 0x80U) {
+ date_tz = x_sys->assembly_date[0];
+ date_tz_f = date_tz & 3;
+ date_tz >>= 2;
+ date_tz_f *= 15;
+ }
+
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "class: %x\n",
+ x_sys->class);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "block size: %x\n",
+ be16_to_cpu(x_sys->block_size));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "block count: %x\n",
+ be16_to_cpu(x_sys->block_count));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "user block count: %x\n",
+ be16_to_cpu(x_sys->user_block_count));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "page size: %x\n",
+ be16_to_cpu(x_sys->page_size));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly date: "
+ "GMT%+d:%d %04u-%02u-%02u %02u:%02u:%02u\n",
+ date_tz, date_tz_f,
+ be16_to_cpup((__be16 *)&x_sys->assembly_date[1]),
+ x_sys->assembly_date[3], x_sys->assembly_date[4],
+ x_sys->assembly_date[5], x_sys->assembly_date[6],
+ x_sys->assembly_date[7]);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "serial number: %x\n",
+ be32_to_cpu(x_sys->serial_number));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc,
+ "assembly maker code: %x\n",
+ x_sys->assembly_maker_code);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly model code: "
+ "%02x%02x%02x\n", x_sys->assembly_model_code[0],
+ x_sys->assembly_model_code[1],
+ x_sys->assembly_model_code[2]);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "memory maker code: %x\n",
+ be16_to_cpu(x_sys->memory_maker_code));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "memory model code: %x\n",
+ be16_to_cpu(x_sys->memory_model_code));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "vcc: %x\n",
+ x_sys->vcc);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "vpp: %x\n",
+ x_sys->vpp);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "controller number: %x\n",
+ be16_to_cpu(x_sys->controller_number));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc,
+ "controller function: %x\n",
+ be16_to_cpu(x_sys->controller_function));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sector: %x\n",
+ be16_to_cpu(x_sys->start_sector));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "unit size: %x\n",
+ be16_to_cpu(x_sys->unit_size));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "sub class: %x\n",
+ x_sys->ms_sub_class);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "interface type: %x\n",
+ x_sys->interface_type);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "controller code: %x\n",
+ be16_to_cpu(x_sys->controller_code));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "format type: %x\n",
+ x_sys->format_type);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "device type: %x\n",
+ x_sys->device_type);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "mspro id: %s\n",
+ x_sys->mspro_id);
+ return rc;
+}
+
+static ssize_t mspro_block_attr_show_modelname(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ struct mspro_sys_attr *s_attr = container_of(attr,
+ struct mspro_sys_attr,
+ dev_attr);
+
+ return scnprintf(buffer, PAGE_SIZE, "%s", (char *)s_attr->data);
+}
+
+static ssize_t mspro_block_attr_show_mbr(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ struct mspro_sys_attr *x_attr = container_of(attr,
+ struct mspro_sys_attr,
+ dev_attr);
+ struct mspro_mbr *x_mbr = x_attr->data;
+ ssize_t rc = 0;
+
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "boot partition: %x\n",
+ x_mbr->boot_partition);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start head: %x\n",
+ x_mbr->start_head);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sector: %x\n",
+ x_mbr->start_sector);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start cylinder: %x\n",
+ x_mbr->start_cylinder);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "partition type: %x\n",
+ x_mbr->partition_type);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "end head: %x\n",
+ x_mbr->end_head);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "end sector: %x\n",
+ x_mbr->end_sector);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "end cylinder: %x\n",
+ x_mbr->end_cylinder);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sectors: %x\n",
+ x_mbr->start_sectors);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc,
+ "sectors per partition: %x\n",
+ x_mbr->sectors_per_partition);
+ return rc;
+}
+
+static ssize_t mspro_block_attr_show_specfile(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ struct mspro_sys_attr *x_attr = container_of(attr,
+ struct mspro_sys_attr,
+ dev_attr);
+ struct mspro_specfile *x_spfile = x_attr->data;
+ char name[9], ext[4];
+ ssize_t rc = 0;
+
+ memcpy(name, x_spfile->name, 8);
+ name[8] = 0;
+ memcpy(ext, x_spfile->ext, 3);
+ ext[3] = 0;
+
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "name: %s\n", name);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "ext: %s\n", ext);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "attribute: %x\n",
+ x_spfile->attr);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "time: %d:%d:%d\n",
+ x_spfile->time >> 11,
+ (x_spfile->time >> 5) & 0x3f,
+ (x_spfile->time & 0x1f) * 2);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "date: %d-%d-%d\n",
+ (x_spfile->date >> 9) + 1980,
+ (x_spfile->date >> 5) & 0xf,
+ x_spfile->date & 0x1f);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start cluster: %x\n",
+ x_spfile->cluster);
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "size: %x\n",
+ x_spfile->size);
+ return rc;
+}
+
+static ssize_t mspro_block_attr_show_devinfo(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ struct mspro_sys_attr *x_attr = container_of(attr,
+ struct mspro_sys_attr,
+ dev_attr);
+ struct mspro_devinfo *x_devinfo = x_attr->data;
+ ssize_t rc = 0;
+
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "cylinders: %x\n",
+ be16_to_cpu(x_devinfo->cylinders));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "heads: %x\n",
+ be16_to_cpu(x_devinfo->heads));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "bytes per track: %x\n",
+ be16_to_cpu(x_devinfo->bytes_per_track));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "bytes per sector: %x\n",
+ be16_to_cpu(x_devinfo->bytes_per_sector));
+ rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "sectors per track: %x\n",
+ be16_to_cpu(x_devinfo->sectors_per_track));
+ return rc;
+}
+
+static sysfs_show_t mspro_block_attr_show(unsigned char tag)
+{
+ switch (tag) {
+ case MSPRO_BLOCK_ID_SYSINFO:
+ return mspro_block_attr_show_sysinfo;
+ case MSPRO_BLOCK_ID_MODELNAME:
+ return mspro_block_attr_show_modelname;
+ case MSPRO_BLOCK_ID_MBR:
+ return mspro_block_attr_show_mbr;
+ case MSPRO_BLOCK_ID_SPECFILEVALUES1:
+ case MSPRO_BLOCK_ID_SPECFILEVALUES2:
+ return mspro_block_attr_show_specfile;
+ case MSPRO_BLOCK_ID_DEVINFO:
+ return mspro_block_attr_show_devinfo;
+ default:
+ return mspro_block_attr_show_default;
+ }
+}
+
+/*** Protocol handlers ***/
+
+/*
+ * Functions prefixed with "h_" are protocol callbacks. They can be called from
+ * interrupt context. Return value of 0 means that request processing is still
+ * ongoing, while special error value of -EAGAIN means that current request is
+ * finished (and request processor should come back some time later).
+ */
+
+static int h_mspro_block_req_init(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+
+ *mrq = &card->current_mrq;
+ card->next_request = msb->mrq_handler;
+ return 0;
+}
+
+static int h_mspro_block_default(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ return mspro_block_complete_req(card, (*mrq)->error);
+}
+
+static int h_mspro_block_default_bad(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ return -ENXIO;
+}
+
+static int h_mspro_block_get_ro(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+
+ if (!(*mrq)->error) {
+ if ((*mrq)->data[offsetof(struct ms_status_register, status0)]
+ & MEMSTICK_STATUS0_WP)
+ msb->read_only = 1;
+ else
+ msb->read_only = 0;
+ }
+
+ return mspro_block_complete_req(card, (*mrq)->error);
+}
+
+static int h_mspro_block_wait_for_ced(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ dev_dbg(&card->dev, "wait for ced: value %x\n", (*mrq)->data[0]);
+
+ if (!(*mrq)->error) {
+ if ((*mrq)->data[0] & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR))
+ (*mrq)->error = -EFAULT;
+ else if (!((*mrq)->data[0] & MEMSTICK_INT_CED))
+ return 0;
+ }
+
+ return mspro_block_complete_req(card, (*mrq)->error);
+}
+
+static int h_mspro_block_transfer_data(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ unsigned char t_val = 0;
+ struct scatterlist t_sg = { 0 };
+ size_t t_offset;
+
+ if ((*mrq)->error)
+ return mspro_block_complete_req(card, (*mrq)->error);
+
+ switch ((*mrq)->tpc) {
+ case MS_TPC_WRITE_REG:
+ memstick_init_req(*mrq, MS_TPC_SET_CMD, &msb->transfer_cmd, 1);
+ (*mrq)->need_card_int = 1;
+ return 0;
+ case MS_TPC_SET_CMD:
+ t_val = (*mrq)->int_reg;
+ memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1);
+ if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT)
+ goto has_int_reg;
+ return 0;
+ case MS_TPC_GET_INT:
+ t_val = (*mrq)->data[0];
+has_int_reg:
+ if (t_val & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) {
+ t_val = MSPRO_CMD_STOP;
+ memstick_init_req(*mrq, MS_TPC_SET_CMD, &t_val, 1);
+ card->next_request = h_mspro_block_default;
+ return 0;
+ }
+
+ if (msb->current_page
+ == (msb->req_sg[msb->current_seg].length
+ / msb->page_size)) {
+ msb->current_page = 0;
+ msb->current_seg++;
+
+ if (msb->current_seg == msb->seg_count) {
+ if (t_val & MEMSTICK_INT_CED) {
+ return mspro_block_complete_req(card,
+ 0);
+ } else {
+ card->next_request
+ = h_mspro_block_wait_for_ced;
+ memstick_init_req(*mrq, MS_TPC_GET_INT,
+ NULL, 1);
+ return 0;
+ }
+ }
+ }
+
+ if (!(t_val & MEMSTICK_INT_BREQ)) {
+ memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1);
+ return 0;
+ }
+
+ t_offset = msb->req_sg[msb->current_seg].offset;
+ t_offset += msb->current_page * msb->page_size;
+
+ sg_set_page(&t_sg,
+ nth_page(sg_page(&(msb->req_sg[msb->current_seg])),
+ t_offset >> PAGE_SHIFT),
+ msb->page_size, offset_in_page(t_offset));
+
+ memstick_init_req_sg(*mrq, msb->data_dir == READ
+ ? MS_TPC_READ_LONG_DATA
+ : MS_TPC_WRITE_LONG_DATA,
+ &t_sg);
+ (*mrq)->need_card_int = 1;
+ return 0;
+ case MS_TPC_READ_LONG_DATA:
+ case MS_TPC_WRITE_LONG_DATA:
+ msb->current_page++;
+ if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT) {
+ t_val = (*mrq)->int_reg;
+ goto has_int_reg;
+ } else {
+ memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1);
+ return 0;
+ }
+
+ default:
+ BUG();
+ }
+}
+
+/*** Transfer setup functions for different access methods. ***/
+
+/** Setup data transfer request for SET_CMD TPC with arguments in card
+ * registers.
+ *
+ * @card Current media instance
+ * @offset Target data offset in bytes
+ * @length Required transfer length in bytes.
+ */
+static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset,
+ size_t length)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ struct mspro_param_register param = {
+ .system = msb->system,
+ .data_count = cpu_to_be16((uint16_t)(length / msb->page_size)),
+ /* ISO C90 warning precludes direct initialization for now. */
+ .data_address = 0,
+ .tpc_param = 0
+ };
+
+ do_div(offset, msb->page_size);
+ param.data_address = cpu_to_be32((uint32_t)offset);
+
+ card->next_request = h_mspro_block_req_init;
+ msb->mrq_handler = h_mspro_block_transfer_data;
+ memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
+ &param, sizeof(param));
+}
+
+/*** Data transfer ***/
+
+static int mspro_block_issue_req(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ u64 t_off;
+ unsigned int count;
+
+ while (true) {
+ msb->current_page = 0;
+ msb->current_seg = 0;
+ msb->seg_count = blk_rq_map_sg(msb->block_req->q,
+ msb->block_req,
+ msb->req_sg);
+
+ if (!msb->seg_count) {
+ unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
+ bool chunk;
+
+ chunk = blk_update_request(msb->block_req,
+ BLK_STS_RESOURCE,
+ bytes);
+ if (chunk)
+ continue;
+ __blk_mq_end_request(msb->block_req,
+ BLK_STS_RESOURCE);
+ msb->block_req = NULL;
+ return -EAGAIN;
+ }
+
+ t_off = blk_rq_pos(msb->block_req);
+ t_off <<= 9;
+ count = blk_rq_bytes(msb->block_req);
+
+ msb->setup_transfer(card, t_off, count);
+
+ msb->data_dir = rq_data_dir(msb->block_req);
+ msb->transfer_cmd = msb->data_dir == READ
+ ? MSPRO_CMD_READ_DATA
+ : MSPRO_CMD_WRITE_DATA;
+
+ memstick_new_req(card->host);
+ return 0;
+ }
+}
+
+static int mspro_block_complete_req(struct memstick_dev *card, int error)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ int cnt;
+ bool chunk;
+ unsigned int t_len = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ dev_dbg(&card->dev, "complete %d, %d\n", msb->block_req ? 1 : 0,
+ error);
+
+ if (msb->block_req) {
+ /* Nothing to do - not really an error */
+ if (error == -EAGAIN)
+ error = 0;
+
+ if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
+ if (msb->data_dir == READ) {
+ for (cnt = 0; cnt < msb->current_seg; cnt++) {
+ t_len += msb->req_sg[cnt].length
+ / msb->page_size;
+
+ if (msb->current_page)
+ t_len += msb->current_page - 1;
+
+ t_len *= msb->page_size;
+ }
+ }
+ } else
+ t_len = blk_rq_bytes(msb->block_req);
+
+ dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
+
+ if (error && !t_len)
+ t_len = blk_rq_cur_bytes(msb->block_req);
+
+ chunk = blk_update_request(msb->block_req,
+ errno_to_blk_status(error), t_len);
+ if (chunk) {
+ error = mspro_block_issue_req(card);
+ if (!error)
+ goto out;
+ } else {
+ __blk_mq_end_request(msb->block_req,
+ errno_to_blk_status(error));
+ msb->block_req = NULL;
+ }
+ } else {
+ if (!error)
+ error = -EAGAIN;
+ }
+
+ card->next_request = h_mspro_block_default_bad;
+ complete_all(&card->mrq_complete);
+out:
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ return error;
+}
+
+static void mspro_block_stop(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ int rc = 0;
+ unsigned long flags;
+
+ while (1) {
+ spin_lock_irqsave(&msb->q_lock, flags);
+ if (!msb->block_req) {
+ blk_mq_stop_hw_queues(msb->queue);
+ rc = 1;
+ }
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ if (rc)
+ break;
+
+ wait_for_completion(&card->mrq_complete);
+ }
+}
+
+static void mspro_block_start(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+
+ blk_mq_start_hw_queues(msb->queue);
+}
+
+static blk_status_t mspro_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct memstick_dev *card = hctx->queue->queuedata;
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+
+ spin_lock_irq(&msb->q_lock);
+
+ if (msb->block_req) {
+ spin_unlock_irq(&msb->q_lock);
+ return BLK_STS_DEV_RESOURCE;
+ }
+
+ if (msb->eject) {
+ spin_unlock_irq(&msb->q_lock);
+ blk_mq_start_request(bd->rq);
+ return BLK_STS_IOERR;
+ }
+
+ msb->block_req = bd->rq;
+ blk_mq_start_request(bd->rq);
+
+ if (mspro_block_issue_req(card))
+ msb->block_req = NULL;
+
+ spin_unlock_irq(&msb->q_lock);
+ return BLK_STS_OK;
+}
+
+/*** Initialization ***/
+
+static int mspro_block_wait_for_ced(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+
+ card->next_request = h_mspro_block_req_init;
+ msb->mrq_handler = h_mspro_block_wait_for_ced;
+ memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1);
+ memstick_new_req(card->host);
+ wait_for_completion(&card->mrq_complete);
+ return card->current_mrq.error;
+}
+
+static int mspro_block_set_interface(struct memstick_dev *card,
+ unsigned char sys_reg)
+{
+ struct memstick_host *host = card->host;
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ struct mspro_param_register param = {
+ .system = sys_reg,
+ .data_count = 0,
+ .data_address = 0,
+ .tpc_param = 0
+ };
+
+ card->next_request = h_mspro_block_req_init;
+ msb->mrq_handler = h_mspro_block_default;
+ memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param,
+ sizeof(param));
+ memstick_new_req(host);
+ wait_for_completion(&card->mrq_complete);
+ return card->current_mrq.error;
+}
+
+static int mspro_block_switch_interface(struct memstick_dev *card)
+{
+ struct memstick_host *host = card->host;
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ int rc = 0;
+
+try_again:
+ if (msb->caps & MEMSTICK_CAP_PAR4)
+ rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR4);
+ else
+ return 0;
+
+ if (rc) {
+ printk(KERN_WARNING
+ "%s: could not switch to 4-bit mode, error %d\n",
+ dev_name(&card->dev), rc);
+ return 0;
+ }
+
+ msb->system = MEMSTICK_SYS_PAR4;
+ host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
+ printk(KERN_INFO "%s: switching to 4-bit parallel mode\n",
+ dev_name(&card->dev));
+
+ if (msb->caps & MEMSTICK_CAP_PAR8) {
+ rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR8);
+
+ if (!rc) {
+ msb->system = MEMSTICK_SYS_PAR8;
+ host->set_param(host, MEMSTICK_INTERFACE,
+ MEMSTICK_PAR8);
+ printk(KERN_INFO
+ "%s: switching to 8-bit parallel mode\n",
+ dev_name(&card->dev));
+ } else
+ printk(KERN_WARNING
+ "%s: could not switch to 8-bit mode, error %d\n",
+ dev_name(&card->dev), rc);
+ }
+
+ card->next_request = h_mspro_block_req_init;
+ msb->mrq_handler = h_mspro_block_default;
+ memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1);
+ memstick_new_req(card->host);
+ wait_for_completion(&card->mrq_complete);
+ rc = card->current_mrq.error;
+
+ if (rc) {
+ printk(KERN_WARNING
+ "%s: interface error, trying to fall back to serial\n",
+ dev_name(&card->dev));
+ msb->system = MEMSTICK_SYS_SERIAL;
+ host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+ msleep(10);
+ host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
+ host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+
+ rc = memstick_set_rw_addr(card);
+ if (!rc)
+ rc = mspro_block_set_interface(card, msb->system);
+
+ if (!rc) {
+ msleep(150);
+ rc = mspro_block_wait_for_ced(card);
+ if (rc)
+ return rc;
+
+ if (msb->caps & MEMSTICK_CAP_PAR8) {
+ msb->caps &= ~MEMSTICK_CAP_PAR8;
+ goto try_again;
+ }
+ }
+ }
+ return rc;
+}
+
+/* Memory allocated for attributes by this function should be freed by
+ * mspro_block_data_clear, no matter if the initialization process succeeded
+ * or failed.
+ */
+static int mspro_block_read_attributes(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ struct mspro_attribute *attr = NULL;
+ struct mspro_sys_attr *s_attr = NULL;
+ unsigned char *buffer = NULL;
+ int cnt, rc, attr_count;
+ /* While normally physical device offsets, represented here by
+ * attr_offset and attr_len will be of large numeric types, we can be
+ * sure, that attributes are close enough to the beginning of the
+ * device, to save ourselves some trouble.
+ */
+ unsigned int addr, attr_offset = 0, attr_len = msb->page_size;
+
+ attr = kmalloc(msb->page_size, GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ sg_init_one(&msb->req_sg[0], attr, msb->page_size);
+ msb->seg_count = 1;
+ msb->current_seg = 0;
+ msb->current_page = 0;
+ msb->data_dir = READ;
+ msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
+
+ msb->setup_transfer(card, attr_offset, attr_len);
+
+ memstick_new_req(card->host);
+ wait_for_completion(&card->mrq_complete);
+ if (card->current_mrq.error) {
+ rc = card->current_mrq.error;
+ goto out_free_attr;
+ }
+
+ if (be16_to_cpu(attr->signature) != MSPRO_BLOCK_SIGNATURE) {
+ printk(KERN_ERR "%s: unrecognized device signature %x\n",
+ dev_name(&card->dev), be16_to_cpu(attr->signature));
+ rc = -ENODEV;
+ goto out_free_attr;
+ }
+
+ if (attr->count > MSPRO_BLOCK_MAX_ATTRIBUTES) {
+ printk(KERN_WARNING "%s: way too many attribute entries\n",
+ dev_name(&card->dev));
+ attr_count = MSPRO_BLOCK_MAX_ATTRIBUTES;
+ } else
+ attr_count = attr->count;
+
+ msb->attr_group.attrs = kcalloc(attr_count + 1,
+ sizeof(*msb->attr_group.attrs),
+ GFP_KERNEL);
+ if (!msb->attr_group.attrs) {
+ rc = -ENOMEM;
+ goto out_free_attr;
+ }
+ msb->attr_group.name = "media_attributes";
+
+ buffer = kmemdup(attr, attr_len, GFP_KERNEL);
+ if (!buffer) {
+ rc = -ENOMEM;
+ goto out_free_attr;
+ }
+
+ for (cnt = 0; cnt < attr_count; ++cnt) {
+ s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL);
+ if (!s_attr) {
+ rc = -ENOMEM;
+ goto out_free_buffer;
+ }
+
+ msb->attr_group.attrs[cnt] = &s_attr->dev_attr.attr;
+ addr = be32_to_cpu(attr->entries[cnt].address);
+ s_attr->size = be32_to_cpu(attr->entries[cnt].size);
+ dev_dbg(&card->dev, "adding attribute %d: id %x, address %x, "
+ "size %zx\n", cnt, attr->entries[cnt].id, addr,
+ s_attr->size);
+ s_attr->id = attr->entries[cnt].id;
+ if (mspro_block_attr_name(s_attr->id))
+ snprintf(s_attr->name, sizeof(s_attr->name), "%s",
+ mspro_block_attr_name(attr->entries[cnt].id));
+ else
+ snprintf(s_attr->name, sizeof(s_attr->name),
+ "attr_x%02x", attr->entries[cnt].id);
+
+ sysfs_attr_init(&s_attr->dev_attr.attr);
+ s_attr->dev_attr.attr.name = s_attr->name;
+ s_attr->dev_attr.attr.mode = S_IRUGO;
+ s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
+
+ if (!s_attr->size)
+ continue;
+
+ s_attr->data = kmalloc(s_attr->size, GFP_KERNEL);
+ if (!s_attr->data) {
+ rc = -ENOMEM;
+ goto out_free_buffer;
+ }
+
+ if (((addr / msb->page_size) == (attr_offset / msb->page_size))
+ && (((addr + s_attr->size - 1) / msb->page_size)
+ == (attr_offset / msb->page_size))) {
+ memcpy(s_attr->data, buffer + addr % msb->page_size,
+ s_attr->size);
+ continue;
+ }
+
+ attr_offset = (addr / msb->page_size) * msb->page_size;
+
+ if ((attr_offset + attr_len) < (addr + s_attr->size)) {
+ kfree(buffer);
+ attr_len = (((addr + s_attr->size) / msb->page_size)
+ + 1 ) * msb->page_size - attr_offset;
+ buffer = kmalloc(attr_len, GFP_KERNEL);
+ if (!buffer) {
+ rc = -ENOMEM;
+ goto out_free_attr;
+ }
+ }
+
+ sg_init_one(&msb->req_sg[0], buffer, attr_len);
+ msb->seg_count = 1;
+ msb->current_seg = 0;
+ msb->current_page = 0;
+ msb->data_dir = READ;
+ msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
+
+ dev_dbg(&card->dev, "reading attribute range %x, %x\n",
+ attr_offset, attr_len);
+
+ msb->setup_transfer(card, attr_offset, attr_len);
+ memstick_new_req(card->host);
+ wait_for_completion(&card->mrq_complete);
+ if (card->current_mrq.error) {
+ rc = card->current_mrq.error;
+ goto out_free_buffer;
+ }
+
+ memcpy(s_attr->data, buffer + addr % msb->page_size,
+ s_attr->size);
+ }
+
+ rc = 0;
+out_free_buffer:
+ kfree(buffer);
+out_free_attr:
+ kfree(attr);
+ return rc;
+}
+
+static int mspro_block_init_card(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ struct memstick_host *host = card->host;
+ int rc = 0;
+
+ msb->system = MEMSTICK_SYS_SERIAL;
+ msb->setup_transfer = h_mspro_block_setup_cmd;
+
+ card->reg_addr.r_offset = offsetof(struct mspro_register, status);
+ card->reg_addr.r_length = sizeof(struct ms_status_register);
+ card->reg_addr.w_offset = offsetof(struct mspro_register, param);
+ card->reg_addr.w_length = sizeof(struct mspro_param_register);
+
+ if (memstick_set_rw_addr(card))
+ return -EIO;
+
+ msb->caps = host->caps;
+
+ msleep(150);
+ rc = mspro_block_wait_for_ced(card);
+ if (rc)
+ return rc;
+
+ rc = mspro_block_switch_interface(card);
+ if (rc)
+ return rc;
+
+ dev_dbg(&card->dev, "card activated\n");
+ if (msb->system != MEMSTICK_SYS_SERIAL)
+ msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
+
+ card->next_request = h_mspro_block_req_init;
+ msb->mrq_handler = h_mspro_block_get_ro;
+ memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL,
+ sizeof(struct ms_status_register));
+ memstick_new_req(card->host);
+ wait_for_completion(&card->mrq_complete);
+ if (card->current_mrq.error)
+ return card->current_mrq.error;
+
+ dev_dbg(&card->dev, "card r/w status %d\n", msb->read_only ? 0 : 1);
+
+ msb->page_size = 512;
+ rc = mspro_block_read_attributes(card);
+ if (rc)
+ return rc;
+
+ dev_dbg(&card->dev, "attributes loaded\n");
+ return 0;
+
+}
+
+static const struct blk_mq_ops mspro_mq_ops = {
+ .queue_rq = mspro_queue_rq,
+};
+
+static int mspro_block_init_disk(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ struct mspro_devinfo *dev_info = NULL;
+ struct mspro_sys_info *sys_info = NULL;
+ struct mspro_sys_attr *s_attr = NULL;
+ int rc, disk_id;
+ unsigned long capacity;
+
+ for (rc = 0; msb->attr_group.attrs[rc]; ++rc) {
+ s_attr = mspro_from_sysfs_attr(msb->attr_group.attrs[rc]);
+
+ if (s_attr->id == MSPRO_BLOCK_ID_DEVINFO)
+ dev_info = s_attr->data;
+ else if (s_attr->id == MSPRO_BLOCK_ID_SYSINFO)
+ sys_info = s_attr->data;
+ }
+
+ if (!dev_info || !sys_info)
+ return -ENODEV;
+
+ msb->cylinders = be16_to_cpu(dev_info->cylinders);
+ msb->heads = be16_to_cpu(dev_info->heads);
+ msb->sectors_per_track = be16_to_cpu(dev_info->sectors_per_track);
+
+ msb->page_size = be16_to_cpu(sys_info->unit_size);
+
+ mutex_lock(&mspro_block_disk_lock);
+ disk_id = idr_alloc(&mspro_block_disk_idr, card, 0, 256, GFP_KERNEL);
+ mutex_unlock(&mspro_block_disk_lock);
+ if (disk_id < 0)
+ return disk_id;
+
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (rc)
+ goto out_release_id;
+
+ msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
+ if (IS_ERR(msb->disk)) {
+ rc = PTR_ERR(msb->disk);
+ goto out_free_tag_set;
+ }
+ msb->queue = msb->disk->queue;
+
+ blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
+ blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
+ blk_queue_max_segment_size(msb->queue,
+ MSPRO_BLOCK_MAX_PAGES * msb->page_size);
+
+ msb->disk->major = major;
+ msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT;
+ msb->disk->minors = 1 << MSPRO_BLOCK_PART_SHIFT;
+ msb->disk->fops = &ms_block_bdops;
+ msb->disk->private_data = msb;
+
+ sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
+
+ blk_queue_logical_block_size(msb->queue, msb->page_size);
+
+ capacity = be16_to_cpu(sys_info->user_block_count);
+ capacity *= be16_to_cpu(sys_info->block_size);
+ capacity *= msb->page_size >> 9;
+ set_capacity(msb->disk, capacity);
+ dev_dbg(&card->dev, "capacity set %ld\n", capacity);
+
+ if (msb->read_only)
+ set_disk_ro(msb->disk, true);
+
+ rc = device_add_disk(&card->dev, msb->disk, NULL);
+ if (rc)
+ goto out_cleanup_disk;
+ msb->active = 1;
+ return 0;
+
+out_cleanup_disk:
+ put_disk(msb->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&msb->tag_set);
+out_release_id:
+ mutex_lock(&mspro_block_disk_lock);
+ idr_remove(&mspro_block_disk_idr, disk_id);
+ mutex_unlock(&mspro_block_disk_lock);
+ return rc;
+}
+
+static void mspro_block_data_clear(struct mspro_block_data *msb)
+{
+ int cnt;
+ struct mspro_sys_attr *s_attr;
+
+ if (msb->attr_group.attrs) {
+ for (cnt = 0; msb->attr_group.attrs[cnt]; ++cnt) {
+ s_attr = mspro_from_sysfs_attr(msb->attr_group
+ .attrs[cnt]);
+ kfree(s_attr->data);
+ kfree(s_attr);
+ }
+ kfree(msb->attr_group.attrs);
+ }
+
+ msb->card = NULL;
+}
+
+static int mspro_block_check_card(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+
+ return (msb->active == 1);
+}
+
+static int mspro_block_probe(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb;
+ int rc = 0;
+
+ msb = kzalloc(sizeof(struct mspro_block_data), GFP_KERNEL);
+ if (!msb)
+ return -ENOMEM;
+ memstick_set_drvdata(card, msb);
+ msb->card = card;
+ spin_lock_init(&msb->q_lock);
+
+ rc = mspro_block_init_card(card);
+
+ if (rc)
+ goto out_free;
+
+ rc = sysfs_create_group(&card->dev.kobj, &msb->attr_group);
+ if (rc)
+ goto out_free;
+
+ rc = mspro_block_init_disk(card);
+ if (!rc) {
+ card->check = mspro_block_check_card;
+ card->stop = mspro_block_stop;
+ card->start = mspro_block_start;
+ return 0;
+ }
+
+ sysfs_remove_group(&card->dev.kobj, &msb->attr_group);
+out_free:
+ memstick_set_drvdata(card, NULL);
+ mspro_block_data_clear(msb);
+ kfree(msb);
+ return rc;
+}
+
+static void mspro_block_remove(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ msb->eject = 1;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ blk_mq_start_hw_queues(msb->queue);
+
+ del_gendisk(msb->disk);
+ dev_dbg(&card->dev, "mspro block remove\n");
+
+ blk_mq_free_tag_set(&msb->tag_set);
+ msb->queue = NULL;
+
+ sysfs_remove_group(&card->dev.kobj, &msb->attr_group);
+
+ mutex_lock(&mspro_block_disk_lock);
+ mspro_block_data_clear(msb);
+ mutex_unlock(&mspro_block_disk_lock);
+
+ put_disk(msb->disk);
+ memstick_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int mspro_block_suspend(struct memstick_dev *card, pm_message_t state)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ blk_mq_stop_hw_queues(msb->queue);
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ msb->active = 0;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ return 0;
+}
+
+static int mspro_block_resume(struct memstick_dev *card)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ int rc = 0;
+
+#ifdef CONFIG_MEMSTICK_UNSAFE_RESUME
+
+ struct mspro_block_data *new_msb;
+ struct memstick_host *host = card->host;
+ struct mspro_sys_attr *s_attr, *r_attr;
+ unsigned char cnt;
+
+ mutex_lock(&host->lock);
+ new_msb = kzalloc(sizeof(struct mspro_block_data), GFP_KERNEL);
+ if (!new_msb) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+
+ new_msb->card = card;
+ memstick_set_drvdata(card, new_msb);
+ rc = mspro_block_init_card(card);
+ if (rc)
+ goto out_free;
+
+ for (cnt = 0; new_msb->attr_group.attrs[cnt]
+ && msb->attr_group.attrs[cnt]; ++cnt) {
+ s_attr = mspro_from_sysfs_attr(new_msb->attr_group.attrs[cnt]);
+ r_attr = mspro_from_sysfs_attr(msb->attr_group.attrs[cnt]);
+
+ if (s_attr->id == MSPRO_BLOCK_ID_SYSINFO
+ && r_attr->id == s_attr->id) {
+ if (memcmp(s_attr->data, r_attr->data, s_attr->size))
+ break;
+
+ msb->active = 1;
+ break;
+ }
+ }
+
+out_free:
+ memstick_set_drvdata(card, msb);
+ mspro_block_data_clear(new_msb);
+ kfree(new_msb);
+out_unlock:
+ mutex_unlock(&host->lock);
+
+#endif /* CONFIG_MEMSTICK_UNSAFE_RESUME */
+
+ blk_mq_start_hw_queues(msb->queue);
+ return rc;
+}
+
+#else
+
+#define mspro_block_suspend NULL
+#define mspro_block_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct memstick_device_id mspro_block_id_tbl[] = {
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_PRO, MEMSTICK_CATEGORY_STORAGE_DUO,
+ MEMSTICK_CLASS_DUO},
+ {}
+};
+
+
+static struct memstick_driver mspro_block_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE
+ },
+ .id_table = mspro_block_id_tbl,
+ .probe = mspro_block_probe,
+ .remove = mspro_block_remove,
+ .suspend = mspro_block_suspend,
+ .resume = mspro_block_resume
+};
+
+static int __init mspro_block_init(void)
+{
+ int rc = -ENOMEM;
+
+ rc = register_blkdev(major, DRIVER_NAME);
+ if (rc < 0) {
+ printk(KERN_ERR DRIVER_NAME ": failed to register "
+ "major %d, error %d\n", major, rc);
+ return rc;
+ }
+ if (!major)
+ major = rc;
+
+ rc = memstick_register_driver(&mspro_block_driver);
+ if (rc)
+ unregister_blkdev(major, DRIVER_NAME);
+ return rc;
+}
+
+static void __exit mspro_block_exit(void)
+{
+ memstick_unregister_driver(&mspro_block_driver);
+ unregister_blkdev(major, DRIVER_NAME);
+ idr_destroy(&mspro_block_disk_idr);
+}
+
+module_init(mspro_block_init);
+module_exit(mspro_block_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Dubov");
+MODULE_DESCRIPTION("Sony MemoryStickPro block device driver");
+MODULE_DEVICE_TABLE(memstick, mspro_block_id_tbl);
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
new file mode 100644
index 000000000..4113343da
--- /dev/null
+++ b/drivers/memstick/host/Kconfig
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# MemoryStick host controller drivers
+#
+
+comment "MemoryStick Host Controller Drivers"
+
+config MEMSTICK_TIFM_MS
+ tristate "TI Flash Media MemoryStick Interface support "
+ depends on PCI
+ select TIFM_CORE
+ help
+ Say Y here if you want to be able to access MemoryStick cards with
+ the Texas Instruments(R) Flash Media card reader, found in many
+ laptops.
+ This option 'selects' (turns on, enables) 'TIFM_CORE', but you
+ probably also need appropriate card reader host adapter, such as
+ 'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
+ (TIFM_7XX1)'.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tifm_ms.
+
+config MEMSTICK_JMICRON_38X
+ tristate "JMicron JMB38X MemoryStick interface support"
+ depends on PCI
+
+ help
+ Say Y here if you want to be able to access MemoryStick cards with
+ the JMicron(R) JMB38X MemoryStick card reader.
+
+ To compile this driver as a module, choose M here: the
+ module will be called jmb38x_ms.
+
+config MEMSTICK_R592
+ tristate "Ricoh R5C592 MemoryStick interface support"
+ depends on PCI
+
+ help
+ Say Y here if you want to be able to access MemoryStick cards with
+ the Ricoh R5C592 MemoryStick card reader (which is part of 5 in one
+ multifunction reader)
+
+ To compile this driver as a module, choose M here: the module will
+ be called r592.
+
+config MEMSTICK_REALTEK_PCI
+ tristate "Realtek PCI-E Memstick Card Interface Driver"
+ depends on MISC_RTSX_PCI
+ help
+ Say Y here to include driver code to support Memstick card interface
+ of Realtek PCI-E card reader
+
+ To compile this driver as a module, choose M here: the module will
+ be called rtsx_pci_ms.
+
+config MEMSTICK_REALTEK_USB
+ tristate "Realtek USB Memstick Card Interface Driver"
+ depends on MISC_RTSX_USB
+ help
+ Say Y here to include driver code to support Memstick card interface
+ of Realtek RTS5129/39 series USB card reader
+
+ To compile this driver as a module, choose M here: the module will
+ be called rts5139_ms.
diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile
new file mode 100644
index 000000000..1abaa03ee
--- /dev/null
+++ b/drivers/memstick/host/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for MemoryStick host controller drivers
+#
+
+obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o
+obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o
+obj-$(CONFIG_MEMSTICK_R592) += r592.o
+obj-$(CONFIG_MEMSTICK_REALTEK_PCI) += rtsx_pci_ms.o
+obj-$(CONFIG_MEMSTICK_REALTEK_USB) += rtsx_usb_ms.o
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
new file mode 100644
index 000000000..21cb2a786
--- /dev/null
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * jmb38x_ms.c - JMicron jmb38x MemoryStick card reader
+ *
+ * Copyright (C) 2008 Alex Dubov <oakad@yahoo.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/memstick.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#define DRIVER_NAME "jmb38x_ms"
+
+static bool no_dma;
+module_param(no_dma, bool, 0644);
+
+enum {
+ DMA_ADDRESS = 0x00,
+ BLOCK = 0x04,
+ DMA_CONTROL = 0x08,
+ TPC_P0 = 0x0c,
+ TPC_P1 = 0x10,
+ TPC = 0x14,
+ HOST_CONTROL = 0x18,
+ DATA = 0x1c,
+ STATUS = 0x20,
+ INT_STATUS = 0x24,
+ INT_STATUS_ENABLE = 0x28,
+ INT_SIGNAL_ENABLE = 0x2c,
+ TIMER = 0x30,
+ TIMER_CONTROL = 0x34,
+ PAD_OUTPUT_ENABLE = 0x38,
+ PAD_PU_PD = 0x3c,
+ CLOCK_DELAY = 0x40,
+ ADMA_ADDRESS = 0x44,
+ CLOCK_CONTROL = 0x48,
+ LED_CONTROL = 0x4c,
+ VERSION = 0x50
+};
+
+struct jmb38x_ms_host {
+ struct jmb38x_ms *chip;
+ void __iomem *addr;
+ spinlock_t lock;
+ struct tasklet_struct notify;
+ int id;
+ char host_id[32];
+ int irq;
+ unsigned int block_pos;
+ unsigned long timeout_jiffies;
+ struct timer_list timer;
+ struct memstick_host *msh;
+ struct memstick_request *req;
+ unsigned char cmd_flags;
+ unsigned char io_pos;
+ unsigned char ifmode;
+ unsigned int io_word[2];
+};
+
+struct jmb38x_ms {
+ struct pci_dev *pdev;
+ int host_cnt;
+ struct memstick_host *hosts[];
+};
+
+#define BLOCK_COUNT_MASK 0xffff0000
+#define BLOCK_SIZE_MASK 0x00000fff
+
+#define DMA_CONTROL_ENABLE 0x00000001
+
+#define TPC_DATA_SEL 0x00008000
+#define TPC_DIR 0x00004000
+#define TPC_WAIT_INT 0x00002000
+#define TPC_GET_INT 0x00000800
+#define TPC_CODE_SZ_MASK 0x00000700
+#define TPC_DATA_SZ_MASK 0x00000007
+
+#define HOST_CONTROL_TDELAY_EN 0x00040000
+#define HOST_CONTROL_HW_OC_P 0x00010000
+#define HOST_CONTROL_RESET_REQ 0x00008000
+#define HOST_CONTROL_REI 0x00004000
+#define HOST_CONTROL_LED 0x00000400
+#define HOST_CONTROL_FAST_CLK 0x00000200
+#define HOST_CONTROL_RESET 0x00000100
+#define HOST_CONTROL_POWER_EN 0x00000080
+#define HOST_CONTROL_CLOCK_EN 0x00000040
+#define HOST_CONTROL_REO 0x00000008
+#define HOST_CONTROL_IF_SHIFT 4
+
+#define HOST_CONTROL_IF_SERIAL 0x0
+#define HOST_CONTROL_IF_PAR4 0x1
+#define HOST_CONTROL_IF_PAR8 0x3
+
+#define STATUS_BUSY 0x00080000
+#define STATUS_MS_DAT7 0x00040000
+#define STATUS_MS_DAT6 0x00020000
+#define STATUS_MS_DAT5 0x00010000
+#define STATUS_MS_DAT4 0x00008000
+#define STATUS_MS_DAT3 0x00004000
+#define STATUS_MS_DAT2 0x00002000
+#define STATUS_MS_DAT1 0x00001000
+#define STATUS_MS_DAT0 0x00000800
+#define STATUS_HAS_MEDIA 0x00000400
+#define STATUS_FIFO_EMPTY 0x00000200
+#define STATUS_FIFO_FULL 0x00000100
+#define STATUS_MS_CED 0x00000080
+#define STATUS_MS_ERR 0x00000040
+#define STATUS_MS_BRQ 0x00000020
+#define STATUS_MS_CNK 0x00000001
+
+#define INT_STATUS_TPC_ERR 0x00080000
+#define INT_STATUS_CRC_ERR 0x00040000
+#define INT_STATUS_TIMER_TO 0x00020000
+#define INT_STATUS_HSK_TO 0x00010000
+#define INT_STATUS_ANY_ERR 0x00008000
+#define INT_STATUS_FIFO_WRDY 0x00000080
+#define INT_STATUS_FIFO_RRDY 0x00000040
+#define INT_STATUS_MEDIA_OUT 0x00000010
+#define INT_STATUS_MEDIA_IN 0x00000008
+#define INT_STATUS_DMA_BOUNDARY 0x00000004
+#define INT_STATUS_EOTRAN 0x00000002
+#define INT_STATUS_EOTPC 0x00000001
+
+#define INT_STATUS_ALL 0x000f801f
+
+#define PAD_OUTPUT_ENABLE_MS 0x0F3F
+
+#define PAD_PU_PD_OFF 0x7FFF0000
+#define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000
+#define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000
+
+#define CLOCK_CONTROL_BY_MMIO 0x00000008
+#define CLOCK_CONTROL_40MHZ 0x00000001
+#define CLOCK_CONTROL_50MHZ 0x00000002
+#define CLOCK_CONTROL_60MHZ 0x00000010
+#define CLOCK_CONTROL_62_5MHZ 0x00000004
+#define CLOCK_CONTROL_OFF 0x00000000
+
+#define PCI_CTL_CLOCK_DLY_ADDR 0x000000b0
+
+enum {
+ CMD_READY = 0x01,
+ FIFO_READY = 0x02,
+ REG_DATA = 0x04,
+ DMA_DATA = 0x08
+};
+
+static unsigned int jmb38x_ms_read_data(struct jmb38x_ms_host *host,
+ unsigned char *buf, unsigned int length)
+{
+ unsigned int off = 0;
+
+ while (host->io_pos && length) {
+ buf[off++] = host->io_word[0] & 0xff;
+ host->io_word[0] >>= 8;
+ length--;
+ host->io_pos--;
+ }
+
+ if (!length)
+ return off;
+
+ while (!(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) {
+ if (length < 4)
+ break;
+ *(unsigned int *)(buf + off) = __raw_readl(host->addr + DATA);
+ length -= 4;
+ off += 4;
+ }
+
+ if (length
+ && !(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) {
+ host->io_word[0] = readl(host->addr + DATA);
+ for (host->io_pos = 4; host->io_pos; --host->io_pos) {
+ buf[off++] = host->io_word[0] & 0xff;
+ host->io_word[0] >>= 8;
+ length--;
+ if (!length)
+ break;
+ }
+ }
+
+ return off;
+}
+
+static unsigned int jmb38x_ms_read_reg_data(struct jmb38x_ms_host *host,
+ unsigned char *buf,
+ unsigned int length)
+{
+ unsigned int off = 0;
+
+ while (host->io_pos > 4 && length) {
+ buf[off++] = host->io_word[0] & 0xff;
+ host->io_word[0] >>= 8;
+ length--;
+ host->io_pos--;
+ }
+
+ if (!length)
+ return off;
+
+ while (host->io_pos && length) {
+ buf[off++] = host->io_word[1] & 0xff;
+ host->io_word[1] >>= 8;
+ length--;
+ host->io_pos--;
+ }
+
+ return off;
+}
+
+static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host,
+ unsigned char *buf,
+ unsigned int length)
+{
+ unsigned int off = 0;
+
+ if (host->io_pos) {
+ while (host->io_pos < 4 && length) {
+ host->io_word[0] |= buf[off++] << (host->io_pos * 8);
+ host->io_pos++;
+ length--;
+ }
+ }
+
+ if (host->io_pos == 4
+ && !(STATUS_FIFO_FULL & readl(host->addr + STATUS))) {
+ writel(host->io_word[0], host->addr + DATA);
+ host->io_pos = 0;
+ host->io_word[0] = 0;
+ } else if (host->io_pos) {
+ return off;
+ }
+
+ if (!length)
+ return off;
+
+ while (!(STATUS_FIFO_FULL & readl(host->addr + STATUS))) {
+ if (length < 4)
+ break;
+
+ __raw_writel(*(unsigned int *)(buf + off),
+ host->addr + DATA);
+ length -= 4;
+ off += 4;
+ }
+
+ switch (length) {
+ case 3:
+ host->io_word[0] |= buf[off + 2] << 16;
+ host->io_pos++;
+ fallthrough;
+ case 2:
+ host->io_word[0] |= buf[off + 1] << 8;
+ host->io_pos++;
+ fallthrough;
+ case 1:
+ host->io_word[0] |= buf[off];
+ host->io_pos++;
+ }
+
+ off += host->io_pos;
+
+ return off;
+}
+
+static unsigned int jmb38x_ms_write_reg_data(struct jmb38x_ms_host *host,
+ unsigned char *buf,
+ unsigned int length)
+{
+ unsigned int off = 0;
+
+ while (host->io_pos < 4 && length) {
+ host->io_word[0] &= ~(0xff << (host->io_pos * 8));
+ host->io_word[0] |= buf[off++] << (host->io_pos * 8);
+ host->io_pos++;
+ length--;
+ }
+
+ if (!length)
+ return off;
+
+ while (host->io_pos < 8 && length) {
+ host->io_word[1] &= ~(0xff << (host->io_pos * 8));
+ host->io_word[1] |= buf[off++] << (host->io_pos * 8);
+ host->io_pos++;
+ length--;
+ }
+
+ return off;
+}
+
+static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
+{
+ unsigned int length;
+ unsigned int off;
+ unsigned int t_size, p_cnt;
+ unsigned char *buf;
+ struct page *pg;
+ unsigned long flags = 0;
+
+ if (host->req->long_data) {
+ length = host->req->sg.length - host->block_pos;
+ off = host->req->sg.offset + host->block_pos;
+ } else {
+ length = host->req->data_len - host->block_pos;
+ off = 0;
+ }
+
+ while (length) {
+ unsigned int p_off;
+
+ if (host->req->long_data) {
+ pg = nth_page(sg_page(&host->req->sg),
+ off >> PAGE_SHIFT);
+ p_off = offset_in_page(off);
+ p_cnt = PAGE_SIZE - p_off;
+ p_cnt = min(p_cnt, length);
+
+ local_irq_save(flags);
+ buf = kmap_atomic(pg) + p_off;
+ } else {
+ buf = host->req->data + host->block_pos;
+ p_cnt = host->req->data_len - host->block_pos;
+ }
+
+ if (host->req->data_dir == WRITE)
+ t_size = !(host->cmd_flags & REG_DATA)
+ ? jmb38x_ms_write_data(host, buf, p_cnt)
+ : jmb38x_ms_write_reg_data(host, buf, p_cnt);
+ else
+ t_size = !(host->cmd_flags & REG_DATA)
+ ? jmb38x_ms_read_data(host, buf, p_cnt)
+ : jmb38x_ms_read_reg_data(host, buf, p_cnt);
+
+ if (host->req->long_data) {
+ kunmap_atomic(buf - p_off);
+ local_irq_restore(flags);
+ }
+
+ if (!t_size)
+ break;
+ host->block_pos += t_size;
+ length -= t_size;
+ off += t_size;
+ }
+
+ if (!length && host->req->data_dir == WRITE) {
+ if (host->cmd_flags & REG_DATA) {
+ writel(host->io_word[0], host->addr + TPC_P0);
+ writel(host->io_word[1], host->addr + TPC_P1);
+ } else if (host->io_pos) {
+ writel(host->io_word[0], host->addr + DATA);
+ }
+ }
+
+ return length;
+}
+
+static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
+{
+ struct jmb38x_ms_host *host = memstick_priv(msh);
+ unsigned int data_len, cmd, t_val;
+
+ if (!(STATUS_HAS_MEDIA & readl(host->addr + STATUS))) {
+ dev_dbg(&msh->dev, "no media status\n");
+ host->req->error = -ETIME;
+ return host->req->error;
+ }
+
+ dev_dbg(&msh->dev, "control %08x\n", readl(host->addr + HOST_CONTROL));
+ dev_dbg(&msh->dev, "status %08x\n", readl(host->addr + INT_STATUS));
+ dev_dbg(&msh->dev, "hstatus %08x\n", readl(host->addr + STATUS));
+
+ host->cmd_flags = 0;
+ host->block_pos = 0;
+ host->io_pos = 0;
+ host->io_word[0] = 0;
+ host->io_word[1] = 0;
+
+ cmd = host->req->tpc << 16;
+ cmd |= TPC_DATA_SEL;
+
+ if (host->req->data_dir == READ)
+ cmd |= TPC_DIR;
+
+ if (host->req->need_card_int) {
+ if (host->ifmode == MEMSTICK_SERIAL)
+ cmd |= TPC_GET_INT;
+ else
+ cmd |= TPC_WAIT_INT;
+ }
+
+ if (!no_dma)
+ host->cmd_flags |= DMA_DATA;
+
+ if (host->req->long_data) {
+ data_len = host->req->sg.length;
+ } else {
+ data_len = host->req->data_len;
+ host->cmd_flags &= ~DMA_DATA;
+ }
+
+ if (data_len <= 8) {
+ cmd &= ~(TPC_DATA_SEL | 0xf);
+ host->cmd_flags |= REG_DATA;
+ cmd |= data_len & 0xf;
+ host->cmd_flags &= ~DMA_DATA;
+ }
+
+ if (host->cmd_flags & DMA_DATA) {
+ if (1 != dma_map_sg(&host->chip->pdev->dev, &host->req->sg, 1,
+ host->req->data_dir == READ
+ ? DMA_FROM_DEVICE
+ : DMA_TO_DEVICE)) {
+ host->req->error = -ENOMEM;
+ return host->req->error;
+ }
+ data_len = sg_dma_len(&host->req->sg);
+ writel(sg_dma_address(&host->req->sg),
+ host->addr + DMA_ADDRESS);
+ writel(((1 << 16) & BLOCK_COUNT_MASK)
+ | (data_len & BLOCK_SIZE_MASK),
+ host->addr + BLOCK);
+ writel(DMA_CONTROL_ENABLE, host->addr + DMA_CONTROL);
+ } else if (!(host->cmd_flags & REG_DATA)) {
+ writel(((1 << 16) & BLOCK_COUNT_MASK)
+ | (data_len & BLOCK_SIZE_MASK),
+ host->addr + BLOCK);
+ t_val = readl(host->addr + INT_STATUS_ENABLE);
+ t_val |= host->req->data_dir == READ
+ ? INT_STATUS_FIFO_RRDY
+ : INT_STATUS_FIFO_WRDY;
+
+ writel(t_val, host->addr + INT_STATUS_ENABLE);
+ writel(t_val, host->addr + INT_SIGNAL_ENABLE);
+ } else {
+ cmd &= ~(TPC_DATA_SEL | 0xf);
+ host->cmd_flags |= REG_DATA;
+ cmd |= data_len & 0xf;
+
+ if (host->req->data_dir == WRITE) {
+ jmb38x_ms_transfer_data(host);
+ writel(host->io_word[0], host->addr + TPC_P0);
+ writel(host->io_word[1], host->addr + TPC_P1);
+ }
+ }
+
+ mod_timer(&host->timer, jiffies + host->timeout_jiffies);
+ writel(HOST_CONTROL_LED | readl(host->addr + HOST_CONTROL),
+ host->addr + HOST_CONTROL);
+ host->req->error = 0;
+
+ writel(cmd, host->addr + TPC);
+ dev_dbg(&msh->dev, "executing TPC %08x, len %x\n", cmd, data_len);
+
+ return 0;
+}
+
+static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last)
+{
+ struct jmb38x_ms_host *host = memstick_priv(msh);
+ unsigned int t_val = 0;
+ int rc;
+
+ del_timer(&host->timer);
+
+ dev_dbg(&msh->dev, "c control %08x\n",
+ readl(host->addr + HOST_CONTROL));
+ dev_dbg(&msh->dev, "c status %08x\n",
+ readl(host->addr + INT_STATUS));
+ dev_dbg(&msh->dev, "c hstatus %08x\n", readl(host->addr + STATUS));
+
+ host->req->int_reg = readl(host->addr + STATUS) & 0xff;
+
+ writel(0, host->addr + BLOCK);
+ writel(0, host->addr + DMA_CONTROL);
+
+ if (host->cmd_flags & DMA_DATA) {
+ dma_unmap_sg(&host->chip->pdev->dev, &host->req->sg, 1,
+ host->req->data_dir == READ
+ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ } else {
+ t_val = readl(host->addr + INT_STATUS_ENABLE);
+ if (host->req->data_dir == READ)
+ t_val &= ~INT_STATUS_FIFO_RRDY;
+ else
+ t_val &= ~INT_STATUS_FIFO_WRDY;
+
+ writel(t_val, host->addr + INT_STATUS_ENABLE);
+ writel(t_val, host->addr + INT_SIGNAL_ENABLE);
+ }
+
+ writel((~HOST_CONTROL_LED) & readl(host->addr + HOST_CONTROL),
+ host->addr + HOST_CONTROL);
+
+ if (!last) {
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ } while (!rc && jmb38x_ms_issue_cmd(msh));
+ } else {
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ if (!rc)
+ host->req->error = -ETIME;
+ } while (!rc);
+ }
+}
+
+static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id)
+{
+ struct memstick_host *msh = dev_id;
+ struct jmb38x_ms_host *host = memstick_priv(msh);
+ unsigned int irq_status;
+
+ spin_lock(&host->lock);
+ irq_status = readl(host->addr + INT_STATUS);
+ dev_dbg(&host->chip->pdev->dev, "irq_status = %08x\n", irq_status);
+ if (irq_status == 0 || irq_status == (~0)) {
+ spin_unlock(&host->lock);
+ return IRQ_NONE;
+ }
+
+ if (host->req) {
+ if (irq_status & INT_STATUS_ANY_ERR) {
+ if (irq_status & INT_STATUS_CRC_ERR)
+ host->req->error = -EILSEQ;
+ else if (irq_status & INT_STATUS_TPC_ERR) {
+ dev_dbg(&host->chip->pdev->dev, "TPC_ERR\n");
+ jmb38x_ms_complete_cmd(msh, 0);
+ } else
+ host->req->error = -ETIME;
+ } else {
+ if (host->cmd_flags & DMA_DATA) {
+ if (irq_status & INT_STATUS_EOTRAN)
+ host->cmd_flags |= FIFO_READY;
+ } else {
+ if (irq_status & (INT_STATUS_FIFO_RRDY
+ | INT_STATUS_FIFO_WRDY))
+ jmb38x_ms_transfer_data(host);
+
+ if (irq_status & INT_STATUS_EOTRAN) {
+ jmb38x_ms_transfer_data(host);
+ host->cmd_flags |= FIFO_READY;
+ }
+ }
+
+ if (irq_status & INT_STATUS_EOTPC) {
+ host->cmd_flags |= CMD_READY;
+ if (host->cmd_flags & REG_DATA) {
+ if (host->req->data_dir == READ) {
+ host->io_word[0]
+ = readl(host->addr
+ + TPC_P0);
+ host->io_word[1]
+ = readl(host->addr
+ + TPC_P1);
+ host->io_pos = 8;
+
+ jmb38x_ms_transfer_data(host);
+ }
+ host->cmd_flags |= FIFO_READY;
+ }
+ }
+ }
+ }
+
+ if (irq_status & (INT_STATUS_MEDIA_IN | INT_STATUS_MEDIA_OUT)) {
+ dev_dbg(&host->chip->pdev->dev, "media changed\n");
+ memstick_detect_change(msh);
+ }
+
+ writel(irq_status, host->addr + INT_STATUS);
+
+ if (host->req
+ && (((host->cmd_flags & CMD_READY)
+ && (host->cmd_flags & FIFO_READY))
+ || host->req->error))
+ jmb38x_ms_complete_cmd(msh, 0);
+
+ spin_unlock(&host->lock);
+ return IRQ_HANDLED;
+}
+
+static void jmb38x_ms_abort(struct timer_list *t)
+{
+ struct jmb38x_ms_host *host = from_timer(host, t, timer);
+ struct memstick_host *msh = host->msh;
+ unsigned long flags;
+
+ dev_dbg(&host->chip->pdev->dev, "abort\n");
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->req) {
+ host->req->error = -ETIME;
+ jmb38x_ms_complete_cmd(msh, 0);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void jmb38x_ms_req_tasklet(unsigned long data)
+{
+ struct memstick_host *msh = (struct memstick_host *)data;
+ struct jmb38x_ms_host *host = memstick_priv(msh);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (!host->req) {
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ dev_dbg(&host->chip->pdev->dev, "tasklet req %d\n", rc);
+ } while (!rc && jmb38x_ms_issue_cmd(msh));
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void jmb38x_ms_dummy_submit(struct memstick_host *msh)
+{
+ return;
+}
+
+static void jmb38x_ms_submit_req(struct memstick_host *msh)
+{
+ struct jmb38x_ms_host *host = memstick_priv(msh);
+
+ tasklet_schedule(&host->notify);
+}
+
+static int jmb38x_ms_reset(struct jmb38x_ms_host *host)
+{
+ int cnt;
+
+ writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN
+ | readl(host->addr + HOST_CONTROL),
+ host->addr + HOST_CONTROL);
+
+ for (cnt = 0; cnt < 20; ++cnt) {
+ if (!(HOST_CONTROL_RESET_REQ
+ & readl(host->addr + HOST_CONTROL)))
+ goto reset_next;
+
+ ndelay(20);
+ }
+ dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n");
+
+reset_next:
+ writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
+ | readl(host->addr + HOST_CONTROL),
+ host->addr + HOST_CONTROL);
+
+ for (cnt = 0; cnt < 20; ++cnt) {
+ if (!(HOST_CONTROL_RESET
+ & readl(host->addr + HOST_CONTROL)))
+ goto reset_ok;
+
+ ndelay(20);
+ }
+ dev_dbg(&host->chip->pdev->dev, "reset timeout\n");
+ return -EIO;
+
+reset_ok:
+ writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE);
+ writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
+ return 0;
+}
+
+static int jmb38x_ms_set_param(struct memstick_host *msh,
+ enum memstick_param param,
+ int value)
+{
+ struct jmb38x_ms_host *host = memstick_priv(msh);
+ unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
+ unsigned int clock_ctl = CLOCK_CONTROL_BY_MMIO, clock_delay = 0;
+ int rc = 0;
+
+ switch (param) {
+ case MEMSTICK_POWER:
+ if (value == MEMSTICK_POWER_ON) {
+ rc = jmb38x_ms_reset(host);
+ if (rc)
+ return rc;
+
+ host_ctl = 7;
+ host_ctl |= HOST_CONTROL_POWER_EN
+ | HOST_CONTROL_CLOCK_EN;
+ writel(host_ctl, host->addr + HOST_CONTROL);
+
+ writel(host->id ? PAD_PU_PD_ON_MS_SOCK1
+ : PAD_PU_PD_ON_MS_SOCK0,
+ host->addr + PAD_PU_PD);
+
+ writel(PAD_OUTPUT_ENABLE_MS,
+ host->addr + PAD_OUTPUT_ENABLE);
+
+ msleep(10);
+ dev_dbg(&host->chip->pdev->dev, "power on\n");
+ } else if (value == MEMSTICK_POWER_OFF) {
+ host_ctl &= ~(HOST_CONTROL_POWER_EN
+ | HOST_CONTROL_CLOCK_EN);
+ writel(host_ctl, host->addr + HOST_CONTROL);
+ writel(0, host->addr + PAD_OUTPUT_ENABLE);
+ writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD);
+ dev_dbg(&host->chip->pdev->dev, "power off\n");
+ } else
+ return -EINVAL;
+ break;
+ case MEMSTICK_INTERFACE:
+ dev_dbg(&host->chip->pdev->dev,
+ "Set Host Interface Mode to %d\n", value);
+ host_ctl &= ~(HOST_CONTROL_FAST_CLK | HOST_CONTROL_REI |
+ HOST_CONTROL_REO);
+ host_ctl |= HOST_CONTROL_TDELAY_EN | HOST_CONTROL_HW_OC_P;
+ host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT);
+
+ if (value == MEMSTICK_SERIAL) {
+ host_ctl |= HOST_CONTROL_IF_SERIAL
+ << HOST_CONTROL_IF_SHIFT;
+ host_ctl |= HOST_CONTROL_REI;
+ clock_ctl |= CLOCK_CONTROL_40MHZ;
+ clock_delay = 0;
+ } else if (value == MEMSTICK_PAR4) {
+ host_ctl |= HOST_CONTROL_FAST_CLK;
+ host_ctl |= HOST_CONTROL_IF_PAR4
+ << HOST_CONTROL_IF_SHIFT;
+ host_ctl |= HOST_CONTROL_REO;
+ clock_ctl |= CLOCK_CONTROL_40MHZ;
+ clock_delay = 4;
+ } else if (value == MEMSTICK_PAR8) {
+ host_ctl |= HOST_CONTROL_FAST_CLK;
+ host_ctl |= HOST_CONTROL_IF_PAR8
+ << HOST_CONTROL_IF_SHIFT;
+ clock_ctl |= CLOCK_CONTROL_50MHZ;
+ clock_delay = 0;
+ } else
+ return -EINVAL;
+
+ writel(host_ctl, host->addr + HOST_CONTROL);
+ writel(CLOCK_CONTROL_OFF, host->addr + CLOCK_CONTROL);
+ writel(clock_ctl, host->addr + CLOCK_CONTROL);
+ pci_write_config_byte(host->chip->pdev,
+ PCI_CTL_CLOCK_DLY_ADDR + 1,
+ clock_delay);
+ host->ifmode = value;
+ break;
+ }
+ return 0;
+}
+
+#define PCI_PMOS0_CONTROL 0xae
+#define PMOS0_ENABLE 0x01
+#define PMOS0_OVERCURRENT_LEVEL_2_4V 0x06
+#define PMOS0_EN_OVERCURRENT_DEBOUNCE 0x40
+#define PMOS0_SW_LED_POLARITY_ENABLE 0x80
+#define PMOS0_ACTIVE_BITS (PMOS0_ENABLE | PMOS0_EN_OVERCURRENT_DEBOUNCE | \
+ PMOS0_OVERCURRENT_LEVEL_2_4V)
+#define PCI_PMOS1_CONTROL 0xbd
+#define PMOS1_ACTIVE_BITS 0x4a
+#define PCI_CLOCK_CTL 0xb9
+
+static int jmb38x_ms_pmos(struct pci_dev *pdev, int flag)
+{
+ unsigned char val;
+
+ pci_read_config_byte(pdev, PCI_PMOS0_CONTROL, &val);
+ if (flag)
+ val |= PMOS0_ACTIVE_BITS;
+ else
+ val &= ~PMOS0_ACTIVE_BITS;
+ pci_write_config_byte(pdev, PCI_PMOS0_CONTROL, val);
+ dev_dbg(&pdev->dev, "JMB38x: set PMOS0 val 0x%x\n", val);
+
+ if (pci_resource_flags(pdev, 1)) {
+ pci_read_config_byte(pdev, PCI_PMOS1_CONTROL, &val);
+ if (flag)
+ val |= PMOS1_ACTIVE_BITS;
+ else
+ val &= ~PMOS1_ACTIVE_BITS;
+ pci_write_config_byte(pdev, PCI_PMOS1_CONTROL, val);
+ dev_dbg(&pdev->dev, "JMB38x: set PMOS1 val 0x%x\n", val);
+ }
+
+ pci_read_config_byte(pdev, PCI_CLOCK_CTL, &val);
+ pci_write_config_byte(pdev, PCI_CLOCK_CTL, val & ~0x0f);
+ pci_write_config_byte(pdev, PCI_CLOCK_CTL, val | 0x01);
+ dev_dbg(&pdev->dev, "Clock Control by PCI config is disabled!\n");
+
+ return 0;
+}
+
+static int __maybe_unused jmb38x_ms_suspend(struct device *dev)
+{
+ struct jmb38x_ms *jm = dev_get_drvdata(dev);
+
+ int cnt;
+
+ for (cnt = 0; cnt < jm->host_cnt; ++cnt) {
+ if (!jm->hosts[cnt])
+ break;
+ memstick_suspend_host(jm->hosts[cnt]);
+ }
+
+ device_wakeup_disable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused jmb38x_ms_resume(struct device *dev)
+{
+ struct jmb38x_ms *jm = dev_get_drvdata(dev);
+ int rc;
+
+ jmb38x_ms_pmos(to_pci_dev(dev), 1);
+
+ for (rc = 0; rc < jm->host_cnt; ++rc) {
+ if (!jm->hosts[rc])
+ break;
+ memstick_resume_host(jm->hosts[rc]);
+ memstick_detect_change(jm->hosts[rc]);
+ }
+
+ return 0;
+}
+
+static int jmb38x_ms_count_slots(struct pci_dev *pdev)
+{
+ int cnt, rc = 0;
+
+ for (cnt = 0; cnt < PCI_STD_NUM_BARS; ++cnt) {
+ if (!(IORESOURCE_MEM & pci_resource_flags(pdev, cnt)))
+ break;
+
+ if (256 != pci_resource_len(pdev, cnt))
+ break;
+
+ ++rc;
+ }
+ return rc;
+}
+
+static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
+{
+ struct memstick_host *msh;
+ struct jmb38x_ms_host *host;
+
+ msh = memstick_alloc_host(sizeof(struct jmb38x_ms_host),
+ &jm->pdev->dev);
+ if (!msh)
+ return NULL;
+
+ host = memstick_priv(msh);
+ host->msh = msh;
+ host->chip = jm;
+ host->addr = ioremap(pci_resource_start(jm->pdev, cnt),
+ pci_resource_len(jm->pdev, cnt));
+ if (!host->addr)
+ goto err_out_free;
+
+ spin_lock_init(&host->lock);
+ host->id = cnt;
+ snprintf(host->host_id, sizeof(host->host_id), DRIVER_NAME ":slot%d",
+ host->id);
+ host->irq = jm->pdev->irq;
+ host->timeout_jiffies = msecs_to_jiffies(1000);
+
+ tasklet_init(&host->notify, jmb38x_ms_req_tasklet, (unsigned long)msh);
+ msh->request = jmb38x_ms_submit_req;
+ msh->set_param = jmb38x_ms_set_param;
+
+ msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8;
+
+ timer_setup(&host->timer, jmb38x_ms_abort, 0);
+
+ if (!request_irq(host->irq, jmb38x_ms_isr, IRQF_SHARED, host->host_id,
+ msh))
+ return msh;
+
+ iounmap(host->addr);
+err_out_free:
+ memstick_free_host(msh);
+ return NULL;
+}
+
+static void jmb38x_ms_free_host(struct memstick_host *msh)
+{
+ struct jmb38x_ms_host *host = memstick_priv(msh);
+
+ free_irq(host->irq, msh);
+ iounmap(host->addr);
+ memstick_free_host(msh);
+}
+
+static int jmb38x_ms_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
+{
+ struct jmb38x_ms *jm;
+ int pci_dev_busy = 0;
+ int rc, cnt;
+
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, DRIVER_NAME);
+ if (rc) {
+ pci_dev_busy = 1;
+ goto err_out;
+ }
+
+ jmb38x_ms_pmos(pdev, 1);
+
+ cnt = jmb38x_ms_count_slots(pdev);
+ if (!cnt) {
+ rc = -ENODEV;
+ pci_dev_busy = 1;
+ goto err_out_int;
+ }
+
+ jm = kzalloc(struct_size(jm, hosts, cnt), GFP_KERNEL);
+ if (!jm) {
+ rc = -ENOMEM;
+ goto err_out_int;
+ }
+
+ jm->pdev = pdev;
+ jm->host_cnt = cnt;
+ pci_set_drvdata(pdev, jm);
+
+ for (cnt = 0; cnt < jm->host_cnt; ++cnt) {
+ jm->hosts[cnt] = jmb38x_ms_alloc_host(jm, cnt);
+ if (!jm->hosts[cnt])
+ break;
+
+ rc = memstick_add_host(jm->hosts[cnt]);
+
+ if (rc) {
+ jmb38x_ms_free_host(jm->hosts[cnt]);
+ jm->hosts[cnt] = NULL;
+ break;
+ }
+ }
+
+ if (cnt)
+ return 0;
+
+ rc = -ENODEV;
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(jm);
+err_out_int:
+ pci_release_regions(pdev);
+err_out:
+ if (!pci_dev_busy)
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void jmb38x_ms_remove(struct pci_dev *dev)
+{
+ struct jmb38x_ms *jm = pci_get_drvdata(dev);
+ struct jmb38x_ms_host *host;
+ int cnt;
+ unsigned long flags;
+
+ for (cnt = 0; cnt < jm->host_cnt; ++cnt) {
+ if (!jm->hosts[cnt])
+ break;
+
+ host = memstick_priv(jm->hosts[cnt]);
+
+ jm->hosts[cnt]->request = jmb38x_ms_dummy_submit;
+ tasklet_kill(&host->notify);
+ writel(0, host->addr + INT_SIGNAL_ENABLE);
+ writel(0, host->addr + INT_STATUS_ENABLE);
+ dev_dbg(&jm->pdev->dev, "interrupts off\n");
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->req) {
+ host->req->error = -ETIME;
+ jmb38x_ms_complete_cmd(jm->hosts[cnt], 1);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ memstick_remove_host(jm->hosts[cnt]);
+ dev_dbg(&jm->pdev->dev, "host removed\n");
+
+ jmb38x_ms_free_host(jm->hosts[cnt]);
+ }
+
+ jmb38x_ms_pmos(dev, 0);
+
+ pci_set_drvdata(dev, NULL);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ kfree(jm);
+}
+
+static struct pci_device_id jmb38x_ms_id_tbl [] = {
+ { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS) },
+ { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB385_MS) },
+ { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB390_MS) },
+ { }
+};
+
+static SIMPLE_DEV_PM_OPS(jmb38x_ms_pm_ops, jmb38x_ms_suspend, jmb38x_ms_resume);
+
+static struct pci_driver jmb38x_ms_driver = {
+ .name = DRIVER_NAME,
+ .id_table = jmb38x_ms_id_tbl,
+ .probe = jmb38x_ms_probe,
+ .remove = jmb38x_ms_remove,
+ .driver.pm = &jmb38x_ms_pm_ops,
+};
+
+module_pci_driver(jmb38x_ms_driver);
+
+MODULE_AUTHOR("Alex Dubov");
+MODULE_DESCRIPTION("JMicron jmb38x MemoryStick driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, jmb38x_ms_id_tbl);
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
new file mode 100644
index 000000000..461f5ffd0
--- /dev/null
+++ b/drivers/memstick/host/r592.c
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 - Maxim Levitsky
+ * driver for Ricoh memstick readers
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/freezer.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <asm/byteorder.h>
+#include <linux/swab.h>
+#include "r592.h"
+
+static bool r592_enable_dma = 1;
+static int debug;
+
+static const char *tpc_names[] = {
+ "MS_TPC_READ_MG_STATUS",
+ "MS_TPC_READ_LONG_DATA",
+ "MS_TPC_READ_SHORT_DATA",
+ "MS_TPC_READ_REG",
+ "MS_TPC_READ_QUAD_DATA",
+ "INVALID",
+ "MS_TPC_GET_INT",
+ "MS_TPC_SET_RW_REG_ADRS",
+ "MS_TPC_EX_SET_CMD",
+ "MS_TPC_WRITE_QUAD_DATA",
+ "MS_TPC_WRITE_REG",
+ "MS_TPC_WRITE_SHORT_DATA",
+ "MS_TPC_WRITE_LONG_DATA",
+ "MS_TPC_SET_CMD",
+};
+
+/**
+ * memstick_debug_get_tpc_name - debug helper that returns string for
+ * a TPC number
+ */
+static __maybe_unused const char *memstick_debug_get_tpc_name(int tpc)
+{
+ return tpc_names[tpc-1];
+}
+
+/* Read a register*/
+static inline u32 r592_read_reg(struct r592_device *dev, int address)
+{
+ u32 value = readl(dev->mmio + address);
+ dbg_reg("reg #%02d == 0x%08x", address, value);
+ return value;
+}
+
+/* Write a register */
+static inline void r592_write_reg(struct r592_device *dev,
+ int address, u32 value)
+{
+ dbg_reg("reg #%02d <- 0x%08x", address, value);
+ writel(value, dev->mmio + address);
+}
+
+/* Reads a big endian DWORD register */
+static inline u32 r592_read_reg_raw_be(struct r592_device *dev, int address)
+{
+ u32 value = __raw_readl(dev->mmio + address);
+ dbg_reg("reg #%02d == 0x%08x", address, value);
+ return be32_to_cpu(value);
+}
+
+/* Writes a big endian DWORD register */
+static inline void r592_write_reg_raw_be(struct r592_device *dev,
+ int address, u32 value)
+{
+ dbg_reg("reg #%02d <- 0x%08x", address, value);
+ __raw_writel(cpu_to_be32(value), dev->mmio + address);
+}
+
+/* Set specific bits in a register (little endian) */
+static inline void r592_set_reg_mask(struct r592_device *dev,
+ int address, u32 mask)
+{
+ u32 reg = readl(dev->mmio + address);
+ dbg_reg("reg #%02d |= 0x%08x (old =0x%08x)", address, mask, reg);
+ writel(reg | mask , dev->mmio + address);
+}
+
+/* Clear specific bits in a register (little endian) */
+static inline void r592_clear_reg_mask(struct r592_device *dev,
+ int address, u32 mask)
+{
+ u32 reg = readl(dev->mmio + address);
+ dbg_reg("reg #%02d &= 0x%08x (old = 0x%08x, mask = 0x%08x)",
+ address, ~mask, reg, mask);
+ writel(reg & ~mask, dev->mmio + address);
+}
+
+
+/* Wait for status bits while checking for errors */
+static int r592_wait_status(struct r592_device *dev, u32 mask, u32 wanted_mask)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u32 reg = r592_read_reg(dev, R592_STATUS);
+
+ if ((reg & mask) == wanted_mask)
+ return 0;
+
+ while (time_before(jiffies, timeout)) {
+
+ reg = r592_read_reg(dev, R592_STATUS);
+
+ if ((reg & mask) == wanted_mask)
+ return 0;
+
+ if (reg & (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR))
+ return -EIO;
+
+ cpu_relax();
+ }
+ return -ETIME;
+}
+
+
+/* Enable/disable device */
+static int r592_enable_device(struct r592_device *dev, bool enable)
+{
+ dbg("%sabling the device", enable ? "en" : "dis");
+
+ if (enable) {
+
+ /* Power up the card */
+ r592_write_reg(dev, R592_POWER, R592_POWER_0 | R592_POWER_1);
+
+ /* Perform a reset */
+ r592_set_reg_mask(dev, R592_IO, R592_IO_RESET);
+
+ msleep(100);
+ } else
+ /* Power down the card */
+ r592_write_reg(dev, R592_POWER, 0);
+
+ return 0;
+}
+
+/* Set serial/parallel mode */
+static int r592_set_mode(struct r592_device *dev, bool parallel_mode)
+{
+ if (!parallel_mode) {
+ dbg("switching to serial mode");
+
+ /* Set serial mode */
+ r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_SERIAL);
+
+ r592_clear_reg_mask(dev, R592_POWER, R592_POWER_20);
+
+ } else {
+ dbg("switching to parallel mode");
+
+ /* This setting should be set _before_ switch TPC */
+ r592_set_reg_mask(dev, R592_POWER, R592_POWER_20);
+
+ r592_clear_reg_mask(dev, R592_IO,
+ R592_IO_SERIAL1 | R592_IO_SERIAL2);
+
+ /* Set the parallel mode now */
+ r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_PARALLEL);
+ }
+
+ dev->parallel_mode = parallel_mode;
+ return 0;
+}
+
+/* Perform a controller reset without powering down the card */
+static void r592_host_reset(struct r592_device *dev)
+{
+ r592_set_reg_mask(dev, R592_IO, R592_IO_RESET);
+ msleep(100);
+ r592_set_mode(dev, dev->parallel_mode);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/* Disable all hardware interrupts */
+static void r592_clear_interrupts(struct r592_device *dev)
+{
+ /* Disable & ACK all interrupts */
+ r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_ACK_MASK);
+ r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_EN_MASK);
+}
+#endif
+
+/* Tests if there is an CRC error */
+static int r592_test_io_error(struct r592_device *dev)
+{
+ if (!(r592_read_reg(dev, R592_STATUS) &
+ (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR)))
+ return 0;
+
+ return -EIO;
+}
+
+/* Ensure that FIFO is ready for use */
+static int r592_test_fifo_empty(struct r592_device *dev)
+{
+ if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY)
+ return 0;
+
+ dbg("FIFO not ready, trying to reset the device");
+ r592_host_reset(dev);
+
+ if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY)
+ return 0;
+
+ message("FIFO still not ready, giving up");
+ return -EIO;
+}
+
+/* Activates the DMA transfer from to FIFO */
+static void r592_start_dma(struct r592_device *dev, bool is_write)
+{
+ unsigned long flags;
+ u32 reg;
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ /* Ack interrupts (just in case) + enable them */
+ r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK);
+ r592_set_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK);
+
+ /* Set DMA address */
+ r592_write_reg(dev, R592_FIFO_DMA, sg_dma_address(&dev->req->sg));
+
+ /* Enable the DMA */
+ reg = r592_read_reg(dev, R592_FIFO_DMA_SETTINGS);
+ reg |= R592_FIFO_DMA_SETTINGS_EN;
+
+ if (!is_write)
+ reg |= R592_FIFO_DMA_SETTINGS_DIR;
+ else
+ reg &= ~R592_FIFO_DMA_SETTINGS_DIR;
+ r592_write_reg(dev, R592_FIFO_DMA_SETTINGS, reg);
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+}
+
+/* Cleanups DMA related settings */
+static void r592_stop_dma(struct r592_device *dev, int error)
+{
+ r592_clear_reg_mask(dev, R592_FIFO_DMA_SETTINGS,
+ R592_FIFO_DMA_SETTINGS_EN);
+
+ /* This is only a precation */
+ r592_write_reg(dev, R592_FIFO_DMA,
+ dev->dummy_dma_page_physical_address);
+
+ r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK);
+ r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK);
+ dev->dma_error = error;
+}
+
+/* Test if hardware supports DMA */
+static void r592_check_dma(struct r592_device *dev)
+{
+ dev->dma_capable = r592_enable_dma &&
+ (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) &
+ R592_FIFO_DMA_SETTINGS_CAP);
+}
+
+/* Transfers fifo contents in/out using DMA */
+static int r592_transfer_fifo_dma(struct r592_device *dev)
+{
+ int len, sg_count;
+ bool is_write;
+
+ if (!dev->dma_capable || !dev->req->long_data)
+ return -EINVAL;
+
+ len = dev->req->sg.length;
+ is_write = dev->req->data_dir == WRITE;
+
+ if (len != R592_LFIFO_SIZE)
+ return -EINVAL;
+
+ dbg_verbose("doing dma transfer");
+
+ dev->dma_error = 0;
+ reinit_completion(&dev->dma_done);
+
+ /* TODO: hidden assumption about nenth beeing always 1 */
+ sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) {
+ message("problem in dma_map_sg");
+ return -EIO;
+ }
+
+ r592_start_dma(dev, is_write);
+
+ /* Wait for DMA completion */
+ if (!wait_for_completion_timeout(
+ &dev->dma_done, msecs_to_jiffies(1000))) {
+ message("DMA timeout");
+ r592_stop_dma(dev, -ETIMEDOUT);
+ }
+
+ dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ return dev->dma_error;
+}
+
+/*
+ * Writes the FIFO in 4 byte chunks.
+ * If length isn't 4 byte aligned, rest of the data if put to a fifo
+ * to be written later
+ * Use r592_flush_fifo_write to flush that fifo when writing for the
+ * last time
+ */
+static void r592_write_fifo_pio(struct r592_device *dev,
+ unsigned char *buffer, int len)
+{
+ /* flush spill from former write */
+ if (!kfifo_is_empty(&dev->pio_fifo)) {
+
+ u8 tmp[4] = {0};
+ int copy_len = kfifo_in(&dev->pio_fifo, buffer, len);
+
+ if (!kfifo_is_full(&dev->pio_fifo))
+ return;
+ len -= copy_len;
+ buffer += copy_len;
+
+ copy_len = kfifo_out(&dev->pio_fifo, tmp, 4);
+ WARN_ON(copy_len != 4);
+ r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)tmp);
+ }
+
+ WARN_ON(!kfifo_is_empty(&dev->pio_fifo));
+
+ /* write full dwords */
+ while (len >= 4) {
+ r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer);
+ buffer += 4;
+ len -= 4;
+ }
+
+ /* put remaining bytes to the spill */
+ if (len)
+ kfifo_in(&dev->pio_fifo, buffer, len);
+}
+
+/* Flushes the temporary FIFO used to make aligned DWORD writes */
+static void r592_flush_fifo_write(struct r592_device *dev)
+{
+ int ret;
+ u8 buffer[4] = { 0 };
+
+ if (kfifo_is_empty(&dev->pio_fifo))
+ return;
+
+ ret = kfifo_out(&dev->pio_fifo, buffer, 4);
+ /* intentionally ignore __must_check return code */
+ (void)ret;
+ r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer);
+}
+
+/*
+ * Read a fifo in 4 bytes chunks.
+ * If input doesn't fit the buffer, it places bytes of last dword in spill
+ * buffer, so that they don't get lost on last read, just throw these away.
+ */
+static void r592_read_fifo_pio(struct r592_device *dev,
+ unsigned char *buffer, int len)
+{
+ u8 tmp[4];
+
+ /* Read from last spill */
+ if (!kfifo_is_empty(&dev->pio_fifo)) {
+ int bytes_copied =
+ kfifo_out(&dev->pio_fifo, buffer, min(4, len));
+ buffer += bytes_copied;
+ len -= bytes_copied;
+
+ if (!kfifo_is_empty(&dev->pio_fifo))
+ return;
+ }
+
+ /* Reads dwords from FIFO */
+ while (len >= 4) {
+ *(u32 *)buffer = r592_read_reg_raw_be(dev, R592_FIFO_PIO);
+ buffer += 4;
+ len -= 4;
+ }
+
+ if (len) {
+ *(u32 *)tmp = r592_read_reg_raw_be(dev, R592_FIFO_PIO);
+ kfifo_in(&dev->pio_fifo, tmp, 4);
+ len -= kfifo_out(&dev->pio_fifo, buffer, len);
+ }
+
+ WARN_ON(len);
+ return;
+}
+
+/* Transfers actual data using PIO. */
+static int r592_transfer_fifo_pio(struct r592_device *dev)
+{
+ unsigned long flags;
+
+ bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
+ struct sg_mapping_iter miter;
+
+ kfifo_reset(&dev->pio_fifo);
+
+ if (!dev->req->long_data) {
+ if (is_write) {
+ r592_write_fifo_pio(dev, dev->req->data,
+ dev->req->data_len);
+ r592_flush_fifo_write(dev);
+ } else
+ r592_read_fifo_pio(dev, dev->req->data,
+ dev->req->data_len);
+ return 0;
+ }
+
+ local_irq_save(flags);
+ sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC |
+ (is_write ? SG_MITER_FROM_SG : SG_MITER_TO_SG));
+
+ /* Do the transfer fifo<->memory*/
+ while (sg_miter_next(&miter))
+ if (is_write)
+ r592_write_fifo_pio(dev, miter.addr, miter.length);
+ else
+ r592_read_fifo_pio(dev, miter.addr, miter.length);
+
+
+ /* Write last few non aligned bytes*/
+ if (is_write)
+ r592_flush_fifo_write(dev);
+
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+ return 0;
+}
+
+/* Executes one TPC (data is read/written from small or large fifo) */
+static void r592_execute_tpc(struct r592_device *dev)
+{
+ bool is_write;
+ int len, error;
+ u32 status, reg;
+
+ if (!dev->req) {
+ message("BUG: tpc execution without request!");
+ return;
+ }
+
+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
+ len = dev->req->long_data ?
+ dev->req->sg.length : dev->req->data_len;
+
+ /* Ensure that FIFO can hold the input data */
+ if (len > R592_LFIFO_SIZE) {
+ message("IO: hardware doesn't support TPCs longer that 512");
+ error = -ENOSYS;
+ goto out;
+ }
+
+ if (!(r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_PRSNT)) {
+ dbg("IO: refusing to send TPC because card is absent");
+ error = -ENODEV;
+ goto out;
+ }
+
+ dbg("IO: executing %s LEN=%d",
+ memstick_debug_get_tpc_name(dev->req->tpc), len);
+
+ /* Set IO direction */
+ if (is_write)
+ r592_set_reg_mask(dev, R592_IO, R592_IO_DIRECTION);
+ else
+ r592_clear_reg_mask(dev, R592_IO, R592_IO_DIRECTION);
+
+
+ error = r592_test_fifo_empty(dev);
+ if (error)
+ goto out;
+
+ /* Transfer write data */
+ if (is_write) {
+ error = r592_transfer_fifo_dma(dev);
+ if (error == -EINVAL)
+ error = r592_transfer_fifo_pio(dev);
+ }
+
+ if (error)
+ goto out;
+
+ /* Trigger the TPC */
+ reg = (len << R592_TPC_EXEC_LEN_SHIFT) |
+ (dev->req->tpc << R592_TPC_EXEC_TPC_SHIFT) |
+ R592_TPC_EXEC_BIG_FIFO;
+
+ r592_write_reg(dev, R592_TPC_EXEC, reg);
+
+ /* Wait for TPC completion */
+ status = R592_STATUS_RDY;
+ if (dev->req->need_card_int)
+ status |= R592_STATUS_CED;
+
+ error = r592_wait_status(dev, status, status);
+ if (error) {
+ message("card didn't respond");
+ goto out;
+ }
+
+ /* Test IO errors */
+ error = r592_test_io_error(dev);
+ if (error) {
+ dbg("IO error");
+ goto out;
+ }
+
+ /* Read data from FIFO */
+ if (!is_write) {
+ error = r592_transfer_fifo_dma(dev);
+ if (error == -EINVAL)
+ error = r592_transfer_fifo_pio(dev);
+ }
+
+ /* read INT reg. This can be shortened with shifts, but that way
+ its more readable */
+ if (dev->parallel_mode && dev->req->need_card_int) {
+
+ dev->req->int_reg = 0;
+ status = r592_read_reg(dev, R592_STATUS);
+
+ if (status & R592_STATUS_P_CMDNACK)
+ dev->req->int_reg |= MEMSTICK_INT_CMDNAK;
+ if (status & R592_STATUS_P_BREQ)
+ dev->req->int_reg |= MEMSTICK_INT_BREQ;
+ if (status & R592_STATUS_P_INTERR)
+ dev->req->int_reg |= MEMSTICK_INT_ERR;
+ if (status & R592_STATUS_P_CED)
+ dev->req->int_reg |= MEMSTICK_INT_CED;
+ }
+
+ if (error)
+ dbg("FIFO read error");
+out:
+ dev->req->error = error;
+ r592_clear_reg_mask(dev, R592_REG_MSC, R592_REG_MSC_LED);
+ return;
+}
+
+/* Main request processing thread */
+static int r592_process_thread(void *data)
+{
+ int error;
+ struct r592_device *dev = (struct r592_device *)data;
+ unsigned long flags;
+
+ while (!kthread_should_stop()) {
+ spin_lock_irqsave(&dev->io_thread_lock, flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ error = memstick_next_req(dev->host, &dev->req);
+ spin_unlock_irqrestore(&dev->io_thread_lock, flags);
+
+ if (error) {
+ if (error == -ENXIO || error == -EAGAIN) {
+ dbg_verbose("IO: done IO, sleeping");
+ } else {
+ dbg("IO: unknown error from "
+ "memstick_next_req %d", error);
+ }
+
+ if (kthread_should_stop())
+ set_current_state(TASK_RUNNING);
+
+ schedule();
+ } else {
+ set_current_state(TASK_RUNNING);
+ r592_execute_tpc(dev);
+ }
+ }
+ return 0;
+}
+
+/* Reprogram chip to detect change in card state */
+/* eg, if card is detected, arm it to detect removal, and vice versa */
+static void r592_update_card_detect(struct r592_device *dev)
+{
+ u32 reg = r592_read_reg(dev, R592_REG_MSC);
+ bool card_detected = reg & R592_REG_MSC_PRSNT;
+
+ dbg("update card detect. card state: %s", card_detected ?
+ "present" : "absent");
+
+ reg &= ~((R592_REG_MSC_IRQ_REMOVE | R592_REG_MSC_IRQ_INSERT) << 16);
+
+ if (card_detected)
+ reg |= (R592_REG_MSC_IRQ_REMOVE << 16);
+ else
+ reg |= (R592_REG_MSC_IRQ_INSERT << 16);
+
+ r592_write_reg(dev, R592_REG_MSC, reg);
+}
+
+/* Timer routine that fires 1 second after last card detection event, */
+static void r592_detect_timer(struct timer_list *t)
+{
+ struct r592_device *dev = from_timer(dev, t, detect_timer);
+ r592_update_card_detect(dev);
+ memstick_detect_change(dev->host);
+}
+
+/* Interrupt handler */
+static irqreturn_t r592_irq(int irq, void *data)
+{
+ struct r592_device *dev = (struct r592_device *)data;
+ irqreturn_t ret = IRQ_NONE;
+ u32 reg;
+ u16 irq_enable, irq_status;
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ reg = r592_read_reg(dev, R592_REG_MSC);
+ irq_enable = reg >> 16;
+ irq_status = reg & 0xFFFF;
+
+ /* Ack the interrupts */
+ reg &= ~irq_status;
+ r592_write_reg(dev, R592_REG_MSC, reg);
+
+ /* Get the IRQ status minus bits that aren't enabled */
+ irq_status &= (irq_enable);
+
+ /* Due to limitation of memstick core, we don't look at bits that
+ indicate that card was removed/inserted and/or present */
+ if (irq_status & (R592_REG_MSC_IRQ_INSERT | R592_REG_MSC_IRQ_REMOVE)) {
+
+ bool card_was_added = irq_status & R592_REG_MSC_IRQ_INSERT;
+ ret = IRQ_HANDLED;
+
+ message("IRQ: card %s", card_was_added ? "added" : "removed");
+
+ mod_timer(&dev->detect_timer,
+ jiffies + msecs_to_jiffies(card_was_added ? 500 : 50));
+ }
+
+ if (irq_status &
+ (R592_REG_MSC_FIFO_DMA_DONE | R592_REG_MSC_FIFO_DMA_ERR)) {
+ ret = IRQ_HANDLED;
+
+ if (irq_status & R592_REG_MSC_FIFO_DMA_ERR) {
+ message("IRQ: DMA error");
+ error = -EIO;
+ } else {
+ dbg_verbose("IRQ: dma done");
+ error = 0;
+ }
+
+ r592_stop_dma(dev, error);
+ complete(&dev->dma_done);
+ }
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ return ret;
+}
+
+/* External inteface: set settings */
+static int r592_set_param(struct memstick_host *host,
+ enum memstick_param param, int value)
+{
+ struct r592_device *dev = memstick_priv(host);
+
+ switch (param) {
+ case MEMSTICK_POWER:
+ switch (value) {
+ case MEMSTICK_POWER_ON:
+ return r592_enable_device(dev, true);
+ case MEMSTICK_POWER_OFF:
+ return r592_enable_device(dev, false);
+ default:
+ return -EINVAL;
+ }
+ case MEMSTICK_INTERFACE:
+ switch (value) {
+ case MEMSTICK_SERIAL:
+ return r592_set_mode(dev, 0);
+ case MEMSTICK_PAR4:
+ return r592_set_mode(dev, 1);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+/* External interface: submit requests */
+static void r592_submit_req(struct memstick_host *host)
+{
+ struct r592_device *dev = memstick_priv(host);
+ unsigned long flags;
+
+ if (dev->req)
+ return;
+
+ spin_lock_irqsave(&dev->io_thread_lock, flags);
+ if (wake_up_process(dev->io_thread))
+ dbg_verbose("IO thread woken to process requests");
+ spin_unlock_irqrestore(&dev->io_thread_lock, flags);
+}
+
+static const struct pci_device_id r592_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0592), },
+ { },
+};
+
+/* Main entry */
+static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int error = -ENOMEM;
+ struct memstick_host *host;
+ struct r592_device *dev;
+
+ /* Allocate memory */
+ host = memstick_alloc_host(sizeof(struct r592_device), &pdev->dev);
+ if (!host)
+ goto error1;
+
+ dev = memstick_priv(host);
+ dev->host = host;
+ dev->pci_dev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ /* pci initialization */
+ error = pci_enable_device(pdev);
+ if (error)
+ goto error2;
+
+ pci_set_master(pdev);
+ error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
+ goto error3;
+
+ error = pci_request_regions(pdev, DRV_NAME);
+ if (error)
+ goto error3;
+
+ dev->mmio = pci_ioremap_bar(pdev, 0);
+ if (!dev->mmio) {
+ error = -ENOMEM;
+ goto error4;
+ }
+
+ dev->irq = pdev->irq;
+ spin_lock_init(&dev->irq_lock);
+ spin_lock_init(&dev->io_thread_lock);
+ init_completion(&dev->dma_done);
+ INIT_KFIFO(dev->pio_fifo);
+ timer_setup(&dev->detect_timer, r592_detect_timer, 0);
+
+ /* Host initialization */
+ host->caps = MEMSTICK_CAP_PAR4;
+ host->request = r592_submit_req;
+ host->set_param = r592_set_param;
+ r592_check_dma(dev);
+
+ dev->io_thread = kthread_run(r592_process_thread, dev, "r592_io");
+ if (IS_ERR(dev->io_thread)) {
+ error = PTR_ERR(dev->io_thread);
+ goto error5;
+ }
+
+ /* This is just a precation, so don't fail */
+ dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &dev->dummy_dma_page_physical_address, GFP_KERNEL);
+ r592_stop_dma(dev , 0);
+
+ error = request_irq(dev->irq, &r592_irq, IRQF_SHARED,
+ DRV_NAME, dev);
+ if (error)
+ goto error6;
+
+ r592_update_card_detect(dev);
+ error = memstick_add_host(host);
+ if (error)
+ goto error7;
+
+ message("driver successfully loaded");
+ return 0;
+error7:
+ free_irq(dev->irq, dev);
+error6:
+ if (dev->dummy_dma_page)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
+ dev->dummy_dma_page_physical_address);
+
+ kthread_stop(dev->io_thread);
+error5:
+ iounmap(dev->mmio);
+error4:
+ pci_release_regions(pdev);
+error3:
+ pci_disable_device(pdev);
+error2:
+ memstick_free_host(host);
+error1:
+ return error;
+}
+
+static void r592_remove(struct pci_dev *pdev)
+{
+ int error = 0;
+ struct r592_device *dev = pci_get_drvdata(pdev);
+
+ /* Stop the processing thread.
+ That ensures that we won't take any more requests */
+ kthread_stop(dev->io_thread);
+ del_timer_sync(&dev->detect_timer);
+ r592_enable_device(dev, false);
+
+ while (!error && dev->req) {
+ dev->req->error = -ETIME;
+ error = memstick_next_req(dev->host, &dev->req);
+ }
+ memstick_remove_host(dev->host);
+
+ if (dev->dummy_dma_page)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
+ dev->dummy_dma_page_physical_address);
+
+ free_irq(dev->irq, dev);
+ iounmap(dev->mmio);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ memstick_free_host(dev->host);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int r592_suspend(struct device *core_dev)
+{
+ struct r592_device *dev = dev_get_drvdata(core_dev);
+
+ r592_clear_interrupts(dev);
+ memstick_suspend_host(dev->host);
+ del_timer_sync(&dev->detect_timer);
+ return 0;
+}
+
+static int r592_resume(struct device *core_dev)
+{
+ struct r592_device *dev = dev_get_drvdata(core_dev);
+
+ r592_clear_interrupts(dev);
+ r592_enable_device(dev, false);
+ memstick_resume_host(dev->host);
+ r592_update_card_detect(dev);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume);
+
+MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl);
+
+static struct pci_driver r592_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r592_pci_id_tbl,
+ .probe = r592_probe,
+ .remove = r592_remove,
+ .driver.pm = &r592_pm_ops,
+};
+
+module_pci_driver(r592_pci_driver);
+
+module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)");
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-3)");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh R5C592 Memstick/Memstick PRO card reader driver");
diff --git a/drivers/memstick/host/r592.h b/drivers/memstick/host/r592.h
new file mode 100644
index 000000000..c161db70c
--- /dev/null
+++ b/drivers/memstick/host/r592.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2010 - Maxim Levitsky
+ * driver for Ricoh memstick readers
+ */
+
+#ifndef R592_H
+
+#include <linux/memstick.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kfifo.h>
+#include <linux/ctype.h>
+
+/* write to this reg (number,len) triggers TPC execution */
+#define R592_TPC_EXEC 0x00
+#define R592_TPC_EXEC_LEN_SHIFT 16 /* Bits 16..25 are TPC len */
+#define R592_TPC_EXEC_BIG_FIFO (1 << 26) /* If bit 26 is set, large fifo is used (reg 48) */
+#define R592_TPC_EXEC_TPC_SHIFT 28 /* Bits 28..31 are the TPC number */
+
+
+/* Window for small TPC fifo (big endian)*/
+/* reads and writes always are done in 8 byte chunks */
+/* Not used in driver, because large fifo does better job */
+#define R592_SFIFO 0x08
+
+
+/* Status register (ms int, small fifo, IO)*/
+#define R592_STATUS 0x10
+ /* Parallel INT bits */
+#define R592_STATUS_P_CMDNACK (1 << 16) /* INT reg: NACK (parallel mode) */
+#define R592_STATUS_P_BREQ (1 << 17) /* INT reg: card ready (parallel mode)*/
+#define R592_STATUS_P_INTERR (1 << 18) /* INT reg: int error (parallel mode)*/
+#define R592_STATUS_P_CED (1 << 19) /* INT reg: command done (parallel mode) */
+
+ /* Fifo status */
+#define R592_STATUS_SFIFO_FULL (1 << 20) /* Small Fifo almost full (last chunk is written) */
+#define R592_STATUS_SFIFO_EMPTY (1 << 21) /* Small Fifo empty */
+
+ /* Error detection via CRC */
+#define R592_STATUS_SEND_ERR (1 << 24) /* Send failed */
+#define R592_STATUS_RECV_ERR (1 << 25) /* Receive failed */
+
+ /* Card state */
+#define R592_STATUS_RDY (1 << 28) /* RDY signal received */
+#define R592_STATUS_CED (1 << 29) /* INT: Command done (serial mode)*/
+#define R592_STATUS_SFIFO_INPUT (1 << 30) /* Small fifo received data*/
+
+#define R592_SFIFO_SIZE 32 /* total size of small fifo is 32 bytes */
+#define R592_SFIFO_PACKET 8 /* packet size of small fifo */
+
+/* IO control */
+#define R592_IO 0x18
+#define R592_IO_16 (1 << 16) /* Set by default, can be cleared */
+#define R592_IO_18 (1 << 18) /* Set by default, can be cleared */
+#define R592_IO_SERIAL1 (1 << 20) /* Set by default, can be cleared, (cleared on parallel) */
+#define R592_IO_22 (1 << 22) /* Set by default, can be cleared */
+#define R592_IO_DIRECTION (1 << 24) /* TPC direction (1 write 0 read) */
+#define R592_IO_26 (1 << 26) /* Set by default, can be cleared */
+#define R592_IO_SERIAL2 (1 << 30) /* Set by default, can be cleared (cleared on parallel), serial doesn't work if unset */
+#define R592_IO_RESET (1 << 31) /* Reset, sets defaults*/
+
+
+/* Turns hardware on/off */
+#define R592_POWER 0x20 /* bits 0-7 writeable */
+#define R592_POWER_0 (1 << 0) /* set on start, cleared on stop - must be set*/
+#define R592_POWER_1 (1 << 1) /* set on start, cleared on stop - must be set*/
+#define R592_POWER_3 (1 << 3) /* must be clear */
+#define R592_POWER_20 (1 << 5) /* set before switch to parallel */
+
+/* IO mode*/
+#define R592_IO_MODE 0x24
+#define R592_IO_MODE_SERIAL 1
+#define R592_IO_MODE_PARALLEL 3
+
+
+/* IRQ,card detection,large fifo (first word irq status, second enable) */
+/* IRQs are ACKed by clearing the bits */
+#define R592_REG_MSC 0x28
+#define R592_REG_MSC_PRSNT (1 << 1) /* card present (only status)*/
+#define R592_REG_MSC_IRQ_INSERT (1 << 8) /* detect insert / card insered */
+#define R592_REG_MSC_IRQ_REMOVE (1 << 9) /* detect removal / card removed */
+#define R592_REG_MSC_FIFO_EMPTY (1 << 10) /* fifo is empty */
+#define R592_REG_MSC_FIFO_DMA_DONE (1 << 11) /* dma enable / dma done */
+
+#define R592_REG_MSC_FIFO_USER_ORN (1 << 12) /* set if software reads empty fifo (if R592_REG_MSC_FIFO_EMPTY is set) */
+#define R592_REG_MSC_FIFO_MISMATH (1 << 13) /* set if amount of data in fifo doesn't match amount in TPC */
+#define R592_REG_MSC_FIFO_DMA_ERR (1 << 14) /* IO failure */
+#define R592_REG_MSC_LED (1 << 15) /* clear to turn led off (only status)*/
+
+#define DMA_IRQ_ACK_MASK \
+ (R592_REG_MSC_FIFO_DMA_DONE | R592_REG_MSC_FIFO_DMA_ERR)
+
+#define DMA_IRQ_EN_MASK (DMA_IRQ_ACK_MASK << 16)
+
+#define IRQ_ALL_ACK_MASK 0x00007F00
+#define IRQ_ALL_EN_MASK (IRQ_ALL_ACK_MASK << 16)
+
+/* DMA address for large FIFO read/writes*/
+#define R592_FIFO_DMA 0x2C
+
+/* PIO access to large FIFO (512 bytes) (big endian)*/
+#define R592_FIFO_PIO 0x30
+#define R592_LFIFO_SIZE 512 /* large fifo size */
+
+
+/* large FIFO DMA settings */
+#define R592_FIFO_DMA_SETTINGS 0x34
+#define R592_FIFO_DMA_SETTINGS_EN (1 << 0) /* DMA enabled */
+#define R592_FIFO_DMA_SETTINGS_DIR (1 << 1) /* Dma direction (1 read, 0 write) */
+#define R592_FIFO_DMA_SETTINGS_CAP (1 << 24) /* Dma is aviable */
+
+/* Maybe just an delay */
+/* Bits 17..19 are just number */
+/* bit 16 is set, then bit 20 is waited */
+/* time to wait is about 50 spins * 2 ^ (bits 17..19) */
+/* seems to be possible just to ignore */
+/* Probably debug register */
+#define R592_REG38 0x38
+#define R592_REG38_CHANGE (1 << 16) /* Start bit */
+#define R592_REG38_DONE (1 << 20) /* HW set this after the delay */
+#define R592_REG38_SHIFT 17
+
+/* Debug register, written (0xABCDEF00) when error happens - not used*/
+#define R592_REG_3C 0x3C
+
+struct r592_device {
+ struct pci_dev *pci_dev;
+ struct memstick_host *host; /* host backpointer */
+ struct memstick_request *req; /* current request */
+
+ /* Registers, IRQ */
+ void __iomem *mmio;
+ int irq;
+ spinlock_t irq_lock;
+ spinlock_t io_thread_lock;
+ struct timer_list detect_timer;
+
+ struct task_struct *io_thread;
+ bool parallel_mode;
+
+ DECLARE_KFIFO(pio_fifo, u8, sizeof(u32));
+
+ /* DMA area */
+ int dma_capable;
+ int dma_error;
+ struct completion dma_done;
+ void *dummy_dma_page;
+ dma_addr_t dummy_dma_page_physical_address;
+
+};
+
+#define DRV_NAME "r592"
+
+
+#define message(format, ...) \
+ printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define __dbg(level, format, ...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG DRV_NAME \
+ ": " format "\n", ## __VA_ARGS__); \
+ } while (0)
+
+
+#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
+#define dbg_reg(format, ...) __dbg(3, format, ## __VA_ARGS__)
+
+#endif
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
new file mode 100644
index 000000000..15720a4af
--- /dev/null
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Realtek PCI-Express Memstick Card Interface driver
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/memstick.h>
+#include <linux/rtsx_pci.h>
+#include <asm/unaligned.h>
+
+struct realtek_pci_ms {
+ struct platform_device *pdev;
+ struct rtsx_pcr *pcr;
+ struct memstick_host *msh;
+ struct memstick_request *req;
+
+ struct mutex host_mutex;
+ struct work_struct handle_req;
+
+ u8 ssc_depth;
+ unsigned int clock;
+ unsigned char ifmode;
+ bool eject;
+};
+
+static inline struct device *ms_dev(struct realtek_pci_ms *host)
+{
+ return &(host->pdev->dev);
+}
+
+static inline void ms_clear_error(struct realtek_pci_ms *host)
+{
+ rtsx_pci_write_register(host->pcr, CARD_STOP,
+ MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR);
+}
+
+#ifdef DEBUG
+
+static void ms_print_debug_regs(struct realtek_pci_ms *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ u16 i;
+ u8 *ptr;
+
+ /* Print MS host internal registers */
+ rtsx_pci_init_cmd(pcr);
+ for (i = 0xFD40; i <= 0xFD44; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
+ for (i = 0xFD52; i <= 0xFD69; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
+ rtsx_pci_send_cmd(pcr, 100);
+
+ ptr = rtsx_pci_get_cmd_data(pcr);
+ for (i = 0xFD40; i <= 0xFD44; i++)
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+ for (i = 0xFD52; i <= 0xFD69; i++)
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+}
+
+#else
+
+#define ms_print_debug_regs(host)
+
+#endif
+
+static int ms_power_on(struct realtek_pci_ms *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, MS_MOD_SEL);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_48_MS);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN,
+ MS_CLK_EN, MS_CLK_EN);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_MS_CARD);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_power_on(pcr, RTSX_MS_CARD);
+ if (err < 0)
+ return err;
+
+ /* Wait ms power stable */
+ msleep(150);
+
+ err = rtsx_pci_write_register(pcr, CARD_OE,
+ MS_OUTPUT_EN, MS_OUTPUT_EN);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int ms_power_off(struct realtek_pci_ms *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
+}
+
+static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir,
+ u8 tpc, u8 cfg, struct scatterlist *sg)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+ unsigned int length = sg->length;
+ u16 sec_cnt = (u16)(length / 512);
+ u8 val, trans_mode, dma_dir;
+ struct memstick_dev *card = host->msh->card;
+ bool pro_card = card->id.type == MEMSTICK_TYPE_PRO;
+
+ dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n",
+ __func__, tpc, (data_dir == READ) ? "READ" : "WRITE",
+ length);
+
+ if (data_dir == READ) {
+ dma_dir = DMA_DIR_FROM_CARD;
+ trans_mode = pro_card ? MS_TM_AUTO_READ : MS_TM_NORMAL_READ;
+ } else {
+ dma_dir = DMA_DIR_TO_CARD;
+ trans_mode = pro_card ? MS_TM_AUTO_WRITE : MS_TM_NORMAL_WRITE;
+ }
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ if (pro_card) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H,
+ 0xFF, (u8)(sec_cnt >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L,
+ 0xFF, (u8)sec_cnt);
+ }
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
+ DMA_DONE_INT, DMA_DONE_INT);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, 0xFF, (u8)(length >> 24));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, 0xFF, (u8)(length >> 16));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, 0xFF, (u8)(length >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)length);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK, dma_dir | DMA_EN | DMA_512);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | trans_mode);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_pci_send_cmd_no_wait(pcr);
+
+ err = rtsx_pci_transfer_data(pcr, sg, 1, data_dir == READ, 10000);
+ if (err < 0) {
+ ms_clear_error(host);
+ return err;
+ }
+
+ rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
+ if (pro_card) {
+ if (val & (MS_INT_CMDNK | MS_INT_ERR |
+ MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ return -EIO;
+ } else {
+ if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ms_write_bytes(struct realtek_pci_ms *host, u8 tpc,
+ u8 cfg, u8 cnt, u8 *data, u8 *int_reg)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err, i;
+
+ dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc);
+
+ if (!data)
+ return -EINVAL;
+
+ rtsx_pci_init_cmd(pcr);
+
+ for (i = 0; i < cnt; i++)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, data[i]);
+ if (cnt % 2)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, 0xFF);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+ if (int_reg)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 5000);
+ if (err < 0) {
+ u8 val;
+
+ rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
+ dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val);
+
+ if (int_reg)
+ *int_reg = val & 0x0F;
+
+ ms_print_debug_regs(host);
+
+ ms_clear_error(host);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR)
+ return -EIO;
+ } else {
+ if (!(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK))
+ return -EIO;
+ }
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ if (int_reg) {
+ u8 *ptr = rtsx_pci_get_cmd_data(pcr) + 1;
+ *int_reg = *ptr & 0x0F;
+ }
+
+ return 0;
+}
+
+static int ms_read_bytes(struct realtek_pci_ms *host, u8 tpc,
+ u8 cfg, u8 cnt, u8 *data, u8 *int_reg)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err, i;
+ u8 *ptr;
+
+ dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc);
+
+ if (!data)
+ return -EINVAL;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | MS_TM_READ_BYTES);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+ for (i = 0; i < cnt - 1; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
+ if (cnt % 2)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + cnt, 0, 0);
+ else
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD,
+ PPBUF_BASE2 + cnt - 1, 0, 0);
+ if (int_reg)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 5000);
+ if (err < 0) {
+ u8 val;
+
+ rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
+ dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val);
+
+ if (int_reg)
+ *int_reg = val & 0x0F;
+
+ ms_print_debug_regs(host);
+
+ ms_clear_error(host);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR)
+ return -EIO;
+ } else {
+ if (!(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK))
+ return -EIO;
+ }
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ ptr = rtsx_pci_get_cmd_data(pcr) + 1;
+ for (i = 0; i < cnt; i++)
+ data[i] = *ptr++;
+
+ if (int_reg)
+ *int_reg = *ptr & 0x0F;
+
+ return 0;
+}
+
+static int rtsx_pci_ms_issue_cmd(struct realtek_pci_ms *host)
+{
+ struct memstick_request *req = host->req;
+ int err = 0;
+ u8 cfg = 0, int_reg;
+
+ dev_dbg(ms_dev(host), "%s\n", __func__);
+
+ if (req->need_card_int) {
+ if (host->ifmode != MEMSTICK_SERIAL)
+ cfg = WAIT_INT;
+ }
+
+ if (req->long_data) {
+ err = ms_transfer_data(host, req->data_dir,
+ req->tpc, cfg, &(req->sg));
+ } else {
+ if (req->data_dir == READ) {
+ err = ms_read_bytes(host, req->tpc, cfg,
+ req->data_len, req->data, &int_reg);
+ } else {
+ err = ms_write_bytes(host, req->tpc, cfg,
+ req->data_len, req->data, &int_reg);
+ }
+ }
+ if (err < 0)
+ return err;
+
+ if (req->need_card_int && (host->ifmode == MEMSTICK_SERIAL)) {
+ err = ms_read_bytes(host, MS_TPC_GET_INT,
+ NO_WAIT_INT, 1, &int_reg, NULL);
+ if (err < 0)
+ return err;
+ }
+
+ if (req->need_card_int) {
+ dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", int_reg);
+
+ if (int_reg & MS_INT_CMDNK)
+ req->int_reg |= MEMSTICK_INT_CMDNAK;
+ if (int_reg & MS_INT_BREQ)
+ req->int_reg |= MEMSTICK_INT_BREQ;
+ if (int_reg & MS_INT_ERR)
+ req->int_reg |= MEMSTICK_INT_ERR;
+ if (int_reg & MS_INT_CED)
+ req->int_reg |= MEMSTICK_INT_CED;
+ }
+
+ return 0;
+}
+
+static void rtsx_pci_ms_handle_req(struct work_struct *work)
+{
+ struct realtek_pci_ms *host = container_of(work,
+ struct realtek_pci_ms, handle_req);
+ struct rtsx_pcr *pcr = host->pcr;
+ struct memstick_host *msh = host->msh;
+ int rc;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ rtsx_pci_switch_clock(host->pcr, host->clock, host->ssc_depth,
+ false, true, false);
+ rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, MS_MOD_SEL);
+ rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_48_MS);
+
+ if (!host->req) {
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ dev_dbg(ms_dev(host), "next req %d\n", rc);
+
+ if (!rc)
+ host->req->error = rtsx_pci_ms_issue_cmd(host);
+ } while (!rc);
+ }
+
+ mutex_unlock(&pcr->pcr_mutex);
+}
+
+static void rtsx_pci_ms_request(struct memstick_host *msh)
+{
+ struct realtek_pci_ms *host = memstick_priv(msh);
+
+ dev_dbg(ms_dev(host), "--> %s\n", __func__);
+
+ if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD))
+ return;
+
+ schedule_work(&host->handle_req);
+}
+
+static int rtsx_pci_ms_set_param(struct memstick_host *msh,
+ enum memstick_param param, int value)
+{
+ struct realtek_pci_ms *host = memstick_priv(msh);
+ struct rtsx_pcr *pcr = host->pcr;
+ unsigned int clock = 0;
+ u8 ssc_depth = 0;
+ int err;
+
+ dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
+ __func__, param, value);
+
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD);
+ if (err)
+ return err;
+
+ switch (param) {
+ case MEMSTICK_POWER:
+ if (value == MEMSTICK_POWER_ON)
+ err = ms_power_on(host);
+ else if (value == MEMSTICK_POWER_OFF)
+ err = ms_power_off(host);
+ else
+ return -EINVAL;
+ break;
+
+ case MEMSTICK_INTERFACE:
+ if (value == MEMSTICK_SERIAL) {
+ clock = 19000000;
+ ssc_depth = RTSX_SSC_DEPTH_500K;
+
+ err = rtsx_pci_write_register(pcr, MS_CFG, 0x58,
+ MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT);
+ if (err < 0)
+ return err;
+ } else if (value == MEMSTICK_PAR4) {
+ clock = 39000000;
+ ssc_depth = RTSX_SSC_DEPTH_1M;
+
+ err = rtsx_pci_write_register(pcr, MS_CFG,
+ 0x58, MS_BUS_WIDTH_4 | PUSH_TIME_ODD);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ err = rtsx_pci_switch_clock(pcr, clock,
+ ssc_depth, false, true, false);
+ if (err < 0)
+ return err;
+
+ host->ssc_depth = ssc_depth;
+ host->clock = clock;
+ host->ifmode = value;
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int rtsx_pci_ms_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct realtek_pci_ms *host = platform_get_drvdata(pdev);
+ struct memstick_host *msh = host->msh;
+
+ dev_dbg(ms_dev(host), "--> %s\n", __func__);
+
+ memstick_suspend_host(msh);
+ return 0;
+}
+
+static int rtsx_pci_ms_resume(struct platform_device *pdev)
+{
+ struct realtek_pci_ms *host = platform_get_drvdata(pdev);
+ struct memstick_host *msh = host->msh;
+
+ dev_dbg(ms_dev(host), "--> %s\n", __func__);
+
+ memstick_resume_host(msh);
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define rtsx_pci_ms_suspend NULL
+#define rtsx_pci_ms_resume NULL
+
+#endif /* CONFIG_PM */
+
+static void rtsx_pci_ms_card_event(struct platform_device *pdev)
+{
+ struct realtek_pci_ms *host = platform_get_drvdata(pdev);
+
+ memstick_detect_change(host->msh);
+}
+
+static int rtsx_pci_ms_drv_probe(struct platform_device *pdev)
+{
+ struct memstick_host *msh;
+ struct realtek_pci_ms *host;
+ struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pdev->dev.platform_data;
+ int rc;
+
+ if (!handle)
+ return -ENXIO;
+
+ pcr = handle->pcr;
+ if (!pcr)
+ return -ENXIO;
+
+ dev_dbg(&(pdev->dev),
+ ": Realtek PCI-E Memstick controller found\n");
+
+ msh = memstick_alloc_host(sizeof(*host), &pdev->dev);
+ if (!msh)
+ return -ENOMEM;
+
+ host = memstick_priv(msh);
+ host->pcr = pcr;
+ host->msh = msh;
+ host->pdev = pdev;
+ platform_set_drvdata(pdev, host);
+ pcr->slots[RTSX_MS_CARD].p_dev = pdev;
+ pcr->slots[RTSX_MS_CARD].card_event = rtsx_pci_ms_card_event;
+
+ mutex_init(&host->host_mutex);
+
+ INIT_WORK(&host->handle_req, rtsx_pci_ms_handle_req);
+ msh->request = rtsx_pci_ms_request;
+ msh->set_param = rtsx_pci_ms_set_param;
+ msh->caps = MEMSTICK_CAP_PAR4;
+
+ rc = memstick_add_host(msh);
+ if (rc) {
+ memstick_free_host(msh);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
+{
+ struct realtek_pci_ms *host = platform_get_drvdata(pdev);
+ struct rtsx_pcr *pcr;
+ struct memstick_host *msh;
+ int rc;
+
+ if (!host)
+ return 0;
+
+ pcr = host->pcr;
+ pcr->slots[RTSX_MS_CARD].p_dev = NULL;
+ pcr->slots[RTSX_MS_CARD].card_event = NULL;
+ msh = host->msh;
+ host->eject = true;
+ cancel_work_sync(&host->handle_req);
+
+ mutex_lock(&host->host_mutex);
+ if (host->req) {
+ dev_dbg(&(pdev->dev),
+ "%s: Controller removed during transfer\n",
+ dev_name(&msh->dev));
+
+ rtsx_pci_complete_unfinished_transfer(pcr);
+
+ host->req->error = -ENOMEDIUM;
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ if (!rc)
+ host->req->error = -ENOMEDIUM;
+ } while (!rc);
+ }
+ mutex_unlock(&host->host_mutex);
+
+ memstick_remove_host(msh);
+ memstick_free_host(msh);
+
+ dev_dbg(&(pdev->dev),
+ ": Realtek PCI-E Memstick controller has been removed\n");
+
+ return 0;
+}
+
+static struct platform_device_id rtsx_pci_ms_ids[] = {
+ {
+ .name = DRV_NAME_RTSX_PCI_MS,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, rtsx_pci_ms_ids);
+
+static struct platform_driver rtsx_pci_ms_driver = {
+ .probe = rtsx_pci_ms_drv_probe,
+ .remove = rtsx_pci_ms_drv_remove,
+ .id_table = rtsx_pci_ms_ids,
+ .suspend = rtsx_pci_ms_suspend,
+ .resume = rtsx_pci_ms_resume,
+ .driver = {
+ .name = DRV_NAME_RTSX_PCI_MS,
+ },
+};
+module_platform_driver(rtsx_pci_ms_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
+MODULE_DESCRIPTION("Realtek PCI-E Memstick Card Host Driver");
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
new file mode 100644
index 000000000..29271ad47
--- /dev/null
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Realtek USB Memstick Card Interface driver
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Roger Tseng <rogerable@realtek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/memstick.h>
+#include <linux/kthread.h>
+#include <linux/rtsx_usb.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <asm/unaligned.h>
+
+struct rtsx_usb_ms {
+ struct platform_device *pdev;
+ struct rtsx_ucr *ucr;
+ struct memstick_host *msh;
+ struct memstick_request *req;
+
+ struct mutex host_mutex;
+ struct work_struct handle_req;
+ struct delayed_work poll_card;
+
+ u8 ssc_depth;
+ unsigned int clock;
+ int power_mode;
+ unsigned char ifmode;
+ bool eject;
+ bool system_suspending;
+};
+
+static inline struct device *ms_dev(struct rtsx_usb_ms *host)
+{
+ return &(host->pdev->dev);
+}
+
+static inline void ms_clear_error(struct rtsx_usb_ms *host)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ rtsx_usb_ep0_write_register(ucr, CARD_STOP,
+ MS_STOP | MS_CLR_ERR,
+ MS_STOP | MS_CLR_ERR);
+
+ rtsx_usb_clear_dma_err(ucr);
+ rtsx_usb_clear_fsm_err(ucr);
+}
+
+#ifdef DEBUG
+
+static void ms_print_debug_regs(struct rtsx_usb_ms *host)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ u16 i;
+ u8 *ptr;
+
+ /* Print MS host internal registers */
+ rtsx_usb_init_cmd(ucr);
+
+ /* MS_CFG to MS_INT_REG */
+ for (i = 0xFD40; i <= 0xFD44; i++)
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, i, 0, 0);
+
+ /* CARD_SHARE_MODE to CARD_GPIO */
+ for (i = 0xFD51; i <= 0xFD56; i++)
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, i, 0, 0);
+
+ /* CARD_PULL_CTLx */
+ for (i = 0xFD60; i <= 0xFD65; i++)
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, i, 0, 0);
+
+ /* CARD_DATA_SOURCE, CARD_SELECT, CARD_CLK_EN, CARD_PWR_CTL */
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_DATA_SOURCE, 0, 0);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_SELECT, 0, 0);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_CLK_EN, 0, 0);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_PWR_CTL, 0, 0);
+
+ rtsx_usb_send_cmd(ucr, MODE_CR, 100);
+ rtsx_usb_get_rsp(ucr, 21, 100);
+
+ ptr = ucr->rsp_buf;
+ for (i = 0xFD40; i <= 0xFD44; i++)
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+ for (i = 0xFD51; i <= 0xFD56; i++)
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+ for (i = 0xFD60; i <= 0xFD65; i++)
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_DATA_SOURCE, *(ptr++));
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_SELECT, *(ptr++));
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_CLK_EN, *(ptr++));
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", CARD_PWR_CTL, *(ptr++));
+}
+
+#else
+
+static void ms_print_debug_regs(struct rtsx_usb_ms *host)
+{
+}
+
+#endif
+
+static int ms_pull_ctl_disable_lqfp48(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int ms_pull_ctl_disable_qfn24(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int ms_pull_ctl_enable_lqfp48(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int ms_pull_ctl_enable_qfn24(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int ms_power_on(struct rtsx_usb_ms *host)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+
+ dev_dbg(ms_dev(host), "%s\n", __func__);
+
+ rtsx_usb_init_cmd(ucr);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SELECT, 0x07, MS_MOD_SEL);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_MS);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN,
+ MS_CLK_EN, MS_CLK_EN);
+ err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+ if (err < 0)
+ return err;
+
+ if (CHECK_PKG(ucr, LQFP48))
+ err = ms_pull_ctl_enable_lqfp48(ucr);
+ else
+ err = ms_pull_ctl_enable_qfn24(ucr);
+ if (err < 0)
+ return err;
+
+ err = rtsx_usb_write_register(ucr, CARD_PWR_CTL,
+ POWER_MASK, PARTIAL_POWER_ON);
+ if (err)
+ return err;
+
+ usleep_range(800, 1000);
+
+ rtsx_usb_init_cmd(ucr);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+ POWER_MASK, POWER_ON);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE,
+ MS_OUTPUT_EN, MS_OUTPUT_EN);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int ms_power_off(struct rtsx_usb_ms *host)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+
+ dev_dbg(ms_dev(host), "%s\n", __func__);
+
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0);
+
+ err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+ if (err < 0)
+ return err;
+
+ if (CHECK_PKG(ucr, LQFP48))
+ return ms_pull_ctl_disable_lqfp48(ucr);
+
+ return ms_pull_ctl_disable_qfn24(ucr);
+}
+
+static int ms_transfer_data(struct rtsx_usb_ms *host, unsigned char data_dir,
+ u8 tpc, u8 cfg, struct scatterlist *sg)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+ unsigned int length = sg->length;
+ u16 sec_cnt = (u16)(length / 512);
+ u8 trans_mode, dma_dir, flag;
+ unsigned int pipe;
+ struct memstick_dev *card = host->msh->card;
+
+ dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n",
+ __func__, tpc, (data_dir == READ) ? "READ" : "WRITE",
+ length);
+
+ if (data_dir == READ) {
+ flag = MODE_CDIR;
+ dma_dir = DMA_DIR_FROM_CARD;
+ if (card->id.type != MEMSTICK_TYPE_PRO)
+ trans_mode = MS_TM_NORMAL_READ;
+ else
+ trans_mode = MS_TM_AUTO_READ;
+ pipe = usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN);
+ } else {
+ flag = MODE_CDOR;
+ dma_dir = DMA_DIR_TO_CARD;
+ if (card->id.type != MEMSTICK_TYPE_PRO)
+ trans_mode = MS_TM_NORMAL_WRITE;
+ else
+ trans_mode = MS_TM_AUTO_WRITE;
+ pipe = usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT);
+ }
+
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ if (card->id.type == MEMSTICK_TYPE_PRO) {
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_SECTOR_CNT_H,
+ 0xFF, (u8)(sec_cnt >> 8));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_SECTOR_CNT_L,
+ 0xFF, (u8)sec_cnt);
+ }
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC3,
+ 0xFF, (u8)(length >> 24));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC2,
+ 0xFF, (u8)(length >> 16));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC1,
+ 0xFF, (u8)(length >> 8));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC0, 0xFF,
+ (u8)length);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL,
+ 0x03 | DMA_PACK_SIZE_MASK, dma_dir | DMA_EN | DMA_512);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | trans_mode);
+ rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ err = rtsx_usb_send_cmd(ucr, flag | STAGE_MS_STATUS, 100);
+ if (err)
+ return err;
+
+ err = rtsx_usb_transfer_data(ucr, pipe, sg, length,
+ 1, NULL, 10000);
+ if (err)
+ goto err_out;
+
+ err = rtsx_usb_get_rsp(ucr, 3, 15000);
+ if (err)
+ goto err_out;
+
+ if (ucr->rsp_buf[0] & MS_TRANSFER_ERR ||
+ ucr->rsp_buf[1] & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) {
+ err = -EIO;
+ goto err_out;
+ }
+ return 0;
+err_out:
+ ms_clear_error(host);
+ return err;
+}
+
+static int ms_write_bytes(struct rtsx_usb_ms *host, u8 tpc,
+ u8 cfg, u8 cnt, u8 *data, u8 *int_reg)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err, i;
+
+ dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc);
+
+ rtsx_usb_init_cmd(ucr);
+
+ for (i = 0; i < cnt; i++)
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, data[i]);
+
+ if (cnt % 2)
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, 0xFF);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES);
+ rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
+
+ err = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
+ if (err)
+ return err;
+
+ err = rtsx_usb_get_rsp(ucr, 2, 5000);
+ if (err || (ucr->rsp_buf[0] & MS_TRANSFER_ERR)) {
+ u8 val;
+
+ rtsx_usb_ep0_read_register(ucr, MS_TRANS_CFG, &val);
+ dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val);
+
+ if (int_reg)
+ *int_reg = val & 0x0F;
+
+ ms_print_debug_regs(host);
+
+ ms_clear_error(host);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR)
+ return -EIO;
+ } else {
+ if (!(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK))
+ return -EIO;
+ }
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ if (int_reg)
+ *int_reg = ucr->rsp_buf[1] & 0x0F;
+
+ return 0;
+}
+
+static int ms_read_bytes(struct rtsx_usb_ms *host, u8 tpc,
+ u8 cfg, u8 cnt, u8 *data, u8 *int_reg)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err, i;
+ u8 *ptr;
+
+ dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc);
+
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | MS_TM_READ_BYTES);
+ rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+ for (i = 0; i < cnt - 1; i++)
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
+ if (cnt % 2)
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, PPBUF_BASE2 + cnt, 0, 0);
+ else
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD,
+ PPBUF_BASE2 + cnt - 1, 0, 0);
+
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
+
+ err = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
+ if (err)
+ return err;
+
+ err = rtsx_usb_get_rsp(ucr, cnt + 2, 5000);
+ if (err || (ucr->rsp_buf[0] & MS_TRANSFER_ERR)) {
+ u8 val;
+
+ rtsx_usb_ep0_read_register(ucr, MS_TRANS_CFG, &val);
+ dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val);
+
+ if (int_reg && (host->ifmode != MEMSTICK_SERIAL))
+ *int_reg = val & 0x0F;
+
+ ms_print_debug_regs(host);
+
+ ms_clear_error(host);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR)
+ return -EIO;
+ } else {
+ if (!(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK))
+ return -EIO;
+ }
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ ptr = ucr->rsp_buf + 1;
+ for (i = 0; i < cnt; i++)
+ data[i] = *ptr++;
+
+
+ if (int_reg && (host->ifmode != MEMSTICK_SERIAL))
+ *int_reg = *ptr & 0x0F;
+
+ return 0;
+}
+
+static int rtsx_usb_ms_issue_cmd(struct rtsx_usb_ms *host)
+{
+ struct memstick_request *req = host->req;
+ int err = 0;
+ u8 cfg = 0, int_reg;
+
+ dev_dbg(ms_dev(host), "%s\n", __func__);
+
+ if (req->need_card_int) {
+ if (host->ifmode != MEMSTICK_SERIAL)
+ cfg = WAIT_INT;
+ }
+
+ if (req->long_data) {
+ err = ms_transfer_data(host, req->data_dir,
+ req->tpc, cfg, &(req->sg));
+ } else {
+ if (req->data_dir == READ)
+ err = ms_read_bytes(host, req->tpc, cfg,
+ req->data_len, req->data, &int_reg);
+ else
+ err = ms_write_bytes(host, req->tpc, cfg,
+ req->data_len, req->data, &int_reg);
+ }
+ if (err < 0)
+ return err;
+
+ if (req->need_card_int) {
+ if (host->ifmode == MEMSTICK_SERIAL) {
+ err = ms_read_bytes(host, MS_TPC_GET_INT,
+ NO_WAIT_INT, 1, &req->int_reg, NULL);
+ if (err < 0)
+ return err;
+ } else {
+
+ if (int_reg & MS_INT_CMDNK)
+ req->int_reg |= MEMSTICK_INT_CMDNAK;
+ if (int_reg & MS_INT_BREQ)
+ req->int_reg |= MEMSTICK_INT_BREQ;
+ if (int_reg & MS_INT_ERR)
+ req->int_reg |= MEMSTICK_INT_ERR;
+ if (int_reg & MS_INT_CED)
+ req->int_reg |= MEMSTICK_INT_CED;
+ }
+ dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", req->int_reg);
+ }
+
+ return 0;
+}
+
+static void rtsx_usb_ms_handle_req(struct work_struct *work)
+{
+ struct rtsx_usb_ms *host = container_of(work,
+ struct rtsx_usb_ms, handle_req);
+ struct rtsx_ucr *ucr = host->ucr;
+ struct memstick_host *msh = host->msh;
+ int rc;
+
+ if (!host->req) {
+ pm_runtime_get_sync(ms_dev(host));
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ dev_dbg(ms_dev(host), "next req %d\n", rc);
+
+ if (!rc) {
+ mutex_lock(&ucr->dev_mutex);
+
+ if (rtsx_usb_card_exclusive_check(ucr,
+ RTSX_USB_MS_CARD))
+ host->req->error = -EIO;
+ else
+ host->req->error =
+ rtsx_usb_ms_issue_cmd(host);
+
+ mutex_unlock(&ucr->dev_mutex);
+
+ dev_dbg(ms_dev(host), "req result %d\n",
+ host->req->error);
+ }
+ } while (!rc);
+ pm_runtime_put_sync(ms_dev(host));
+ }
+
+}
+
+static void rtsx_usb_ms_request(struct memstick_host *msh)
+{
+ struct rtsx_usb_ms *host = memstick_priv(msh);
+
+ dev_dbg(ms_dev(host), "--> %s\n", __func__);
+
+ if (!host->eject)
+ schedule_work(&host->handle_req);
+}
+
+static int rtsx_usb_ms_set_param(struct memstick_host *msh,
+ enum memstick_param param, int value)
+{
+ struct rtsx_usb_ms *host = memstick_priv(msh);
+ struct rtsx_ucr *ucr = host->ucr;
+ unsigned int clock = 0;
+ u8 ssc_depth = 0;
+ int err;
+
+ dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
+ __func__, param, value);
+
+ pm_runtime_get_sync(ms_dev(host));
+ mutex_lock(&ucr->dev_mutex);
+
+ err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
+ if (err)
+ goto out;
+
+ switch (param) {
+ case MEMSTICK_POWER:
+ if (value == host->power_mode)
+ break;
+
+ if (value == MEMSTICK_POWER_ON) {
+ pm_runtime_get_noresume(ms_dev(host));
+ err = ms_power_on(host);
+ if (err)
+ pm_runtime_put_noidle(ms_dev(host));
+ } else if (value == MEMSTICK_POWER_OFF) {
+ err = ms_power_off(host);
+ if (!err)
+ pm_runtime_put_noidle(ms_dev(host));
+ } else
+ err = -EINVAL;
+ if (!err)
+ host->power_mode = value;
+ break;
+
+ case MEMSTICK_INTERFACE:
+ if (value == MEMSTICK_SERIAL) {
+ clock = 19000000;
+ ssc_depth = SSC_DEPTH_512K;
+ err = rtsx_usb_write_register(ucr, MS_CFG, 0x5A,
+ MS_BUS_WIDTH_1 | PUSH_TIME_DEFAULT);
+ if (err < 0)
+ break;
+ } else if (value == MEMSTICK_PAR4) {
+ clock = 39000000;
+ ssc_depth = SSC_DEPTH_1M;
+
+ err = rtsx_usb_write_register(ucr, MS_CFG, 0x5A,
+ MS_BUS_WIDTH_4 | PUSH_TIME_ODD |
+ MS_NO_CHECK_INT);
+ if (err < 0)
+ break;
+ } else {
+ err = -EINVAL;
+ break;
+ }
+
+ err = rtsx_usb_switch_clock(ucr, clock,
+ ssc_depth, false, true, false);
+ if (err < 0) {
+ dev_dbg(ms_dev(host), "switch clock failed\n");
+ break;
+ }
+
+ host->ssc_depth = ssc_depth;
+ host->clock = clock;
+ host->ifmode = value;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+out:
+ mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put_sync(ms_dev(host));
+
+ /* power-on delay */
+ if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON) {
+ usleep_range(10000, 12000);
+
+ if (!host->eject)
+ schedule_delayed_work(&host->poll_card, 100);
+ }
+
+ dev_dbg(ms_dev(host), "%s: return = %d\n", __func__, err);
+ return err;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rtsx_usb_ms_suspend(struct device *dev)
+{
+ struct rtsx_usb_ms *host = dev_get_drvdata(dev);
+ struct memstick_host *msh = host->msh;
+
+ /* Since we use rtsx_usb's resume callback to runtime resume its
+ * children to implement remote wakeup signaling, this causes
+ * rtsx_usb_ms' runtime resume callback runs after its suspend
+ * callback:
+ * rtsx_usb_ms_suspend()
+ * rtsx_usb_resume()
+ * -> rtsx_usb_ms_runtime_resume()
+ * -> memstick_detect_change()
+ *
+ * rtsx_usb_suspend()
+ *
+ * To avoid this, skip runtime resume/suspend if system suspend is
+ * underway.
+ */
+
+ host->system_suspending = true;
+ memstick_suspend_host(msh);
+
+ return 0;
+}
+
+static int rtsx_usb_ms_resume(struct device *dev)
+{
+ struct rtsx_usb_ms *host = dev_get_drvdata(dev);
+ struct memstick_host *msh = host->msh;
+
+ memstick_resume_host(msh);
+ host->system_suspending = false;
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int rtsx_usb_ms_runtime_suspend(struct device *dev)
+{
+ struct rtsx_usb_ms *host = dev_get_drvdata(dev);
+
+ if (host->system_suspending)
+ return 0;
+
+ if (host->msh->card || host->power_mode != MEMSTICK_POWER_OFF)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int rtsx_usb_ms_runtime_resume(struct device *dev)
+{
+ struct rtsx_usb_ms *host = dev_get_drvdata(dev);
+
+
+ if (host->system_suspending)
+ return 0;
+
+ memstick_detect_change(host->msh);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops rtsx_usb_ms_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rtsx_usb_ms_suspend, rtsx_usb_ms_resume)
+ SET_RUNTIME_PM_OPS(rtsx_usb_ms_runtime_suspend, rtsx_usb_ms_runtime_resume, NULL)
+};
+
+
+static void rtsx_usb_ms_poll_card(struct work_struct *work)
+{
+ struct rtsx_usb_ms *host = container_of(work, struct rtsx_usb_ms,
+ poll_card.work);
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+ u8 val;
+
+ if (host->eject || host->power_mode != MEMSTICK_POWER_ON)
+ return;
+
+ pm_runtime_get_sync(ms_dev(host));
+ mutex_lock(&ucr->dev_mutex);
+
+ /* Check pending MS card changes */
+ err = rtsx_usb_read_register(ucr, CARD_INT_PEND, &val);
+ if (err) {
+ mutex_unlock(&ucr->dev_mutex);
+ goto poll_again;
+ }
+
+ /* Clear the pending */
+ rtsx_usb_write_register(ucr, CARD_INT_PEND,
+ XD_INT | MS_INT | SD_INT,
+ XD_INT | MS_INT | SD_INT);
+
+ mutex_unlock(&ucr->dev_mutex);
+
+ if (val & MS_INT) {
+ dev_dbg(ms_dev(host), "MS slot change detected\n");
+ memstick_detect_change(host->msh);
+ }
+
+poll_again:
+ pm_runtime_put_sync(ms_dev(host));
+
+ if (!host->eject && host->power_mode == MEMSTICK_POWER_ON)
+ schedule_delayed_work(&host->poll_card, 100);
+}
+
+static int rtsx_usb_ms_drv_probe(struct platform_device *pdev)
+{
+ struct memstick_host *msh;
+ struct rtsx_usb_ms *host;
+ struct rtsx_ucr *ucr;
+ int err;
+
+ ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent));
+ if (!ucr)
+ return -ENXIO;
+
+ dev_dbg(&(pdev->dev),
+ "Realtek USB Memstick controller found\n");
+
+ msh = memstick_alloc_host(sizeof(*host), &pdev->dev);
+ if (!msh)
+ return -ENOMEM;
+
+ host = memstick_priv(msh);
+ host->ucr = ucr;
+ host->msh = msh;
+ host->pdev = pdev;
+ host->power_mode = MEMSTICK_POWER_OFF;
+ platform_set_drvdata(pdev, host);
+
+ mutex_init(&host->host_mutex);
+ INIT_WORK(&host->handle_req, rtsx_usb_ms_handle_req);
+
+ INIT_DELAYED_WORK(&host->poll_card, rtsx_usb_ms_poll_card);
+
+ msh->request = rtsx_usb_ms_request;
+ msh->set_param = rtsx_usb_ms_set_param;
+ msh->caps = MEMSTICK_CAP_PAR4;
+
+ pm_runtime_get_noresume(ms_dev(host));
+ pm_runtime_set_active(ms_dev(host));
+ pm_runtime_enable(ms_dev(host));
+
+ err = memstick_add_host(msh);
+ if (err)
+ goto err_out;
+
+ pm_runtime_put(ms_dev(host));
+
+ return 0;
+err_out:
+ pm_runtime_disable(ms_dev(host));
+ pm_runtime_put_noidle(ms_dev(host));
+ memstick_free_host(msh);
+ return err;
+}
+
+static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
+{
+ struct rtsx_usb_ms *host = platform_get_drvdata(pdev);
+ struct memstick_host *msh = host->msh;
+ int err;
+
+ host->eject = true;
+ cancel_work_sync(&host->handle_req);
+
+ mutex_lock(&host->host_mutex);
+ if (host->req) {
+ dev_dbg(ms_dev(host),
+ "%s: Controller removed during transfer\n",
+ dev_name(&msh->dev));
+ host->req->error = -ENOMEDIUM;
+ do {
+ err = memstick_next_req(msh, &host->req);
+ if (!err)
+ host->req->error = -ENOMEDIUM;
+ } while (!err);
+ }
+ mutex_unlock(&host->host_mutex);
+
+ /* Balance possible unbalanced usage count
+ * e.g. unconditional module removal
+ */
+ if (pm_runtime_active(ms_dev(host)))
+ pm_runtime_put(ms_dev(host));
+
+ pm_runtime_disable(ms_dev(host));
+ memstick_remove_host(msh);
+ dev_dbg(ms_dev(host),
+ ": Realtek USB Memstick controller has been removed\n");
+ memstick_free_host(msh);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_device_id rtsx_usb_ms_ids[] = {
+ {
+ .name = "rtsx_usb_ms",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, rtsx_usb_ms_ids);
+
+static struct platform_driver rtsx_usb_ms_driver = {
+ .probe = rtsx_usb_ms_drv_probe,
+ .remove = rtsx_usb_ms_drv_remove,
+ .id_table = rtsx_usb_ms_ids,
+ .driver = {
+ .name = "rtsx_usb_ms",
+ .pm = &rtsx_usb_ms_pm_ops,
+ },
+};
+module_platform_driver(rtsx_usb_ms_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Roger Tseng <rogerable@realtek.com>");
+MODULE_DESCRIPTION("Realtek USB Memstick Card Host Driver");
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
new file mode 100644
index 000000000..c27245367
--- /dev/null
+++ b/drivers/memstick/host/tifm_ms.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI FlashMedia driver
+ *
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * Special thanks to Carlos Corbacho for providing various MemoryStick cards
+ * that made this driver possible.
+ */
+
+#include <linux/tifm.h>
+#include <linux/memstick.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+#define DRIVER_NAME "tifm_ms"
+
+static bool no_dma;
+module_param(no_dma, bool, 0644);
+
+/*
+ * Some control bits of TIFM appear to conform to Sony's reference design,
+ * so I'm just assuming they all are.
+ */
+
+#define TIFM_MS_STAT_DRQ 0x04000
+#define TIFM_MS_STAT_MSINT 0x02000
+#define TIFM_MS_STAT_RDY 0x01000
+#define TIFM_MS_STAT_CRC 0x00200
+#define TIFM_MS_STAT_TOE 0x00100
+#define TIFM_MS_STAT_EMP 0x00020
+#define TIFM_MS_STAT_FUL 0x00010
+#define TIFM_MS_STAT_CED 0x00008
+#define TIFM_MS_STAT_ERR 0x00004
+#define TIFM_MS_STAT_BRQ 0x00002
+#define TIFM_MS_STAT_CNK 0x00001
+
+#define TIFM_MS_SYS_DMA 0x10000
+#define TIFM_MS_SYS_RESET 0x08000
+#define TIFM_MS_SYS_SRAC 0x04000
+#define TIFM_MS_SYS_INTEN 0x02000
+#define TIFM_MS_SYS_NOCRC 0x01000
+#define TIFM_MS_SYS_INTCLR 0x00800
+#define TIFM_MS_SYS_MSIEN 0x00400
+#define TIFM_MS_SYS_FCLR 0x00200
+#define TIFM_MS_SYS_FDIR 0x00100
+#define TIFM_MS_SYS_DAM 0x00080
+#define TIFM_MS_SYS_DRM 0x00040
+#define TIFM_MS_SYS_DRQSL 0x00020
+#define TIFM_MS_SYS_REI 0x00010
+#define TIFM_MS_SYS_REO 0x00008
+#define TIFM_MS_SYS_BSY_MASK 0x00007
+
+#define TIFM_MS_SYS_FIFO (TIFM_MS_SYS_INTEN | TIFM_MS_SYS_MSIEN \
+ | TIFM_MS_SYS_FCLR | TIFM_MS_SYS_BSY_MASK)
+
+/* Hardware flags */
+enum {
+ CMD_READY = 0x01,
+ FIFO_READY = 0x02,
+ CARD_INT = 0x04
+};
+
+struct tifm_ms {
+ struct tifm_dev *dev;
+ struct timer_list timer;
+ struct memstick_request *req;
+ struct tasklet_struct notify;
+ unsigned int mode_mask;
+ unsigned int block_pos;
+ unsigned long timeout_jiffies;
+ unsigned char eject:1,
+ use_dma:1;
+ unsigned char cmd_flags;
+ unsigned char io_pos;
+ unsigned int io_word;
+};
+
+static unsigned int tifm_ms_read_data(struct tifm_ms *host,
+ unsigned char *buf, unsigned int length)
+{
+ struct tifm_dev *sock = host->dev;
+ unsigned int off = 0;
+
+ while (host->io_pos && length) {
+ buf[off++] = host->io_word & 0xff;
+ host->io_word >>= 8;
+ length--;
+ host->io_pos--;
+ }
+
+ if (!length)
+ return off;
+
+ while (!(TIFM_MS_STAT_EMP & readl(sock->addr + SOCK_MS_STATUS))) {
+ if (length < 4)
+ break;
+ *(unsigned int *)(buf + off) = __raw_readl(sock->addr
+ + SOCK_MS_DATA);
+ length -= 4;
+ off += 4;
+ }
+
+ if (length
+ && !(TIFM_MS_STAT_EMP & readl(sock->addr + SOCK_MS_STATUS))) {
+ host->io_word = readl(sock->addr + SOCK_MS_DATA);
+ for (host->io_pos = 4; host->io_pos; --host->io_pos) {
+ buf[off++] = host->io_word & 0xff;
+ host->io_word >>= 8;
+ length--;
+ if (!length)
+ break;
+ }
+ }
+
+ return off;
+}
+
+static unsigned int tifm_ms_write_data(struct tifm_ms *host,
+ unsigned char *buf, unsigned int length)
+{
+ struct tifm_dev *sock = host->dev;
+ unsigned int off = 0;
+
+ if (host->io_pos) {
+ while (host->io_pos < 4 && length) {
+ host->io_word |= buf[off++] << (host->io_pos * 8);
+ host->io_pos++;
+ length--;
+ }
+ }
+
+ if (host->io_pos == 4
+ && !(TIFM_MS_STAT_FUL & readl(sock->addr + SOCK_MS_STATUS))) {
+ writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM),
+ sock->addr + SOCK_MS_SYSTEM);
+ writel(host->io_word, sock->addr + SOCK_MS_DATA);
+ host->io_pos = 0;
+ host->io_word = 0;
+ } else if (host->io_pos) {
+ return off;
+ }
+
+ if (!length)
+ return off;
+
+ while (!(TIFM_MS_STAT_FUL & readl(sock->addr + SOCK_MS_STATUS))) {
+ if (length < 4)
+ break;
+ writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM),
+ sock->addr + SOCK_MS_SYSTEM);
+ __raw_writel(*(unsigned int *)(buf + off),
+ sock->addr + SOCK_MS_DATA);
+ length -= 4;
+ off += 4;
+ }
+
+ switch (length) {
+ case 3:
+ host->io_word |= buf[off + 2] << 16;
+ host->io_pos++;
+ fallthrough;
+ case 2:
+ host->io_word |= buf[off + 1] << 8;
+ host->io_pos++;
+ fallthrough;
+ case 1:
+ host->io_word |= buf[off];
+ host->io_pos++;
+ }
+
+ off += host->io_pos;
+
+ return off;
+}
+
+static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
+{
+ struct tifm_dev *sock = host->dev;
+ unsigned int length;
+ unsigned int off;
+ unsigned int t_size, p_cnt;
+ unsigned char *buf;
+ struct page *pg;
+ unsigned long flags = 0;
+
+ if (host->req->long_data) {
+ length = host->req->sg.length - host->block_pos;
+ off = host->req->sg.offset + host->block_pos;
+ } else {
+ length = host->req->data_len - host->block_pos;
+ off = 0;
+ }
+ dev_dbg(&sock->dev, "fifo data transfer, %d, %d\n", length,
+ host->block_pos);
+
+ while (length) {
+ unsigned int p_off;
+
+ if (host->req->long_data) {
+ pg = nth_page(sg_page(&host->req->sg),
+ off >> PAGE_SHIFT);
+ p_off = offset_in_page(off);
+ p_cnt = PAGE_SIZE - p_off;
+ p_cnt = min(p_cnt, length);
+
+ local_irq_save(flags);
+ buf = kmap_atomic(pg) + p_off;
+ } else {
+ buf = host->req->data + host->block_pos;
+ p_cnt = host->req->data_len - host->block_pos;
+ }
+
+ t_size = host->req->data_dir == WRITE
+ ? tifm_ms_write_data(host, buf, p_cnt)
+ : tifm_ms_read_data(host, buf, p_cnt);
+
+ if (host->req->long_data) {
+ kunmap_atomic(buf - p_off);
+ local_irq_restore(flags);
+ }
+
+ if (!t_size)
+ break;
+ host->block_pos += t_size;
+ length -= t_size;
+ off += t_size;
+ }
+
+ dev_dbg(&sock->dev, "fifo data transfer, %d remaining\n", length);
+ if (!length && (host->req->data_dir == WRITE)) {
+ if (host->io_pos) {
+ writel(TIFM_MS_SYS_FDIR
+ | readl(sock->addr + SOCK_MS_SYSTEM),
+ sock->addr + SOCK_MS_SYSTEM);
+ writel(host->io_word, sock->addr + SOCK_MS_DATA);
+ }
+ writel(TIFM_MS_SYS_FDIR
+ | readl(sock->addr + SOCK_MS_SYSTEM),
+ sock->addr + SOCK_MS_SYSTEM);
+ writel(0, sock->addr + SOCK_MS_DATA);
+ } else {
+ readl(sock->addr + SOCK_MS_DATA);
+ }
+
+ return length;
+}
+
+static int tifm_ms_issue_cmd(struct tifm_ms *host)
+{
+ struct tifm_dev *sock = host->dev;
+ unsigned int data_len, cmd, sys_param;
+
+ host->cmd_flags = 0;
+ host->block_pos = 0;
+ host->io_pos = 0;
+ host->io_word = 0;
+ host->cmd_flags = 0;
+
+ host->use_dma = !no_dma;
+
+ if (host->req->long_data) {
+ data_len = host->req->sg.length;
+ if (!is_power_of_2(data_len))
+ host->use_dma = 0;
+ } else {
+ data_len = host->req->data_len;
+ host->use_dma = 0;
+ }
+
+ writel(TIFM_FIFO_INT_SETALL,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
+ writel(TIFM_FIFO_ENABLE,
+ sock->addr + SOCK_FIFO_CONTROL);
+
+ if (host->use_dma) {
+ if (1 != tifm_map_sg(sock, &host->req->sg, 1,
+ host->req->data_dir == READ
+ ? DMA_FROM_DEVICE
+ : DMA_TO_DEVICE)) {
+ host->req->error = -ENOMEM;
+ return host->req->error;
+ }
+ data_len = sg_dma_len(&host->req->sg);
+
+ writel(ilog2(data_len) - 2,
+ sock->addr + SOCK_FIFO_PAGE_SIZE);
+ writel(TIFM_FIFO_INTMASK,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
+ sys_param = TIFM_DMA_EN | (1 << 8);
+ if (host->req->data_dir == WRITE)
+ sys_param |= TIFM_DMA_TX;
+
+ writel(TIFM_FIFO_INTMASK,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
+
+ writel(sg_dma_address(&host->req->sg),
+ sock->addr + SOCK_DMA_ADDRESS);
+ writel(sys_param, sock->addr + SOCK_DMA_CONTROL);
+ } else {
+ writel(host->mode_mask | TIFM_MS_SYS_FIFO,
+ sock->addr + SOCK_MS_SYSTEM);
+
+ writel(TIFM_FIFO_MORE,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
+ }
+
+ mod_timer(&host->timer, jiffies + host->timeout_jiffies);
+ writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
+ sock->addr + SOCK_CONTROL);
+ host->req->error = 0;
+
+ sys_param = readl(sock->addr + SOCK_MS_SYSTEM);
+ sys_param |= TIFM_MS_SYS_INTCLR;
+
+ if (host->use_dma)
+ sys_param |= TIFM_MS_SYS_DMA;
+ else
+ sys_param &= ~TIFM_MS_SYS_DMA;
+
+ writel(sys_param, sock->addr + SOCK_MS_SYSTEM);
+
+ cmd = (host->req->tpc & 0xf) << 12;
+ cmd |= data_len;
+ writel(cmd, sock->addr + SOCK_MS_COMMAND);
+
+ dev_dbg(&sock->dev, "executing TPC %x, %x\n", cmd, sys_param);
+ return 0;
+}
+
+static void tifm_ms_complete_cmd(struct tifm_ms *host)
+{
+ struct tifm_dev *sock = host->dev;
+ struct memstick_host *msh = tifm_get_drvdata(sock);
+ int rc;
+
+ del_timer(&host->timer);
+
+ host->req->int_reg = readl(sock->addr + SOCK_MS_STATUS) & 0xff;
+ host->req->int_reg = (host->req->int_reg & 1)
+ | ((host->req->int_reg << 4) & 0xe0);
+
+ writel(TIFM_FIFO_INT_SETALL,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
+ writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
+
+ if (host->use_dma) {
+ tifm_unmap_sg(sock, &host->req->sg, 1,
+ host->req->data_dir == READ
+ ? DMA_FROM_DEVICE
+ : DMA_TO_DEVICE);
+ }
+
+ writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
+ sock->addr + SOCK_CONTROL);
+
+ dev_dbg(&sock->dev, "TPC complete\n");
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ } while (!rc && tifm_ms_issue_cmd(host));
+}
+
+static int tifm_ms_check_status(struct tifm_ms *host)
+{
+ if (!host->req->error) {
+ if (!(host->cmd_flags & CMD_READY))
+ return 1;
+ if (!(host->cmd_flags & FIFO_READY))
+ return 1;
+ if (host->req->need_card_int
+ && !(host->cmd_flags & CARD_INT))
+ return 1;
+ }
+ return 0;
+}
+
+/* Called from interrupt handler */
+static void tifm_ms_data_event(struct tifm_dev *sock)
+{
+ struct tifm_ms *host;
+ unsigned int fifo_status = 0, host_status = 0;
+ int rc = 1;
+
+ spin_lock(&sock->lock);
+ host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock));
+ fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
+ host_status = readl(sock->addr + SOCK_MS_STATUS);
+ dev_dbg(&sock->dev,
+ "data event: fifo_status %x, host_status %x, flags %x\n",
+ fifo_status, host_status, host->cmd_flags);
+
+ if (host->req) {
+ if (host->use_dma && (fifo_status & 1)) {
+ host->cmd_flags |= FIFO_READY;
+ rc = tifm_ms_check_status(host);
+ }
+ if (!host->use_dma && (fifo_status & TIFM_FIFO_MORE)) {
+ if (!tifm_ms_transfer_data(host)) {
+ host->cmd_flags |= FIFO_READY;
+ rc = tifm_ms_check_status(host);
+ }
+ }
+ }
+
+ writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS);
+ if (!rc)
+ tifm_ms_complete_cmd(host);
+
+ spin_unlock(&sock->lock);
+}
+
+
+/* Called from interrupt handler */
+static void tifm_ms_card_event(struct tifm_dev *sock)
+{
+ struct tifm_ms *host;
+ unsigned int host_status = 0;
+ int rc = 1;
+
+ spin_lock(&sock->lock);
+ host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock));
+ host_status = readl(sock->addr + SOCK_MS_STATUS);
+ dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n",
+ host_status, host->cmd_flags);
+
+ if (host->req) {
+ if (host_status & TIFM_MS_STAT_TOE)
+ host->req->error = -ETIME;
+ else if (host_status & TIFM_MS_STAT_CRC)
+ host->req->error = -EILSEQ;
+
+ if (host_status & TIFM_MS_STAT_RDY)
+ host->cmd_flags |= CMD_READY;
+
+ if (host_status & TIFM_MS_STAT_MSINT)
+ host->cmd_flags |= CARD_INT;
+
+ rc = tifm_ms_check_status(host);
+
+ }
+
+ writel(TIFM_MS_SYS_INTCLR | readl(sock->addr + SOCK_MS_SYSTEM),
+ sock->addr + SOCK_MS_SYSTEM);
+
+ if (!rc)
+ tifm_ms_complete_cmd(host);
+
+ spin_unlock(&sock->lock);
+ return;
+}
+
+static void tifm_ms_req_tasklet(unsigned long data)
+{
+ struct memstick_host *msh = (struct memstick_host *)data;
+ struct tifm_ms *host = memstick_priv(msh);
+ struct tifm_dev *sock = host->dev;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&sock->lock, flags);
+ if (!host->req) {
+ if (host->eject) {
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ if (!rc)
+ host->req->error = -ETIME;
+ } while (!rc);
+ spin_unlock_irqrestore(&sock->lock, flags);
+ return;
+ }
+
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ } while (!rc && tifm_ms_issue_cmd(host));
+ }
+ spin_unlock_irqrestore(&sock->lock, flags);
+}
+
+static void tifm_ms_dummy_submit(struct memstick_host *msh)
+{
+ return;
+}
+
+static void tifm_ms_submit_req(struct memstick_host *msh)
+{
+ struct tifm_ms *host = memstick_priv(msh);
+
+ tasklet_schedule(&host->notify);
+}
+
+static int tifm_ms_set_param(struct memstick_host *msh,
+ enum memstick_param param,
+ int value)
+{
+ struct tifm_ms *host = memstick_priv(msh);
+ struct tifm_dev *sock = host->dev;
+
+ switch (param) {
+ case MEMSTICK_POWER:
+ /* also affected by media detection mechanism */
+ if (value == MEMSTICK_POWER_ON) {
+ host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI;
+ writel(TIFM_MS_SYS_RESET, sock->addr + SOCK_MS_SYSTEM);
+ writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR,
+ sock->addr + SOCK_MS_SYSTEM);
+ writel(0xffffffff, sock->addr + SOCK_MS_STATUS);
+ } else if (value == MEMSTICK_POWER_OFF) {
+ writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR,
+ sock->addr + SOCK_MS_SYSTEM);
+ writel(0xffffffff, sock->addr + SOCK_MS_STATUS);
+ } else
+ return -EINVAL;
+ break;
+ case MEMSTICK_INTERFACE:
+ if (value == MEMSTICK_SERIAL) {
+ host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI;
+ writel((~TIFM_CTRL_FAST_CLK)
+ & readl(sock->addr + SOCK_CONTROL),
+ sock->addr + SOCK_CONTROL);
+ } else if (value == MEMSTICK_PAR4) {
+ host->mode_mask = 0;
+ writel(TIFM_CTRL_FAST_CLK
+ | readl(sock->addr + SOCK_CONTROL),
+ sock->addr + SOCK_CONTROL);
+ } else
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+static void tifm_ms_abort(struct timer_list *t)
+{
+ struct tifm_ms *host = from_timer(host, t, timer);
+
+ dev_dbg(&host->dev->dev, "status %x\n",
+ readl(host->dev->addr + SOCK_MS_STATUS));
+ printk(KERN_ERR
+ "%s : card failed to respond for a long period of time "
+ "(%x, %x)\n",
+ dev_name(&host->dev->dev), host->req ? host->req->tpc : 0,
+ host->cmd_flags);
+
+ tifm_eject(host->dev);
+}
+
+static int tifm_ms_probe(struct tifm_dev *sock)
+{
+ struct memstick_host *msh;
+ struct tifm_ms *host;
+ int rc = -EIO;
+
+ if (!(TIFM_SOCK_STATE_OCCUPIED
+ & readl(sock->addr + SOCK_PRESENT_STATE))) {
+ printk(KERN_WARNING "%s : card gone, unexpectedly\n",
+ dev_name(&sock->dev));
+ return rc;
+ }
+
+ msh = memstick_alloc_host(sizeof(struct tifm_ms), &sock->dev);
+ if (!msh)
+ return -ENOMEM;
+
+ host = memstick_priv(msh);
+ tifm_set_drvdata(sock, msh);
+ host->dev = sock;
+ host->timeout_jiffies = msecs_to_jiffies(1000);
+
+ timer_setup(&host->timer, tifm_ms_abort, 0);
+ tasklet_init(&host->notify, tifm_ms_req_tasklet, (unsigned long)msh);
+
+ msh->request = tifm_ms_submit_req;
+ msh->set_param = tifm_ms_set_param;
+ sock->card_event = tifm_ms_card_event;
+ sock->data_event = tifm_ms_data_event;
+ if (tifm_has_ms_pif(sock))
+ msh->caps |= MEMSTICK_CAP_PAR4;
+
+ rc = memstick_add_host(msh);
+ if (!rc)
+ return 0;
+
+ memstick_free_host(msh);
+ return rc;
+}
+
+static void tifm_ms_remove(struct tifm_dev *sock)
+{
+ struct memstick_host *msh = tifm_get_drvdata(sock);
+ struct tifm_ms *host = memstick_priv(msh);
+ int rc = 0;
+ unsigned long flags;
+
+ msh->request = tifm_ms_dummy_submit;
+ tasklet_kill(&host->notify);
+ spin_lock_irqsave(&sock->lock, flags);
+ host->eject = 1;
+ if (host->req) {
+ del_timer(&host->timer);
+ writel(TIFM_FIFO_INT_SETALL,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
+ writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
+ if (host->use_dma)
+ tifm_unmap_sg(sock, &host->req->sg, 1,
+ host->req->data_dir == READ
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ host->req->error = -ETIME;
+
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ if (!rc)
+ host->req->error = -ETIME;
+ } while (!rc);
+ }
+ spin_unlock_irqrestore(&sock->lock, flags);
+
+ memstick_remove_host(msh);
+ memstick_free_host(msh);
+}
+
+#ifdef CONFIG_PM
+
+static int tifm_ms_suspend(struct tifm_dev *sock, pm_message_t state)
+{
+ struct memstick_host *msh = tifm_get_drvdata(sock);
+
+ memstick_suspend_host(msh);
+ return 0;
+}
+
+static int tifm_ms_resume(struct tifm_dev *sock)
+{
+ struct memstick_host *msh = tifm_get_drvdata(sock);
+
+ memstick_resume_host(msh);
+ return 0;
+}
+
+#else
+
+#define tifm_ms_suspend NULL
+#define tifm_ms_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct tifm_device_id tifm_ms_id_tbl[] = {
+ { TIFM_TYPE_MS }, { 0 }
+};
+
+static struct tifm_driver tifm_ms_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE
+ },
+ .id_table = tifm_ms_id_tbl,
+ .probe = tifm_ms_probe,
+ .remove = tifm_ms_remove,
+ .suspend = tifm_ms_suspend,
+ .resume = tifm_ms_resume
+};
+
+static int __init tifm_ms_init(void)
+{
+ return tifm_register_driver(&tifm_ms_driver);
+}
+
+static void __exit tifm_ms_exit(void)
+{
+ tifm_unregister_driver(&tifm_ms_driver);
+}
+
+MODULE_AUTHOR("Alex Dubov");
+MODULE_DESCRIPTION("TI FlashMedia MemoryStick driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(tifm, tifm_ms_id_tbl);
+
+module_init(tifm_ms_init);
+module_exit(tifm_ms_exit);