summaryrefslogtreecommitdiffstats
path: root/drivers/parport
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/parport')
-rw-r--r--drivers/parport/BUGS-parport9
-rw-r--r--drivers/parport/Kconfig166
-rw-r--r--drivers/parport/Makefile22
-rw-r--r--drivers/parport/TODO-parport20
-rw-r--r--drivers/parport/daisy.c507
-rw-r--r--drivers/parport/ieee1284.c789
-rw-r--r--drivers/parport/ieee1284_ops.c893
-rw-r--r--drivers/parport/multiface.h21
-rw-r--r--drivers/parport/parport_amiga.c246
-rw-r--r--drivers/parport/parport_atari.c225
-rw-r--r--drivers/parport/parport_ax88796.c418
-rw-r--r--drivers/parport/parport_cs.c195
-rw-r--r--drivers/parport/parport_gsc.c429
-rw-r--r--drivers/parport/parport_gsc.h207
-rw-r--r--drivers/parport/parport_ip32.c2238
-rw-r--r--drivers/parport/parport_mfc3.c366
-rw-r--r--drivers/parport/parport_pc.c3340
-rw-r--r--drivers/parport/parport_serial.c783
-rw-r--r--drivers/parport/parport_sunbpp.c382
-rw-r--r--drivers/parport/probe.c279
-rw-r--r--drivers/parport/procfs.c623
-rw-r--r--drivers/parport/share.c1234
22 files changed, 13392 insertions, 0 deletions
diff --git a/drivers/parport/BUGS-parport b/drivers/parport/BUGS-parport
new file mode 100644
index 000000000..622ca3cc7
--- /dev/null
+++ b/drivers/parport/BUGS-parport
@@ -0,0 +1,9 @@
+Currently known (or at least suspected) bugs in parport:
+
+o lp doesn't allow you to read status while printing is in progress (is
+ this still true?).
+
+o parport_pc_ecp_read_block_pio() is broken. parport will revert to the
+ software-driven mode in ieee1284_ops.c
+
+See <URL:http://people.redhat.com/twaugh/parport/>.
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
new file mode 100644
index 000000000..e78a9f030
--- /dev/null
+++ b/drivers/parport/Kconfig
@@ -0,0 +1,166 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.rst.
+#
+# Parport configuration.
+#
+
+config ARCH_MIGHT_HAVE_PC_PARPORT
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the architecture might have PC parallel port hardware.
+
+menuconfig PARPORT
+ tristate "Parallel port support"
+ depends on HAS_IOMEM
+ help
+ If you want to use devices connected to your machine's parallel port
+ (the connector at the computer with 25 holes), e.g. printer, ZIP
+ drive, PLIP link (Parallel Line Internet Protocol is mainly used to
+ create a mini network by connecting the parallel ports of two local
+ machines) etc., then you need to say Y here; please read
+ <file:Documentation/admin-guide/parport.rst> and
+ <file:drivers/parport/BUGS-parport>.
+
+ For extensive information about drivers for many devices attaching
+ to the parallel port see <http://www.torque.net/linux-pp.html> on
+ the WWW.
+
+ It is possible to share a single parallel port among several devices
+ and it is safe to compile all the corresponding drivers into the
+ kernel. To compile parallel port support as a module, choose M here:
+ the module will be called parport.
+ If you have more than one parallel port and want to specify which
+ port and IRQ to be used by this driver at module load time, take a
+ look at <file:Documentation/admin-guide/parport.rst>.
+
+ If unsure, say Y.
+
+if PARPORT
+
+config PARPORT_PC
+ tristate "PC-style hardware"
+ depends on ARCH_MIGHT_HAVE_PC_PARPORT
+ help
+ You should say Y here if you have a PC-style parallel port. All
+ IBM PC compatible computers and some Alphas have PC-style
+ parallel ports. PA-RISC owners should only say Y here if they
+ have a SuperIO parallel port.
+
+ To compile this driver as a module, choose M here: the
+ module will be called parport_pc.
+
+ If unsure, say Y.
+
+config PARPORT_SERIAL
+ tristate "Multi-IO cards (parallel and serial)"
+ depends on SERIAL_8250_PCI && PARPORT_PC && PCI
+ help
+ This adds support for multi-IO PCI cards that have parallel and
+ serial ports. You should say Y or M here. If you say M, the module
+ will be called parport_serial.
+
+config PARPORT_PC_FIFO
+ bool "Use FIFO/DMA if available"
+ depends on PARPORT_PC
+ help
+ Many parallel port chipsets provide hardware that can speed up
+ printing. Say Y here if you want to take advantage of that.
+
+ As well as actually having a FIFO, or DMA capability, the kernel
+ will need to know which IRQ the parallel port has. By default,
+ parallel port interrupts will not be used, and so neither will the
+ FIFO. See <file:Documentation/admin-guide/parport.rst> to find out how to
+ specify which IRQ/DMA to use.
+
+config PARPORT_PC_SUPERIO
+ bool "SuperIO chipset support"
+ depends on PARPORT_PC && !PARISC
+ help
+ Saying Y here enables some probes for Super-IO chipsets in order to
+ find out things like base addresses, IRQ lines and DMA channels. It
+ is safe to say N.
+
+config PARPORT_PC_PCMCIA
+ tristate "Support for PCMCIA management for PC-style ports"
+ depends on PCMCIA && PARPORT_PC
+ help
+ Say Y here if you need PCMCIA support for your PC-style parallel
+ ports. If unsure, say N.
+
+config PARPORT_IP32
+ tristate "SGI IP32 builtin port"
+ depends on SGI_IP32
+ select PARPORT_NOT_PC
+ help
+ Say Y here if you need support for the parallel port on
+ SGI O2 machines. This code is also available as a module (say M),
+ called parport_ip32. If in doubt, saying N is the safe plan.
+
+config PARPORT_AMIGA
+ tristate "Amiga builtin port"
+ depends on AMIGA
+ select PARPORT_NOT_PC
+ help
+ Say Y here if you need support for the parallel port hardware on
+ Amiga machines. This code is also available as a module (say M),
+ called parport_amiga. If in doubt, saying N is the safe plan.
+
+config PARPORT_MFC3
+ tristate "Multiface III parallel port"
+ depends on ZORRO
+ select PARPORT_NOT_PC
+ help
+ Say Y here if you need parallel port support for the MFC3 card.
+ This code is also available as a module (say M), called
+ parport_mfc3. If in doubt, saying N is the safe plan.
+
+config PARPORT_ATARI
+ tristate "Atari hardware"
+ depends on ATARI
+ select PARPORT_NOT_PC
+ help
+ Say Y here if you need support for the parallel port hardware on
+ Atari machines. This code is also available as a module (say M),
+ called parport_atari. If in doubt, saying N is the safe plan.
+
+config PARPORT_GSC
+ tristate
+ default GSC
+ select PARPORT_NOT_PC
+
+config PARPORT_SUNBPP
+ tristate "Sparc hardware"
+ depends on SBUS
+ select PARPORT_NOT_PC
+ help
+ This driver provides support for the bidirectional parallel port
+ found on many Sun machines. Note that many of the newer Ultras
+ actually have pc style hardware instead.
+
+config PARPORT_AX88796
+ tristate "AX88796 Parallel Port"
+ select PARPORT_NOT_PC
+ help
+ Say Y here if you need support for the parallel port hardware on
+ the AX88796 network controller chip. This code is also available
+ as a module (say M), called parport_ax88796.
+
+ The driver is not dependent on the AX88796 network driver, and
+ should not interfere with the networking functions of the chip.
+
+config PARPORT_1284
+ bool "IEEE 1284 transfer modes"
+ help
+ If you have a printer that supports status readback or device ID, or
+ want to use a device that uses enhanced parallel port transfer modes
+ such as EPP and ECP, say Y here to enable advanced IEEE 1284
+ transfer modes. Also say Y if you want device ID information to
+ appear in /proc/sys/dev/parport/*/autoprobe*. It is safe to say N.
+
+config PARPORT_NOT_PC
+ bool
+
+endif # PARPORT
diff --git a/drivers/parport/Makefile b/drivers/parport/Makefile
new file mode 100644
index 000000000..022c566c0
--- /dev/null
+++ b/drivers/parport/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the kernel Parallel port device drivers.
+#
+
+parport-objs := share.o ieee1284.o ieee1284_ops.o procfs.o
+
+ifeq ($(CONFIG_PARPORT_1284),y)
+ parport-objs += daisy.o probe.o
+endif
+
+obj-$(CONFIG_PARPORT) += parport.o
+obj-$(CONFIG_PARPORT_PC) += parport_pc.o
+obj-$(CONFIG_PARPORT_SERIAL) += parport_serial.o
+obj-$(CONFIG_PARPORT_PC_PCMCIA) += parport_cs.o
+obj-$(CONFIG_PARPORT_AMIGA) += parport_amiga.o
+obj-$(CONFIG_PARPORT_MFC3) += parport_mfc3.o
+obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o
+obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o
+obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o
+obj-$(CONFIG_PARPORT_AX88796) += parport_ax88796.o
+obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o
diff --git a/drivers/parport/TODO-parport b/drivers/parport/TODO-parport
new file mode 100644
index 000000000..089b14ee4
--- /dev/null
+++ b/drivers/parport/TODO-parport
@@ -0,0 +1,20 @@
+Things to be done.
+
+0. Fix the bugs (see BUGS-parport).
+
+1. Proper documentation.
+
+2. A better lp.c:
+
+ a) ECP support would be nice. This can only work if both the port and
+ the printer support it.
+
+ b) Handle status readback automatically. IEEE1284 printers can post status
+ bits when they have something to say. We should read out and deal
+ with (maybe just log) whatever the printer wants to tell the world.
+
+3. Support more hardware (eg m68k, Sun bpp).
+
+4. A better PLIP (make use of bidirectional/ECP/EPP ports).
+
+See <URL:http://people.redhat.com/twaugh/parport/>.
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
new file mode 100644
index 000000000..6d78ec3a7
--- /dev/null
+++ b/drivers/parport/daisy.c
@@ -0,0 +1,507 @@
+/*
+ * IEEE 1284.3 Parallel port daisy chain and multiplexor code
+ *
+ * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * ??-12-1998: Initial implementation.
+ * 31-01-1999: Make port-cloning transparent.
+ * 13-02-1999: Move DeviceID technique from parport_probe.
+ * 13-03-1999: Get DeviceID from non-IEEE 1284.3 devices too.
+ * 22-02-2000: Count devices that are actually detected.
+ *
+ * Any part of this program may be used in documents licensed under
+ * the GNU Free Documentation License, Version 1.1 or any later version
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/parport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+
+#include <asm/current.h>
+#include <linux/uaccess.h>
+
+#undef DEBUG
+
+static struct daisydev {
+ struct daisydev *next;
+ struct parport *port;
+ int daisy;
+ int devnum;
+} *topology = NULL;
+static DEFINE_SPINLOCK(topology_lock);
+
+static int numdevs;
+static bool daisy_init_done;
+
+/* Forward-declaration of lower-level functions. */
+static int mux_present(struct parport *port);
+static int num_mux_ports(struct parport *port);
+static int select_port(struct parport *port);
+static int assign_addrs(struct parport *port);
+
+/* Add a device to the discovered topology. */
+static void add_dev(int devnum, struct parport *port, int daisy)
+{
+ struct daisydev *newdev, **p;
+ newdev = kmalloc(sizeof(struct daisydev), GFP_KERNEL);
+ if (newdev) {
+ newdev->port = port;
+ newdev->daisy = daisy;
+ newdev->devnum = devnum;
+ spin_lock(&topology_lock);
+ for (p = &topology; *p && (*p)->devnum<devnum; p = &(*p)->next)
+ ;
+ newdev->next = *p;
+ *p = newdev;
+ spin_unlock(&topology_lock);
+ }
+}
+
+/* Clone a parport (actually, make an alias). */
+static struct parport *clone_parport(struct parport *real, int muxport)
+{
+ struct parport *extra = parport_register_port(real->base,
+ real->irq,
+ real->dma,
+ real->ops);
+ if (extra) {
+ extra->portnum = real->portnum;
+ extra->physport = real;
+ extra->muxport = muxport;
+ real->slaves[muxport-1] = extra;
+ }
+
+ return extra;
+}
+
+static int daisy_drv_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+
+ if (strcmp(drv->name, "daisy_drv"))
+ return -ENODEV;
+ if (strcmp(par_dev->name, daisy_dev_name))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver daisy_driver = {
+ .name = "daisy_drv",
+ .probe = daisy_drv_probe,
+ .devmodel = true,
+};
+
+/* Discover the IEEE1284.3 topology on a port -- muxes and daisy chains.
+ * Return value is number of devices actually detected. */
+int parport_daisy_init(struct parport *port)
+{
+ int detected = 0;
+ char *deviceid;
+ static const char *th[] = { /*0*/"th", "st", "nd", "rd", "th" };
+ int num_ports;
+ int i;
+ int last_try = 0;
+
+ if (!daisy_init_done) {
+ /*
+ * flag should be marked true first as
+ * parport_register_driver() might try to load the low
+ * level driver which will lead to announcing new ports
+ * and which will again come back here at
+ * parport_daisy_init()
+ */
+ daisy_init_done = true;
+ i = parport_register_driver(&daisy_driver);
+ if (i) {
+ pr_err("daisy registration failed\n");
+ daisy_init_done = false;
+ return i;
+ }
+ }
+
+again:
+ /* Because this is called before any other devices exist,
+ * we don't have to claim exclusive access. */
+
+ /* If mux present on normal port, need to create new
+ * parports for each extra port. */
+ if (port->muxport < 0 && mux_present(port) &&
+ /* don't be fooled: a mux must have 2 or 4 ports. */
+ ((num_ports = num_mux_ports(port)) == 2 || num_ports == 4)) {
+ /* Leave original as port zero. */
+ port->muxport = 0;
+ pr_info("%s: 1st (default) port of %d-way multiplexor\n",
+ port->name, num_ports);
+ for (i = 1; i < num_ports; i++) {
+ /* Clone the port. */
+ struct parport *extra = clone_parport(port, i);
+ if (!extra) {
+ if (signal_pending(current))
+ break;
+
+ schedule();
+ continue;
+ }
+
+ pr_info("%s: %d%s port of %d-way multiplexor on %s\n",
+ extra->name, i + 1, th[i + 1], num_ports,
+ port->name);
+
+ /* Analyse that port too. We won't recurse
+ forever because of the 'port->muxport < 0'
+ test above. */
+ parport_daisy_init(extra);
+ }
+ }
+
+ if (port->muxport >= 0)
+ select_port(port);
+
+ parport_daisy_deselect_all(port);
+ detected += assign_addrs(port);
+
+ /* Count the potential legacy device at the end. */
+ add_dev(numdevs++, port, -1);
+
+ /* Find out the legacy device's IEEE 1284 device ID. */
+ deviceid = kmalloc(1024, GFP_KERNEL);
+ if (deviceid) {
+ if (parport_device_id(numdevs - 1, deviceid, 1024) > 2)
+ detected++;
+
+ kfree(deviceid);
+ }
+
+ if (!detected && !last_try) {
+ /* No devices were detected. Perhaps they are in some
+ funny state; let's try to reset them and see if
+ they wake up. */
+ parport_daisy_fini(port);
+ parport_write_control(port, PARPORT_CONTROL_SELECT);
+ udelay(50);
+ parport_write_control(port,
+ PARPORT_CONTROL_SELECT |
+ PARPORT_CONTROL_INIT);
+ udelay(50);
+ last_try = 1;
+ goto again;
+ }
+
+ return detected;
+}
+
+/* Forget about devices on a physical port. */
+void parport_daisy_fini(struct parport *port)
+{
+ struct daisydev **p;
+
+ spin_lock(&topology_lock);
+ p = &topology;
+ while (*p) {
+ struct daisydev *dev = *p;
+ if (dev->port != port) {
+ p = &dev->next;
+ continue;
+ }
+ *p = dev->next;
+ kfree(dev);
+ }
+
+ /* Gaps in the numbering could be handled better. How should
+ someone enumerate through all IEEE1284.3 devices in the
+ topology?. */
+ if (!topology) numdevs = 0;
+ spin_unlock(&topology_lock);
+ return;
+}
+
+/**
+ * parport_open - find a device by canonical device number
+ * @devnum: canonical device number
+ * @name: name to associate with the device
+ *
+ * This function is similar to parport_register_device(), except
+ * that it locates a device by its number rather than by the port
+ * it is attached to.
+ *
+ * All parameters except for @devnum are the same as for
+ * parport_register_device(). The return value is the same as
+ * for parport_register_device().
+ **/
+
+struct pardevice *parport_open(int devnum, const char *name)
+{
+ struct daisydev *p = topology;
+ struct pardev_cb par_cb;
+ struct parport *port;
+ struct pardevice *dev;
+ int daisy;
+
+ memset(&par_cb, 0, sizeof(par_cb));
+ spin_lock(&topology_lock);
+ while (p && p->devnum != devnum)
+ p = p->next;
+
+ if (!p) {
+ spin_unlock(&topology_lock);
+ return NULL;
+ }
+
+ daisy = p->daisy;
+ port = parport_get_port(p->port);
+ spin_unlock(&topology_lock);
+
+ dev = parport_register_dev_model(port, name, &par_cb, devnum);
+ parport_put_port(port);
+ if (!dev)
+ return NULL;
+
+ dev->daisy = daisy;
+
+ /* Check that there really is a device to select. */
+ if (daisy >= 0) {
+ int selected;
+ parport_claim_or_block(dev);
+ selected = port->daisy;
+ parport_release(dev);
+
+ if (selected != daisy) {
+ /* No corresponding device. */
+ parport_unregister_device(dev);
+ return NULL;
+ }
+ }
+
+ return dev;
+}
+
+/**
+ * parport_close - close a device opened with parport_open()
+ * @dev: device to close
+ *
+ * This is to parport_open() as parport_unregister_device() is to
+ * parport_register_device().
+ **/
+
+void parport_close(struct pardevice *dev)
+{
+ parport_unregister_device(dev);
+}
+
+/* Send a daisy-chain-style CPP command packet. */
+static int cpp_daisy(struct parport *port, int cmd)
+{
+ unsigned char s;
+
+ parport_data_forward(port);
+ parport_write_data(port, 0xaa); udelay(2);
+ parport_write_data(port, 0x55); udelay(2);
+ parport_write_data(port, 0x00); udelay(2);
+ parport_write_data(port, 0xff); udelay(2);
+ s = parport_read_status(port) & (PARPORT_STATUS_BUSY
+ | PARPORT_STATUS_PAPEROUT
+ | PARPORT_STATUS_SELECT
+ | PARPORT_STATUS_ERROR);
+ if (s != (PARPORT_STATUS_BUSY
+ | PARPORT_STATUS_PAPEROUT
+ | PARPORT_STATUS_SELECT
+ | PARPORT_STATUS_ERROR)) {
+ pr_debug("%s: cpp_daisy: aa5500ff(%02x)\n", port->name, s);
+ return -ENXIO;
+ }
+
+ parport_write_data(port, 0x87); udelay(2);
+ s = parport_read_status(port) & (PARPORT_STATUS_BUSY
+ | PARPORT_STATUS_PAPEROUT
+ | PARPORT_STATUS_SELECT
+ | PARPORT_STATUS_ERROR);
+ if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
+ pr_debug("%s: cpp_daisy: aa5500ff87(%02x)\n", port->name, s);
+ return -ENXIO;
+ }
+
+ parport_write_data(port, 0x78); udelay(2);
+ parport_write_data(port, cmd); udelay(2);
+ parport_frob_control(port,
+ PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+ udelay(1);
+ s = parport_read_status(port);
+ parport_frob_control(port, PARPORT_CONTROL_STROBE, 0);
+ udelay(1);
+ parport_write_data(port, 0xff); udelay(2);
+
+ return s;
+}
+
+/* Send a mux-style CPP command packet. */
+static int cpp_mux(struct parport *port, int cmd)
+{
+ unsigned char s;
+ int rc;
+
+ parport_data_forward(port);
+ parport_write_data(port, 0xaa); udelay(2);
+ parport_write_data(port, 0x55); udelay(2);
+ parport_write_data(port, 0xf0); udelay(2);
+ parport_write_data(port, 0x0f); udelay(2);
+ parport_write_data(port, 0x52); udelay(2);
+ parport_write_data(port, 0xad); udelay(2);
+ parport_write_data(port, cmd); udelay(2);
+
+ s = parport_read_status(port);
+ if (!(s & PARPORT_STATUS_ACK)) {
+ pr_debug("%s: cpp_mux: aa55f00f52ad%02x(%02x)\n",
+ port->name, cmd, s);
+ return -EIO;
+ }
+
+ rc = (((s & PARPORT_STATUS_SELECT ? 1 : 0) << 0) |
+ ((s & PARPORT_STATUS_PAPEROUT ? 1 : 0) << 1) |
+ ((s & PARPORT_STATUS_BUSY ? 0 : 1) << 2) |
+ ((s & PARPORT_STATUS_ERROR ? 0 : 1) << 3));
+
+ return rc;
+}
+
+void parport_daisy_deselect_all(struct parport *port)
+{
+ cpp_daisy(port, 0x30);
+}
+
+int parport_daisy_select(struct parport *port, int daisy, int mode)
+{
+ switch (mode)
+ {
+ // For these modes we should switch to EPP mode:
+ case IEEE1284_MODE_EPP:
+ case IEEE1284_MODE_EPPSL:
+ case IEEE1284_MODE_EPPSWE:
+ return !(cpp_daisy(port, 0x20 + daisy) &
+ PARPORT_STATUS_ERROR);
+
+ // For these modes we should switch to ECP mode:
+ case IEEE1284_MODE_ECP:
+ case IEEE1284_MODE_ECPRLE:
+ case IEEE1284_MODE_ECPSWE:
+ return !(cpp_daisy(port, 0xd0 + daisy) &
+ PARPORT_STATUS_ERROR);
+
+ // Nothing was told for BECP in Daisy chain specification.
+ // May be it's wise to use ECP?
+ case IEEE1284_MODE_BECP:
+ // Others use compat mode
+ case IEEE1284_MODE_NIBBLE:
+ case IEEE1284_MODE_BYTE:
+ case IEEE1284_MODE_COMPAT:
+ default:
+ return !(cpp_daisy(port, 0xe0 + daisy) &
+ PARPORT_STATUS_ERROR);
+ }
+}
+
+static int mux_present(struct parport *port)
+{
+ return cpp_mux(port, 0x51) == 3;
+}
+
+static int num_mux_ports(struct parport *port)
+{
+ return cpp_mux(port, 0x58);
+}
+
+static int select_port(struct parport *port)
+{
+ int muxport = port->muxport;
+ return cpp_mux(port, 0x60 + muxport) == muxport;
+}
+
+static int assign_addrs(struct parport *port)
+{
+ unsigned char s;
+ unsigned char daisy;
+ int thisdev = numdevs;
+ int detected;
+ char *deviceid;
+
+ parport_data_forward(port);
+ parport_write_data(port, 0xaa); udelay(2);
+ parport_write_data(port, 0x55); udelay(2);
+ parport_write_data(port, 0x00); udelay(2);
+ parport_write_data(port, 0xff); udelay(2);
+ s = parport_read_status(port) & (PARPORT_STATUS_BUSY
+ | PARPORT_STATUS_PAPEROUT
+ | PARPORT_STATUS_SELECT
+ | PARPORT_STATUS_ERROR);
+ if (s != (PARPORT_STATUS_BUSY
+ | PARPORT_STATUS_PAPEROUT
+ | PARPORT_STATUS_SELECT
+ | PARPORT_STATUS_ERROR)) {
+ pr_debug("%s: assign_addrs: aa5500ff(%02x)\n", port->name, s);
+ return 0;
+ }
+
+ parport_write_data(port, 0x87); udelay(2);
+ s = parport_read_status(port) & (PARPORT_STATUS_BUSY
+ | PARPORT_STATUS_PAPEROUT
+ | PARPORT_STATUS_SELECT
+ | PARPORT_STATUS_ERROR);
+ if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
+ pr_debug("%s: assign_addrs: aa5500ff87(%02x)\n", port->name, s);
+ return 0;
+ }
+
+ parport_write_data(port, 0x78); udelay(2);
+ s = parport_read_status(port);
+
+ for (daisy = 0;
+ (s & (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT))
+ == (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT)
+ && daisy < 4;
+ ++daisy) {
+ parport_write_data(port, daisy);
+ udelay(2);
+ parport_frob_control(port,
+ PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+ udelay(1);
+ parport_frob_control(port, PARPORT_CONTROL_STROBE, 0);
+ udelay(1);
+
+ add_dev(numdevs++, port, daisy);
+
+ /* See if this device thought it was the last in the
+ * chain. */
+ if (!(s & PARPORT_STATUS_BUSY))
+ break;
+
+ /* We are seeing pass through status now. We see
+ last_dev from next device or if last_dev does not
+ work status lines from some non-daisy chain
+ device. */
+ s = parport_read_status(port);
+ }
+
+ parport_write_data(port, 0xff); udelay(2);
+ detected = numdevs - thisdev;
+ pr_debug("%s: Found %d daisy-chained devices\n", port->name, detected);
+
+ /* Ask the new devices to introduce themselves. */
+ deviceid = kmalloc(1024, GFP_KERNEL);
+ if (!deviceid) return 0;
+
+ for (daisy = 0; thisdev < numdevs; thisdev++, daisy++)
+ parport_device_id(thisdev, deviceid, 1024);
+
+ kfree(deviceid);
+ return detected;
+}
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
new file mode 100644
index 000000000..4547ac44c
--- /dev/null
+++ b/drivers/parport/ieee1284.c
@@ -0,0 +1,789 @@
+/*
+ * IEEE-1284 implementation for parport.
+ *
+ * Authors: Phil Blundell <philb@gnu.org>
+ * Carsten Gross <carsten@sol.wohnheim.uni-ulm.de>
+ * Jose Renau <renau@acm.org>
+ * Tim Waugh <tim@cyberelk.demon.co.uk> (largely rewritten)
+ *
+ * This file is responsible for IEEE 1284 negotiation, and for handing
+ * read/write requests to low-level drivers.
+ *
+ * Any part of this program may be used in documents licensed under
+ * the GNU Free Documentation License, Version 1.1 or any later version
+ * published by the Free Software Foundation.
+ *
+ * Various hacks, Fred Barnes <frmb2@ukc.ac.uk>, 04/2000
+ */
+
+#include <linux/module.h>
+#include <linux/threads.h>
+#include <linux/parport.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/sched/signal.h>
+
+#undef DEBUG /* undef me for production */
+
+#ifdef CONFIG_LP_CONSOLE
+#undef DEBUG /* Don't want a garbled console */
+#endif
+
+/* Make parport_wait_peripheral wake up.
+ * It will be useful to call this from an interrupt handler. */
+static void parport_ieee1284_wakeup (struct parport *port)
+{
+ up (&port->physport->ieee1284.irq);
+}
+
+static void timeout_waiting_on_port (struct timer_list *t)
+{
+ struct parport *port = from_timer(port, t, timer);
+
+ parport_ieee1284_wakeup (port);
+}
+
+/**
+ * parport_wait_event - wait for an event on a parallel port
+ * @port: port to wait on
+ * @timeout: time to wait (in jiffies)
+ *
+ * This function waits for up to @timeout jiffies for an
+ * interrupt to occur on a parallel port. If the port timeout is
+ * set to zero, it returns immediately.
+ *
+ * If an interrupt occurs before the timeout period elapses, this
+ * function returns zero immediately. If it times out, it returns
+ * one. An error code less than zero indicates an error (most
+ * likely a pending signal), and the calling code should finish
+ * what it's doing as soon as it can.
+ */
+
+int parport_wait_event (struct parport *port, signed long timeout)
+{
+ int ret;
+
+ if (!port->physport->cad->timeout)
+ /* Zero timeout is special, and we can't down() the
+ semaphore. */
+ return 1;
+
+ timer_setup(&port->timer, timeout_waiting_on_port, 0);
+ mod_timer(&port->timer, jiffies + timeout);
+ ret = down_interruptible (&port->physport->ieee1284.irq);
+ if (!del_timer_sync(&port->timer) && !ret)
+ /* Timed out. */
+ ret = 1;
+
+ return ret;
+}
+
+/**
+ * parport_poll_peripheral - poll status lines
+ * @port: port to watch
+ * @mask: status lines to watch
+ * @result: desired values of chosen status lines
+ * @usec: timeout
+ *
+ * This function busy-waits until the masked status lines have
+ * the desired values, or until the timeout period elapses. The
+ * @mask and @result parameters are bitmasks, with the bits
+ * defined by the constants in parport.h: %PARPORT_STATUS_BUSY,
+ * and so on.
+ *
+ * This function does not call schedule(); instead it busy-waits
+ * using udelay(). It currently has a resolution of 5usec.
+ *
+ * If the status lines take on the desired values before the
+ * timeout period elapses, parport_poll_peripheral() returns zero
+ * immediately. A return value greater than zero indicates
+ * a timeout. An error code (less than zero) indicates an error,
+ * most likely a signal that arrived, and the caller should
+ * finish what it is doing as soon as possible.
+*/
+
+int parport_poll_peripheral(struct parport *port,
+ unsigned char mask,
+ unsigned char result,
+ int usec)
+{
+ /* Zero return code is success, >0 is timeout. */
+ int count = usec / 5 + 2;
+ int i;
+ unsigned char status;
+ for (i = 0; i < count; i++) {
+ status = parport_read_status (port);
+ if ((status & mask) == result)
+ return 0;
+ if (signal_pending (current))
+ return -EINTR;
+ if (need_resched())
+ break;
+ if (i >= 2)
+ udelay (5);
+ }
+
+ return 1;
+}
+
+/**
+ * parport_wait_peripheral - wait for status lines to change in 35ms
+ * @port: port to watch
+ * @mask: status lines to watch
+ * @result: desired values of chosen status lines
+ *
+ * This function waits until the masked status lines have the
+ * desired values, or until 35ms have elapsed (see IEEE 1284-1994
+ * page 24 to 25 for why this value in particular is hardcoded).
+ * The @mask and @result parameters are bitmasks, with the bits
+ * defined by the constants in parport.h: %PARPORT_STATUS_BUSY,
+ * and so on.
+ *
+ * The port is polled quickly to start off with, in anticipation
+ * of a fast response from the peripheral. This fast polling
+ * time is configurable (using /proc), and defaults to 500usec.
+ * If the timeout for this port (see parport_set_timeout()) is
+ * zero, the fast polling time is 35ms, and this function does
+ * not call schedule().
+ *
+ * If the timeout for this port is non-zero, after the fast
+ * polling fails it uses parport_wait_event() to wait for up to
+ * 10ms, waking up if an interrupt occurs.
+ */
+
+int parport_wait_peripheral(struct parport *port,
+ unsigned char mask,
+ unsigned char result)
+{
+ int ret;
+ int usec;
+ unsigned long deadline;
+ unsigned char status;
+
+ usec = port->physport->spintime; /* usecs of fast polling */
+ if (!port->physport->cad->timeout)
+ /* A zero timeout is "special": busy wait for the
+ entire 35ms. */
+ usec = 35000;
+
+ /* Fast polling.
+ *
+ * This should be adjustable.
+ * How about making a note (in the device structure) of how long
+ * it takes, so we know for next time?
+ */
+ ret = parport_poll_peripheral (port, mask, result, usec);
+ if (ret != 1)
+ return ret;
+
+ if (!port->physport->cad->timeout)
+ /* We may be in an interrupt handler, so we can't poll
+ * slowly anyway. */
+ return 1;
+
+ /* 40ms of slow polling. */
+ deadline = jiffies + msecs_to_jiffies(40);
+ while (time_before (jiffies, deadline)) {
+ if (signal_pending (current))
+ return -EINTR;
+
+ /* Wait for 10ms (or until an interrupt occurs if
+ * the handler is set) */
+ if ((ret = parport_wait_event (port, msecs_to_jiffies(10))) < 0)
+ return ret;
+
+ status = parport_read_status (port);
+ if ((status & mask) == result)
+ return 0;
+
+ if (!ret) {
+ /* parport_wait_event didn't time out, but the
+ * peripheral wasn't actually ready either.
+ * Wait for another 10ms. */
+ schedule_timeout_interruptible(msecs_to_jiffies(10));
+ }
+ }
+
+ return 1;
+}
+
+#ifdef CONFIG_PARPORT_1284
+/* Terminate a negotiated mode. */
+static void parport_ieee1284_terminate (struct parport *port)
+{
+ int r;
+ port = port->physport;
+
+ /* EPP terminates differently. */
+ switch (port->ieee1284.mode) {
+ case IEEE1284_MODE_EPP:
+ case IEEE1284_MODE_EPPSL:
+ case IEEE1284_MODE_EPPSWE:
+ /* Terminate from EPP mode. */
+
+ /* Event 68: Set nInit low */
+ parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
+ udelay (50);
+
+ /* Event 69: Set nInit high, nSelectIn low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_SELECT
+ | PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_SELECT
+ | PARPORT_CONTROL_INIT);
+ break;
+
+ case IEEE1284_MODE_ECP:
+ case IEEE1284_MODE_ECPRLE:
+ case IEEE1284_MODE_ECPSWE:
+ /* In ECP we can only terminate from fwd idle phase. */
+ if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
+ /* Event 47: Set nInit high */
+ parport_frob_control (port,
+ PARPORT_CONTROL_INIT
+ | PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_INIT
+ | PARPORT_CONTROL_AUTOFD);
+
+ /* Event 49: PError goes high */
+ r = parport_wait_peripheral (port,
+ PARPORT_STATUS_PAPEROUT,
+ PARPORT_STATUS_PAPEROUT);
+ if (r)
+ pr_debug("%s: Timeout at event 49\n",
+ port->name);
+
+ parport_data_forward (port);
+ pr_debug("%s: ECP direction: forward\n", port->name);
+ port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+ }
+
+ fallthrough;
+
+ default:
+ /* Terminate from all other modes. */
+
+ /* Event 22: Set nSelectIn low, nAutoFd high */
+ parport_frob_control (port,
+ PARPORT_CONTROL_SELECT
+ | PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_SELECT);
+
+ /* Event 24: nAck goes low */
+ r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0);
+ if (r)
+ pr_debug("%s: Timeout at event 24\n", port->name);
+
+ /* Event 25: Set nAutoFd low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+
+ /* Event 27: nAck goes high */
+ r = parport_wait_peripheral (port,
+ PARPORT_STATUS_ACK,
+ PARPORT_STATUS_ACK);
+ if (r)
+ pr_debug("%s: Timeout at event 27\n", port->name);
+
+ /* Event 29: Set nAutoFd high */
+ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
+ }
+
+ port->ieee1284.mode = IEEE1284_MODE_COMPAT;
+ port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+
+ pr_debug("%s: In compatibility (forward idle) mode\n", port->name);
+}
+#endif /* IEEE1284 support */
+
+/**
+ * parport_negotiate - negotiate an IEEE 1284 mode
+ * @port: port to use
+ * @mode: mode to negotiate to
+ *
+ * Use this to negotiate to a particular IEEE 1284 transfer mode.
+ * The @mode parameter should be one of the constants in
+ * parport.h starting %IEEE1284_MODE_xxx.
+ *
+ * The return value is 0 if the peripheral has accepted the
+ * negotiation to the mode specified, -1 if the peripheral is not
+ * IEEE 1284 compliant (or not present), or 1 if the peripheral
+ * has rejected the negotiation.
+ */
+
+int parport_negotiate (struct parport *port, int mode)
+{
+#ifndef CONFIG_PARPORT_1284
+ if (mode == IEEE1284_MODE_COMPAT)
+ return 0;
+ pr_err("parport: IEEE1284 not supported in this kernel\n");
+ return -1;
+#else
+ int m = mode & ~IEEE1284_ADDR;
+ int r;
+ unsigned char xflag;
+
+ port = port->physport;
+
+ /* Is there anything to do? */
+ if (port->ieee1284.mode == mode)
+ return 0;
+
+ /* Is the difference just an address-or-not bit? */
+ if ((port->ieee1284.mode & ~IEEE1284_ADDR) == (mode & ~IEEE1284_ADDR)){
+ port->ieee1284.mode = mode;
+ return 0;
+ }
+
+ /* Go to compatibility forward idle mode */
+ if (port->ieee1284.mode != IEEE1284_MODE_COMPAT)
+ parport_ieee1284_terminate (port);
+
+ if (mode == IEEE1284_MODE_COMPAT)
+ /* Compatibility mode: no negotiation. */
+ return 0;
+
+ switch (mode) {
+ case IEEE1284_MODE_ECPSWE:
+ m = IEEE1284_MODE_ECP;
+ break;
+ case IEEE1284_MODE_EPPSL:
+ case IEEE1284_MODE_EPPSWE:
+ m = IEEE1284_MODE_EPP;
+ break;
+ case IEEE1284_MODE_BECP:
+ return -ENOSYS; /* FIXME (implement BECP) */
+ }
+
+ if (mode & IEEE1284_EXT_LINK)
+ m = 1<<7; /* request extensibility link */
+
+ port->ieee1284.phase = IEEE1284_PH_NEGOTIATION;
+
+ /* Start off with nStrobe and nAutoFd high, and nSelectIn low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE
+ | PARPORT_CONTROL_AUTOFD
+ | PARPORT_CONTROL_SELECT,
+ PARPORT_CONTROL_SELECT);
+ udelay(1);
+
+ /* Event 0: Set data */
+ parport_data_forward (port);
+ parport_write_data (port, m);
+ udelay (400); /* Shouldn't need to wait this long. */
+
+ /* Event 1: Set nSelectIn high, nAutoFd low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_SELECT
+ | PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+
+ /* Event 2: PError, Select, nFault go high, nAck goes low */
+ if (parport_wait_peripheral (port,
+ PARPORT_STATUS_ERROR
+ | PARPORT_STATUS_SELECT
+ | PARPORT_STATUS_PAPEROUT
+ | PARPORT_STATUS_ACK,
+ PARPORT_STATUS_ERROR
+ | PARPORT_STATUS_SELECT
+ | PARPORT_STATUS_PAPEROUT)) {
+ /* Timeout */
+ parport_frob_control (port,
+ PARPORT_CONTROL_SELECT
+ | PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_SELECT);
+ pr_debug("%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
+ port->name, parport_read_status (port));
+ port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+ return -1; /* Not IEEE1284 compliant */
+ }
+
+ /* Event 3: Set nStrobe low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+
+ /* Event 4: Set nStrobe and nAutoFd high */
+ udelay (5);
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE
+ | PARPORT_CONTROL_AUTOFD,
+ 0);
+
+ /* Event 6: nAck goes high */
+ if (parport_wait_peripheral (port,
+ PARPORT_STATUS_ACK,
+ PARPORT_STATUS_ACK)) {
+ /* This shouldn't really happen with a compliant device. */
+ pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
+ port->name, mode, port->ops->read_status (port));
+ parport_ieee1284_terminate (port);
+ return 1;
+ }
+
+ xflag = parport_read_status (port) & PARPORT_STATUS_SELECT;
+
+ /* xflag should be high for all modes other than nibble (0). */
+ if (mode && !xflag) {
+ /* Mode not supported. */
+ pr_debug("%s: Mode 0x%02x rejected by peripheral\n",
+ port->name, mode);
+ parport_ieee1284_terminate (port);
+ return 1;
+ }
+
+ /* More to do if we've requested extensibility link. */
+ if (mode & IEEE1284_EXT_LINK) {
+ m = mode & 0x7f;
+ udelay (1);
+ parport_write_data (port, m);
+ udelay (1);
+
+ /* Event 51: Set nStrobe low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+
+ /* Event 52: nAck goes low */
+ if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) {
+ /* This peripheral is _very_ slow. */
+ pr_debug("%s: Event 52 didn't happen\n", port->name);
+ parport_ieee1284_terminate (port);
+ return 1;
+ }
+
+ /* Event 53: Set nStrobe high */
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE,
+ 0);
+
+ /* Event 55: nAck goes high */
+ if (parport_wait_peripheral (port,
+ PARPORT_STATUS_ACK,
+ PARPORT_STATUS_ACK)) {
+ /* This shouldn't really happen with a compliant
+ * device. */
+ pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
+ port->name, mode,
+ port->ops->read_status(port));
+ parport_ieee1284_terminate (port);
+ return 1;
+ }
+
+ /* Event 54: Peripheral sets XFlag to reflect support */
+ xflag = parport_read_status (port) & PARPORT_STATUS_SELECT;
+
+ /* xflag should be high. */
+ if (!xflag) {
+ /* Extended mode not supported. */
+ pr_debug("%s: Extended mode 0x%02x not supported\n",
+ port->name, mode);
+ parport_ieee1284_terminate (port);
+ return 1;
+ }
+
+ /* Any further setup is left to the caller. */
+ }
+
+ /* Mode is supported */
+ pr_debug("%s: In mode 0x%02x\n", port->name, mode);
+ port->ieee1284.mode = mode;
+
+ /* But ECP is special */
+ if (!(mode & IEEE1284_EXT_LINK) && (m & IEEE1284_MODE_ECP)) {
+ port->ieee1284.phase = IEEE1284_PH_ECP_SETUP;
+
+ /* Event 30: Set nAutoFd low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+
+ /* Event 31: PError goes high. */
+ r = parport_wait_peripheral (port,
+ PARPORT_STATUS_PAPEROUT,
+ PARPORT_STATUS_PAPEROUT);
+ if (r) {
+ pr_debug("%s: Timeout at event 31\n", port->name);
+ }
+
+ port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+ pr_debug("%s: ECP direction: forward\n", port->name);
+ } else switch (mode) {
+ case IEEE1284_MODE_NIBBLE:
+ case IEEE1284_MODE_BYTE:
+ port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
+ break;
+ default:
+ port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+ }
+
+
+ return 0;
+#endif /* IEEE1284 support */
+}
+
+/* Acknowledge that the peripheral has data available.
+ * Events 18-20, in order to get from Reverse Idle phase
+ * to Host Busy Data Available.
+ * This will most likely be called from an interrupt.
+ * Returns zero if data was available.
+ */
+#ifdef CONFIG_PARPORT_1284
+static int parport_ieee1284_ack_data_avail (struct parport *port)
+{
+ if (parport_read_status (port) & PARPORT_STATUS_ERROR)
+ /* Event 18 didn't happen. */
+ return -1;
+
+ /* Event 20: nAutoFd goes high. */
+ port->ops->frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
+ port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
+ return 0;
+}
+#endif /* IEEE1284 support */
+
+/* Handle an interrupt. */
+void parport_ieee1284_interrupt (void *handle)
+{
+ struct parport *port = handle;
+ parport_ieee1284_wakeup (port);
+
+#ifdef CONFIG_PARPORT_1284
+ if (port->ieee1284.phase == IEEE1284_PH_REV_IDLE) {
+ /* An interrupt in this phase means that data
+ * is now available. */
+ pr_debug("%s: Data available\n", port->name);
+ parport_ieee1284_ack_data_avail (port);
+ }
+#endif /* IEEE1284 support */
+}
+
+/**
+ * parport_write - write a block of data to a parallel port
+ * @port: port to write to
+ * @buffer: data buffer (in kernel space)
+ * @len: number of bytes of data to transfer
+ *
+ * This will write up to @len bytes of @buffer to the port
+ * specified, using the IEEE 1284 transfer mode most recently
+ * negotiated to (using parport_negotiate()), as long as that
+ * mode supports forward transfers (host to peripheral).
+ *
+ * It is the caller's responsibility to ensure that the first
+ * @len bytes of @buffer are valid.
+ *
+ * This function returns the number of bytes transferred (if zero
+ * or positive), or else an error code.
+ */
+
+ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
+{
+#ifndef CONFIG_PARPORT_1284
+ return port->ops->compat_write_data (port, buffer, len, 0);
+#else
+ ssize_t retval;
+ int mode = port->ieee1284.mode;
+ int addr = mode & IEEE1284_ADDR;
+ size_t (*fn) (struct parport *, const void *, size_t, int);
+
+ /* Ignore the device-ID-request bit and the address bit. */
+ mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
+
+ /* Use the mode we're in. */
+ switch (mode) {
+ case IEEE1284_MODE_NIBBLE:
+ case IEEE1284_MODE_BYTE:
+ parport_negotiate (port, IEEE1284_MODE_COMPAT);
+ fallthrough;
+ case IEEE1284_MODE_COMPAT:
+ pr_debug("%s: Using compatibility mode\n", port->name);
+ fn = port->ops->compat_write_data;
+ break;
+
+ case IEEE1284_MODE_EPP:
+ pr_debug("%s: Using EPP mode\n", port->name);
+ if (addr) {
+ fn = port->ops->epp_write_addr;
+ } else {
+ fn = port->ops->epp_write_data;
+ }
+ break;
+ case IEEE1284_MODE_EPPSWE:
+ pr_debug("%s: Using software-emulated EPP mode\n", port->name);
+ if (addr) {
+ fn = parport_ieee1284_epp_write_addr;
+ } else {
+ fn = parport_ieee1284_epp_write_data;
+ }
+ break;
+ case IEEE1284_MODE_ECP:
+ case IEEE1284_MODE_ECPRLE:
+ pr_debug("%s: Using ECP mode\n", port->name);
+ if (addr) {
+ fn = port->ops->ecp_write_addr;
+ } else {
+ fn = port->ops->ecp_write_data;
+ }
+ break;
+
+ case IEEE1284_MODE_ECPSWE:
+ pr_debug("%s: Using software-emulated ECP mode\n", port->name);
+ /* The caller has specified that it must be emulated,
+ * even if we have ECP hardware! */
+ if (addr) {
+ fn = parport_ieee1284_ecp_write_addr;
+ } else {
+ fn = parport_ieee1284_ecp_write_data;
+ }
+ break;
+
+ default:
+ pr_debug("%s: Unknown mode 0x%02x\n",
+ port->name, port->ieee1284.mode);
+ return -ENOSYS;
+ }
+
+ retval = (*fn) (port, buffer, len, 0);
+ pr_debug("%s: wrote %zd/%zu bytes\n", port->name, retval, len);
+ return retval;
+#endif /* IEEE1284 support */
+}
+
+/**
+ * parport_read - read a block of data from a parallel port
+ * @port: port to read from
+ * @buffer: data buffer (in kernel space)
+ * @len: number of bytes of data to transfer
+ *
+ * This will read up to @len bytes of @buffer to the port
+ * specified, using the IEEE 1284 transfer mode most recently
+ * negotiated to (using parport_negotiate()), as long as that
+ * mode supports reverse transfers (peripheral to host).
+ *
+ * It is the caller's responsibility to ensure that the first
+ * @len bytes of @buffer are available to write to.
+ *
+ * This function returns the number of bytes transferred (if zero
+ * or positive), or else an error code.
+ */
+
+ssize_t parport_read (struct parport *port, void *buffer, size_t len)
+{
+#ifndef CONFIG_PARPORT_1284
+ pr_err("parport: IEEE1284 not supported in this kernel\n");
+ return -ENODEV;
+#else
+ int mode = port->physport->ieee1284.mode;
+ int addr = mode & IEEE1284_ADDR;
+ size_t (*fn) (struct parport *, void *, size_t, int);
+
+ /* Ignore the device-ID-request bit and the address bit. */
+ mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
+
+ /* Use the mode we're in. */
+ switch (mode) {
+ case IEEE1284_MODE_COMPAT:
+ /* if we can tri-state use BYTE mode instead of NIBBLE mode,
+ * if that fails, revert to NIBBLE mode -- ought to store somewhere
+ * the device's ability to do BYTE mode reverse transfers, so we don't
+ * end up needlessly calling negotiate(BYTE) repeately.. (fb)
+ */
+ if ((port->physport->modes & PARPORT_MODE_TRISTATE) &&
+ !parport_negotiate (port, IEEE1284_MODE_BYTE)) {
+ /* got into BYTE mode OK */
+ pr_debug("%s: Using byte mode\n", port->name);
+ fn = port->ops->byte_read_data;
+ break;
+ }
+ if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) {
+ return -EIO;
+ }
+ fallthrough; /* to NIBBLE */
+ case IEEE1284_MODE_NIBBLE:
+ pr_debug("%s: Using nibble mode\n", port->name);
+ fn = port->ops->nibble_read_data;
+ break;
+
+ case IEEE1284_MODE_BYTE:
+ pr_debug("%s: Using byte mode\n", port->name);
+ fn = port->ops->byte_read_data;
+ break;
+
+ case IEEE1284_MODE_EPP:
+ pr_debug("%s: Using EPP mode\n", port->name);
+ if (addr) {
+ fn = port->ops->epp_read_addr;
+ } else {
+ fn = port->ops->epp_read_data;
+ }
+ break;
+ case IEEE1284_MODE_EPPSWE:
+ pr_debug("%s: Using software-emulated EPP mode\n", port->name);
+ if (addr) {
+ fn = parport_ieee1284_epp_read_addr;
+ } else {
+ fn = parport_ieee1284_epp_read_data;
+ }
+ break;
+ case IEEE1284_MODE_ECP:
+ case IEEE1284_MODE_ECPRLE:
+ pr_debug("%s: Using ECP mode\n", port->name);
+ fn = port->ops->ecp_read_data;
+ break;
+
+ case IEEE1284_MODE_ECPSWE:
+ pr_debug("%s: Using software-emulated ECP mode\n", port->name);
+ fn = parport_ieee1284_ecp_read_data;
+ break;
+
+ default:
+ pr_debug("%s: Unknown mode 0x%02x\n",
+ port->name, port->physport->ieee1284.mode);
+ return -ENOSYS;
+ }
+
+ return (*fn) (port, buffer, len, 0);
+#endif /* IEEE1284 support */
+}
+
+/**
+ * parport_set_timeout - set the inactivity timeout for a device
+ * @dev: device on a port
+ * @inactivity: inactivity timeout (in jiffies)
+ *
+ * This sets the inactivity timeout for a particular device on a
+ * port. This affects functions like parport_wait_peripheral().
+ * The special value 0 means not to call schedule() while dealing
+ * with this device.
+ *
+ * The return value is the previous inactivity timeout.
+ *
+ * Any callers of parport_wait_event() for this device are woken
+ * up.
+ */
+
+long parport_set_timeout (struct pardevice *dev, long inactivity)
+{
+ long int old = dev->timeout;
+
+ dev->timeout = inactivity;
+
+ if (dev->port->physport->cad == dev)
+ parport_ieee1284_wakeup (dev->port);
+
+ return old;
+}
+
+/* Exported symbols for modules. */
+
+EXPORT_SYMBOL(parport_negotiate);
+EXPORT_SYMBOL(parport_write);
+EXPORT_SYMBOL(parport_read);
+EXPORT_SYMBOL(parport_wait_peripheral);
+EXPORT_SYMBOL(parport_wait_event);
+EXPORT_SYMBOL(parport_set_timeout);
+EXPORT_SYMBOL(parport_ieee1284_interrupt);
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
new file mode 100644
index 000000000..17061f1df
--- /dev/null
+++ b/drivers/parport/ieee1284_ops.c
@@ -0,0 +1,893 @@
+// SPDX-License-Identifier: GPL-2.0
+/* IEEE-1284 operations for parport.
+ *
+ * This file is for generic IEEE 1284 operations. The idea is that
+ * they are used by the low-level drivers. If they have a special way
+ * of doing something, they can provide their own routines (and put
+ * the function pointers in port->ops); if not, they can just use these
+ * as a fallback.
+ *
+ * Note: Make no assumptions about hardware or architecture in this file!
+ *
+ * Author: Tim Waugh <tim@cyberelk.demon.co.uk>
+ * Fixed AUTOFD polarity in ecp_forward_to_reverse(). Fred Barnes, 1999
+ * Software emulated EPP fixes, Fred Barnes, 04/2001.
+ */
+
+
+#include <linux/module.h>
+#include <linux/parport.h>
+#include <linux/delay.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#undef DEBUG /* undef me for production */
+
+#ifdef CONFIG_LP_CONSOLE
+#undef DEBUG /* Don't want a garbled console */
+#endif
+
+/*** *
+ * One-way data transfer functions. *
+ * ***/
+
+/* Compatibility mode. */
+size_t parport_ieee1284_write_compat (struct parport *port,
+ const void *buffer, size_t len,
+ int flags)
+{
+ int no_irq = 1;
+ ssize_t count = 0;
+ const unsigned char *addr = buffer;
+ unsigned char byte;
+ struct pardevice *dev = port->physport->cad;
+ unsigned char ctl = (PARPORT_CONTROL_SELECT
+ | PARPORT_CONTROL_INIT);
+
+ if (port->irq != PARPORT_IRQ_NONE) {
+ parport_enable_irq (port);
+ no_irq = 0;
+ }
+
+ port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
+ parport_write_control (port, ctl);
+ parport_data_forward (port);
+ while (count < len) {
+ unsigned long expire = jiffies + dev->timeout;
+ long wait = msecs_to_jiffies(10);
+ unsigned char mask = (PARPORT_STATUS_ERROR
+ | PARPORT_STATUS_BUSY);
+ unsigned char val = (PARPORT_STATUS_ERROR
+ | PARPORT_STATUS_BUSY);
+
+ /* Wait until the peripheral's ready */
+ do {
+ /* Is the peripheral ready yet? */
+ if (!parport_wait_peripheral (port, mask, val))
+ /* Skip the loop */
+ goto ready;
+
+ /* Is the peripheral upset? */
+ if ((parport_read_status (port) &
+ (PARPORT_STATUS_PAPEROUT |
+ PARPORT_STATUS_SELECT |
+ PARPORT_STATUS_ERROR))
+ != (PARPORT_STATUS_SELECT |
+ PARPORT_STATUS_ERROR))
+ /* If nFault is asserted (i.e. no
+ * error) and PAPEROUT and SELECT are
+ * just red herrings, give the driver
+ * a chance to check it's happy with
+ * that before continuing. */
+ goto stop;
+
+ /* Have we run out of time? */
+ if (!time_before (jiffies, expire))
+ break;
+
+ /* Yield the port for a while. If this is the
+ first time around the loop, don't let go of
+ the port. This way, we find out if we have
+ our interrupt handler called. */
+ if (count && no_irq) {
+ parport_release (dev);
+ schedule_timeout_interruptible(wait);
+ parport_claim_or_block (dev);
+ }
+ else
+ /* We must have the device claimed here */
+ parport_wait_event (port, wait);
+
+ /* Is there a signal pending? */
+ if (signal_pending (current))
+ break;
+
+ /* Wait longer next time. */
+ wait *= 2;
+ } while (time_before (jiffies, expire));
+
+ if (signal_pending (current))
+ break;
+
+ pr_debug("%s: Timed out\n", port->name);
+ break;
+
+ ready:
+ /* Write the character to the data lines. */
+ byte = *addr++;
+ parport_write_data (port, byte);
+ udelay (1);
+
+ /* Pulse strobe. */
+ parport_write_control (port, ctl | PARPORT_CONTROL_STROBE);
+ udelay (1); /* strobe */
+
+ parport_write_control (port, ctl);
+ udelay (1); /* hold */
+
+ /* Assume the peripheral received it. */
+ count++;
+
+ /* Let another process run if it needs to. */
+ if (time_before (jiffies, expire))
+ if (!parport_yield_blocking (dev)
+ && need_resched())
+ schedule ();
+ }
+ stop:
+ port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+
+ return count;
+}
+
+/* Nibble mode. */
+size_t parport_ieee1284_read_nibble (struct parport *port,
+ void *buffer, size_t len,
+ int flags)
+{
+#ifndef CONFIG_PARPORT_1284
+ return 0;
+#else
+ unsigned char *buf = buffer;
+ int i;
+ unsigned char byte = 0;
+
+ len *= 2; /* in nibbles */
+ for (i=0; i < len; i++) {
+ unsigned char nibble;
+
+ /* Does the error line indicate end of data? */
+ if (((i & 1) == 0) &&
+ (parport_read_status(port) & PARPORT_STATUS_ERROR)) {
+ goto end_of_data;
+ }
+
+ /* Event 7: Set nAutoFd low. */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+
+ /* Event 9: nAck goes low. */
+ port->ieee1284.phase = IEEE1284_PH_REV_DATA;
+ if (parport_wait_peripheral (port,
+ PARPORT_STATUS_ACK, 0)) {
+ /* Timeout -- no more data? */
+ pr_debug("%s: Nibble timeout at event 9 (%d bytes)\n",
+ port->name, i / 2);
+ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
+ break;
+ }
+
+
+ /* Read a nibble. */
+ nibble = parport_read_status (port) >> 3;
+ nibble &= ~8;
+ if ((nibble & 0x10) == 0)
+ nibble |= 8;
+ nibble &= 0xf;
+
+ /* Event 10: Set nAutoFd high. */
+ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
+
+ /* Event 11: nAck goes high. */
+ if (parport_wait_peripheral (port,
+ PARPORT_STATUS_ACK,
+ PARPORT_STATUS_ACK)) {
+ /* Timeout -- no more data? */
+ pr_debug("%s: Nibble timeout at event 11\n",
+ port->name);
+ break;
+ }
+
+ if (i & 1) {
+ /* Second nibble */
+ byte |= nibble << 4;
+ *buf++ = byte;
+ } else
+ byte = nibble;
+ }
+
+ if (i == len) {
+ /* Read the last nibble without checking data avail. */
+ if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
+ end_of_data:
+ pr_debug("%s: No more nibble data (%d bytes)\n",
+ port->name, i / 2);
+
+ /* Go to reverse idle phase. */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+ port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
+ }
+ else
+ port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
+ }
+
+ return i/2;
+#endif /* IEEE1284 support */
+}
+
+/* Byte mode. */
+size_t parport_ieee1284_read_byte (struct parport *port,
+ void *buffer, size_t len,
+ int flags)
+{
+#ifndef CONFIG_PARPORT_1284
+ return 0;
+#else
+ unsigned char *buf = buffer;
+ ssize_t count = 0;
+
+ for (count = 0; count < len; count++) {
+ unsigned char byte;
+
+ /* Data available? */
+ if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
+ goto end_of_data;
+ }
+
+ /* Event 14: Place data bus in high impedance state. */
+ parport_data_reverse (port);
+
+ /* Event 7: Set nAutoFd low. */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+
+ /* Event 9: nAck goes low. */
+ port->physport->ieee1284.phase = IEEE1284_PH_REV_DATA;
+ if (parport_wait_peripheral (port,
+ PARPORT_STATUS_ACK,
+ 0)) {
+ /* Timeout -- no more data? */
+ parport_frob_control (port, PARPORT_CONTROL_AUTOFD,
+ 0);
+ pr_debug("%s: Byte timeout at event 9\n", port->name);
+ break;
+ }
+
+ byte = parport_read_data (port);
+ *buf++ = byte;
+
+ /* Event 10: Set nAutoFd high */
+ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
+
+ /* Event 11: nAck goes high. */
+ if (parport_wait_peripheral (port,
+ PARPORT_STATUS_ACK,
+ PARPORT_STATUS_ACK)) {
+ /* Timeout -- no more data? */
+ pr_debug("%s: Byte timeout at event 11\n", port->name);
+ break;
+ }
+
+ /* Event 16: Set nStrobe low. */
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+ udelay (5);
+
+ /* Event 17: Set nStrobe high. */
+ parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
+ }
+
+ if (count == len) {
+ /* Read the last byte without checking data avail. */
+ if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
+ end_of_data:
+ pr_debug("%s: No more byte data (%zd bytes)\n",
+ port->name, count);
+
+ /* Go to reverse idle phase. */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+ port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
+ }
+ else
+ port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
+ }
+
+ return count;
+#endif /* IEEE1284 support */
+}
+
+/*** *
+ * ECP Functions. *
+ * ***/
+
+#ifdef CONFIG_PARPORT_1284
+
+static inline
+int ecp_forward_to_reverse (struct parport *port)
+{
+ int retval;
+
+ /* Event 38: Set nAutoFd low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+ parport_data_reverse (port);
+ udelay (5);
+
+ /* Event 39: Set nInit low to initiate bus reversal */
+ parport_frob_control (port,
+ PARPORT_CONTROL_INIT,
+ 0);
+
+ /* Event 40: PError goes low */
+ retval = parport_wait_peripheral (port,
+ PARPORT_STATUS_PAPEROUT, 0);
+
+ if (!retval) {
+ pr_debug("%s: ECP direction: reverse\n", port->name);
+ port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
+ } else {
+ pr_debug("%s: ECP direction: failed to reverse\n", port->name);
+ port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
+ }
+
+ return retval;
+}
+
+static inline
+int ecp_reverse_to_forward (struct parport *port)
+{
+ int retval;
+
+ /* Event 47: Set nInit high */
+ parport_frob_control (port,
+ PARPORT_CONTROL_INIT
+ | PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_INIT
+ | PARPORT_CONTROL_AUTOFD);
+
+ /* Event 49: PError goes high */
+ retval = parport_wait_peripheral (port,
+ PARPORT_STATUS_PAPEROUT,
+ PARPORT_STATUS_PAPEROUT);
+
+ if (!retval) {
+ parport_data_forward (port);
+ pr_debug("%s: ECP direction: forward\n", port->name);
+ port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+ } else {
+ pr_debug("%s: ECP direction: failed to switch forward\n",
+ port->name);
+ port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
+ }
+
+
+ return retval;
+}
+
+#endif /* IEEE1284 support */
+
+/* ECP mode, forward channel, data. */
+size_t parport_ieee1284_ecp_write_data (struct parport *port,
+ const void *buffer, size_t len,
+ int flags)
+{
+#ifndef CONFIG_PARPORT_1284
+ return 0;
+#else
+ const unsigned char *buf = buffer;
+ size_t written;
+ int retry;
+
+ port = port->physport;
+
+ if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE)
+ if (ecp_reverse_to_forward (port))
+ return 0;
+
+ port->ieee1284.phase = IEEE1284_PH_FWD_DATA;
+
+ /* HostAck high (data, not command) */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD
+ | PARPORT_CONTROL_STROBE
+ | PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_INIT);
+ for (written = 0; written < len; written++, buf++) {
+ unsigned long expire = jiffies + port->cad->timeout;
+ unsigned char byte;
+
+ byte = *buf;
+ try_again:
+ parport_write_data (port, byte);
+ parport_frob_control (port, PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+ udelay (5);
+ for (retry = 0; retry < 100; retry++) {
+ if (!parport_wait_peripheral (port,
+ PARPORT_STATUS_BUSY, 0))
+ goto success;
+
+ if (signal_pending (current)) {
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE,
+ 0);
+ break;
+ }
+ }
+
+ /* Time for Host Transfer Recovery (page 41 of IEEE1284) */
+ pr_debug("%s: ECP transfer stalled!\n", port->name);
+
+ parport_frob_control (port, PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_INIT);
+ udelay (50);
+ if (parport_read_status (port) & PARPORT_STATUS_PAPEROUT) {
+ /* It's buggered. */
+ parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
+ break;
+ }
+
+ parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
+ udelay (50);
+ if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
+ break;
+
+ pr_debug("%s: Host transfer recovered\n", port->name);
+
+ if (time_after_eq (jiffies, expire)) break;
+ goto try_again;
+ success:
+ parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
+ udelay (5);
+ if (parport_wait_peripheral (port,
+ PARPORT_STATUS_BUSY,
+ PARPORT_STATUS_BUSY))
+ /* Peripheral hasn't accepted the data. */
+ break;
+ }
+
+ port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+
+ return written;
+#endif /* IEEE1284 support */
+}
+
+/* ECP mode, reverse channel, data. */
+size_t parport_ieee1284_ecp_read_data (struct parport *port,
+ void *buffer, size_t len, int flags)
+{
+#ifndef CONFIG_PARPORT_1284
+ return 0;
+#else
+ struct pardevice *dev = port->cad;
+ unsigned char *buf = buffer;
+ int rle_count = 0; /* shut gcc up */
+ unsigned char ctl;
+ int rle = 0;
+ ssize_t count = 0;
+
+ port = port->physport;
+
+ if (port->ieee1284.phase != IEEE1284_PH_REV_IDLE)
+ if (ecp_forward_to_reverse (port))
+ return 0;
+
+ port->ieee1284.phase = IEEE1284_PH_REV_DATA;
+
+ /* Set HostAck low to start accepting data. */
+ ctl = parport_read_control (port);
+ ctl &= ~(PARPORT_CONTROL_STROBE | PARPORT_CONTROL_INIT |
+ PARPORT_CONTROL_AUTOFD);
+ parport_write_control (port,
+ ctl | PARPORT_CONTROL_AUTOFD);
+ while (count < len) {
+ unsigned long expire = jiffies + dev->timeout;
+ unsigned char byte;
+ int command;
+
+ /* Event 43: Peripheral sets nAck low. It can take as
+ long as it wants. */
+ while (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) {
+ /* The peripheral hasn't given us data in
+ 35ms. If we have data to give back to the
+ caller, do it now. */
+ if (count)
+ goto out;
+
+ /* If we've used up all the time we were allowed,
+ give up altogether. */
+ if (!time_before (jiffies, expire))
+ goto out;
+
+ /* Yield the port for a while. */
+ if (dev->port->irq != PARPORT_IRQ_NONE) {
+ parport_release (dev);
+ schedule_timeout_interruptible(msecs_to_jiffies(40));
+ parport_claim_or_block (dev);
+ }
+ else
+ /* We must have the device claimed here. */
+ parport_wait_event (port, msecs_to_jiffies(40));
+
+ /* Is there a signal pending? */
+ if (signal_pending (current))
+ goto out;
+ }
+
+ /* Is this a command? */
+ if (rle)
+ /* The last byte was a run-length count, so
+ this can't be as well. */
+ command = 0;
+ else
+ command = (parport_read_status (port) &
+ PARPORT_STATUS_BUSY) ? 1 : 0;
+
+ /* Read the data. */
+ byte = parport_read_data (port);
+
+ /* If this is a channel command, rather than an RLE
+ command or a normal data byte, don't accept it. */
+ if (command) {
+ if (byte & 0x80) {
+ pr_debug("%s: stopping short at channel command (%02x)\n",
+ port->name, byte);
+ goto out;
+ }
+ else if (port->ieee1284.mode != IEEE1284_MODE_ECPRLE)
+ pr_debug("%s: device illegally using RLE; accepting anyway\n",
+ port->name);
+
+ rle_count = byte + 1;
+
+ /* Are we allowed to read that many bytes? */
+ if (rle_count > (len - count)) {
+ pr_debug("%s: leaving %d RLE bytes for next time\n",
+ port->name, rle_count);
+ break;
+ }
+
+ rle = 1;
+ }
+
+ /* Event 44: Set HostAck high, acknowledging handshake. */
+ parport_write_control (port, ctl);
+
+ /* Event 45: The peripheral has 35ms to set nAck high. */
+ if (parport_wait_peripheral (port, PARPORT_STATUS_ACK,
+ PARPORT_STATUS_ACK)) {
+ /* It's gone wrong. Return what data we have
+ to the caller. */
+ pr_debug("ECP read timed out at 45\n");
+
+ if (command)
+ pr_warn("%s: command ignored (%02x)\n",
+ port->name, byte);
+
+ break;
+ }
+
+ /* Event 46: Set HostAck low and accept the data. */
+ parport_write_control (port,
+ ctl | PARPORT_CONTROL_AUTOFD);
+
+ /* If we just read a run-length count, fetch the data. */
+ if (command)
+ continue;
+
+ /* If this is the byte after a run-length count, decompress. */
+ if (rle) {
+ rle = 0;
+ memset (buf, byte, rle_count);
+ buf += rle_count;
+ count += rle_count;
+ pr_debug("%s: decompressed to %d bytes\n",
+ port->name, rle_count);
+ } else {
+ /* Normal data byte. */
+ *buf = byte;
+ buf++, count++;
+ }
+ }
+
+ out:
+ port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
+ return count;
+#endif /* IEEE1284 support */
+}
+
+/* ECP mode, forward channel, commands. */
+size_t parport_ieee1284_ecp_write_addr (struct parport *port,
+ const void *buffer, size_t len,
+ int flags)
+{
+#ifndef CONFIG_PARPORT_1284
+ return 0;
+#else
+ const unsigned char *buf = buffer;
+ size_t written;
+ int retry;
+
+ port = port->physport;
+
+ if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE)
+ if (ecp_reverse_to_forward (port))
+ return 0;
+
+ port->ieee1284.phase = IEEE1284_PH_FWD_DATA;
+
+ /* HostAck low (command, not data) */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD
+ | PARPORT_CONTROL_STROBE
+ | PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_AUTOFD
+ | PARPORT_CONTROL_INIT);
+ for (written = 0; written < len; written++, buf++) {
+ unsigned long expire = jiffies + port->cad->timeout;
+ unsigned char byte;
+
+ byte = *buf;
+ try_again:
+ parport_write_data (port, byte);
+ parport_frob_control (port, PARPORT_CONTROL_STROBE,
+ PARPORT_CONTROL_STROBE);
+ udelay (5);
+ for (retry = 0; retry < 100; retry++) {
+ if (!parport_wait_peripheral (port,
+ PARPORT_STATUS_BUSY, 0))
+ goto success;
+
+ if (signal_pending (current)) {
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE,
+ 0);
+ break;
+ }
+ }
+
+ /* Time for Host Transfer Recovery (page 41 of IEEE1284) */
+ pr_debug("%s: ECP transfer stalled!\n", port->name);
+
+ parport_frob_control (port, PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_INIT);
+ udelay (50);
+ if (parport_read_status (port) & PARPORT_STATUS_PAPEROUT) {
+ /* It's buggered. */
+ parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
+ break;
+ }
+
+ parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
+ udelay (50);
+ if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
+ break;
+
+ pr_debug("%s: Host transfer recovered\n", port->name);
+
+ if (time_after_eq (jiffies, expire)) break;
+ goto try_again;
+ success:
+ parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
+ udelay (5);
+ if (parport_wait_peripheral (port,
+ PARPORT_STATUS_BUSY,
+ PARPORT_STATUS_BUSY))
+ /* Peripheral hasn't accepted the data. */
+ break;
+ }
+
+ port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+
+ return written;
+#endif /* IEEE1284 support */
+}
+
+/*** *
+ * EPP functions. *
+ * ***/
+
+/* EPP mode, forward channel, data. */
+size_t parport_ieee1284_epp_write_data (struct parport *port,
+ const void *buffer, size_t len,
+ int flags)
+{
+ unsigned char *bp = (unsigned char *) buffer;
+ size_t ret = 0;
+
+ /* set EPP idle state (just to make sure) with strobe low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_SELECT |
+ PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_INIT);
+ port->ops->data_forward (port);
+ for (; len > 0; len--, bp++) {
+ /* Event 62: Write data and set autofd low */
+ parport_write_data (port, *bp);
+ parport_frob_control (port, PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+
+ /* Event 58: wait for busy (nWait) to go high */
+ if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, 0, 10))
+ break;
+
+ /* Event 63: set nAutoFd (nDStrb) high */
+ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
+
+ /* Event 60: wait for busy (nWait) to go low */
+ if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY,
+ PARPORT_STATUS_BUSY, 5))
+ break;
+
+ ret++;
+ }
+
+ /* Event 61: set strobe (nWrite) high */
+ parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
+
+ return ret;
+}
+
+/* EPP mode, reverse channel, data. */
+size_t parport_ieee1284_epp_read_data (struct parport *port,
+ void *buffer, size_t len,
+ int flags)
+{
+ unsigned char *bp = (unsigned char *) buffer;
+ unsigned ret = 0;
+
+ /* set EPP idle state (just to make sure) with strobe high */
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_SELECT |
+ PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_INIT);
+ port->ops->data_reverse (port);
+ for (; len > 0; len--, bp++) {
+ /* Event 67: set nAutoFd (nDStrb) low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+ /* Event 58: wait for Busy to go high */
+ if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, 0)) {
+ break;
+ }
+
+ *bp = parport_read_data (port);
+
+ /* Event 63: set nAutoFd (nDStrb) high */
+ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
+
+ /* Event 60: wait for Busy to go low */
+ if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY,
+ PARPORT_STATUS_BUSY, 5)) {
+ break;
+ }
+
+ ret++;
+ }
+ port->ops->data_forward (port);
+
+ return ret;
+}
+
+/* EPP mode, forward channel, addresses. */
+size_t parport_ieee1284_epp_write_addr (struct parport *port,
+ const void *buffer, size_t len,
+ int flags)
+{
+ unsigned char *bp = (unsigned char *) buffer;
+ size_t ret = 0;
+
+ /* set EPP idle state (just to make sure) with strobe low */
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_SELECT |
+ PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_INIT);
+ port->ops->data_forward (port);
+ for (; len > 0; len--, bp++) {
+ /* Event 56: Write data and set nAStrb low. */
+ parport_write_data (port, *bp);
+ parport_frob_control (port, PARPORT_CONTROL_SELECT,
+ PARPORT_CONTROL_SELECT);
+
+ /* Event 58: wait for busy (nWait) to go high */
+ if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, 0, 10))
+ break;
+
+ /* Event 59: set nAStrb high */
+ parport_frob_control (port, PARPORT_CONTROL_SELECT, 0);
+
+ /* Event 60: wait for busy (nWait) to go low */
+ if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY,
+ PARPORT_STATUS_BUSY, 5))
+ break;
+
+ ret++;
+ }
+
+ /* Event 61: set strobe (nWrite) high */
+ parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
+
+ return ret;
+}
+
+/* EPP mode, reverse channel, addresses. */
+size_t parport_ieee1284_epp_read_addr (struct parport *port,
+ void *buffer, size_t len,
+ int flags)
+{
+ unsigned char *bp = (unsigned char *) buffer;
+ unsigned ret = 0;
+
+ /* Set EPP idle state (just to make sure) with strobe high */
+ parport_frob_control (port,
+ PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_SELECT |
+ PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_INIT);
+ port->ops->data_reverse (port);
+ for (; len > 0; len--, bp++) {
+ /* Event 64: set nSelectIn (nAStrb) low */
+ parport_frob_control (port, PARPORT_CONTROL_SELECT,
+ PARPORT_CONTROL_SELECT);
+
+ /* Event 58: wait for Busy to go high */
+ if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, 0)) {
+ break;
+ }
+
+ *bp = parport_read_data (port);
+
+ /* Event 59: set nSelectIn (nAStrb) high */
+ parport_frob_control (port, PARPORT_CONTROL_SELECT,
+ 0);
+
+ /* Event 60: wait for Busy to go low */
+ if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY,
+ PARPORT_STATUS_BUSY, 5))
+ break;
+
+ ret++;
+ }
+ port->ops->data_forward (port);
+
+ return ret;
+}
+
+EXPORT_SYMBOL(parport_ieee1284_ecp_write_data);
+EXPORT_SYMBOL(parport_ieee1284_ecp_read_data);
+EXPORT_SYMBOL(parport_ieee1284_ecp_write_addr);
+EXPORT_SYMBOL(parport_ieee1284_write_compat);
+EXPORT_SYMBOL(parport_ieee1284_read_nibble);
+EXPORT_SYMBOL(parport_ieee1284_read_byte);
+EXPORT_SYMBOL(parport_ieee1284_epp_write_data);
+EXPORT_SYMBOL(parport_ieee1284_epp_read_data);
+EXPORT_SYMBOL(parport_ieee1284_epp_write_addr);
+EXPORT_SYMBOL(parport_ieee1284_epp_read_addr);
diff --git a/drivers/parport/multiface.h b/drivers/parport/multiface.h
new file mode 100644
index 000000000..6513a44b9
--- /dev/null
+++ b/drivers/parport/multiface.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MULTIFACE_H_
+#define _MULTIFACE_H_
+
+/*
+ * Defines for SerialMaster, Multiface Card II and Multiface Card III
+ * The addresses given below are offsets to the board base address
+ *
+ * 6.11.95 Joerg Dorchain (dorchain@mpi-sb.mpg.de)
+ *
+ */
+
+#define PIA_REG_PADWIDTH 255
+
+#define DUARTBASE 0x0000
+#define PITBASE 0x0100
+#define ROMBASE 0x0200
+#define PIABASE 0x4000
+
+#endif
+
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
new file mode 100644
index 000000000..1e88bcfe0
--- /dev/null
+++ b/drivers/parport/parport_amiga.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Low-level parallel port routines for the Amiga built-in port
+ *
+ * Author: Joerg Dorchain <joerg@dorchain.net>
+ *
+ * This is a complete rewrite of the code, but based heaviy upon the old
+ * lp_intern. code.
+ *
+ * The built-in Amiga parallel port provides one port at a fixed address
+ * with 8 bidirectional data lines (D0 - D7) and 3 bidirectional status
+ * lines (BUSY, POUT, SEL), 1 output control line /STROBE (raised automatically
+ * in hardware when the data register is accessed), and 1 input control line
+ * /ACK, able to cause an interrupt, but both not directly settable by
+ * software.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/parport.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/setup.h>
+#include <asm/amigahw.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/amigaints.h>
+
+#undef DEBUG
+
+static void amiga_write_data(struct parport *p, unsigned char data)
+{
+ pr_debug("write_data %c\n", data);
+ /* Triggers also /STROBE. This behavior cannot be changed */
+ ciaa.prb = data;
+ mb();
+}
+
+static unsigned char amiga_read_data(struct parport *p)
+{
+ /* Triggers also /STROBE. This behavior cannot be changed */
+ return ciaa.prb;
+}
+
+static unsigned char control_amiga_to_pc(unsigned char control)
+{
+ return PARPORT_CONTROL_SELECT |
+ PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_STROBE;
+ /* fake value: interrupt enable, select in, no reset,
+ no autolf, no strobe - seems to be closest the wiring diagram */
+}
+
+static void amiga_write_control(struct parport *p, unsigned char control)
+{
+ pr_debug("write_control %02x\n", control);
+ /* No implementation possible */
+}
+
+static unsigned char amiga_read_control( struct parport *p)
+{
+ pr_debug("read_control\n");
+ return control_amiga_to_pc(0);
+}
+
+static unsigned char amiga_frob_control( struct parport *p, unsigned char mask, unsigned char val)
+{
+ unsigned char old;
+
+ pr_debug("frob_control mask %02x, value %02x\n", mask, val);
+ old = amiga_read_control(p);
+ amiga_write_control(p, (old & ~mask) ^ val);
+ return old;
+}
+
+static unsigned char status_amiga_to_pc(unsigned char status)
+{
+ unsigned char ret = PARPORT_STATUS_BUSY | PARPORT_STATUS_ACK | PARPORT_STATUS_ERROR;
+
+ if (status & 1) /* Busy */
+ ret &= ~PARPORT_STATUS_BUSY;
+ if (status & 2) /* PaperOut */
+ ret |= PARPORT_STATUS_PAPEROUT;
+ if (status & 4) /* Selected */
+ ret |= PARPORT_STATUS_SELECT;
+ /* the rest is not connected or handled autonomously in hardware */
+
+ return ret;
+}
+
+static unsigned char amiga_read_status(struct parport *p)
+{
+ unsigned char status;
+
+ status = status_amiga_to_pc(ciab.pra & 7);
+ pr_debug("read_status %02x\n", status);
+ return status;
+}
+
+static void amiga_enable_irq(struct parport *p)
+{
+ enable_irq(IRQ_AMIGA_CIAA_FLG);
+}
+
+static void amiga_disable_irq(struct parport *p)
+{
+ disable_irq(IRQ_AMIGA_CIAA_FLG);
+}
+
+static void amiga_data_forward(struct parport *p)
+{
+ pr_debug("forward\n");
+ ciaa.ddrb = 0xff; /* all pins output */
+ mb();
+}
+
+static void amiga_data_reverse(struct parport *p)
+{
+ pr_debug("reverse\n");
+ ciaa.ddrb = 0; /* all pins input */
+ mb();
+}
+
+static void amiga_init_state(struct pardevice *dev, struct parport_state *s)
+{
+ s->u.amiga.data = 0;
+ s->u.amiga.datadir = 255;
+ s->u.amiga.status = 0;
+ s->u.amiga.statusdir = 0;
+}
+
+static void amiga_save_state(struct parport *p, struct parport_state *s)
+{
+ mb();
+ s->u.amiga.data = ciaa.prb;
+ s->u.amiga.datadir = ciaa.ddrb;
+ s->u.amiga.status = ciab.pra & 7;
+ s->u.amiga.statusdir = ciab.ddra & 7;
+ mb();
+}
+
+static void amiga_restore_state(struct parport *p, struct parport_state *s)
+{
+ mb();
+ ciaa.prb = s->u.amiga.data;
+ ciaa.ddrb = s->u.amiga.datadir;
+ ciab.pra |= (ciab.pra & 0xf8) | s->u.amiga.status;
+ ciab.ddra |= (ciab.ddra & 0xf8) | s->u.amiga.statusdir;
+ mb();
+}
+
+static struct parport_operations pp_amiga_ops = {
+ .write_data = amiga_write_data,
+ .read_data = amiga_read_data,
+
+ .write_control = amiga_write_control,
+ .read_control = amiga_read_control,
+ .frob_control = amiga_frob_control,
+
+ .read_status = amiga_read_status,
+
+ .enable_irq = amiga_enable_irq,
+ .disable_irq = amiga_disable_irq,
+
+ .data_forward = amiga_data_forward,
+ .data_reverse = amiga_data_reverse,
+
+ .init_state = amiga_init_state,
+ .save_state = amiga_save_state,
+ .restore_state = amiga_restore_state,
+
+ .epp_write_data = parport_ieee1284_epp_write_data,
+ .epp_read_data = parport_ieee1284_epp_read_data,
+ .epp_write_addr = parport_ieee1284_epp_write_addr,
+ .epp_read_addr = parport_ieee1284_epp_read_addr,
+
+ .ecp_write_data = parport_ieee1284_ecp_write_data,
+ .ecp_read_data = parport_ieee1284_ecp_read_data,
+ .ecp_write_addr = parport_ieee1284_ecp_write_addr,
+
+ .compat_write_data = parport_ieee1284_write_compat,
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+
+ .owner = THIS_MODULE,
+};
+
+/* ----------- Initialisation code --------------------------------- */
+
+static int __init amiga_parallel_probe(struct platform_device *pdev)
+{
+ struct parport *p;
+ int err;
+
+ ciaa.ddrb = 0xff;
+ ciab.ddra &= 0xf8;
+ mb();
+
+ p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG,
+ PARPORT_DMA_NONE, &pp_amiga_ops);
+ if (!p)
+ return -EBUSY;
+
+ err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name,
+ p);
+ if (err)
+ goto out_irq;
+
+ pr_info("%s: Amiga built-in port using irq\n", p->name);
+ /* XXX: set operating mode */
+ parport_announce_port(p);
+
+ platform_set_drvdata(pdev, p);
+
+ return 0;
+
+out_irq:
+ parport_put_port(p);
+ return err;
+}
+
+static int __exit amiga_parallel_remove(struct platform_device *pdev)
+{
+ struct parport *port = platform_get_drvdata(pdev);
+
+ parport_remove_port(port);
+ if (port->irq != PARPORT_IRQ_NONE)
+ free_irq(IRQ_AMIGA_CIAA_FLG, port);
+ parport_put_port(port);
+ return 0;
+}
+
+static struct platform_driver amiga_parallel_driver = {
+ .remove = __exit_p(amiga_parallel_remove),
+ .driver = {
+ .name = "amiga-parallel",
+ },
+};
+
+module_platform_driver_probe(amiga_parallel_driver, amiga_parallel_probe);
+
+MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
+MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port");
+MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-parallel");
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c
new file mode 100644
index 000000000..2ff0fe053
--- /dev/null
+++ b/drivers/parport/parport_atari.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Low-level parallel port routines for the Atari builtin port
+ *
+ * Author: Andreas Schwab <schwab@issan.informatik.uni-dortmund.de>
+ *
+ * Based on parport_amiga.c.
+ *
+ * The built-in Atari parallel port provides one port at a fixed address
+ * with 8 output data lines (D0 - D7), 1 output control line (STROBE)
+ * and 1 input status line (BUSY) able to cause an interrupt.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/parport.h>
+#include <linux/interrupt.h>
+#include <asm/setup.h>
+#include <asm/atarihw.h>
+#include <asm/irq.h>
+#include <asm/atariints.h>
+
+static struct parport *this_port;
+
+static unsigned char
+parport_atari_read_data(struct parport *p)
+{
+ unsigned long flags;
+ unsigned char data;
+
+ local_irq_save(flags);
+ sound_ym.rd_data_reg_sel = 15;
+ data = sound_ym.rd_data_reg_sel;
+ local_irq_restore(flags);
+ return data;
+}
+
+static void
+parport_atari_write_data(struct parport *p, unsigned char data)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sound_ym.rd_data_reg_sel = 15;
+ sound_ym.wd_data = data;
+ local_irq_restore(flags);
+}
+
+static unsigned char
+parport_atari_read_control(struct parport *p)
+{
+ unsigned long flags;
+ unsigned char control = 0;
+
+ local_irq_save(flags);
+ sound_ym.rd_data_reg_sel = 14;
+ if (!(sound_ym.rd_data_reg_sel & (1 << 5)))
+ control = PARPORT_CONTROL_STROBE;
+ local_irq_restore(flags);
+ return control;
+}
+
+static void
+parport_atari_write_control(struct parport *p, unsigned char control)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sound_ym.rd_data_reg_sel = 14;
+ if (control & PARPORT_CONTROL_STROBE)
+ sound_ym.wd_data = sound_ym.rd_data_reg_sel & ~(1 << 5);
+ else
+ sound_ym.wd_data = sound_ym.rd_data_reg_sel | (1 << 5);
+ local_irq_restore(flags);
+}
+
+static unsigned char
+parport_atari_frob_control(struct parport *p, unsigned char mask,
+ unsigned char val)
+{
+ unsigned char old = parport_atari_read_control(p);
+ parport_atari_write_control(p, (old & ~mask) ^ val);
+ return old;
+}
+
+static unsigned char
+parport_atari_read_status(struct parport *p)
+{
+ return ((st_mfp.par_dt_reg & 1 ? 0 : PARPORT_STATUS_BUSY) |
+ PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR);
+}
+
+static void
+parport_atari_init_state(struct pardevice *d, struct parport_state *s)
+{
+}
+
+static void
+parport_atari_save_state(struct parport *p, struct parport_state *s)
+{
+}
+
+static void
+parport_atari_restore_state(struct parport *p, struct parport_state *s)
+{
+}
+
+static void
+parport_atari_enable_irq(struct parport *p)
+{
+ enable_irq(IRQ_MFP_BUSY);
+}
+
+static void
+parport_atari_disable_irq(struct parport *p)
+{
+ disable_irq(IRQ_MFP_BUSY);
+}
+
+static void
+parport_atari_data_forward(struct parport *p)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* Soundchip port B as output. */
+ sound_ym.rd_data_reg_sel = 7;
+ sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x40;
+ local_irq_restore(flags);
+}
+
+static void
+parport_atari_data_reverse(struct parport *p)
+{
+}
+
+static struct parport_operations parport_atari_ops = {
+ .write_data = parport_atari_write_data,
+ .read_data = parport_atari_read_data,
+
+ .write_control = parport_atari_write_control,
+ .read_control = parport_atari_read_control,
+ .frob_control = parport_atari_frob_control,
+
+ .read_status = parport_atari_read_status,
+
+ .enable_irq = parport_atari_enable_irq,
+ .disable_irq = parport_atari_disable_irq,
+
+ .data_forward = parport_atari_data_forward,
+ .data_reverse = parport_atari_data_reverse,
+
+ .init_state = parport_atari_init_state,
+ .save_state = parport_atari_save_state,
+ .restore_state = parport_atari_restore_state,
+
+ .epp_write_data = parport_ieee1284_epp_write_data,
+ .epp_read_data = parport_ieee1284_epp_read_data,
+ .epp_write_addr = parport_ieee1284_epp_write_addr,
+ .epp_read_addr = parport_ieee1284_epp_read_addr,
+
+ .ecp_write_data = parport_ieee1284_ecp_write_data,
+ .ecp_read_data = parport_ieee1284_ecp_read_data,
+ .ecp_write_addr = parport_ieee1284_ecp_write_addr,
+
+ .compat_write_data = parport_ieee1284_write_compat,
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+
+ .owner = THIS_MODULE,
+};
+
+
+static int __init parport_atari_init(void)
+{
+ struct parport *p;
+ unsigned long flags;
+
+ if (MACH_IS_ATARI) {
+ local_irq_save(flags);
+ /* Soundchip port A/B as output. */
+ sound_ym.rd_data_reg_sel = 7;
+ sound_ym.wd_data = (sound_ym.rd_data_reg_sel & 0x3f) | 0xc0;
+ /* STROBE high. */
+ sound_ym.rd_data_reg_sel = 14;
+ sound_ym.wd_data = sound_ym.rd_data_reg_sel | (1 << 5);
+ local_irq_restore(flags);
+ /* MFP port I0 as input. */
+ st_mfp.data_dir &= ~1;
+ /* MFP port I0 interrupt on high->low edge. */
+ st_mfp.active_edge &= ~1;
+ p = parport_register_port((unsigned long)&sound_ym.wd_data,
+ IRQ_MFP_BUSY, PARPORT_DMA_NONE,
+ &parport_atari_ops);
+ if (!p)
+ return -ENODEV;
+ if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 0, p->name,
+ p)) {
+ parport_put_port (p);
+ return -ENODEV;
+ }
+
+ this_port = p;
+ pr_info("%s: Atari built-in port using irq\n", p->name);
+ parport_announce_port (p);
+
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static void __exit parport_atari_exit(void)
+{
+ parport_remove_port(this_port);
+ if (this_port->irq != PARPORT_IRQ_NONE)
+ free_irq(IRQ_MFP_BUSY, this_port);
+ parport_put_port(this_port);
+}
+
+MODULE_AUTHOR("Andreas Schwab");
+MODULE_DESCRIPTION("Parport Driver for Atari builtin Port");
+MODULE_SUPPORTED_DEVICE("Atari builtin Parallel Port");
+MODULE_LICENSE("GPL");
+
+module_init(parport_atari_init)
+module_exit(parport_atari_exit)
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
new file mode 100644
index 000000000..54b539809
--- /dev/null
+++ b/drivers/parport/parport_ax88796.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* linux/drivers/parport/parport_ax88796.c
+ *
+ * (c) 2005,2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/parport.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define AX_SPR_BUSY (1<<7)
+#define AX_SPR_ACK (1<<6)
+#define AX_SPR_PE (1<<5)
+#define AX_SPR_SLCT (1<<4)
+#define AX_SPR_ERR (1<<3)
+
+#define AX_CPR_nDOE (1<<5)
+#define AX_CPR_SLCTIN (1<<3)
+#define AX_CPR_nINIT (1<<2)
+#define AX_CPR_ATFD (1<<1)
+#define AX_CPR_STRB (1<<0)
+
+struct ax_drvdata {
+ struct parport *parport;
+ struct parport_state suspend;
+
+ struct device *dev;
+ struct resource *io;
+
+ unsigned char irq_enabled;
+
+ void __iomem *base;
+ void __iomem *spp_data;
+ void __iomem *spp_spr;
+ void __iomem *spp_cpr;
+};
+
+static inline struct ax_drvdata *pp_to_drv(struct parport *p)
+{
+ return p->private_data;
+}
+
+static unsigned char
+parport_ax88796_read_data(struct parport *p)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+
+ return readb(dd->spp_data);
+}
+
+static void
+parport_ax88796_write_data(struct parport *p, unsigned char data)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+
+ writeb(data, dd->spp_data);
+}
+
+static unsigned char
+parport_ax88796_read_control(struct parport *p)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+ unsigned int cpr = readb(dd->spp_cpr);
+ unsigned int ret = 0;
+
+ if (!(cpr & AX_CPR_STRB))
+ ret |= PARPORT_CONTROL_STROBE;
+
+ if (!(cpr & AX_CPR_ATFD))
+ ret |= PARPORT_CONTROL_AUTOFD;
+
+ if (cpr & AX_CPR_nINIT)
+ ret |= PARPORT_CONTROL_INIT;
+
+ if (!(cpr & AX_CPR_SLCTIN))
+ ret |= PARPORT_CONTROL_SELECT;
+
+ return ret;
+}
+
+static void
+parport_ax88796_write_control(struct parport *p, unsigned char control)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+ unsigned int cpr = readb(dd->spp_cpr);
+
+ cpr &= AX_CPR_nDOE;
+
+ if (!(control & PARPORT_CONTROL_STROBE))
+ cpr |= AX_CPR_STRB;
+
+ if (!(control & PARPORT_CONTROL_AUTOFD))
+ cpr |= AX_CPR_ATFD;
+
+ if (control & PARPORT_CONTROL_INIT)
+ cpr |= AX_CPR_nINIT;
+
+ if (!(control & PARPORT_CONTROL_SELECT))
+ cpr |= AX_CPR_SLCTIN;
+
+ dev_dbg(dd->dev, "write_control: ctrl=%02x, cpr=%02x\n", control, cpr);
+ writeb(cpr, dd->spp_cpr);
+
+ if (parport_ax88796_read_control(p) != control) {
+ dev_err(dd->dev, "write_control: read != set (%02x, %02x)\n",
+ parport_ax88796_read_control(p), control);
+ }
+}
+
+static unsigned char
+parport_ax88796_read_status(struct parport *p)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+ unsigned int status = readb(dd->spp_spr);
+ unsigned int ret = 0;
+
+ if (status & AX_SPR_BUSY)
+ ret |= PARPORT_STATUS_BUSY;
+
+ if (status & AX_SPR_ACK)
+ ret |= PARPORT_STATUS_ACK;
+
+ if (status & AX_SPR_ERR)
+ ret |= PARPORT_STATUS_ERROR;
+
+ if (status & AX_SPR_SLCT)
+ ret |= PARPORT_STATUS_SELECT;
+
+ if (status & AX_SPR_PE)
+ ret |= PARPORT_STATUS_PAPEROUT;
+
+ return ret;
+}
+
+static unsigned char
+parport_ax88796_frob_control(struct parport *p, unsigned char mask,
+ unsigned char val)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+ unsigned char old = parport_ax88796_read_control(p);
+
+ dev_dbg(dd->dev, "frob: mask=%02x, val=%02x, old=%02x\n",
+ mask, val, old);
+
+ parport_ax88796_write_control(p, (old & ~mask) | val);
+ return old;
+}
+
+static void
+parport_ax88796_enable_irq(struct parport *p)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (!dd->irq_enabled) {
+ enable_irq(p->irq);
+ dd->irq_enabled = 1;
+ }
+ local_irq_restore(flags);
+}
+
+static void
+parport_ax88796_disable_irq(struct parport *p)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (dd->irq_enabled) {
+ disable_irq(p->irq);
+ dd->irq_enabled = 0;
+ }
+ local_irq_restore(flags);
+}
+
+static void
+parport_ax88796_data_forward(struct parport *p)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+ void __iomem *cpr = dd->spp_cpr;
+
+ writeb((readb(cpr) & ~AX_CPR_nDOE), cpr);
+}
+
+static void
+parport_ax88796_data_reverse(struct parport *p)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+ void __iomem *cpr = dd->spp_cpr;
+
+ writeb(readb(cpr) | AX_CPR_nDOE, cpr);
+}
+
+static void
+parport_ax88796_init_state(struct pardevice *d, struct parport_state *s)
+{
+ struct ax_drvdata *dd = pp_to_drv(d->port);
+
+ memset(s, 0, sizeof(struct parport_state));
+
+ dev_dbg(dd->dev, "init_state: %p: state=%p\n", d, s);
+ s->u.ax88796.cpr = readb(dd->spp_cpr);
+}
+
+static void
+parport_ax88796_save_state(struct parport *p, struct parport_state *s)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+
+ dev_dbg(dd->dev, "save_state: %p: state=%p\n", p, s);
+ s->u.ax88796.cpr = readb(dd->spp_cpr);
+}
+
+static void
+parport_ax88796_restore_state(struct parport *p, struct parport_state *s)
+{
+ struct ax_drvdata *dd = pp_to_drv(p);
+
+ dev_dbg(dd->dev, "restore_state: %p: state=%p\n", p, s);
+ writeb(s->u.ax88796.cpr, dd->spp_cpr);
+}
+
+static struct parport_operations parport_ax88796_ops = {
+ .write_data = parport_ax88796_write_data,
+ .read_data = parport_ax88796_read_data,
+
+ .write_control = parport_ax88796_write_control,
+ .read_control = parport_ax88796_read_control,
+ .frob_control = parport_ax88796_frob_control,
+
+ .read_status = parport_ax88796_read_status,
+
+ .enable_irq = parport_ax88796_enable_irq,
+ .disable_irq = parport_ax88796_disable_irq,
+
+ .data_forward = parport_ax88796_data_forward,
+ .data_reverse = parport_ax88796_data_reverse,
+
+ .init_state = parport_ax88796_init_state,
+ .save_state = parport_ax88796_save_state,
+ .restore_state = parport_ax88796_restore_state,
+
+ .epp_write_data = parport_ieee1284_epp_write_data,
+ .epp_read_data = parport_ieee1284_epp_read_data,
+ .epp_write_addr = parport_ieee1284_epp_write_addr,
+ .epp_read_addr = parport_ieee1284_epp_read_addr,
+
+ .ecp_write_data = parport_ieee1284_ecp_write_data,
+ .ecp_read_data = parport_ieee1284_ecp_read_data,
+ .ecp_write_addr = parport_ieee1284_ecp_write_addr,
+
+ .compat_write_data = parport_ieee1284_write_compat,
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+
+ .owner = THIS_MODULE,
+};
+
+static int parport_ax88796_probe(struct platform_device *pdev)
+{
+ struct device *_dev = &pdev->dev;
+ struct ax_drvdata *dd;
+ struct parport *pp;
+ struct resource *res;
+ unsigned long size;
+ int spacing;
+ int irq;
+ int ret;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(_dev, "no MEM specified\n");
+ ret = -ENXIO;
+ goto exit_mem;
+ }
+
+ size = resource_size(res);
+ spacing = size / 3;
+
+ dd->io = request_mem_region(res->start, size, pdev->name);
+ if (dd->io == NULL) {
+ dev_err(_dev, "cannot reserve memory\n");
+ ret = -ENXIO;
+ goto exit_mem;
+ }
+
+ dd->base = ioremap(res->start, size);
+ if (dd->base == NULL) {
+ dev_err(_dev, "cannot ioremap region\n");
+ ret = -ENXIO;
+ goto exit_res;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ irq = PARPORT_IRQ_NONE;
+
+ pp = parport_register_port((unsigned long)dd->base, irq,
+ PARPORT_DMA_NONE,
+ &parport_ax88796_ops);
+
+ if (pp == NULL) {
+ dev_err(_dev, "failed to register parallel port\n");
+ ret = -ENOMEM;
+ goto exit_unmap;
+ }
+
+ pp->private_data = dd;
+ dd->parport = pp;
+ dd->dev = _dev;
+
+ dd->spp_data = dd->base;
+ dd->spp_spr = dd->base + (spacing * 1);
+ dd->spp_cpr = dd->base + (spacing * 2);
+
+ /* initialise the port controls */
+ writeb(AX_CPR_STRB, dd->spp_cpr);
+
+ if (irq >= 0) {
+ /* request irq */
+ ret = request_irq(irq, parport_irq_handler,
+ IRQF_TRIGGER_FALLING, pdev->name, pp);
+
+ if (ret < 0)
+ goto exit_port;
+
+ dd->irq_enabled = 1;
+ }
+
+ platform_set_drvdata(pdev, pp);
+
+ dev_info(_dev, "attached parallel port driver\n");
+ parport_announce_port(pp);
+
+ return 0;
+
+ exit_port:
+ parport_remove_port(pp);
+ exit_unmap:
+ iounmap(dd->base);
+ exit_res:
+ release_mem_region(dd->io->start, size);
+ exit_mem:
+ kfree(dd);
+ return ret;
+}
+
+static int parport_ax88796_remove(struct platform_device *pdev)
+{
+ struct parport *p = platform_get_drvdata(pdev);
+ struct ax_drvdata *dd = pp_to_drv(p);
+
+ free_irq(p->irq, p);
+ parport_remove_port(p);
+ iounmap(dd->base);
+ release_mem_region(dd->io->start, resource_size(dd->io));
+ kfree(dd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int parport_ax88796_suspend(struct platform_device *dev,
+ pm_message_t state)
+{
+ struct parport *p = platform_get_drvdata(dev);
+ struct ax_drvdata *dd = pp_to_drv(p);
+
+ parport_ax88796_save_state(p, &dd->suspend);
+ writeb(AX_CPR_nDOE | AX_CPR_STRB, dd->spp_cpr);
+ return 0;
+}
+
+static int parport_ax88796_resume(struct platform_device *dev)
+{
+ struct parport *p = platform_get_drvdata(dev);
+ struct ax_drvdata *dd = pp_to_drv(p);
+
+ parport_ax88796_restore_state(p, &dd->suspend);
+ return 0;
+}
+
+#else
+#define parport_ax88796_suspend NULL
+#define parport_ax88796_resume NULL
+#endif
+
+MODULE_ALIAS("platform:ax88796-pp");
+
+static struct platform_driver axdrv = {
+ .driver = {
+ .name = "ax88796-pp",
+ },
+ .probe = parport_ax88796_probe,
+ .remove = parport_ax88796_remove,
+ .suspend = parport_ax88796_suspend,
+ .resume = parport_ax88796_resume,
+};
+
+module_platform_driver(axdrv);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("AX88796 Parport parallel port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
new file mode 100644
index 000000000..8e7e3ac4b
--- /dev/null
+++ b/drivers/parport/parport_cs.c
@@ -0,0 +1,195 @@
+/*======================================================================
+
+ A driver for PCMCIA parallel port adapters
+
+ (specifically, for the Quatech SPP-100 EPP card: other cards will
+ probably require driver tweaks)
+
+ parport_cs.c 1.29 2002/10/11 06:57:41
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/major.h>
+#include <linux/interrupt.h>
+
+#include <linux/parport.h>
+#include <linux/parport_pc.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("PCMCIA parallel port card driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+INT_MODULE_PARM(epp_mode, 1);
+
+
+/*====================================================================*/
+
+#define FORCE_EPP_MODE 0x08
+
+typedef struct parport_info_t {
+ struct pcmcia_device *p_dev;
+ int ndev;
+ struct parport *port;
+} parport_info_t;
+
+static void parport_detach(struct pcmcia_device *p_dev);
+static int parport_config(struct pcmcia_device *link);
+static void parport_cs_release(struct pcmcia_device *);
+
+static int parport_probe(struct pcmcia_device *link)
+{
+ parport_info_t *info;
+
+ dev_dbg(&link->dev, "parport_attach()\n");
+
+ /* Create new parport device */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) return -ENOMEM;
+ link->priv = info;
+ info->p_dev = link;
+
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+ return parport_config(link);
+} /* parport_attach */
+
+static void parport_detach(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "parport_detach\n");
+
+ parport_cs_release(link);
+
+ kfree(link->priv);
+} /* parport_detach */
+
+static int parport_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+
+ return pcmcia_request_io(p_dev);
+}
+
+static int parport_config(struct pcmcia_device *link)
+{
+ parport_info_t *info = link->priv;
+ struct parport *p;
+ int ret;
+
+ dev_dbg(&link->dev, "parport_config\n");
+
+ if (epp_mode)
+ link->config_index |= FORCE_EPP_MODE;
+
+ ret = pcmcia_loop_config(link, parport_config_check, NULL);
+ if (ret)
+ goto failed;
+
+ if (!link->irq)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ p = parport_pc_probe_port(link->resource[0]->start,
+ link->resource[1]->start,
+ link->irq, PARPORT_DMA_NONE,
+ &link->dev, IRQF_SHARED);
+ if (p == NULL) {
+ pr_notice("parport_cs: parport_pc_probe_port() at 0x%3x, irq %u failed\n",
+ (unsigned int)link->resource[0]->start, link->irq);
+ goto failed;
+ }
+
+ p->modes |= PARPORT_MODE_PCSPP;
+ if (epp_mode)
+ p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP;
+ info->ndev = 1;
+ info->port = p;
+
+ return 0;
+
+failed:
+ parport_cs_release(link);
+ kfree(link->priv);
+ return -ENODEV;
+} /* parport_config */
+
+static void parport_cs_release(struct pcmcia_device *link)
+{
+ parport_info_t *info = link->priv;
+
+ dev_dbg(&link->dev, "parport_release\n");
+
+ if (info->ndev) {
+ struct parport *p = info->port;
+ parport_pc_unregister_port(p);
+ }
+ info->ndev = 0;
+
+ pcmcia_disable_device(link);
+} /* parport_cs_release */
+
+
+static const struct pcmcia_device_id parport_ids[] = {
+ PCMCIA_DEVICE_FUNC_ID(3),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, parport_ids);
+
+static struct pcmcia_driver parport_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "parport_cs",
+ .probe = parport_probe,
+ .remove = parport_detach,
+ .id_table = parport_ids,
+};
+module_pcmcia_driver(parport_cs_driver);
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
new file mode 100644
index 000000000..9228e8f90
--- /dev/null
+++ b/drivers/parport/parport_gsc.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Low-level parallel-support for PC-style hardware integrated in the
+ * LASI-Controller (on GSC-Bus) for HP-PARISC Workstations
+ *
+ * (C) 1999-2001 by Helge Deller <deller@gmx.de>
+ *
+ * based on parport_pc.c by
+ * Grant Guenther <grant@torque.net>
+ * Phil Blundell <philb@gnu.org>
+ * Tim Waugh <tim@cyberelk.demon.co.uk>
+ * Jose Renau <renau@acm.org>
+ * David Campbell
+ * Andrea Arcangeli
+ */
+
+#undef DEBUG /* undef for production */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/sysctl.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/uaccess.h>
+#include <asm/superio.h>
+
+#include <linux/parport.h>
+#include <asm/pdc.h>
+#include <asm/parisc-device.h>
+#include <asm/hardware.h>
+#include "parport_gsc.h"
+
+
+MODULE_AUTHOR("Helge Deller <deller@gmx.de>");
+MODULE_DESCRIPTION("HP-PARISC PC-style parallel port driver");
+MODULE_SUPPORTED_DEVICE("integrated PC-style parallel port");
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Clear TIMEOUT BIT in EPP MODE
+ *
+ * This is also used in SPP detection.
+ */
+static int clear_epp_timeout(struct parport *pb)
+{
+ unsigned char r;
+
+ if (!(parport_gsc_read_status(pb) & 0x01))
+ return 1;
+
+ /* To clear timeout some chips require double read */
+ parport_gsc_read_status(pb);
+ r = parport_gsc_read_status(pb);
+ parport_writeb (r | 0x01, STATUS (pb)); /* Some reset by writing 1 */
+ parport_writeb (r & 0xfe, STATUS (pb)); /* Others by writing 0 */
+ r = parport_gsc_read_status(pb);
+
+ return !(r & 0x01);
+}
+
+/*
+ * Access functions.
+ *
+ * Most of these aren't static because they may be used by the
+ * parport_xxx_yyy macros. extern __inline__ versions of several
+ * of these are in parport_gsc.h.
+ */
+
+void parport_gsc_init_state(struct pardevice *dev, struct parport_state *s)
+{
+ s->u.pc.ctr = 0xc | (dev->irq_func ? 0x10 : 0x0);
+}
+
+void parport_gsc_save_state(struct parport *p, struct parport_state *s)
+{
+ s->u.pc.ctr = parport_readb (CONTROL (p));
+}
+
+void parport_gsc_restore_state(struct parport *p, struct parport_state *s)
+{
+ parport_writeb (s->u.pc.ctr, CONTROL (p));
+}
+
+struct parport_operations parport_gsc_ops =
+{
+ .write_data = parport_gsc_write_data,
+ .read_data = parport_gsc_read_data,
+
+ .write_control = parport_gsc_write_control,
+ .read_control = parport_gsc_read_control,
+ .frob_control = parport_gsc_frob_control,
+
+ .read_status = parport_gsc_read_status,
+
+ .enable_irq = parport_gsc_enable_irq,
+ .disable_irq = parport_gsc_disable_irq,
+
+ .data_forward = parport_gsc_data_forward,
+ .data_reverse = parport_gsc_data_reverse,
+
+ .init_state = parport_gsc_init_state,
+ .save_state = parport_gsc_save_state,
+ .restore_state = parport_gsc_restore_state,
+
+ .epp_write_data = parport_ieee1284_epp_write_data,
+ .epp_read_data = parport_ieee1284_epp_read_data,
+ .epp_write_addr = parport_ieee1284_epp_write_addr,
+ .epp_read_addr = parport_ieee1284_epp_read_addr,
+
+ .ecp_write_data = parport_ieee1284_ecp_write_data,
+ .ecp_read_data = parport_ieee1284_ecp_read_data,
+ .ecp_write_addr = parport_ieee1284_ecp_write_addr,
+
+ .compat_write_data = parport_ieee1284_write_compat,
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+
+ .owner = THIS_MODULE,
+};
+
+/* --- Mode detection ------------------------------------- */
+
+/*
+ * Checks for port existence, all ports support SPP MODE
+ */
+static int parport_SPP_supported(struct parport *pb)
+{
+ unsigned char r, w;
+
+ /*
+ * first clear an eventually pending EPP timeout
+ * I (sailer@ife.ee.ethz.ch) have an SMSC chipset
+ * that does not even respond to SPP cycles if an EPP
+ * timeout is pending
+ */
+ clear_epp_timeout(pb);
+
+ /* Do a simple read-write test to make sure the port exists. */
+ w = 0xc;
+ parport_writeb (w, CONTROL (pb));
+
+ /* Is there a control register that we can read from? Some
+ * ports don't allow reads, so read_control just returns a
+ * software copy. Some ports _do_ allow reads, so bypass the
+ * software copy here. In addition, some bits aren't
+ * writable. */
+ r = parport_readb (CONTROL (pb));
+ if ((r & 0xf) == w) {
+ w = 0xe;
+ parport_writeb (w, CONTROL (pb));
+ r = parport_readb (CONTROL (pb));
+ parport_writeb (0xc, CONTROL (pb));
+ if ((r & 0xf) == w)
+ return PARPORT_MODE_PCSPP;
+ }
+
+ /* Try the data register. The data lines aren't tri-stated at
+ * this stage, so we expect back what we wrote. */
+ w = 0xaa;
+ parport_gsc_write_data (pb, w);
+ r = parport_gsc_read_data (pb);
+ if (r == w) {
+ w = 0x55;
+ parport_gsc_write_data (pb, w);
+ r = parport_gsc_read_data (pb);
+ if (r == w)
+ return PARPORT_MODE_PCSPP;
+ }
+
+ return 0;
+}
+
+/* Detect PS/2 support.
+ *
+ * Bit 5 (0x20) sets the PS/2 data direction; setting this high
+ * allows us to read data from the data lines. In theory we would get back
+ * 0xff but any peripheral attached to the port may drag some or all of the
+ * lines down to zero. So if we get back anything that isn't the contents
+ * of the data register we deem PS/2 support to be present.
+ *
+ * Some SPP ports have "half PS/2" ability - you can't turn off the line
+ * drivers, but an external peripheral with sufficiently beefy drivers of
+ * its own can overpower them and assert its own levels onto the bus, from
+ * where they can then be read back as normal. Ports with this property
+ * and the right type of device attached are likely to fail the SPP test,
+ * (as they will appear to have stuck bits) and so the fact that they might
+ * be misdetected here is rather academic.
+ */
+
+static int parport_PS2_supported(struct parport *pb)
+{
+ int ok = 0;
+
+ clear_epp_timeout(pb);
+
+ /* try to tri-state the buffer */
+ parport_gsc_data_reverse (pb);
+
+ parport_gsc_write_data(pb, 0x55);
+ if (parport_gsc_read_data(pb) != 0x55) ok++;
+
+ parport_gsc_write_data(pb, 0xaa);
+ if (parport_gsc_read_data(pb) != 0xaa) ok++;
+
+ /* cancel input mode */
+ parport_gsc_data_forward (pb);
+
+ if (ok) {
+ pb->modes |= PARPORT_MODE_TRISTATE;
+ } else {
+ struct parport_gsc_private *priv = pb->private_data;
+ priv->ctr_writable &= ~0x20;
+ }
+
+ return ok;
+}
+
+
+/* --- Initialisation code -------------------------------- */
+
+struct parport *parport_gsc_probe_port(unsigned long base,
+ unsigned long base_hi, int irq,
+ int dma, struct parisc_device *padev)
+{
+ struct parport_gsc_private *priv;
+ struct parport_operations *ops;
+ struct parport tmp;
+ struct parport *p = &tmp;
+
+ priv = kzalloc (sizeof (struct parport_gsc_private), GFP_KERNEL);
+ if (!priv) {
+ printk(KERN_DEBUG "parport (0x%lx): no memory!\n", base);
+ return NULL;
+ }
+ ops = kmemdup(&parport_gsc_ops, sizeof(struct parport_operations),
+ GFP_KERNEL);
+ if (!ops) {
+ printk(KERN_DEBUG "parport (0x%lx): no memory for ops!\n",
+ base);
+ kfree (priv);
+ return NULL;
+ }
+ priv->ctr = 0xc;
+ priv->ctr_writable = 0xff;
+ priv->dma_buf = NULL;
+ priv->dma_handle = 0;
+ p->base = base;
+ p->base_hi = base_hi;
+ p->irq = irq;
+ p->dma = dma;
+ p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
+ p->ops = ops;
+ p->private_data = priv;
+ p->physport = p;
+ if (!parport_SPP_supported (p)) {
+ /* No port. */
+ kfree (priv);
+ kfree(ops);
+ return NULL;
+ }
+ parport_PS2_supported (p);
+
+ if (!(p = parport_register_port(base, PARPORT_IRQ_NONE,
+ PARPORT_DMA_NONE, ops))) {
+ kfree (priv);
+ kfree (ops);
+ return NULL;
+ }
+
+ p->dev = &padev->dev;
+ p->base_hi = base_hi;
+ p->modes = tmp.modes;
+ p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
+ p->private_data = priv;
+
+ pr_info("%s: PC-style at 0x%lx", p->name, p->base);
+ p->irq = irq;
+ if (p->irq == PARPORT_IRQ_AUTO) {
+ p->irq = PARPORT_IRQ_NONE;
+ }
+ if (p->irq != PARPORT_IRQ_NONE) {
+ pr_cont(", irq %d", p->irq);
+
+ if (p->dma == PARPORT_DMA_AUTO) {
+ p->dma = PARPORT_DMA_NONE;
+ }
+ }
+ if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq
+ is mandatory (see above) */
+ p->dma = PARPORT_DMA_NONE;
+
+ pr_cont(" [");
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
+ {
+ int f = 0;
+ printmode(PCSPP);
+ printmode(TRISTATE);
+ printmode(COMPAT);
+ printmode(EPP);
+// printmode(ECP);
+// printmode(DMA);
+ }
+#undef printmode
+ pr_cont("]\n");
+
+ if (p->irq != PARPORT_IRQ_NONE) {
+ if (request_irq (p->irq, parport_irq_handler,
+ 0, p->name, p)) {
+ pr_warn("%s: irq %d in use, resorting to polled operation\n",
+ p->name, p->irq);
+ p->irq = PARPORT_IRQ_NONE;
+ p->dma = PARPORT_DMA_NONE;
+ }
+ }
+
+ /* Done probing. Now put the port into a sensible start-up state. */
+
+ parport_gsc_write_data(p, 0);
+ parport_gsc_data_forward (p);
+
+ /* Now that we've told the sharing engine about the port, and
+ found out its characteristics, let the high-level drivers
+ know about it. */
+ parport_announce_port (p);
+
+ return p;
+}
+
+
+#define PARPORT_GSC_OFFSET 0x800
+
+static int parport_count;
+
+static int __init parport_init_chip(struct parisc_device *dev)
+{
+ struct parport *p;
+ unsigned long port;
+
+ if (!dev->irq) {
+ pr_warn("IRQ not found for parallel device at 0x%llx\n",
+ (unsigned long long)dev->hpa.start);
+ return -ENODEV;
+ }
+
+ port = dev->hpa.start + PARPORT_GSC_OFFSET;
+
+ /* some older machines with ASP-chip don't support
+ * the enhanced parport modes.
+ */
+ if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) {
+
+ /* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */
+ pr_info("%s: initialize bidirectional-mode\n", __func__);
+ parport_writeb ( (0x10 + 0x20), port + 4);
+
+ } else {
+ pr_info("%s: enhanced parport-modes not supported\n", __func__);
+ }
+
+ p = parport_gsc_probe_port(port, 0, dev->irq,
+ /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, dev);
+ if (p)
+ parport_count++;
+ dev_set_drvdata(&dev->dev, p);
+
+ return 0;
+}
+
+static int __exit parport_remove_chip(struct parisc_device *dev)
+{
+ struct parport *p = dev_get_drvdata(&dev->dev);
+ if (p) {
+ struct parport_gsc_private *priv = p->private_data;
+ struct parport_operations *ops = p->ops;
+ parport_remove_port(p);
+ if (p->dma != PARPORT_DMA_NONE)
+ free_dma(p->dma);
+ if (p->irq != PARPORT_IRQ_NONE)
+ free_irq(p->irq, p);
+ if (priv->dma_buf)
+ pci_free_consistent(priv->dev, PAGE_SIZE,
+ priv->dma_buf,
+ priv->dma_handle);
+ kfree (p->private_data);
+ parport_put_port(p);
+ kfree (ops); /* hope no-one cached it */
+ }
+ return 0;
+}
+
+static const struct parisc_device_id parport_tbl[] __initconst = {
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x74 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, parport_tbl);
+
+static struct parisc_driver parport_driver __refdata = {
+ .name = "Parallel",
+ .id_table = parport_tbl,
+ .probe = parport_init_chip,
+ .remove = __exit_p(parport_remove_chip),
+};
+
+int parport_gsc_init(void)
+{
+ return register_parisc_driver(&parport_driver);
+}
+
+static void parport_gsc_exit(void)
+{
+ unregister_parisc_driver(&parport_driver);
+}
+
+module_init(parport_gsc_init);
+module_exit(parport_gsc_exit);
diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h
new file mode 100644
index 000000000..9301217ed
--- /dev/null
+++ b/drivers/parport/parport_gsc.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Low-level parallel-support for PC-style hardware integrated in the
+ * LASI-Controller (on GSC-Bus) for HP-PARISC Workstations
+ *
+ * (C) 1999-2001 by Helge Deller <deller@gmx.de>
+ *
+ * based on parport_pc.c by
+ * Grant Guenther <grant@torque.net>
+ * Phil Blundell <Philip.Blundell@pobox.com>
+ * Tim Waugh <tim@cyberelk.demon.co.uk>
+ * Jose Renau <renau@acm.org>
+ * David Campbell
+ * Andrea Arcangeli
+ */
+
+#ifndef __DRIVERS_PARPORT_PARPORT_GSC_H
+#define __DRIVERS_PARPORT_PARPORT_GSC_H
+
+#include <asm/io.h>
+#include <linux/delay.h>
+
+#undef DEBUG_PARPORT /* undefine for production */
+#define DELAY_TIME 0
+
+#if DELAY_TIME == 0
+#define parport_readb gsc_readb
+#define parport_writeb gsc_writeb
+#else
+static __inline__ unsigned char parport_readb( unsigned long port )
+{
+ udelay(DELAY_TIME);
+ return gsc_readb(port);
+}
+
+static __inline__ void parport_writeb( unsigned char value, unsigned long port )
+{
+ gsc_writeb(value,port);
+ udelay(DELAY_TIME);
+}
+#endif
+
+/* --- register definitions ------------------------------- */
+
+#define EPPDATA(p) ((p)->base + 0x4)
+#define EPPADDR(p) ((p)->base + 0x3)
+#define CONTROL(p) ((p)->base + 0x2)
+#define STATUS(p) ((p)->base + 0x1)
+#define DATA(p) ((p)->base + 0x0)
+
+struct parport_gsc_private {
+ /* Contents of CTR. */
+ unsigned char ctr;
+
+ /* Bitmask of writable CTR bits. */
+ unsigned char ctr_writable;
+
+ /* Number of bytes per portword. */
+ int pword;
+
+ /* Not used yet. */
+ int readIntrThreshold;
+ int writeIntrThreshold;
+
+ /* buffer suitable for DMA, if DMA enabled */
+ char *dma_buf;
+ dma_addr_t dma_handle;
+ struct pci_dev *dev;
+};
+
+static inline void parport_gsc_write_data(struct parport *p, unsigned char d)
+{
+#ifdef DEBUG_PARPORT
+ printk(KERN_DEBUG "%s(%p,0x%02x)\n", __func__, p, d);
+#endif
+ parport_writeb(d, DATA(p));
+}
+
+static inline unsigned char parport_gsc_read_data(struct parport *p)
+{
+ unsigned char val = parport_readb (DATA (p));
+#ifdef DEBUG_PARPORT
+ printk(KERN_DEBUG "%s(%p) = 0x%02x\n", __func__, p, val);
+#endif
+ return val;
+}
+
+/* __parport_gsc_frob_control differs from parport_gsc_frob_control in that
+ * it doesn't do any extra masking. */
+static inline unsigned char __parport_gsc_frob_control(struct parport *p,
+ unsigned char mask,
+ unsigned char val)
+{
+ struct parport_gsc_private *priv = p->physport->private_data;
+ unsigned char ctr = priv->ctr;
+#ifdef DEBUG_PARPORT
+ printk(KERN_DEBUG "%s(%02x,%02x): %02x -> %02x\n",
+ __func__, mask, val,
+ ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable);
+#endif
+ ctr = (ctr & ~mask) ^ val;
+ ctr &= priv->ctr_writable; /* only write writable bits. */
+ parport_writeb (ctr, CONTROL (p));
+ priv->ctr = ctr; /* Update soft copy */
+ return ctr;
+}
+
+static inline void parport_gsc_data_reverse(struct parport *p)
+{
+ __parport_gsc_frob_control (p, 0x20, 0x20);
+}
+
+static inline void parport_gsc_data_forward(struct parport *p)
+{
+ __parport_gsc_frob_control (p, 0x20, 0x00);
+}
+
+static inline void parport_gsc_write_control(struct parport *p,
+ unsigned char d)
+{
+ const unsigned char wm = (PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_INIT |
+ PARPORT_CONTROL_SELECT);
+
+ /* Take this out when drivers have adapted to newer interface. */
+ if (d & 0x20) {
+ printk(KERN_DEBUG "%s (%s): use data_reverse for this!\n",
+ p->name, p->cad->name);
+ parport_gsc_data_reverse (p);
+ }
+
+ __parport_gsc_frob_control (p, wm, d & wm);
+}
+
+static inline unsigned char parport_gsc_read_control(struct parport *p)
+{
+ const unsigned char rm = (PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_INIT |
+ PARPORT_CONTROL_SELECT);
+ const struct parport_gsc_private *priv = p->physport->private_data;
+ return priv->ctr & rm; /* Use soft copy */
+}
+
+static inline unsigned char parport_gsc_frob_control(struct parport *p,
+ unsigned char mask,
+ unsigned char val)
+{
+ const unsigned char wm = (PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_INIT |
+ PARPORT_CONTROL_SELECT);
+
+ /* Take this out when drivers have adapted to newer interface. */
+ if (mask & 0x20) {
+ printk(KERN_DEBUG "%s (%s): use data_%s for this!\n",
+ p->name, p->cad->name,
+ (val & 0x20) ? "reverse" : "forward");
+ if (val & 0x20)
+ parport_gsc_data_reverse (p);
+ else
+ parport_gsc_data_forward (p);
+ }
+
+ /* Restrict mask and val to control lines. */
+ mask &= wm;
+ val &= wm;
+
+ return __parport_gsc_frob_control (p, mask, val);
+}
+
+static inline unsigned char parport_gsc_read_status(struct parport *p)
+{
+ return parport_readb (STATUS(p));
+}
+
+static inline void parport_gsc_disable_irq(struct parport *p)
+{
+ __parport_gsc_frob_control (p, 0x10, 0x00);
+}
+
+static inline void parport_gsc_enable_irq(struct parport *p)
+{
+ __parport_gsc_frob_control (p, 0x10, 0x10);
+}
+
+extern void parport_gsc_release_resources(struct parport *p);
+
+extern int parport_gsc_claim_resources(struct parport *p);
+
+extern void parport_gsc_init_state(struct pardevice *, struct parport_state *s);
+
+extern void parport_gsc_save_state(struct parport *p, struct parport_state *s);
+
+extern void parport_gsc_restore_state(struct parport *p, struct parport_state *s);
+
+extern void parport_gsc_inc_use_count(void);
+
+extern void parport_gsc_dec_use_count(void);
+
+extern struct parport *parport_gsc_probe_port(unsigned long base,
+ unsigned long base_hi,
+ int irq, int dma,
+ struct parisc_device *padev);
+
+#endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
new file mode 100644
index 000000000..48b084e86
--- /dev/null
+++ b/drivers/parport/parport_ip32.c
@@ -0,0 +1,2238 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Low-level parallel port routines for built-in port on SGI IP32
+ *
+ * Author: Arnaud Giersch <arnaud.giersch@free.fr>
+ *
+ * Based on parport_pc.c by
+ * Phil Blundell, Tim Waugh, Jose Renau, David Campbell,
+ * Andrea Arcangeli, et al.
+ *
+ * Thanks to Ilya A. Volynets-Evenbakh for his help.
+ *
+ * Copyright (C) 2005, 2006 Arnaud Giersch.
+ */
+
+/* Current status:
+ *
+ * Basic SPP and PS2 modes are supported.
+ * Support for parallel port IRQ is present.
+ * Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are
+ * supported.
+ * SPP/ECP FIFO can be driven in PIO or DMA mode. PIO mode can work with
+ * or without interrupt support.
+ *
+ * Hardware ECP mode is not fully implemented (ecp_read_data and
+ * ecp_write_addr are actually missing).
+ *
+ * To do:
+ *
+ * Fully implement ECP mode.
+ * EPP and ECP mode need to be tested. I currently do not own any
+ * peripheral supporting these extended mode, and cannot test them.
+ * If DMA mode works well, decide if support for PIO FIFO modes should be
+ * dropped.
+ * Use the io{read,write} family functions when they become available in
+ * the linux-mips.org tree. Note: the MIPS specific functions readsb()
+ * and writesb() are to be translated by ioread8_rep() and iowrite8_rep()
+ * respectively.
+ */
+
+/* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an
+ * IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1].
+ * This chip supports SPP, bidirectional, EPP and ECP modes. It has a 16 byte
+ * FIFO buffer and supports DMA transfers.
+ *
+ * [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html
+ *
+ * Theoretically, we could simply use the parport_pc module. It is however
+ * not so simple. The parport_pc code assumes that the parallel port
+ * registers are port-mapped. On the O2, they are memory-mapped.
+ * Furthermore, each register is replicated on 256 consecutive addresses (as
+ * it is for the built-in serial ports on the same chip).
+ */
+
+/*--- Some configuration defines ---------------------------------------*/
+
+/* DEBUG_PARPORT_IP32
+ * 0 disable debug
+ * 1 standard level: pr_debug1 is enabled
+ * 2 parport_ip32_dump_state is enabled
+ * >=3 verbose level: pr_debug is enabled
+ */
+#if !defined(DEBUG_PARPORT_IP32)
+# define DEBUG_PARPORT_IP32 0 /* 0 (disabled) for production */
+#endif
+
+/*----------------------------------------------------------------------*/
+
+/* Setup DEBUG macros. This is done before any includes, just in case we
+ * activate pr_debug() with DEBUG_PARPORT_IP32 >= 3.
+ */
+#if DEBUG_PARPORT_IP32 == 1
+# warning DEBUG_PARPORT_IP32 == 1
+#elif DEBUG_PARPORT_IP32 == 2
+# warning DEBUG_PARPORT_IP32 == 2
+#elif DEBUG_PARPORT_IP32 >= 3
+# warning DEBUG_PARPORT_IP32 >= 3
+# if !defined(DEBUG)
+# define DEBUG /* enable pr_debug() in kernel.h */
+# endif
+#endif
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/parport.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/ip32/ip32_ints.h>
+#include <asm/ip32/mace.h>
+
+/*--- Global variables -------------------------------------------------*/
+
+/* Verbose probing on by default for debugging. */
+#if DEBUG_PARPORT_IP32 >= 1
+# define DEFAULT_VERBOSE_PROBING 1
+#else
+# define DEFAULT_VERBOSE_PROBING 0
+#endif
+
+/* Default prefix for printk */
+#define PPIP32 "parport_ip32: "
+
+/*
+ * These are the module parameters:
+ * @features: bit mask of features to enable/disable
+ * (all enabled by default)
+ * @verbose_probing: log chit-chat during initialization
+ */
+#define PARPORT_IP32_ENABLE_IRQ (1U << 0)
+#define PARPORT_IP32_ENABLE_DMA (1U << 1)
+#define PARPORT_IP32_ENABLE_SPP (1U << 2)
+#define PARPORT_IP32_ENABLE_EPP (1U << 3)
+#define PARPORT_IP32_ENABLE_ECP (1U << 4)
+static unsigned int features = ~0U;
+static bool verbose_probing = DEFAULT_VERBOSE_PROBING;
+
+/* We do not support more than one port. */
+static struct parport *this_port;
+
+/* Timing constants for FIFO modes. */
+#define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */
+#define FIFO_POLLING_INTERVAL 50 /* microseconds */
+
+/*--- I/O register definitions -----------------------------------------*/
+
+/**
+ * struct parport_ip32_regs - virtual addresses of parallel port registers
+ * @data: Data Register
+ * @dsr: Device Status Register
+ * @dcr: Device Control Register
+ * @eppAddr: EPP Address Register
+ * @eppData0: EPP Data Register 0
+ * @eppData1: EPP Data Register 1
+ * @eppData2: EPP Data Register 2
+ * @eppData3: EPP Data Register 3
+ * @ecpAFifo: ECP Address FIFO
+ * @fifo: General FIFO register. The same address is used for:
+ * - cFifo, the Parallel Port DATA FIFO
+ * - ecpDFifo, the ECP Data FIFO
+ * - tFifo, the ECP Test FIFO
+ * @cnfgA: Configuration Register A
+ * @cnfgB: Configuration Register B
+ * @ecr: Extended Control Register
+ */
+struct parport_ip32_regs {
+ void __iomem *data;
+ void __iomem *dsr;
+ void __iomem *dcr;
+ void __iomem *eppAddr;
+ void __iomem *eppData0;
+ void __iomem *eppData1;
+ void __iomem *eppData2;
+ void __iomem *eppData3;
+ void __iomem *ecpAFifo;
+ void __iomem *fifo;
+ void __iomem *cnfgA;
+ void __iomem *cnfgB;
+ void __iomem *ecr;
+};
+
+/* Device Status Register */
+#define DSR_nBUSY (1U << 7) /* PARPORT_STATUS_BUSY */
+#define DSR_nACK (1U << 6) /* PARPORT_STATUS_ACK */
+#define DSR_PERROR (1U << 5) /* PARPORT_STATUS_PAPEROUT */
+#define DSR_SELECT (1U << 4) /* PARPORT_STATUS_SELECT */
+#define DSR_nFAULT (1U << 3) /* PARPORT_STATUS_ERROR */
+#define DSR_nPRINT (1U << 2) /* specific to TL16PIR552 */
+/* #define DSR_reserved (1U << 1) */
+#define DSR_TIMEOUT (1U << 0) /* EPP timeout */
+
+/* Device Control Register */
+/* #define DCR_reserved (1U << 7) | (1U << 6) */
+#define DCR_DIR (1U << 5) /* direction */
+#define DCR_IRQ (1U << 4) /* interrupt on nAck */
+#define DCR_SELECT (1U << 3) /* PARPORT_CONTROL_SELECT */
+#define DCR_nINIT (1U << 2) /* PARPORT_CONTROL_INIT */
+#define DCR_AUTOFD (1U << 1) /* PARPORT_CONTROL_AUTOFD */
+#define DCR_STROBE (1U << 0) /* PARPORT_CONTROL_STROBE */
+
+/* ECP Configuration Register A */
+#define CNFGA_IRQ (1U << 7)
+#define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4))
+#define CNFGA_ID_SHIFT 4
+#define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT)
+#define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT)
+#define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT)
+/* #define CNFGA_reserved (1U << 3) */
+#define CNFGA_nBYTEINTRANS (1U << 2)
+#define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0))
+
+/* ECP Configuration Register B */
+#define CNFGB_COMPRESS (1U << 7)
+#define CNFGB_INTRVAL (1U << 6)
+#define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3))
+#define CNFGB_IRQ_SHIFT 3
+#define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0))
+#define CNFGB_DMA_SHIFT 0
+
+/* Extended Control Register */
+#define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5))
+#define ECR_MODE_SHIFT 5
+#define ECR_MODE_SPP (00U << ECR_MODE_SHIFT)
+#define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT)
+#define ECR_MODE_PPF (02U << ECR_MODE_SHIFT)
+#define ECR_MODE_ECP (03U << ECR_MODE_SHIFT)
+#define ECR_MODE_EPP (04U << ECR_MODE_SHIFT)
+/* #define ECR_MODE_reserved (05U << ECR_MODE_SHIFT) */
+#define ECR_MODE_TST (06U << ECR_MODE_SHIFT)
+#define ECR_MODE_CFG (07U << ECR_MODE_SHIFT)
+#define ECR_nERRINTR (1U << 4)
+#define ECR_DMAEN (1U << 3)
+#define ECR_SERVINTR (1U << 2)
+#define ECR_F_FULL (1U << 1)
+#define ECR_F_EMPTY (1U << 0)
+
+/*--- Private data -----------------------------------------------------*/
+
+/**
+ * enum parport_ip32_irq_mode - operation mode of interrupt handler
+ * @PARPORT_IP32_IRQ_FWD: forward interrupt to the upper parport layer
+ * @PARPORT_IP32_IRQ_HERE: interrupt is handled locally
+ */
+enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE };
+
+/**
+ * struct parport_ip32_private - private stuff for &struct parport
+ * @regs: register addresses
+ * @dcr_cache: cached contents of DCR
+ * @dcr_writable: bit mask of writable DCR bits
+ * @pword: number of bytes per PWord
+ * @fifo_depth: number of PWords that FIFO will hold
+ * @readIntrThreshold: minimum number of PWords we can read
+ * if we get an interrupt
+ * @writeIntrThreshold: minimum number of PWords we can write
+ * if we get an interrupt
+ * @irq_mode: operation mode of interrupt handler for this port
+ * @irq_complete: mutex used to wait for an interrupt to occur
+ */
+struct parport_ip32_private {
+ struct parport_ip32_regs regs;
+ unsigned int dcr_cache;
+ unsigned int dcr_writable;
+ unsigned int pword;
+ unsigned int fifo_depth;
+ unsigned int readIntrThreshold;
+ unsigned int writeIntrThreshold;
+ enum parport_ip32_irq_mode irq_mode;
+ struct completion irq_complete;
+};
+
+/*--- Debug code -------------------------------------------------------*/
+
+/*
+ * pr_debug1 - print debug messages
+ *
+ * This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1
+ */
+#if DEBUG_PARPORT_IP32 >= 1
+# define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__)
+#else /* DEBUG_PARPORT_IP32 < 1 */
+# define pr_debug1(...) do { } while (0)
+#endif
+
+/*
+ * pr_trace, pr_trace1 - trace function calls
+ * @p: pointer to &struct parport
+ * @fmt: printk format string
+ * @...: parameters for format string
+ *
+ * Macros used to trace function calls. The given string is formatted after
+ * function name. pr_trace() uses pr_debug(), and pr_trace1() uses
+ * pr_debug1(). __pr_trace() is the low-level macro and is not to be used
+ * directly.
+ */
+#define __pr_trace(pr, p, fmt, ...) \
+ pr("%s: %s" fmt "\n", \
+ ({ const struct parport *__p = (p); \
+ __p ? __p->name : "parport_ip32"; }), \
+ __func__ , ##__VA_ARGS__)
+#define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
+#define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
+
+/*
+ * __pr_probe, pr_probe - print message if @verbose_probing is true
+ * @p: pointer to &struct parport
+ * @fmt: printk format string
+ * @...: parameters for format string
+ *
+ * For new lines, use pr_probe(). Use __pr_probe() for continued lines.
+ */
+#define __pr_probe(...) \
+ do { if (verbose_probing) printk(__VA_ARGS__); } while (0)
+#define pr_probe(p, fmt, ...) \
+ __pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
+
+/*
+ * parport_ip32_dump_state - print register status of parport
+ * @p: pointer to &struct parport
+ * @str: string to add in message
+ * @show_ecp_config: shall we dump ECP configuration registers too?
+ *
+ * This function is only here for debugging purpose, and should be used with
+ * care. Reading the parallel port registers may have undesired side effects.
+ * Especially if @show_ecp_config is true, the parallel port is resetted.
+ * This function is only defined if %DEBUG_PARPORT_IP32 >= 2.
+ */
+#if DEBUG_PARPORT_IP32 >= 2
+static void parport_ip32_dump_state(struct parport *p, char *str,
+ unsigned int show_ecp_config)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ unsigned int i;
+
+ printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str);
+ {
+ static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF",
+ "ECP", "EPP", "???",
+ "TST", "CFG"};
+ unsigned int ecr = readb(priv->regs.ecr);
+ printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
+ pr_cont(" %s",
+ ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
+ if (ecr & ECR_nERRINTR)
+ pr_cont(",nErrIntrEn");
+ if (ecr & ECR_DMAEN)
+ pr_cont(",dmaEn");
+ if (ecr & ECR_SERVINTR)
+ pr_cont(",serviceIntr");
+ if (ecr & ECR_F_FULL)
+ pr_cont(",f_full");
+ if (ecr & ECR_F_EMPTY)
+ pr_cont(",f_empty");
+ pr_cont("\n");
+ }
+ if (show_ecp_config) {
+ unsigned int oecr, cnfgA, cnfgB;
+ oecr = readb(priv->regs.ecr);
+ writeb(ECR_MODE_PS2, priv->regs.ecr);
+ writeb(ECR_MODE_CFG, priv->regs.ecr);
+ cnfgA = readb(priv->regs.cnfgA);
+ cnfgB = readb(priv->regs.cnfgB);
+ writeb(ECR_MODE_PS2, priv->regs.ecr);
+ writeb(oecr, priv->regs.ecr);
+ printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
+ pr_cont(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
+ switch (cnfgA & CNFGA_ID_MASK) {
+ case CNFGA_ID_8:
+ pr_cont(",8 bits");
+ break;
+ case CNFGA_ID_16:
+ pr_cont(",16 bits");
+ break;
+ case CNFGA_ID_32:
+ pr_cont(",32 bits");
+ break;
+ default:
+ pr_cont(",unknown ID");
+ break;
+ }
+ if (!(cnfgA & CNFGA_nBYTEINTRANS))
+ pr_cont(",ByteInTrans");
+ if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
+ pr_cont(",%d byte%s left",
+ cnfgA & CNFGA_PWORDLEFT,
+ ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
+ pr_cont("\n");
+ printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
+ pr_cont(" irq=%u,dma=%u",
+ (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
+ (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
+ pr_cont(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
+ if (cnfgB & CNFGB_COMPRESS)
+ pr_cont(",compress");
+ pr_cont("\n");
+ }
+ for (i = 0; i < 2; i++) {
+ unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
+ printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
+ i ? "soft" : "hard", dcr);
+ pr_cont(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
+ if (dcr & DCR_IRQ)
+ pr_cont(",ackIntEn");
+ if (!(dcr & DCR_SELECT))
+ pr_cont(",nSelectIn");
+ if (dcr & DCR_nINIT)
+ pr_cont(",nInit");
+ if (!(dcr & DCR_AUTOFD))
+ pr_cont(",nAutoFD");
+ if (!(dcr & DCR_STROBE))
+ pr_cont(",nStrobe");
+ pr_cont("\n");
+ }
+#define sep (f++ ? ',' : ' ')
+ {
+ unsigned int f = 0;
+ unsigned int dsr = readb(priv->regs.dsr);
+ printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
+ if (!(dsr & DSR_nBUSY))
+ pr_cont("%cBusy", sep);
+ if (dsr & DSR_nACK)
+ pr_cont("%cnAck", sep);
+ if (dsr & DSR_PERROR)
+ pr_cont("%cPError", sep);
+ if (dsr & DSR_SELECT)
+ pr_cont("%cSelect", sep);
+ if (dsr & DSR_nFAULT)
+ pr_cont("%cnFault", sep);
+ if (!(dsr & DSR_nPRINT))
+ pr_cont("%c(Print)", sep);
+ if (dsr & DSR_TIMEOUT)
+ pr_cont("%cTimeout", sep);
+ pr_cont("\n");
+ }
+#undef sep
+}
+#else /* DEBUG_PARPORT_IP32 < 2 */
+#define parport_ip32_dump_state(...) do { } while (0)
+#endif
+
+/*
+ * CHECK_EXTRA_BITS - track and log extra bits
+ * @p: pointer to &struct parport
+ * @b: byte to inspect
+ * @m: bit mask of authorized bits
+ *
+ * This is used to track and log extra bits that should not be there in
+ * parport_ip32_write_control() and parport_ip32_frob_control(). It is only
+ * defined if %DEBUG_PARPORT_IP32 >= 1.
+ */
+#if DEBUG_PARPORT_IP32 >= 1
+#define CHECK_EXTRA_BITS(p, b, m) \
+ do { \
+ unsigned int __b = (b), __m = (m); \
+ if (__b & ~__m) \
+ pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \
+ "0x%02x/0x%02x\n", \
+ (p)->name, __func__, #b, __b, __m); \
+ } while (0)
+#else /* DEBUG_PARPORT_IP32 < 1 */
+#define CHECK_EXTRA_BITS(...) do { } while (0)
+#endif
+
+/*--- IP32 parallel port DMA operations --------------------------------*/
+
+/**
+ * struct parport_ip32_dma_data - private data needed for DMA operation
+ * @dir: DMA direction (from or to device)
+ * @buf: buffer physical address
+ * @len: buffer length
+ * @next: address of next bytes to DMA transfer
+ * @left: number of bytes remaining
+ * @ctx: next context to write (0: context_a; 1: context_b)
+ * @irq_on: are the DMA IRQs currently enabled?
+ * @lock: spinlock to protect access to the structure
+ */
+struct parport_ip32_dma_data {
+ enum dma_data_direction dir;
+ dma_addr_t buf;
+ dma_addr_t next;
+ size_t len;
+ size_t left;
+ unsigned int ctx;
+ unsigned int irq_on;
+ spinlock_t lock;
+};
+static struct parport_ip32_dma_data parport_ip32_dma;
+
+/**
+ * parport_ip32_dma_setup_context - setup next DMA context
+ * @limit: maximum data size for the context
+ *
+ * The alignment constraints must be verified in caller function, and the
+ * parameter @limit must be set accordingly.
+ */
+static void parport_ip32_dma_setup_context(unsigned int limit)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&parport_ip32_dma.lock, flags);
+ if (parport_ip32_dma.left > 0) {
+ /* Note: ctxreg is "volatile" here only because
+ * mace->perif.ctrl.parport.context_a and context_b are
+ * "volatile". */
+ volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?
+ &mace->perif.ctrl.parport.context_a :
+ &mace->perif.ctrl.parport.context_b;
+ u64 count;
+ u64 ctxval;
+ if (parport_ip32_dma.left <= limit) {
+ count = parport_ip32_dma.left;
+ ctxval = MACEPAR_CONTEXT_LASTFLAG;
+ } else {
+ count = limit;
+ ctxval = 0;
+ }
+
+ pr_trace(NULL,
+ "(%u): 0x%04x:0x%04x, %u -> %u%s",
+ limit,
+ (unsigned int)parport_ip32_dma.buf,
+ (unsigned int)parport_ip32_dma.next,
+ (unsigned int)count,
+ parport_ip32_dma.ctx, ctxval ? "*" : "");
+
+ ctxval |= parport_ip32_dma.next &
+ MACEPAR_CONTEXT_BASEADDR_MASK;
+ ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) &
+ MACEPAR_CONTEXT_DATALEN_MASK;
+ writeq(ctxval, ctxreg);
+ parport_ip32_dma.next += count;
+ parport_ip32_dma.left -= count;
+ parport_ip32_dma.ctx ^= 1U;
+ }
+ /* If there is nothing more to send, disable IRQs to avoid to
+ * face an IRQ storm which can lock the machine. Disable them
+ * only once. */
+ if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {
+ pr_debug(PPIP32 "IRQ off (ctx)\n");
+ disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
+ disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
+ parport_ip32_dma.irq_on = 0;
+ }
+ spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);
+}
+
+/**
+ * parport_ip32_dma_interrupt - DMA interrupt handler
+ * @irq: interrupt number
+ * @dev_id: unused
+ */
+static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id)
+{
+ if (parport_ip32_dma.left)
+ pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx);
+ parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
+ return IRQ_HANDLED;
+}
+
+#if DEBUG_PARPORT_IP32
+static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id)
+{
+ pr_trace1(NULL, "(%d)", irq);
+ return IRQ_HANDLED;
+}
+#endif
+
+/**
+ * parport_ip32_dma_start - begins a DMA transfer
+ * @p: partport to work on
+ * @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @addr: pointer to data buffer
+ * @count: buffer size
+ *
+ * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
+ * correctly balanced.
+ */
+static int parport_ip32_dma_start(struct parport *p,
+ enum dma_data_direction dir, void *addr, size_t count)
+{
+ unsigned int limit;
+ u64 ctrl;
+
+ pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count);
+
+ /* FIXME - add support for DMA_FROM_DEVICE. In this case, buffer must
+ * be 64 bytes aligned. */
+ BUG_ON(dir != DMA_TO_DEVICE);
+
+ /* Reset DMA controller */
+ ctrl = MACEPAR_CTLSTAT_RESET;
+ writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
+
+ /* DMA IRQs should normally be enabled */
+ if (!parport_ip32_dma.irq_on) {
+ WARN_ON(1);
+ enable_irq(MACEISA_PAR_CTXA_IRQ);
+ enable_irq(MACEISA_PAR_CTXB_IRQ);
+ parport_ip32_dma.irq_on = 1;
+ }
+
+ /* Prepare DMA pointers */
+ parport_ip32_dma.dir = dir;
+ parport_ip32_dma.buf = dma_map_single(&p->bus_dev, addr, count, dir);
+ parport_ip32_dma.len = count;
+ parport_ip32_dma.next = parport_ip32_dma.buf;
+ parport_ip32_dma.left = parport_ip32_dma.len;
+ parport_ip32_dma.ctx = 0;
+
+ /* Setup DMA direction and first two contexts */
+ ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION;
+ writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
+ /* Single transfer should not cross a 4K page boundary */
+ limit = MACEPAR_CONTEXT_DATA_BOUND -
+ (parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1));
+ parport_ip32_dma_setup_context(limit);
+ parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
+
+ /* Real start of DMA transfer */
+ ctrl |= MACEPAR_CTLSTAT_ENABLE;
+ writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
+
+ return 0;
+}
+
+/**
+ * parport_ip32_dma_stop - ends a running DMA transfer
+ * @p: partport to work on
+ *
+ * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
+ * correctly balanced.
+ */
+static void parport_ip32_dma_stop(struct parport *p)
+{
+ u64 ctx_a;
+ u64 ctx_b;
+ u64 ctrl;
+ u64 diag;
+ size_t res[2]; /* {[0] = res_a, [1] = res_b} */
+
+ pr_trace(NULL, "()");
+
+ /* Disable IRQs */
+ spin_lock_irq(&parport_ip32_dma.lock);
+ if (parport_ip32_dma.irq_on) {
+ pr_debug(PPIP32 "IRQ off (stop)\n");
+ disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
+ disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
+ parport_ip32_dma.irq_on = 0;
+ }
+ spin_unlock_irq(&parport_ip32_dma.lock);
+ /* Force IRQ synchronization, even if the IRQs were disabled
+ * elsewhere. */
+ synchronize_irq(MACEISA_PAR_CTXA_IRQ);
+ synchronize_irq(MACEISA_PAR_CTXB_IRQ);
+
+ /* Stop DMA transfer */
+ ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
+ ctrl &= ~MACEPAR_CTLSTAT_ENABLE;
+ writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
+
+ /* Adjust residue (parport_ip32_dma.left) */
+ ctx_a = readq(&mace->perif.ctrl.parport.context_a);
+ ctx_b = readq(&mace->perif.ctrl.parport.context_b);
+ ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
+ diag = readq(&mace->perif.ctrl.parport.diagnostic);
+ res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ?
+ 1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >>
+ MACEPAR_CONTEXT_DATALEN_SHIFT) :
+ 0;
+ res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ?
+ 1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>
+ MACEPAR_CONTEXT_DATALEN_SHIFT) :
+ 0;
+ if (diag & MACEPAR_DIAG_DMACTIVE)
+ res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] =
+ 1 + ((diag & MACEPAR_DIAG_CTRMASK) >>
+ MACEPAR_DIAG_CTRSHIFT);
+ parport_ip32_dma.left += res[0] + res[1];
+
+ /* Reset DMA controller, and re-enable IRQs */
+ ctrl = MACEPAR_CTLSTAT_RESET;
+ writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
+ pr_debug(PPIP32 "IRQ on (stop)\n");
+ enable_irq(MACEISA_PAR_CTXA_IRQ);
+ enable_irq(MACEISA_PAR_CTXB_IRQ);
+ parport_ip32_dma.irq_on = 1;
+
+ dma_unmap_single(&p->bus_dev, parport_ip32_dma.buf,
+ parport_ip32_dma.len, parport_ip32_dma.dir);
+}
+
+/**
+ * parport_ip32_dma_get_residue - get residue from last DMA transfer
+ *
+ * Returns the number of bytes remaining from last DMA transfer.
+ */
+static inline size_t parport_ip32_dma_get_residue(void)
+{
+ return parport_ip32_dma.left;
+}
+
+/**
+ * parport_ip32_dma_register - initialize DMA engine
+ *
+ * Returns zero for success.
+ */
+static int parport_ip32_dma_register(void)
+{
+ int err;
+
+ spin_lock_init(&parport_ip32_dma.lock);
+ parport_ip32_dma.irq_on = 1;
+
+ /* Reset DMA controller */
+ writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat);
+
+ /* Request IRQs */
+ err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt,
+ 0, "parport_ip32", NULL);
+ if (err)
+ goto fail_a;
+ err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt,
+ 0, "parport_ip32", NULL);
+ if (err)
+ goto fail_b;
+#if DEBUG_PARPORT_IP32
+ /* FIXME - what is this IRQ for? */
+ err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt,
+ 0, "parport_ip32", NULL);
+ if (err)
+ goto fail_merr;
+#endif
+ return 0;
+
+#if DEBUG_PARPORT_IP32
+fail_merr:
+ free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
+#endif
+fail_b:
+ free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
+fail_a:
+ return err;
+}
+
+/**
+ * parport_ip32_dma_unregister - release and free resources for DMA engine
+ */
+static void parport_ip32_dma_unregister(void)
+{
+#if DEBUG_PARPORT_IP32
+ free_irq(MACEISA_PAR_MERR_IRQ, NULL);
+#endif
+ free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
+ free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
+}
+
+/*--- Interrupt handlers and associates --------------------------------*/
+
+/**
+ * parport_ip32_wakeup - wakes up code waiting for an interrupt
+ * @p: pointer to &struct parport
+ */
+static inline void parport_ip32_wakeup(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ complete(&priv->irq_complete);
+}
+
+/**
+ * parport_ip32_interrupt - interrupt handler
+ * @irq: interrupt number
+ * @dev_id: pointer to &struct parport
+ *
+ * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is
+ * %PARPORT_IP32_IRQ_FWD.
+ */
+static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id)
+{
+ struct parport * const p = dev_id;
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ enum parport_ip32_irq_mode irq_mode = priv->irq_mode;
+
+ switch (irq_mode) {
+ case PARPORT_IP32_IRQ_FWD:
+ return parport_irq_handler(irq, dev_id);
+
+ case PARPORT_IP32_IRQ_HERE:
+ parport_ip32_wakeup(p);
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*--- Some utility function to manipulate ECR register -----------------*/
+
+/**
+ * parport_ip32_read_econtrol - read contents of the ECR register
+ * @p: pointer to &struct parport
+ */
+static inline unsigned int parport_ip32_read_econtrol(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ return readb(priv->regs.ecr);
+}
+
+/**
+ * parport_ip32_write_econtrol - write new contents to the ECR register
+ * @p: pointer to &struct parport
+ * @c: new value to write
+ */
+static inline void parport_ip32_write_econtrol(struct parport *p,
+ unsigned int c)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ writeb(c, priv->regs.ecr);
+}
+
+/**
+ * parport_ip32_frob_econtrol - change bits from the ECR register
+ * @p: pointer to &struct parport
+ * @mask: bit mask of bits to change
+ * @val: new value for changed bits
+ *
+ * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits
+ * in @val, and write the result to the ECR.
+ */
+static inline void parport_ip32_frob_econtrol(struct parport *p,
+ unsigned int mask,
+ unsigned int val)
+{
+ unsigned int c;
+ c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
+ parport_ip32_write_econtrol(p, c);
+}
+
+/**
+ * parport_ip32_set_mode - change mode of ECP port
+ * @p: pointer to &struct parport
+ * @mode: new mode to write in ECR
+ *
+ * ECR is reset in a sane state (interrupts and DMA disabled), and placed in
+ * mode @mode. Go through PS2 mode if needed.
+ */
+static void parport_ip32_set_mode(struct parport *p, unsigned int mode)
+{
+ unsigned int omode;
+
+ mode &= ECR_MODE_MASK;
+ omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;
+
+ if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2
+ || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) {
+ /* We have to go through PS2 mode */
+ unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
+ parport_ip32_write_econtrol(p, ecr);
+ }
+ parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR);
+}
+
+/*--- Basic functions needed for parport -------------------------------*/
+
+/**
+ * parport_ip32_read_data - return current contents of the DATA register
+ * @p: pointer to &struct parport
+ */
+static inline unsigned char parport_ip32_read_data(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ return readb(priv->regs.data);
+}
+
+/**
+ * parport_ip32_write_data - set new contents for the DATA register
+ * @p: pointer to &struct parport
+ * @d: new value to write
+ */
+static inline void parport_ip32_write_data(struct parport *p, unsigned char d)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ writeb(d, priv->regs.data);
+}
+
+/**
+ * parport_ip32_read_status - return current contents of the DSR register
+ * @p: pointer to &struct parport
+ */
+static inline unsigned char parport_ip32_read_status(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ return readb(priv->regs.dsr);
+}
+
+/**
+ * __parport_ip32_read_control - return cached contents of the DCR register
+ * @p: pointer to &struct parport
+ */
+static inline unsigned int __parport_ip32_read_control(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ return priv->dcr_cache; /* use soft copy */
+}
+
+/**
+ * __parport_ip32_write_control - set new contents for the DCR register
+ * @p: pointer to &struct parport
+ * @c: new value to write
+ */
+static inline void __parport_ip32_write_control(struct parport *p,
+ unsigned int c)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ CHECK_EXTRA_BITS(p, c, priv->dcr_writable);
+ c &= priv->dcr_writable; /* only writable bits */
+ writeb(c, priv->regs.dcr);
+ priv->dcr_cache = c; /* update soft copy */
+}
+
+/**
+ * __parport_ip32_frob_control - change bits from the DCR register
+ * @p: pointer to &struct parport
+ * @mask: bit mask of bits to change
+ * @val: new value for changed bits
+ *
+ * This is equivalent to read from the DCR, mask out the bits in @mask,
+ * exclusive-or with the bits in @val, and write the result to the DCR.
+ * Actually, the cached contents of the DCR is used.
+ */
+static inline void __parport_ip32_frob_control(struct parport *p,
+ unsigned int mask,
+ unsigned int val)
+{
+ unsigned int c;
+ c = (__parport_ip32_read_control(p) & ~mask) ^ val;
+ __parport_ip32_write_control(p, c);
+}
+
+/**
+ * parport_ip32_read_control - return cached contents of the DCR register
+ * @p: pointer to &struct parport
+ *
+ * The return value is masked so as to only return the value of %DCR_STROBE,
+ * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
+ */
+static inline unsigned char parport_ip32_read_control(struct parport *p)
+{
+ const unsigned int rm =
+ DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
+ return __parport_ip32_read_control(p) & rm;
+}
+
+/**
+ * parport_ip32_write_control - set new contents for the DCR register
+ * @p: pointer to &struct parport
+ * @c: new value to write
+ *
+ * The value is masked so as to only change the value of %DCR_STROBE,
+ * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
+ */
+static inline void parport_ip32_write_control(struct parport *p,
+ unsigned char c)
+{
+ const unsigned int wm =
+ DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
+ CHECK_EXTRA_BITS(p, c, wm);
+ __parport_ip32_frob_control(p, wm, c & wm);
+}
+
+/**
+ * parport_ip32_frob_control - change bits from the DCR register
+ * @p: pointer to &struct parport
+ * @mask: bit mask of bits to change
+ * @val: new value for changed bits
+ *
+ * This differs from __parport_ip32_frob_control() in that it only allows to
+ * change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
+ */
+static inline unsigned char parport_ip32_frob_control(struct parport *p,
+ unsigned char mask,
+ unsigned char val)
+{
+ const unsigned int wm =
+ DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
+ CHECK_EXTRA_BITS(p, mask, wm);
+ CHECK_EXTRA_BITS(p, val, wm);
+ __parport_ip32_frob_control(p, mask & wm, val & wm);
+ return parport_ip32_read_control(p);
+}
+
+/**
+ * parport_ip32_disable_irq - disable interrupts on the rising edge of nACK
+ * @p: pointer to &struct parport
+ */
+static inline void parport_ip32_disable_irq(struct parport *p)
+{
+ __parport_ip32_frob_control(p, DCR_IRQ, 0);
+}
+
+/**
+ * parport_ip32_enable_irq - enable interrupts on the rising edge of nACK
+ * @p: pointer to &struct parport
+ */
+static inline void parport_ip32_enable_irq(struct parport *p)
+{
+ __parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ);
+}
+
+/**
+ * parport_ip32_data_forward - enable host-to-peripheral communications
+ * @p: pointer to &struct parport
+ *
+ * Enable the data line drivers, for 8-bit host-to-peripheral communications.
+ */
+static inline void parport_ip32_data_forward(struct parport *p)
+{
+ __parport_ip32_frob_control(p, DCR_DIR, 0);
+}
+
+/**
+ * parport_ip32_data_reverse - enable peripheral-to-host communications
+ * @p: pointer to &struct parport
+ *
+ * Place the data bus in a high impedance state, if @p->modes has the
+ * PARPORT_MODE_TRISTATE bit set.
+ */
+static inline void parport_ip32_data_reverse(struct parport *p)
+{
+ __parport_ip32_frob_control(p, DCR_DIR, DCR_DIR);
+}
+
+/**
+ * parport_ip32_init_state - for core parport code
+ * @dev: pointer to &struct pardevice
+ * @s: pointer to &struct parport_state to initialize
+ */
+static void parport_ip32_init_state(struct pardevice *dev,
+ struct parport_state *s)
+{
+ s->u.ip32.dcr = DCR_SELECT | DCR_nINIT;
+ s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
+}
+
+/**
+ * parport_ip32_save_state - for core parport code
+ * @p: pointer to &struct parport
+ * @s: pointer to &struct parport_state to save state to
+ */
+static void parport_ip32_save_state(struct parport *p,
+ struct parport_state *s)
+{
+ s->u.ip32.dcr = __parport_ip32_read_control(p);
+ s->u.ip32.ecr = parport_ip32_read_econtrol(p);
+}
+
+/**
+ * parport_ip32_restore_state - for core parport code
+ * @p: pointer to &struct parport
+ * @s: pointer to &struct parport_state to restore state from
+ */
+static void parport_ip32_restore_state(struct parport *p,
+ struct parport_state *s)
+{
+ parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK);
+ parport_ip32_write_econtrol(p, s->u.ip32.ecr);
+ __parport_ip32_write_control(p, s->u.ip32.dcr);
+}
+
+/*--- EPP mode functions -----------------------------------------------*/
+
+/**
+ * parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode
+ * @p: pointer to &struct parport
+ *
+ * Returns 1 if the Timeout bit is clear, and 0 otherwise.
+ */
+static unsigned int parport_ip32_clear_epp_timeout(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ unsigned int cleared;
+
+ if (!(parport_ip32_read_status(p) & DSR_TIMEOUT))
+ cleared = 1;
+ else {
+ unsigned int r;
+ /* To clear timeout some chips require double read */
+ parport_ip32_read_status(p);
+ r = parport_ip32_read_status(p);
+ /* Some reset by writing 1 */
+ writeb(r | DSR_TIMEOUT, priv->regs.dsr);
+ /* Others by writing 0 */
+ writeb(r & ~DSR_TIMEOUT, priv->regs.dsr);
+
+ r = parport_ip32_read_status(p);
+ cleared = !(r & DSR_TIMEOUT);
+ }
+
+ pr_trace(p, "(): %s", cleared ? "cleared" : "failed");
+ return cleared;
+}
+
+/**
+ * parport_ip32_epp_read - generic EPP read function
+ * @eppreg: I/O register to read from
+ * @p: pointer to &struct parport
+ * @buf: buffer to store read data
+ * @len: length of buffer @buf
+ * @flags: may be PARPORT_EPP_FAST
+ */
+static size_t parport_ip32_epp_read(void __iomem *eppreg,
+ struct parport *p, void *buf,
+ size_t len, int flags)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ size_t got;
+ parport_ip32_set_mode(p, ECR_MODE_EPP);
+ parport_ip32_data_reverse(p);
+ parport_ip32_write_control(p, DCR_nINIT);
+ if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
+ readsb(eppreg, buf, len);
+ if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
+ parport_ip32_clear_epp_timeout(p);
+ return -EIO;
+ }
+ got = len;
+ } else {
+ u8 *bufp = buf;
+ for (got = 0; got < len; got++) {
+ *bufp++ = readb(eppreg);
+ if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
+ parport_ip32_clear_epp_timeout(p);
+ break;
+ }
+ }
+ }
+ parport_ip32_data_forward(p);
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ return got;
+}
+
+/**
+ * parport_ip32_epp_write - generic EPP write function
+ * @eppreg: I/O register to write to
+ * @p: pointer to &struct parport
+ * @buf: buffer of data to write
+ * @len: length of buffer @buf
+ * @flags: may be PARPORT_EPP_FAST
+ */
+static size_t parport_ip32_epp_write(void __iomem *eppreg,
+ struct parport *p, const void *buf,
+ size_t len, int flags)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ size_t written;
+ parport_ip32_set_mode(p, ECR_MODE_EPP);
+ parport_ip32_data_forward(p);
+ parport_ip32_write_control(p, DCR_nINIT);
+ if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
+ writesb(eppreg, buf, len);
+ if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
+ parport_ip32_clear_epp_timeout(p);
+ return -EIO;
+ }
+ written = len;
+ } else {
+ const u8 *bufp = buf;
+ for (written = 0; written < len; written++) {
+ writeb(*bufp++, eppreg);
+ if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
+ parport_ip32_clear_epp_timeout(p);
+ break;
+ }
+ }
+ }
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ return written;
+}
+
+/**
+ * parport_ip32_epp_read_data - read a block of data in EPP mode
+ * @p: pointer to &struct parport
+ * @buf: buffer to store read data
+ * @len: length of buffer @buf
+ * @flags: may be PARPORT_EPP_FAST
+ */
+static size_t parport_ip32_epp_read_data(struct parport *p, void *buf,
+ size_t len, int flags)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags);
+}
+
+/**
+ * parport_ip32_epp_write_data - write a block of data in EPP mode
+ * @p: pointer to &struct parport
+ * @buf: buffer of data to write
+ * @len: length of buffer @buf
+ * @flags: may be PARPORT_EPP_FAST
+ */
+static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf,
+ size_t len, int flags)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags);
+}
+
+/**
+ * parport_ip32_epp_read_addr - read a block of addresses in EPP mode
+ * @p: pointer to &struct parport
+ * @buf: buffer to store read data
+ * @len: length of buffer @buf
+ * @flags: may be PARPORT_EPP_FAST
+ */
+static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf,
+ size_t len, int flags)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags);
+}
+
+/**
+ * parport_ip32_epp_write_addr - write a block of addresses in EPP mode
+ * @p: pointer to &struct parport
+ * @buf: buffer of data to write
+ * @len: length of buffer @buf
+ * @flags: may be PARPORT_EPP_FAST
+ */
+static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
+ size_t len, int flags)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags);
+}
+
+/*--- ECP mode functions (FIFO) ----------------------------------------*/
+
+/**
+ * parport_ip32_fifo_wait_break - check if the waiting function should return
+ * @p: pointer to &struct parport
+ * @expire: timeout expiring date, in jiffies
+ *
+ * parport_ip32_fifo_wait_break() checks if the waiting function should return
+ * immediately or not. The break conditions are:
+ * - expired timeout;
+ * - a pending signal;
+ * - nFault asserted low.
+ * This function also calls cond_resched().
+ */
+static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
+ unsigned long expire)
+{
+ cond_resched();
+ if (time_after(jiffies, expire)) {
+ pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
+ return 1;
+ }
+ if (signal_pending(current)) {
+ pr_debug1(PPIP32 "%s: Signal pending\n", p->name);
+ return 1;
+ }
+ if (!(parport_ip32_read_status(p) & DSR_nFAULT)) {
+ pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling)
+ * @p: pointer to &struct parport
+ *
+ * Returns the number of bytes that can safely be written in the FIFO. A
+ * return value of zero means that the calling function should terminate as
+ * fast as possible.
+ */
+static unsigned int parport_ip32_fwp_wait_polling(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ struct parport * const physport = p->physport;
+ unsigned long expire;
+ unsigned int count;
+ unsigned int ecr;
+
+ expire = jiffies + physport->cad->timeout;
+ count = 0;
+ while (1) {
+ if (parport_ip32_fifo_wait_break(p, expire))
+ break;
+
+ /* Check FIFO state. We do nothing when the FIFO is nor full,
+ * nor empty. It appears that the FIFO full bit is not always
+ * reliable, the FIFO state is sometimes wrongly reported, and
+ * the chip gets confused if we give it another byte. */
+ ecr = parport_ip32_read_econtrol(p);
+ if (ecr & ECR_F_EMPTY) {
+ /* FIFO is empty, fill it up */
+ count = priv->fifo_depth;
+ break;
+ }
+
+ /* Wait a moment... */
+ udelay(FIFO_POLLING_INTERVAL);
+ } /* while (1) */
+
+ return count;
+}
+
+/**
+ * parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven)
+ * @p: pointer to &struct parport
+ *
+ * Returns the number of bytes that can safely be written in the FIFO. A
+ * return value of zero means that the calling function should terminate as
+ * fast as possible.
+ */
+static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
+{
+ static unsigned int lost_interrupt = 0;
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ struct parport * const physport = p->physport;
+ unsigned long nfault_timeout;
+ unsigned long expire;
+ unsigned int count;
+ unsigned int ecr;
+
+ nfault_timeout = min((unsigned long)physport->cad->timeout,
+ msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
+ expire = jiffies + physport->cad->timeout;
+ count = 0;
+ while (1) {
+ if (parport_ip32_fifo_wait_break(p, expire))
+ break;
+
+ /* Initialize mutex used to take interrupts into account */
+ reinit_completion(&priv->irq_complete);
+
+ /* Enable serviceIntr */
+ parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
+
+ /* Enabling serviceIntr while the FIFO is empty does not
+ * always generate an interrupt, so check for emptiness
+ * now. */
+ ecr = parport_ip32_read_econtrol(p);
+ if (!(ecr & ECR_F_EMPTY)) {
+ /* FIFO is not empty: wait for an interrupt or a
+ * timeout to occur */
+ wait_for_completion_interruptible_timeout(
+ &priv->irq_complete, nfault_timeout);
+ ecr = parport_ip32_read_econtrol(p);
+ if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
+ && !lost_interrupt) {
+ pr_warn(PPIP32 "%s: lost interrupt in %s\n",
+ p->name, __func__);
+ lost_interrupt = 1;
+ }
+ }
+
+ /* Disable serviceIntr */
+ parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR);
+
+ /* Check FIFO state */
+ if (ecr & ECR_F_EMPTY) {
+ /* FIFO is empty, fill it up */
+ count = priv->fifo_depth;
+ break;
+ } else if (ecr & ECR_SERVINTR) {
+ /* FIFO is not empty, but we know that can safely push
+ * writeIntrThreshold bytes into it */
+ count = priv->writeIntrThreshold;
+ break;
+ }
+ /* FIFO is not empty, and we did not get any interrupt.
+ * Either it's time to check for nFault, or a signal is
+ * pending. This is verified in
+ * parport_ip32_fifo_wait_break(), so we continue the loop. */
+ } /* while (1) */
+
+ return count;
+}
+
+/**
+ * parport_ip32_fifo_write_block_pio - write a block of data (PIO mode)
+ * @p: pointer to &struct parport
+ * @buf: buffer of data to write
+ * @len: length of buffer @buf
+ *
+ * Uses PIO to write the contents of the buffer @buf into the parallel port
+ * FIFO. Returns the number of bytes that were actually written. It can work
+ * with or without the help of interrupts. The parallel port must be
+ * correctly initialized before calling parport_ip32_fifo_write_block_pio().
+ */
+static size_t parport_ip32_fifo_write_block_pio(struct parport *p,
+ const void *buf, size_t len)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ const u8 *bufp = buf;
+ size_t left = len;
+
+ priv->irq_mode = PARPORT_IP32_IRQ_HERE;
+
+ while (left > 0) {
+ unsigned int count;
+
+ count = (p->irq == PARPORT_IRQ_NONE) ?
+ parport_ip32_fwp_wait_polling(p) :
+ parport_ip32_fwp_wait_interrupt(p);
+ if (count == 0)
+ break; /* Transmission should be stopped */
+ if (count > left)
+ count = left;
+ if (count == 1) {
+ writeb(*bufp, priv->regs.fifo);
+ bufp++, left--;
+ } else {
+ writesb(priv->regs.fifo, bufp, count);
+ bufp += count, left -= count;
+ }
+ }
+
+ priv->irq_mode = PARPORT_IP32_IRQ_FWD;
+
+ return len - left;
+}
+
+/**
+ * parport_ip32_fifo_write_block_dma - write a block of data (DMA mode)
+ * @p: pointer to &struct parport
+ * @buf: buffer of data to write
+ * @len: length of buffer @buf
+ *
+ * Uses DMA to write the contents of the buffer @buf into the parallel port
+ * FIFO. Returns the number of bytes that were actually written. The
+ * parallel port must be correctly initialized before calling
+ * parport_ip32_fifo_write_block_dma().
+ */
+static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
+ const void *buf, size_t len)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ struct parport * const physport = p->physport;
+ unsigned long nfault_timeout;
+ unsigned long expire;
+ size_t written;
+ unsigned int ecr;
+
+ priv->irq_mode = PARPORT_IP32_IRQ_HERE;
+
+ parport_ip32_dma_start(p, DMA_TO_DEVICE, (void *)buf, len);
+ reinit_completion(&priv->irq_complete);
+ parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
+
+ nfault_timeout = min((unsigned long)physport->cad->timeout,
+ msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
+ expire = jiffies + physport->cad->timeout;
+ while (1) {
+ if (parport_ip32_fifo_wait_break(p, expire))
+ break;
+ wait_for_completion_interruptible_timeout(&priv->irq_complete,
+ nfault_timeout);
+ ecr = parport_ip32_read_econtrol(p);
+ if (ecr & ECR_SERVINTR)
+ break; /* DMA transfer just finished */
+ }
+ parport_ip32_dma_stop(p);
+ written = len - parport_ip32_dma_get_residue();
+
+ priv->irq_mode = PARPORT_IP32_IRQ_FWD;
+
+ return written;
+}
+
+/**
+ * parport_ip32_fifo_write_block - write a block of data
+ * @p: pointer to &struct parport
+ * @buf: buffer of data to write
+ * @len: length of buffer @buf
+ *
+ * Uses PIO or DMA to write the contents of the buffer @buf into the parallel
+ * p FIFO. Returns the number of bytes that were actually written.
+ */
+static size_t parport_ip32_fifo_write_block(struct parport *p,
+ const void *buf, size_t len)
+{
+ size_t written = 0;
+ if (len)
+ /* FIXME - Maybe some threshold value should be set for @len
+ * under which we revert to PIO mode? */
+ written = (p->modes & PARPORT_MODE_DMA) ?
+ parport_ip32_fifo_write_block_dma(p, buf, len) :
+ parport_ip32_fifo_write_block_pio(p, buf, len);
+ return written;
+}
+
+/**
+ * parport_ip32_drain_fifo - wait for FIFO to empty
+ * @p: pointer to &struct parport
+ * @timeout: timeout, in jiffies
+ *
+ * This function waits for FIFO to empty. It returns 1 when FIFO is empty, or
+ * 0 if the timeout @timeout is reached before, or if a signal is pending.
+ */
+static unsigned int parport_ip32_drain_fifo(struct parport *p,
+ unsigned long timeout)
+{
+ unsigned long expire = jiffies + timeout;
+ unsigned int polling_interval;
+ unsigned int counter;
+
+ /* Busy wait for approx. 200us */
+ for (counter = 0; counter < 40; counter++) {
+ if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
+ break;
+ if (time_after(jiffies, expire))
+ break;
+ if (signal_pending(current))
+ break;
+ udelay(5);
+ }
+ /* Poll slowly. Polling interval starts with 1 millisecond, and is
+ * increased exponentially until 128. */
+ polling_interval = 1; /* msecs */
+ while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
+ if (time_after_eq(jiffies, expire))
+ break;
+ msleep_interruptible(polling_interval);
+ if (signal_pending(current))
+ break;
+ if (polling_interval < 128)
+ polling_interval *= 2;
+ }
+
+ return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY);
+}
+
+/**
+ * parport_ip32_get_fifo_residue - reset FIFO
+ * @p: pointer to &struct parport
+ * @mode: current operation mode (ECR_MODE_PPF or ECR_MODE_ECP)
+ *
+ * This function resets FIFO, and returns the number of bytes remaining in it.
+ */
+static unsigned int parport_ip32_get_fifo_residue(struct parport *p,
+ unsigned int mode)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ unsigned int residue;
+ unsigned int cnfga;
+
+ /* FIXME - We are missing one byte if the printer is off-line. I
+ * don't know how to detect this. It looks that the full bit is not
+ * always reliable. For the moment, the problem is avoided in most
+ * cases by testing for BUSY in parport_ip32_compat_write_data().
+ */
+ if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
+ residue = 0;
+ else {
+ pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name);
+
+ /* Stop all transfers.
+ *
+ * Microsoft's document instructs to drive DCR_STROBE to 0,
+ * but it doesn't work (at least in Compatibility mode, not
+ * tested in ECP mode). Switching directly to Test mode (as
+ * in parport_pc) is not an option: it does confuse the port,
+ * ECP service interrupts are no more working after that. A
+ * hard reset is then needed to revert to a sane state.
+ *
+ * Let's hope that the FIFO is really stuck and that the
+ * peripheral doesn't wake up now.
+ */
+ parport_ip32_frob_control(p, DCR_STROBE, 0);
+
+ /* Fill up FIFO */
+ for (residue = priv->fifo_depth; residue > 0; residue--) {
+ if (parport_ip32_read_econtrol(p) & ECR_F_FULL)
+ break;
+ writeb(0x00, priv->regs.fifo);
+ }
+ }
+ if (residue)
+ pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n",
+ p->name, residue,
+ (residue == 1) ? " was" : "s were");
+
+ /* Now reset the FIFO */
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+
+ /* Host recovery for ECP mode */
+ if (mode == ECR_MODE_ECP) {
+ parport_ip32_data_reverse(p);
+ parport_ip32_frob_control(p, DCR_nINIT, 0);
+ if (parport_wait_peripheral(p, DSR_PERROR, 0))
+ pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n",
+ p->name, __func__);
+ parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE);
+ parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT);
+ if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR))
+ pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n",
+ p->name, __func__);
+ }
+
+ /* Adjust residue if needed */
+ parport_ip32_set_mode(p, ECR_MODE_CFG);
+ cnfga = readb(priv->regs.cnfgA);
+ if (!(cnfga & CNFGA_nBYTEINTRANS)) {
+ pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n",
+ p->name, cnfga);
+ pr_debug1(PPIP32 "%s: Accounting for extra byte\n",
+ p->name);
+ residue++;
+ }
+
+ /* Don't care about partial PWords since we do not support
+ * PWord != 1 byte. */
+
+ /* Back to forward PS2 mode. */
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ parport_ip32_data_forward(p);
+
+ return residue;
+}
+
+/**
+ * parport_ip32_compat_write_data - write a block of data in SPP mode
+ * @p: pointer to &struct parport
+ * @buf: buffer of data to write
+ * @len: length of buffer @buf
+ * @flags: ignored
+ */
+static size_t parport_ip32_compat_write_data(struct parport *p,
+ const void *buf, size_t len,
+ int flags)
+{
+ static unsigned int ready_before = 1;
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ struct parport * const physport = p->physport;
+ size_t written = 0;
+
+ /* Special case: a timeout of zero means we cannot call schedule().
+ * Also if O_NONBLOCK is set then use the default implementation. */
+ if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
+ return parport_ieee1284_write_compat(p, buf, len, flags);
+
+ /* Reset FIFO, go in forward mode, and disable ackIntEn */
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
+ parport_ip32_data_forward(p);
+ parport_ip32_disable_irq(p);
+ parport_ip32_set_mode(p, ECR_MODE_PPF);
+ physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
+
+ /* Wait for peripheral to become ready */
+ if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
+ DSR_nBUSY | DSR_nFAULT)) {
+ /* Avoid to flood the logs */
+ if (ready_before)
+ pr_info(PPIP32 "%s: not ready in %s\n",
+ p->name, __func__);
+ ready_before = 0;
+ goto stop;
+ }
+ ready_before = 1;
+
+ written = parport_ip32_fifo_write_block(p, buf, len);
+
+ /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
+ parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
+
+ /* Check for a potential residue */
+ written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF);
+
+ /* Then, wait for BUSY to get low. */
+ if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
+ printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
+ p->name, __func__);
+
+stop:
+ /* Reset FIFO */
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+
+ return written;
+}
+
+/*
+ * FIXME - Insert here parport_ip32_ecp_read_data().
+ */
+
+/**
+ * parport_ip32_ecp_write_data - write a block of data in ECP mode
+ * @p: pointer to &struct parport
+ * @buf: buffer of data to write
+ * @len: length of buffer @buf
+ * @flags: ignored
+ */
+static size_t parport_ip32_ecp_write_data(struct parport *p,
+ const void *buf, size_t len,
+ int flags)
+{
+ static unsigned int ready_before = 1;
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ struct parport * const physport = p->physport;
+ size_t written = 0;
+
+ /* Special case: a timeout of zero means we cannot call schedule().
+ * Also if O_NONBLOCK is set then use the default implementation. */
+ if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
+ return parport_ieee1284_ecp_write_data(p, buf, len, flags);
+
+ /* Negotiate to forward mode if necessary. */
+ if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
+ /* Event 47: Set nInit high. */
+ parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD,
+ DCR_nINIT | DCR_AUTOFD);
+
+ /* Event 49: PError goes high. */
+ if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
+ printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s\n",
+ p->name, __func__);
+ physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
+ return 0;
+ }
+ }
+
+ /* Reset FIFO, go in forward mode, and disable ackIntEn */
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
+ parport_ip32_data_forward(p);
+ parport_ip32_disable_irq(p);
+ parport_ip32_set_mode(p, ECR_MODE_ECP);
+ physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
+
+ /* Wait for peripheral to become ready */
+ if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
+ DSR_nBUSY | DSR_nFAULT)) {
+ /* Avoid to flood the logs */
+ if (ready_before)
+ pr_info(PPIP32 "%s: not ready in %s\n",
+ p->name, __func__);
+ ready_before = 0;
+ goto stop;
+ }
+ ready_before = 1;
+
+ written = parport_ip32_fifo_write_block(p, buf, len);
+
+ /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
+ parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
+
+ /* Check for a potential residue */
+ written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP);
+
+ /* Then, wait for BUSY to get low. */
+ if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
+ printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
+ p->name, __func__);
+
+stop:
+ /* Reset FIFO */
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+
+ return written;
+}
+
+/*
+ * FIXME - Insert here parport_ip32_ecp_write_addr().
+ */
+
+/*--- Default parport operations ---------------------------------------*/
+
+static const struct parport_operations parport_ip32_ops __initconst = {
+ .write_data = parport_ip32_write_data,
+ .read_data = parport_ip32_read_data,
+
+ .write_control = parport_ip32_write_control,
+ .read_control = parport_ip32_read_control,
+ .frob_control = parport_ip32_frob_control,
+
+ .read_status = parport_ip32_read_status,
+
+ .enable_irq = parport_ip32_enable_irq,
+ .disable_irq = parport_ip32_disable_irq,
+
+ .data_forward = parport_ip32_data_forward,
+ .data_reverse = parport_ip32_data_reverse,
+
+ .init_state = parport_ip32_init_state,
+ .save_state = parport_ip32_save_state,
+ .restore_state = parport_ip32_restore_state,
+
+ .epp_write_data = parport_ieee1284_epp_write_data,
+ .epp_read_data = parport_ieee1284_epp_read_data,
+ .epp_write_addr = parport_ieee1284_epp_write_addr,
+ .epp_read_addr = parport_ieee1284_epp_read_addr,
+
+ .ecp_write_data = parport_ieee1284_ecp_write_data,
+ .ecp_read_data = parport_ieee1284_ecp_read_data,
+ .ecp_write_addr = parport_ieee1284_ecp_write_addr,
+
+ .compat_write_data = parport_ieee1284_write_compat,
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+
+ .owner = THIS_MODULE,
+};
+
+/*--- Device detection -------------------------------------------------*/
+
+/**
+ * parport_ip32_ecp_supported - check for an ECP port
+ * @p: pointer to the &parport structure
+ *
+ * Returns 1 if an ECP port is found, and 0 otherwise. This function actually
+ * checks if an Extended Control Register seems to be present. On successful
+ * return, the port is placed in SPP mode.
+ */
+static __init unsigned int parport_ip32_ecp_supported(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ unsigned int ecr;
+
+ ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
+ writeb(ecr, priv->regs.ecr);
+ if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY))
+ goto fail;
+
+ pr_probe(p, "Found working ECR register\n");
+ parport_ip32_set_mode(p, ECR_MODE_SPP);
+ parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
+ return 1;
+
+fail:
+ pr_probe(p, "ECR register not found\n");
+ return 0;
+}
+
+/**
+ * parport_ip32_fifo_supported - check for FIFO parameters
+ * @p: pointer to the &parport structure
+ *
+ * Check for FIFO parameters of an Extended Capabilities Port. Returns 1 on
+ * success, and 0 otherwise. Adjust FIFO parameters in the parport structure.
+ * On return, the port is placed in SPP mode.
+ */
+static __init unsigned int parport_ip32_fifo_supported(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ unsigned int configa, configb;
+ unsigned int pword;
+ unsigned int i;
+
+ /* Configuration mode */
+ parport_ip32_set_mode(p, ECR_MODE_CFG);
+ configa = readb(priv->regs.cnfgA);
+ configb = readb(priv->regs.cnfgB);
+
+ /* Find out PWord size */
+ switch (configa & CNFGA_ID_MASK) {
+ case CNFGA_ID_8:
+ pword = 1;
+ break;
+ case CNFGA_ID_16:
+ pword = 2;
+ break;
+ case CNFGA_ID_32:
+ pword = 4;
+ break;
+ default:
+ pr_probe(p, "Unknown implementation ID: 0x%0x\n",
+ (configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT);
+ goto fail;
+ break;
+ }
+ if (pword != 1) {
+ pr_probe(p, "Unsupported PWord size: %u\n", pword);
+ goto fail;
+ }
+ priv->pword = pword;
+ pr_probe(p, "PWord is %u bits\n", 8 * priv->pword);
+
+ /* Check for compression support */
+ writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB);
+ if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS)
+ pr_probe(p, "Hardware compression detected (unsupported)\n");
+ writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB);
+
+ /* Reset FIFO and go in test mode (no interrupt, no DMA) */
+ parport_ip32_set_mode(p, ECR_MODE_TST);
+
+ /* FIFO must be empty now */
+ if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
+ pr_probe(p, "FIFO not reset\n");
+ goto fail;
+ }
+
+ /* Find out FIFO depth. */
+ priv->fifo_depth = 0;
+ for (i = 0; i < 1024; i++) {
+ if (readb(priv->regs.ecr) & ECR_F_FULL) {
+ /* FIFO full */
+ priv->fifo_depth = i;
+ break;
+ }
+ writeb((u8)i, priv->regs.fifo);
+ }
+ if (i >= 1024) {
+ pr_probe(p, "Can't fill FIFO\n");
+ goto fail;
+ }
+ if (!priv->fifo_depth) {
+ pr_probe(p, "Can't get FIFO depth\n");
+ goto fail;
+ }
+ pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth);
+
+ /* Enable interrupts */
+ parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
+
+ /* Find out writeIntrThreshold: number of PWords we know we can write
+ * if we get an interrupt. */
+ priv->writeIntrThreshold = 0;
+ for (i = 0; i < priv->fifo_depth; i++) {
+ if (readb(priv->regs.fifo) != (u8)i) {
+ pr_probe(p, "Invalid data in FIFO\n");
+ goto fail;
+ }
+ if (!priv->writeIntrThreshold
+ && readb(priv->regs.ecr) & ECR_SERVINTR)
+ /* writeIntrThreshold reached */
+ priv->writeIntrThreshold = i + 1;
+ if (i + 1 < priv->fifo_depth
+ && readb(priv->regs.ecr) & ECR_F_EMPTY) {
+ /* FIFO empty before the last byte? */
+ pr_probe(p, "Data lost in FIFO\n");
+ goto fail;
+ }
+ }
+ if (!priv->writeIntrThreshold) {
+ pr_probe(p, "Can't get writeIntrThreshold\n");
+ goto fail;
+ }
+ pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold);
+
+ /* FIFO must be empty now */
+ if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
+ pr_probe(p, "Can't empty FIFO\n");
+ goto fail;
+ }
+
+ /* Reset FIFO */
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ /* Set reverse direction (must be in PS2 mode) */
+ parport_ip32_data_reverse(p);
+ /* Test FIFO, no interrupt, no DMA */
+ parport_ip32_set_mode(p, ECR_MODE_TST);
+ /* Enable interrupts */
+ parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
+
+ /* Find out readIntrThreshold: number of PWords we can read if we get
+ * an interrupt. */
+ priv->readIntrThreshold = 0;
+ for (i = 0; i < priv->fifo_depth; i++) {
+ writeb(0xaa, priv->regs.fifo);
+ if (readb(priv->regs.ecr) & ECR_SERVINTR) {
+ /* readIntrThreshold reached */
+ priv->readIntrThreshold = i + 1;
+ break;
+ }
+ }
+ if (!priv->readIntrThreshold) {
+ pr_probe(p, "Can't get readIntrThreshold\n");
+ goto fail;
+ }
+ pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold);
+
+ /* Reset ECR */
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ parport_ip32_data_forward(p);
+ parport_ip32_set_mode(p, ECR_MODE_SPP);
+ return 1;
+
+fail:
+ priv->fifo_depth = 0;
+ parport_ip32_set_mode(p, ECR_MODE_SPP);
+ return 0;
+}
+
+/*--- Initialization code ----------------------------------------------*/
+
+/**
+ * parport_ip32_make_isa_registers - compute (ISA) register addresses
+ * @regs: pointer to &struct parport_ip32_regs to fill
+ * @base: base address of standard and EPP registers
+ * @base_hi: base address of ECP registers
+ * @regshift: how much to shift register offset by
+ *
+ * Compute register addresses, according to the ISA standard. The addresses
+ * of the standard and EPP registers are computed from address @base. The
+ * addresses of the ECP registers are computed from address @base_hi.
+ */
+static void __init
+parport_ip32_make_isa_registers(struct parport_ip32_regs *regs,
+ void __iomem *base, void __iomem *base_hi,
+ unsigned int regshift)
+{
+#define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift))
+#define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift))
+ *regs = (struct parport_ip32_regs){
+ .data = r_base(0),
+ .dsr = r_base(1),
+ .dcr = r_base(2),
+ .eppAddr = r_base(3),
+ .eppData0 = r_base(4),
+ .eppData1 = r_base(5),
+ .eppData2 = r_base(6),
+ .eppData3 = r_base(7),
+ .ecpAFifo = r_base(0),
+ .fifo = r_base_hi(0),
+ .cnfgA = r_base_hi(0),
+ .cnfgB = r_base_hi(1),
+ .ecr = r_base_hi(2)
+ };
+#undef r_base_hi
+#undef r_base
+}
+
+/**
+ * parport_ip32_probe_port - probe and register IP32 built-in parallel port
+ *
+ * Returns the new allocated &parport structure. On error, an error code is
+ * encoded in return value with the ERR_PTR function.
+ */
+static __init struct parport *parport_ip32_probe_port(void)
+{
+ struct parport_ip32_regs regs;
+ struct parport_ip32_private *priv = NULL;
+ struct parport_operations *ops = NULL;
+ struct parport *p = NULL;
+ int err;
+
+ parport_ip32_make_isa_registers(&regs, &mace->isa.parallel,
+ &mace->isa.ecp1284, 8 /* regshift */);
+
+ ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
+ priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL);
+ p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops);
+ if (ops == NULL || priv == NULL || p == NULL) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel);
+ p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284);
+ p->private_data = priv;
+
+ *ops = parport_ip32_ops;
+ *priv = (struct parport_ip32_private){
+ .regs = regs,
+ .dcr_writable = DCR_DIR | DCR_SELECT | DCR_nINIT |
+ DCR_AUTOFD | DCR_STROBE,
+ .irq_mode = PARPORT_IP32_IRQ_FWD,
+ };
+ init_completion(&priv->irq_complete);
+
+ /* Probe port. */
+ if (!parport_ip32_ecp_supported(p)) {
+ err = -ENODEV;
+ goto fail;
+ }
+ parport_ip32_dump_state(p, "begin init", 0);
+
+ /* We found what looks like a working ECR register. Simply assume
+ * that all modes are correctly supported. Enable basic modes. */
+ p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
+ p->modes |= PARPORT_MODE_TRISTATE;
+
+ if (!parport_ip32_fifo_supported(p)) {
+ pr_warn(PPIP32 "%s: error: FIFO disabled\n", p->name);
+ /* Disable hardware modes depending on a working FIFO. */
+ features &= ~PARPORT_IP32_ENABLE_SPP;
+ features &= ~PARPORT_IP32_ENABLE_ECP;
+ /* DMA is not needed if FIFO is not supported. */
+ features &= ~PARPORT_IP32_ENABLE_DMA;
+ }
+
+ /* Request IRQ */
+ if (features & PARPORT_IP32_ENABLE_IRQ) {
+ int irq = MACEISA_PARALLEL_IRQ;
+ if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
+ pr_warn(PPIP32 "%s: error: IRQ disabled\n", p->name);
+ /* DMA cannot work without interrupts. */
+ features &= ~PARPORT_IP32_ENABLE_DMA;
+ } else {
+ pr_probe(p, "Interrupt support enabled\n");
+ p->irq = irq;
+ priv->dcr_writable |= DCR_IRQ;
+ }
+ }
+
+ /* Allocate DMA resources */
+ if (features & PARPORT_IP32_ENABLE_DMA) {
+ if (parport_ip32_dma_register())
+ pr_warn(PPIP32 "%s: error: DMA disabled\n", p->name);
+ else {
+ pr_probe(p, "DMA support enabled\n");
+ p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
+ p->modes |= PARPORT_MODE_DMA;
+ }
+ }
+
+ if (features & PARPORT_IP32_ENABLE_SPP) {
+ /* Enable compatibility FIFO mode */
+ p->ops->compat_write_data = parport_ip32_compat_write_data;
+ p->modes |= PARPORT_MODE_COMPAT;
+ pr_probe(p, "Hardware support for SPP mode enabled\n");
+ }
+ if (features & PARPORT_IP32_ENABLE_EPP) {
+ /* Set up access functions to use EPP hardware. */
+ p->ops->epp_read_data = parport_ip32_epp_read_data;
+ p->ops->epp_write_data = parport_ip32_epp_write_data;
+ p->ops->epp_read_addr = parport_ip32_epp_read_addr;
+ p->ops->epp_write_addr = parport_ip32_epp_write_addr;
+ p->modes |= PARPORT_MODE_EPP;
+ pr_probe(p, "Hardware support for EPP mode enabled\n");
+ }
+ if (features & PARPORT_IP32_ENABLE_ECP) {
+ /* Enable ECP FIFO mode */
+ p->ops->ecp_write_data = parport_ip32_ecp_write_data;
+ /* FIXME - not implemented */
+/* p->ops->ecp_read_data = parport_ip32_ecp_read_data; */
+/* p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */
+ p->modes |= PARPORT_MODE_ECP;
+ pr_probe(p, "Hardware support for ECP mode enabled\n");
+ }
+
+ /* Initialize the port with sensible values */
+ parport_ip32_set_mode(p, ECR_MODE_PS2);
+ parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
+ parport_ip32_data_forward(p);
+ parport_ip32_disable_irq(p);
+ parport_ip32_write_data(p, 0x00);
+ parport_ip32_dump_state(p, "end init", 0);
+
+ /* Print out what we found */
+ pr_info("%s: SGI IP32 at 0x%lx (0x%lx)", p->name, p->base, p->base_hi);
+ if (p->irq != PARPORT_IRQ_NONE)
+ pr_cont(", irq %d", p->irq);
+ pr_cont(" [");
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
+ {
+ unsigned int f = 0;
+ printmode(PCSPP);
+ printmode(TRISTATE);
+ printmode(COMPAT);
+ printmode(EPP);
+ printmode(ECP);
+ printmode(DMA);
+ }
+#undef printmode
+ pr_cont("]\n");
+
+ parport_announce_port(p);
+ return p;
+
+fail:
+ if (p)
+ parport_put_port(p);
+ kfree(priv);
+ kfree(ops);
+ return ERR_PTR(err);
+}
+
+/**
+ * parport_ip32_unregister_port - unregister a parallel port
+ * @p: pointer to the &struct parport
+ *
+ * Unregisters a parallel port and free previously allocated resources
+ * (memory, IRQ, ...).
+ */
+static __exit void parport_ip32_unregister_port(struct parport *p)
+{
+ struct parport_ip32_private * const priv = p->physport->private_data;
+ struct parport_operations *ops = p->ops;
+
+ parport_remove_port(p);
+ if (p->modes & PARPORT_MODE_DMA)
+ parport_ip32_dma_unregister();
+ if (p->irq != PARPORT_IRQ_NONE)
+ free_irq(p->irq, p);
+ parport_put_port(p);
+ kfree(priv);
+ kfree(ops);
+}
+
+/**
+ * parport_ip32_init - module initialization function
+ */
+static int __init parport_ip32_init(void)
+{
+ pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
+ this_port = parport_ip32_probe_port();
+ return PTR_ERR_OR_ZERO(this_port);
+}
+
+/**
+ * parport_ip32_exit - module termination function
+ */
+static void __exit parport_ip32_exit(void)
+{
+ parport_ip32_unregister_port(this_port);
+}
+
+/*--- Module stuff -----------------------------------------------------*/
+
+MODULE_AUTHOR("Arnaud Giersch <arnaud.giersch@free.fr>");
+MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.6"); /* update in parport_ip32_init() too */
+
+module_init(parport_ip32_init);
+module_exit(parport_ip32_exit);
+
+module_param(verbose_probing, bool, S_IRUGO);
+MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization");
+
+module_param(features, uint, S_IRUGO);
+MODULE_PARM_DESC(features,
+ "Bit mask of features to enable"
+ ", bit 0: IRQ support"
+ ", bit 1: DMA support"
+ ", bit 2: hardware SPP mode"
+ ", bit 3: hardware EPP mode"
+ ", bit 4: hardware ECP mode");
+
+/*--- Inform (X)Emacs about preferred coding style ---------------------*/
+/*
+ * Local Variables:
+ * mode: c
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * fill-column: 78
+ * ispell-local-dictionary: "american"
+ * End:
+ */
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
new file mode 100644
index 000000000..d6bbe8446
--- /dev/null
+++ b/drivers/parport/parport_mfc3.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Low-level parallel port routines for the Multiface 3 card
+ *
+ * Author: Joerg Dorchain <joerg@dorchain.net>
+ *
+ * (C) The elitist m68k Users(TM)
+ *
+ * based on the existing parport_amiga and lp_mfc
+ *
+ *
+ * From the MFC3 documentation:
+ *
+ * Miscellaneous PIA Details
+ * -------------------------
+ *
+ * The two open-drain interrupt outputs /IRQA and /IRQB are routed to
+ * /INT2 of the Z2 bus.
+ *
+ * The CPU data bus of the PIA (D0-D7) is connected to D8-D15 on the Z2
+ * bus. This means that any PIA registers are accessed at even addresses.
+ *
+ * Centronics Pin Connections for the PIA
+ * --------------------------------------
+ *
+ * The following table shows the connections between the PIA and the
+ * Centronics interface connector. These connections implement a single, but
+ * very complete, Centronics type interface. The Pin column gives the pin
+ * numbers of the PIA. The Centronics pin numbers can be found in the section
+ * "Parallel Connectors".
+ *
+ *
+ * Pin | PIA | Dir | Centronics Names
+ * -------+-----+-----+---------------------------------------------------------
+ * 19 | CB2 | --> | /STROBE (aka /DRDY)
+ * 10-17 | PBx | <-> | DATA0 - DATA7
+ * 18 | CB1 | <-- | /ACK
+ * 40 | CA1 | <-- | BUSY
+ * 3 | PA1 | <-- | PAPER-OUT (aka POUT)
+ * 4 | PA2 | <-- | SELECTED (aka SEL)
+ * 9 | PA7 | --> | /INIT (aka /RESET or /INPUT-PRIME)
+ * 6 | PA4 | <-- | /ERROR (aka /FAULT)
+ * 7 | PA5 | --> | DIR (aka /SELECT-IN)
+ * 8 | PA6 | --> | /AUTO-FEED-XT
+ * 39 | CA2 | --> | open
+ * 5 | PA3 | <-- | /ACK (same as CB1!)
+ * 2 | PA0 | <-- | BUSY (same as CA1!)
+ * -------+-----+-----+---------------------------------------------------------
+ *
+ * Should be enough to understand some of the driver.
+ *
+ * Per convention for normal use the port registers are visible.
+ * If you need the data direction registers, restore the value in the
+ * control register.
+ */
+
+#include "multiface.h"
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/parport.h>
+#include <linux/delay.h>
+#include <linux/mc6821.h>
+#include <linux/zorro.h>
+#include <linux/interrupt.h>
+#include <asm/setup.h>
+#include <asm/amigahw.h>
+#include <asm/irq.h>
+#include <asm/amigaints.h>
+
+/* Maximum Number of Cards supported */
+#define MAX_MFC 5
+
+#undef DEBUG
+
+static struct parport *this_port[MAX_MFC] = {NULL, };
+static volatile int dummy; /* for trigger readds */
+
+#define pia(dev) ((struct pia *)(dev->base))
+static struct parport_operations pp_mfc3_ops;
+
+static void mfc3_write_data(struct parport *p, unsigned char data)
+{
+ pr_debug("write_data %c\n", data);
+
+ dummy = pia(p)->pprb; /* clears irq bit */
+ /* Triggers also /STROBE.*/
+ pia(p)->pprb = data;
+}
+
+static unsigned char mfc3_read_data(struct parport *p)
+{
+ /* clears interrupt bit. Triggers also /STROBE. */
+ return pia(p)->pprb;
+}
+
+static unsigned char control_pc_to_mfc3(unsigned char control)
+{
+ unsigned char ret = 32|64;
+
+ if (control & PARPORT_CONTROL_SELECT) /* XXX: What is SELECP? */
+ ret &= ~32; /* /SELECT_IN */
+ if (control & PARPORT_CONTROL_INIT) /* INITP */
+ ret |= 128;
+ if (control & PARPORT_CONTROL_AUTOFD) /* AUTOLF */
+ ret &= ~64;
+ if (control & PARPORT_CONTROL_STROBE) /* Strobe */
+ /* Handled directly by hardware */;
+ return ret;
+}
+
+static unsigned char control_mfc3_to_pc(unsigned char control)
+{
+ unsigned char ret = PARPORT_CONTROL_STROBE
+ | PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_SELECT;
+
+ if (control & 128) /* /INITP */
+ ret |= PARPORT_CONTROL_INIT;
+ if (control & 64) /* /AUTOLF */
+ ret &= ~PARPORT_CONTROL_AUTOFD;
+ if (control & 32) /* /SELECT_IN */
+ ret &= ~PARPORT_CONTROL_SELECT;
+ return ret;
+}
+
+static void mfc3_write_control(struct parport *p, unsigned char control)
+{
+ pr_debug("write_control %02x\n", control);
+ pia(p)->ppra = (pia(p)->ppra & 0x1f) | control_pc_to_mfc3(control);
+}
+
+static unsigned char mfc3_read_control( struct parport *p)
+{
+ pr_debug("read_control\n");
+ return control_mfc3_to_pc(pia(p)->ppra & 0xe0);
+}
+
+static unsigned char mfc3_frob_control( struct parport *p, unsigned char mask, unsigned char val)
+{
+ unsigned char old;
+
+ pr_debug("frob_control mask %02x, value %02x\n", mask, val);
+ old = mfc3_read_control(p);
+ mfc3_write_control(p, (old & ~mask) ^ val);
+ return old;
+}
+
+static unsigned char status_mfc3_to_pc(unsigned char status)
+{
+ unsigned char ret = PARPORT_STATUS_BUSY;
+
+ if (status & 1) /* Busy */
+ ret &= ~PARPORT_STATUS_BUSY;
+ if (status & 2) /* PaperOut */
+ ret |= PARPORT_STATUS_PAPEROUT;
+ if (status & 4) /* Selected */
+ ret |= PARPORT_STATUS_SELECT;
+ if (status & 8) /* Ack */
+ ret |= PARPORT_STATUS_ACK;
+ if (status & 16) /* /ERROR */
+ ret |= PARPORT_STATUS_ERROR;
+
+ return ret;
+}
+
+static unsigned char mfc3_read_status(struct parport *p)
+{
+ unsigned char status;
+
+ status = status_mfc3_to_pc(pia(p)->ppra & 0x1f);
+ pr_debug("read_status %02x\n", status);
+ return status;
+}
+
+static int use_cnt;
+
+static irqreturn_t mfc3_interrupt(int irq, void *dev_id)
+{
+ int i;
+
+ for( i = 0; i < MAX_MFC; i++)
+ if (this_port[i] != NULL)
+ if (pia(this_port[i])->crb & 128) { /* Board caused interrupt */
+ dummy = pia(this_port[i])->pprb; /* clear irq bit */
+ parport_generic_irq(this_port[i]);
+ }
+ return IRQ_HANDLED;
+}
+
+static void mfc3_enable_irq(struct parport *p)
+{
+ pia(p)->crb |= PIA_C1_ENABLE_IRQ;
+}
+
+static void mfc3_disable_irq(struct parport *p)
+{
+ pia(p)->crb &= ~PIA_C1_ENABLE_IRQ;
+}
+
+static void mfc3_data_forward(struct parport *p)
+{
+ pr_debug("forward\n");
+ pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
+ pia(p)->pddrb = 255; /* all pins output */
+ pia(p)->crb |= PIA_DDR; /* make data register visible - default */
+}
+
+static void mfc3_data_reverse(struct parport *p)
+{
+ pr_debug("reverse\n");
+ pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
+ pia(p)->pddrb = 0; /* all pins input */
+ pia(p)->crb |= PIA_DDR; /* make data register visible - default */
+}
+
+static void mfc3_init_state(struct pardevice *dev, struct parport_state *s)
+{
+ s->u.amiga.data = 0;
+ s->u.amiga.datadir = 255;
+ s->u.amiga.status = 0;
+ s->u.amiga.statusdir = 0xe0;
+}
+
+static void mfc3_save_state(struct parport *p, struct parport_state *s)
+{
+ s->u.amiga.data = pia(p)->pprb;
+ pia(p)->crb &= ~PIA_DDR;
+ s->u.amiga.datadir = pia(p)->pddrb;
+ pia(p)->crb |= PIA_DDR;
+ s->u.amiga.status = pia(p)->ppra;
+ pia(p)->cra &= ~PIA_DDR;
+ s->u.amiga.statusdir = pia(p)->pddrb;
+ pia(p)->cra |= PIA_DDR;
+}
+
+static void mfc3_restore_state(struct parport *p, struct parport_state *s)
+{
+ pia(p)->pprb = s->u.amiga.data;
+ pia(p)->crb &= ~PIA_DDR;
+ pia(p)->pddrb = s->u.amiga.datadir;
+ pia(p)->crb |= PIA_DDR;
+ pia(p)->ppra = s->u.amiga.status;
+ pia(p)->cra &= ~PIA_DDR;
+ pia(p)->pddrb = s->u.amiga.statusdir;
+ pia(p)->cra |= PIA_DDR;
+}
+
+static struct parport_operations pp_mfc3_ops = {
+ .write_data = mfc3_write_data,
+ .read_data = mfc3_read_data,
+
+ .write_control = mfc3_write_control,
+ .read_control = mfc3_read_control,
+ .frob_control = mfc3_frob_control,
+
+ .read_status = mfc3_read_status,
+
+ .enable_irq = mfc3_enable_irq,
+ .disable_irq = mfc3_disable_irq,
+
+ .data_forward = mfc3_data_forward,
+ .data_reverse = mfc3_data_reverse,
+
+ .init_state = mfc3_init_state,
+ .save_state = mfc3_save_state,
+ .restore_state = mfc3_restore_state,
+
+ .epp_write_data = parport_ieee1284_epp_write_data,
+ .epp_read_data = parport_ieee1284_epp_read_data,
+ .epp_write_addr = parport_ieee1284_epp_write_addr,
+ .epp_read_addr = parport_ieee1284_epp_read_addr,
+
+ .ecp_write_data = parport_ieee1284_ecp_write_data,
+ .ecp_read_data = parport_ieee1284_ecp_read_data,
+ .ecp_write_addr = parport_ieee1284_ecp_write_addr,
+
+ .compat_write_data = parport_ieee1284_write_compat,
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+
+ .owner = THIS_MODULE,
+};
+
+/* ----------- Initialisation code --------------------------------- */
+
+static int __init parport_mfc3_init(void)
+{
+ struct parport *p;
+ int pias = 0;
+ struct pia *pp;
+ struct zorro_dev *z = NULL;
+
+ if (!MACH_IS_AMIGA)
+ return -ENODEV;
+
+ while ((z = zorro_find_device(ZORRO_PROD_BSC_MULTIFACE_III, z))) {
+ unsigned long piabase = z->resource.start+PIABASE;
+ if (!request_mem_region(piabase, sizeof(struct pia), "PIA"))
+ continue;
+
+ pp = ZTWO_VADDR(piabase);
+ pp->crb = 0;
+ pp->pddrb = 255; /* all data pins output */
+ pp->crb = PIA_DDR|32|8;
+ dummy = pp->pddrb; /* reading clears interrupt */
+ pp->cra = 0;
+ pp->pddra = 0xe0; /* /RESET, /DIR ,/AUTO-FEED output */
+ pp->cra = PIA_DDR;
+ pp->ppra = 0; /* reset printer */
+ udelay(10);
+ pp->ppra = 128;
+ p = parport_register_port((unsigned long)pp, IRQ_AMIGA_PORTS,
+ PARPORT_DMA_NONE, &pp_mfc3_ops);
+ if (!p)
+ goto out_port;
+
+ if (p->irq != PARPORT_IRQ_NONE) {
+ if (use_cnt++ == 0)
+ if (request_irq(IRQ_AMIGA_PORTS, mfc3_interrupt, IRQF_SHARED, p->name, &pp_mfc3_ops))
+ goto out_irq;
+ }
+ p->dev = &z->dev;
+
+ this_port[pias++] = p;
+ pr_info("%s: Multiface III port using irq\n", p->name);
+ /* XXX: set operating mode */
+
+ p->private_data = (void *)piabase;
+ parport_announce_port (p);
+
+ if (pias >= MAX_MFC)
+ break;
+ continue;
+
+ out_irq:
+ parport_put_port(p);
+ out_port:
+ release_mem_region(piabase, sizeof(struct pia));
+ }
+
+ return pias ? 0 : -ENODEV;
+}
+
+static void __exit parport_mfc3_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_MFC; i++) {
+ if (!this_port[i])
+ continue;
+ parport_remove_port(this_port[i]);
+ if (this_port[i]->irq != PARPORT_IRQ_NONE) {
+ if (--use_cnt == 0)
+ free_irq(IRQ_AMIGA_PORTS, &pp_mfc3_ops);
+ }
+ release_mem_region(ZTWO_PADDR(this_port[i]->private_data), sizeof(struct pia));
+ parport_put_port(this_port[i]);
+ }
+}
+
+
+MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
+MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port");
+MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port");
+MODULE_LICENSE("GPL");
+
+module_init(parport_mfc3_init)
+module_exit(parport_mfc3_exit)
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
new file mode 100644
index 000000000..de5a823f3
--- /dev/null
+++ b/drivers/parport/parport_pc.c
@@ -0,0 +1,3340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Low-level parallel-port routines for 8255-based PC-style hardware.
+ *
+ * Authors: Phil Blundell <philb@gnu.org>
+ * Tim Waugh <tim@cyberelk.demon.co.uk>
+ * Jose Renau <renau@acm.org>
+ * David Campbell
+ * Andrea Arcangeli
+ *
+ * based on work by Grant Guenther <grant@torque.net> and Phil Blundell.
+ *
+ * Cleaned up include files - Russell King <linux@arm.uk.linux.org>
+ * DMA support - Bert De Jonghe <bert@sophis.be>
+ * Many ECP bugs fixed. Fred Barnes & Jamie Lokier, 1999
+ * More PCI support now conditional on CONFIG_PCI, 03/2001, Paul G.
+ * Various hacks, Fred Barnes, 04/2001
+ * Updated probing logic - Adam Belay <ambx1@neo.rr.com>
+ */
+
+/* This driver should work with any hardware that is broadly compatible
+ * with that in the IBM PC. This applies to the majority of integrated
+ * I/O chipsets that are commonly available. The expected register
+ * layout is:
+ *
+ * base+0 data
+ * base+1 status
+ * base+2 control
+ *
+ * In addition, there are some optional registers:
+ *
+ * base+3 EPP address
+ * base+4 EPP data
+ * base+0x400 ECP config A
+ * base+0x401 ECP config B
+ * base+0x402 ECP control
+ *
+ * All registers are 8 bits wide and read/write. If your hardware differs
+ * only in register addresses (eg because your registers are on 32-bit
+ * word boundaries) then you can alter the constants in parport_pc.h to
+ * accommodate this.
+ *
+ * Note that the ECP registers may not start at offset 0x400 for PCI cards,
+ * but rather will start at port->base_hi.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched/signal.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pnp.h>
+#include <linux/platform_device.h>
+#include <linux/sysctl.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/dma.h>
+
+#include <linux/parport.h>
+#include <linux/parport_pc.h>
+#include <linux/via.h>
+#include <asm/parport.h>
+
+#define PARPORT_PC_MAX_PORTS PARPORT_MAX
+
+#ifdef CONFIG_ISA_DMA_API
+#define HAS_DMA
+#endif
+
+/* ECR modes */
+#define ECR_SPP 00
+#define ECR_PS2 01
+#define ECR_PPF 02
+#define ECR_ECP 03
+#define ECR_EPP 04
+#define ECR_VND 05
+#define ECR_TST 06
+#define ECR_CNF 07
+#define ECR_MODE_MASK 0xe0
+#define ECR_WRITE(p, v) frob_econtrol((p), 0xff, (v))
+
+#undef DEBUG
+
+#define NR_SUPERIOS 3
+static struct superio_struct { /* For Super-IO chips autodetection */
+ int io;
+ int irq;
+ int dma;
+} superios[NR_SUPERIOS] = { {0,},};
+
+static int user_specified;
+#if defined(CONFIG_PARPORT_PC_SUPERIO) || \
+ (defined(CONFIG_PARPORT_1284) && defined(CONFIG_PARPORT_PC_FIFO))
+static int verbose_probing;
+#endif
+static int pci_registered_parport;
+static int pnp_registered_parport;
+
+/* frob_control, but for ECR */
+static void frob_econtrol(struct parport *pb, unsigned char m,
+ unsigned char v)
+{
+ unsigned char ectr = 0;
+
+ if (m != 0xff)
+ ectr = inb(ECONTROL(pb));
+
+ pr_debug("frob_econtrol(%02x,%02x): %02x -> %02x\n",
+ m, v, ectr, (ectr & ~m) ^ v);
+
+ outb((ectr & ~m) ^ v, ECONTROL(pb));
+}
+
+static inline void frob_set_mode(struct parport *p, int mode)
+{
+ frob_econtrol(p, ECR_MODE_MASK, mode << 5);
+}
+
+#ifdef CONFIG_PARPORT_PC_FIFO
+/* Safely change the mode bits in the ECR
+ Returns:
+ 0 : Success
+ -EBUSY: Could not drain FIFO in some finite amount of time,
+ mode not changed!
+ */
+static int change_mode(struct parport *p, int m)
+{
+ const struct parport_pc_private *priv = p->physport->private_data;
+ unsigned char oecr;
+ int mode;
+
+ pr_debug("parport change_mode ECP-ISA to mode 0x%02x\n", m);
+
+ if (!priv->ecr) {
+ printk(KERN_DEBUG "change_mode: but there's no ECR!\n");
+ return 0;
+ }
+
+ /* Bits <7:5> contain the mode. */
+ oecr = inb(ECONTROL(p));
+ mode = (oecr >> 5) & 0x7;
+ if (mode == m)
+ return 0;
+
+ if (mode >= 2 && !(priv->ctr & 0x20)) {
+ /* This mode resets the FIFO, so we may
+ * have to wait for it to drain first. */
+ unsigned long expire = jiffies + p->physport->cad->timeout;
+ int counter;
+ switch (mode) {
+ case ECR_PPF: /* Parallel Port FIFO mode */
+ case ECR_ECP: /* ECP Parallel Port mode */
+ /* Busy wait for 200us */
+ for (counter = 0; counter < 40; counter++) {
+ if (inb(ECONTROL(p)) & 0x01)
+ break;
+ if (signal_pending(current))
+ break;
+ udelay(5);
+ }
+
+ /* Poll slowly. */
+ while (!(inb(ECONTROL(p)) & 0x01)) {
+ if (time_after_eq(jiffies, expire))
+ /* The FIFO is stuck. */
+ return -EBUSY;
+ schedule_timeout_interruptible(
+ msecs_to_jiffies(10));
+ if (signal_pending(current))
+ break;
+ }
+ }
+ }
+
+ if (mode >= 2 && m >= 2) {
+ /* We have to go through mode 001 */
+ oecr &= ~(7 << 5);
+ oecr |= ECR_PS2 << 5;
+ ECR_WRITE(p, oecr);
+ }
+
+ /* Set the mode. */
+ oecr &= ~(7 << 5);
+ oecr |= m << 5;
+ ECR_WRITE(p, oecr);
+ return 0;
+}
+#endif /* FIFO support */
+
+/*
+ * Clear TIMEOUT BIT in EPP MODE
+ *
+ * This is also used in SPP detection.
+ */
+static int clear_epp_timeout(struct parport *pb)
+{
+ unsigned char r;
+
+ if (!(parport_pc_read_status(pb) & 0x01))
+ return 1;
+
+ /* To clear timeout some chips require double read */
+ parport_pc_read_status(pb);
+ r = parport_pc_read_status(pb);
+ outb(r | 0x01, STATUS(pb)); /* Some reset by writing 1 */
+ outb(r & 0xfe, STATUS(pb)); /* Others by writing 0 */
+ r = parport_pc_read_status(pb);
+
+ return !(r & 0x01);
+}
+
+/*
+ * Access functions.
+ *
+ * Most of these aren't static because they may be used by the
+ * parport_xxx_yyy macros. extern __inline__ versions of several
+ * of these are in parport_pc.h.
+ */
+
+static void parport_pc_init_state(struct pardevice *dev,
+ struct parport_state *s)
+{
+ s->u.pc.ctr = 0xc;
+ if (dev->irq_func &&
+ dev->port->irq != PARPORT_IRQ_NONE)
+ /* Set ackIntEn */
+ s->u.pc.ctr |= 0x10;
+
+ s->u.pc.ecr = 0x34; /* NetMos chip can cause problems 0x24;
+ * D.Gruszka VScom */
+}
+
+static void parport_pc_save_state(struct parport *p, struct parport_state *s)
+{
+ const struct parport_pc_private *priv = p->physport->private_data;
+ s->u.pc.ctr = priv->ctr;
+ if (priv->ecr)
+ s->u.pc.ecr = inb(ECONTROL(p));
+}
+
+static void parport_pc_restore_state(struct parport *p,
+ struct parport_state *s)
+{
+ struct parport_pc_private *priv = p->physport->private_data;
+ register unsigned char c = s->u.pc.ctr & priv->ctr_writable;
+ outb(c, CONTROL(p));
+ priv->ctr = c;
+ if (priv->ecr)
+ ECR_WRITE(p, s->u.pc.ecr);
+}
+
+#ifdef CONFIG_PARPORT_1284
+static size_t parport_pc_epp_read_data(struct parport *port, void *buf,
+ size_t length, int flags)
+{
+ size_t got = 0;
+
+ if (flags & PARPORT_W91284PIC) {
+ unsigned char status;
+ size_t left = length;
+
+ /* use knowledge about data lines..:
+ * nFault is 0 if there is at least 1 byte in the Warp's FIFO
+ * pError is 1 if there are 16 bytes in the Warp's FIFO
+ */
+ status = inb(STATUS(port));
+
+ while (!(status & 0x08) && got < length) {
+ if (left >= 16 && (status & 0x20) && !(status & 0x08)) {
+ /* can grab 16 bytes from warp fifo */
+ if (!((long)buf & 0x03))
+ insl(EPPDATA(port), buf, 4);
+ else
+ insb(EPPDATA(port), buf, 16);
+ buf += 16;
+ got += 16;
+ left -= 16;
+ } else {
+ /* grab single byte from the warp fifo */
+ *((char *)buf) = inb(EPPDATA(port));
+ buf++;
+ got++;
+ left--;
+ }
+ status = inb(STATUS(port));
+ if (status & 0x01) {
+ /* EPP timeout should never occur... */
+ printk(KERN_DEBUG "%s: EPP timeout occurred while talking to w91284pic (should not have done)\n",
+ port->name);
+ clear_epp_timeout(port);
+ }
+ }
+ return got;
+ }
+ if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
+ if (!(((long)buf | length) & 0x03))
+ insl(EPPDATA(port), buf, (length >> 2));
+ else
+ insb(EPPDATA(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
+ return -EIO;
+ }
+ return length;
+ }
+ for (; got < length; got++) {
+ *((char *)buf) = inb(EPPDATA(port));
+ buf++;
+ if (inb(STATUS(port)) & 0x01) {
+ /* EPP timeout */
+ clear_epp_timeout(port);
+ break;
+ }
+ }
+
+ return got;
+}
+
+static size_t parport_pc_epp_write_data(struct parport *port, const void *buf,
+ size_t length, int flags)
+{
+ size_t written = 0;
+
+ if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
+ if (!(((long)buf | length) & 0x03))
+ outsl(EPPDATA(port), buf, (length >> 2));
+ else
+ outsb(EPPDATA(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
+ return -EIO;
+ }
+ return length;
+ }
+ for (; written < length; written++) {
+ outb(*((char *)buf), EPPDATA(port));
+ buf++;
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
+ break;
+ }
+ }
+
+ return written;
+}
+
+static size_t parport_pc_epp_read_addr(struct parport *port, void *buf,
+ size_t length, int flags)
+{
+ size_t got = 0;
+
+ if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
+ insb(EPPADDR(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
+ return -EIO;
+ }
+ return length;
+ }
+ for (; got < length; got++) {
+ *((char *)buf) = inb(EPPADDR(port));
+ buf++;
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
+ break;
+ }
+ }
+
+ return got;
+}
+
+static size_t parport_pc_epp_write_addr(struct parport *port,
+ const void *buf, size_t length,
+ int flags)
+{
+ size_t written = 0;
+
+ if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
+ outsb(EPPADDR(port), buf, length);
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
+ return -EIO;
+ }
+ return length;
+ }
+ for (; written < length; written++) {
+ outb(*((char *)buf), EPPADDR(port));
+ buf++;
+ if (inb(STATUS(port)) & 0x01) {
+ clear_epp_timeout(port);
+ break;
+ }
+ }
+
+ return written;
+}
+
+static size_t parport_pc_ecpepp_read_data(struct parport *port, void *buf,
+ size_t length, int flags)
+{
+ size_t got;
+
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_data_reverse(port);
+ parport_pc_write_control(port, 0x4);
+ got = parport_pc_epp_read_data(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
+
+ return got;
+}
+
+static size_t parport_pc_ecpepp_write_data(struct parport *port,
+ const void *buf, size_t length,
+ int flags)
+{
+ size_t written;
+
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_write_control(port, 0x4);
+ parport_pc_data_forward(port);
+ written = parport_pc_epp_write_data(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
+
+ return written;
+}
+
+static size_t parport_pc_ecpepp_read_addr(struct parport *port, void *buf,
+ size_t length, int flags)
+{
+ size_t got;
+
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_data_reverse(port);
+ parport_pc_write_control(port, 0x4);
+ got = parport_pc_epp_read_addr(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
+
+ return got;
+}
+
+static size_t parport_pc_ecpepp_write_addr(struct parport *port,
+ const void *buf, size_t length,
+ int flags)
+{
+ size_t written;
+
+ frob_set_mode(port, ECR_EPP);
+ parport_pc_write_control(port, 0x4);
+ parport_pc_data_forward(port);
+ written = parport_pc_epp_write_addr(port, buf, length, flags);
+ frob_set_mode(port, ECR_PS2);
+
+ return written;
+}
+#endif /* IEEE 1284 support */
+
+#ifdef CONFIG_PARPORT_PC_FIFO
+static size_t parport_pc_fifo_write_block_pio(struct parport *port,
+ const void *buf, size_t length)
+{
+ int ret = 0;
+ const unsigned char *bufp = buf;
+ size_t left = length;
+ unsigned long expire = jiffies + port->physport->cad->timeout;
+ const unsigned long fifo = FIFO(port);
+ int poll_for = 8; /* 80 usecs */
+ const struct parport_pc_private *priv = port->physport->private_data;
+ const int fifo_depth = priv->fifo_depth;
+
+ port = port->physport;
+
+ /* We don't want to be interrupted every character. */
+ parport_pc_disable_irq(port);
+ /* set nErrIntrEn and serviceIntr */
+ frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
+
+ /* Forward mode. */
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
+
+ while (left) {
+ unsigned char byte;
+ unsigned char ecrval = inb(ECONTROL(port));
+ int i = 0;
+
+ if (need_resched() && time_before(jiffies, expire))
+ /* Can't yield the port. */
+ schedule();
+
+ /* Anyone else waiting for the port? */
+ if (port->waithead) {
+ printk(KERN_DEBUG "Somebody wants the port\n");
+ break;
+ }
+
+ if (ecrval & 0x02) {
+ /* FIFO is full. Wait for interrupt. */
+
+ /* Clear serviceIntr */
+ ECR_WRITE(port, ecrval & ~(1<<2));
+false_alarm:
+ ret = parport_wait_event(port, HZ);
+ if (ret < 0)
+ break;
+ ret = 0;
+ if (!time_before(jiffies, expire)) {
+ /* Timed out. */
+ printk(KERN_DEBUG "FIFO write timed out\n");
+ break;
+ }
+ ecrval = inb(ECONTROL(port));
+ if (!(ecrval & (1<<2))) {
+ if (need_resched() &&
+ time_before(jiffies, expire))
+ schedule();
+
+ goto false_alarm;
+ }
+
+ continue;
+ }
+
+ /* Can't fail now. */
+ expire = jiffies + port->cad->timeout;
+
+poll:
+ if (signal_pending(current))
+ break;
+
+ if (ecrval & 0x01) {
+ /* FIFO is empty. Blast it full. */
+ const int n = left < fifo_depth ? left : fifo_depth;
+ outsb(fifo, bufp, n);
+ bufp += n;
+ left -= n;
+
+ /* Adjust the poll time. */
+ if (i < (poll_for - 2))
+ poll_for--;
+ continue;
+ } else if (i++ < poll_for) {
+ udelay(10);
+ ecrval = inb(ECONTROL(port));
+ goto poll;
+ }
+
+ /* Half-full(call me an optimist) */
+ byte = *bufp++;
+ outb(byte, fifo);
+ left--;
+ }
+ dump_parport_state("leave fifo_write_block_pio", port);
+ return length - left;
+}
+
+#ifdef HAS_DMA
+static size_t parport_pc_fifo_write_block_dma(struct parport *port,
+ const void *buf, size_t length)
+{
+ int ret = 0;
+ unsigned long dmaflag;
+ size_t left = length;
+ const struct parport_pc_private *priv = port->physport->private_data;
+ struct device *dev = port->physport->dev;
+ dma_addr_t dma_addr, dma_handle;
+ size_t maxlen = 0x10000; /* max 64k per DMA transfer */
+ unsigned long start = (unsigned long) buf;
+ unsigned long end = (unsigned long) buf + length - 1;
+
+ dump_parport_state("enter fifo_write_block_dma", port);
+ if (end < MAX_DMA_ADDRESS) {
+ /* If it would cross a 64k boundary, cap it at the end. */
+ if ((start ^ end) & ~0xffffUL)
+ maxlen = 0x10000 - (start & 0xffff);
+
+ dma_addr = dma_handle = dma_map_single(dev, (void *)buf, length,
+ DMA_TO_DEVICE);
+ } else {
+ /* above 16 MB we use a bounce buffer as ISA-DMA
+ is not possible */
+ maxlen = PAGE_SIZE; /* sizeof(priv->dma_buf) */
+ dma_addr = priv->dma_handle;
+ dma_handle = 0;
+ }
+
+ port = port->physport;
+
+ /* We don't want to be interrupted every character. */
+ parport_pc_disable_irq(port);
+ /* set nErrIntrEn and serviceIntr */
+ frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
+
+ /* Forward mode. */
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
+
+ while (left) {
+ unsigned long expire = jiffies + port->physport->cad->timeout;
+
+ size_t count = left;
+
+ if (count > maxlen)
+ count = maxlen;
+
+ if (!dma_handle) /* bounce buffer ! */
+ memcpy(priv->dma_buf, buf, count);
+
+ dmaflag = claim_dma_lock();
+ disable_dma(port->dma);
+ clear_dma_ff(port->dma);
+ set_dma_mode(port->dma, DMA_MODE_WRITE);
+ set_dma_addr(port->dma, dma_addr);
+ set_dma_count(port->dma, count);
+
+ /* Set DMA mode */
+ frob_econtrol(port, 1<<3, 1<<3);
+
+ /* Clear serviceIntr */
+ frob_econtrol(port, 1<<2, 0);
+
+ enable_dma(port->dma);
+ release_dma_lock(dmaflag);
+
+ /* assume DMA will be successful */
+ left -= count;
+ buf += count;
+ if (dma_handle)
+ dma_addr += count;
+
+ /* Wait for interrupt. */
+false_alarm:
+ ret = parport_wait_event(port, HZ);
+ if (ret < 0)
+ break;
+ ret = 0;
+ if (!time_before(jiffies, expire)) {
+ /* Timed out. */
+ printk(KERN_DEBUG "DMA write timed out\n");
+ break;
+ }
+ /* Is serviceIntr set? */
+ if (!(inb(ECONTROL(port)) & (1<<2))) {
+ cond_resched();
+
+ goto false_alarm;
+ }
+
+ dmaflag = claim_dma_lock();
+ disable_dma(port->dma);
+ clear_dma_ff(port->dma);
+ count = get_dma_residue(port->dma);
+ release_dma_lock(dmaflag);
+
+ cond_resched(); /* Can't yield the port. */
+
+ /* Anyone else waiting for the port? */
+ if (port->waithead) {
+ printk(KERN_DEBUG "Somebody wants the port\n");
+ break;
+ }
+
+ /* update for possible DMA residue ! */
+ buf -= count;
+ left += count;
+ if (dma_handle)
+ dma_addr -= count;
+ }
+
+ /* Maybe got here through break, so adjust for DMA residue! */
+ dmaflag = claim_dma_lock();
+ disable_dma(port->dma);
+ clear_dma_ff(port->dma);
+ left += get_dma_residue(port->dma);
+ release_dma_lock(dmaflag);
+
+ /* Turn off DMA mode */
+ frob_econtrol(port, 1<<3, 0);
+
+ if (dma_handle)
+ dma_unmap_single(dev, dma_handle, length, DMA_TO_DEVICE);
+
+ dump_parport_state("leave fifo_write_block_dma", port);
+ return length - left;
+}
+#endif
+
+static inline size_t parport_pc_fifo_write_block(struct parport *port,
+ const void *buf, size_t length)
+{
+#ifdef HAS_DMA
+ if (port->dma != PARPORT_DMA_NONE)
+ return parport_pc_fifo_write_block_dma(port, buf, length);
+#endif
+ return parport_pc_fifo_write_block_pio(port, buf, length);
+}
+
+/* Parallel Port FIFO mode (ECP chipsets) */
+static size_t parport_pc_compat_write_block_pio(struct parport *port,
+ const void *buf, size_t length,
+ int flags)
+{
+ size_t written;
+ int r;
+ unsigned long expire;
+ const struct parport_pc_private *priv = port->physport->private_data;
+
+ /* Special case: a timeout of zero means we cannot call schedule().
+ * Also if O_NONBLOCK is set then use the default implementation. */
+ if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
+ return parport_ieee1284_write_compat(port, buf,
+ length, flags);
+
+ /* Set up parallel port FIFO mode.*/
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
+ parport_pc_frob_control(port, PARPORT_CONTROL_STROBE, 0);
+ r = change_mode(port, ECR_PPF); /* Parallel port FIFO */
+ if (r)
+ printk(KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n",
+ port->name);
+
+ port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
+
+ /* Write the data to the FIFO. */
+ written = parport_pc_fifo_write_block(port, buf, length);
+
+ /* Finish up. */
+ /* For some hardware we don't want to touch the mode until
+ * the FIFO is empty, so allow 4 seconds for each position
+ * in the fifo.
+ */
+ expire = jiffies + (priv->fifo_depth * HZ * 4);
+ do {
+ /* Wait for the FIFO to empty */
+ r = change_mode(port, ECR_PS2);
+ if (r != -EBUSY)
+ break;
+ } while (time_before(jiffies, expire));
+ if (r == -EBUSY) {
+
+ printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
+
+ /* Prevent further data transfer. */
+ frob_set_mode(port, ECR_TST);
+
+ /* Adjust for the contents of the FIFO. */
+ for (written -= priv->fifo_depth; ; written++) {
+ if (inb(ECONTROL(port)) & 0x2) {
+ /* Full up. */
+ break;
+ }
+ outb(0, FIFO(port));
+ }
+
+ /* Reset the FIFO and return to PS2 mode. */
+ frob_set_mode(port, ECR_PS2);
+ }
+
+ r = parport_wait_peripheral(port,
+ PARPORT_STATUS_BUSY,
+ PARPORT_STATUS_BUSY);
+ if (r)
+ printk(KERN_DEBUG "%s: BUSY timeout (%d) in compat_write_block_pio\n",
+ port->name, r);
+
+ port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+
+ return written;
+}
+
+/* ECP */
+#ifdef CONFIG_PARPORT_1284
+static size_t parport_pc_ecp_write_block_pio(struct parport *port,
+ const void *buf, size_t length,
+ int flags)
+{
+ size_t written;
+ int r;
+ unsigned long expire;
+ const struct parport_pc_private *priv = port->physport->private_data;
+
+ /* Special case: a timeout of zero means we cannot call schedule().
+ * Also if O_NONBLOCK is set then use the default implementation. */
+ if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
+ return parport_ieee1284_ecp_write_data(port, buf,
+ length, flags);
+
+ /* Switch to forward mode if necessary. */
+ if (port->physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
+ /* Event 47: Set nInit high. */
+ parport_frob_control(port,
+ PARPORT_CONTROL_INIT
+ | PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_INIT
+ | PARPORT_CONTROL_AUTOFD);
+
+ /* Event 49: PError goes high. */
+ r = parport_wait_peripheral(port,
+ PARPORT_STATUS_PAPEROUT,
+ PARPORT_STATUS_PAPEROUT);
+ if (r) {
+ printk(KERN_DEBUG "%s: PError timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
+ }
+ }
+
+ /* Set up ECP parallel port mode.*/
+ parport_pc_data_forward(port); /* Must be in PS2 mode */
+ parport_pc_frob_control(port,
+ PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD,
+ 0);
+ r = change_mode(port, ECR_ECP); /* ECP FIFO */
+ if (r)
+ printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
+ port->name);
+ port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
+
+ /* Write the data to the FIFO. */
+ written = parport_pc_fifo_write_block(port, buf, length);
+
+ /* Finish up. */
+ /* For some hardware we don't want to touch the mode until
+ * the FIFO is empty, so allow 4 seconds for each position
+ * in the fifo.
+ */
+ expire = jiffies + (priv->fifo_depth * (HZ * 4));
+ do {
+ /* Wait for the FIFO to empty */
+ r = change_mode(port, ECR_PS2);
+ if (r != -EBUSY)
+ break;
+ } while (time_before(jiffies, expire));
+ if (r == -EBUSY) {
+
+ printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
+
+ /* Prevent further data transfer. */
+ frob_set_mode(port, ECR_TST);
+
+ /* Adjust for the contents of the FIFO. */
+ for (written -= priv->fifo_depth; ; written++) {
+ if (inb(ECONTROL(port)) & 0x2) {
+ /* Full up. */
+ break;
+ }
+ outb(0, FIFO(port));
+ }
+
+ /* Reset the FIFO and return to PS2 mode. */
+ frob_set_mode(port, ECR_PS2);
+
+ /* Host transfer recovery. */
+ parport_pc_data_reverse(port); /* Must be in PS2 mode */
+ udelay(5);
+ parport_frob_control(port, PARPORT_CONTROL_INIT, 0);
+ r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
+ if (r)
+ printk(KERN_DEBUG "%s: PE,1 timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
+
+ parport_frob_control(port,
+ PARPORT_CONTROL_INIT,
+ PARPORT_CONTROL_INIT);
+ r = parport_wait_peripheral(port,
+ PARPORT_STATUS_PAPEROUT,
+ PARPORT_STATUS_PAPEROUT);
+ if (r)
+ printk(KERN_DEBUG "%s: PE,2 timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
+ }
+
+ r = parport_wait_peripheral(port,
+ PARPORT_STATUS_BUSY,
+ PARPORT_STATUS_BUSY);
+ if (r)
+ printk(KERN_DEBUG "%s: BUSY timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
+
+ port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+
+ return written;
+}
+#endif /* IEEE 1284 support */
+#endif /* Allowed to use FIFO/DMA */
+
+
+/*
+ * ******************************************
+ * INITIALISATION AND MODULE STUFF BELOW HERE
+ * ******************************************
+ */
+
+/* GCC is not inlining extern inline function later overwritten to non-inline,
+ so we use outlined_ variants here. */
+static const struct parport_operations parport_pc_ops = {
+ .write_data = parport_pc_write_data,
+ .read_data = parport_pc_read_data,
+
+ .write_control = parport_pc_write_control,
+ .read_control = parport_pc_read_control,
+ .frob_control = parport_pc_frob_control,
+
+ .read_status = parport_pc_read_status,
+
+ .enable_irq = parport_pc_enable_irq,
+ .disable_irq = parport_pc_disable_irq,
+
+ .data_forward = parport_pc_data_forward,
+ .data_reverse = parport_pc_data_reverse,
+
+ .init_state = parport_pc_init_state,
+ .save_state = parport_pc_save_state,
+ .restore_state = parport_pc_restore_state,
+
+ .epp_write_data = parport_ieee1284_epp_write_data,
+ .epp_read_data = parport_ieee1284_epp_read_data,
+ .epp_write_addr = parport_ieee1284_epp_write_addr,
+ .epp_read_addr = parport_ieee1284_epp_read_addr,
+
+ .ecp_write_data = parport_ieee1284_ecp_write_data,
+ .ecp_read_data = parport_ieee1284_ecp_read_data,
+ .ecp_write_addr = parport_ieee1284_ecp_write_addr,
+
+ .compat_write_data = parport_ieee1284_write_compat,
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_PARPORT_PC_SUPERIO
+
+static struct superio_struct *find_free_superio(void)
+{
+ int i;
+ for (i = 0; i < NR_SUPERIOS; i++)
+ if (superios[i].io == 0)
+ return &superios[i];
+ return NULL;
+}
+
+
+/* Super-IO chipset detection, Winbond, SMSC */
+static void show_parconfig_smsc37c669(int io, int key)
+{
+ int cr1, cr4, cra, cr23, cr26, cr27;
+ struct superio_struct *s;
+
+ static const char *const modes[] = {
+ "SPP and Bidirectional (PS/2)",
+ "EPP and SPP",
+ "ECP",
+ "ECP and EPP" };
+
+ outb(key, io);
+ outb(key, io);
+ outb(1, io);
+ cr1 = inb(io + 1);
+ outb(4, io);
+ cr4 = inb(io + 1);
+ outb(0x0a, io);
+ cra = inb(io + 1);
+ outb(0x23, io);
+ cr23 = inb(io + 1);
+ outb(0x26, io);
+ cr26 = inb(io + 1);
+ outb(0x27, io);
+ cr27 = inb(io + 1);
+ outb(0xaa, io);
+
+ if (verbose_probing) {
+ pr_info("SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
+ cr1, cr4, cra, cr23, cr26, cr27);
+
+ /* The documentation calls DMA and IRQ-Lines by letters, so
+ the board maker can/will wire them
+ appropriately/randomly... G=reserved H=IDE-irq, */
+ pr_info("SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
+ cr23 * 4,
+ (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
+ (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
+ cra & 0x0f);
+ pr_info("SMSC LPT Config: enabled=%s power=%s\n",
+ (cr23 * 4 >= 0x100) ? "yes" : "no",
+ (cr1 & 4) ? "yes" : "no");
+ pr_info("SMSC LPT Config: Port mode=%s, EPP version =%s\n",
+ (cr1 & 0x08) ? "Standard mode only (SPP)"
+ : modes[cr4 & 0x03],
+ (cr4 & 0x40) ? "1.7" : "1.9");
+ }
+
+ /* Heuristics ! BIOS setup for this mainboard device limits
+ the choices to standard settings, i.e. io-address and IRQ
+ are related, however DMA can be 1 or 3, assume DMA_A=DMA1,
+ DMA_C=DMA3 (this is true e.g. for TYAN 1564D Tomcat IV) */
+ if (cr23 * 4 >= 0x100) { /* if active */
+ s = find_free_superio();
+ if (s == NULL)
+ pr_info("Super-IO: too many chips!\n");
+ else {
+ int d;
+ switch (cr23 * 4) {
+ case 0x3bc:
+ s->io = 0x3bc;
+ s->irq = 7;
+ break;
+ case 0x378:
+ s->io = 0x378;
+ s->irq = 7;
+ break;
+ case 0x278:
+ s->io = 0x278;
+ s->irq = 5;
+ }
+ d = (cr26 & 0x0f);
+ if (d == 1 || d == 3)
+ s->dma = d;
+ else
+ s->dma = PARPORT_DMA_NONE;
+ }
+ }
+}
+
+
+static void show_parconfig_winbond(int io, int key)
+{
+ int cr30, cr60, cr61, cr70, cr74, crf0;
+ struct superio_struct *s;
+ static const char *const modes[] = {
+ "Standard (SPP) and Bidirectional(PS/2)", /* 0 */
+ "EPP-1.9 and SPP",
+ "ECP",
+ "ECP and EPP-1.9",
+ "Standard (SPP)",
+ "EPP-1.7 and SPP", /* 5 */
+ "undefined!",
+ "ECP and EPP-1.7" };
+ static char *const irqtypes[] = {
+ "pulsed low, high-Z",
+ "follows nACK" };
+
+ /* The registers are called compatible-PnP because the
+ register layout is modelled after ISA-PnP, the access
+ method is just another ... */
+ outb(key, io);
+ outb(key, io);
+ outb(0x07, io); /* Register 7: Select Logical Device */
+ outb(0x01, io + 1); /* LD1 is Parallel Port */
+ outb(0x30, io);
+ cr30 = inb(io + 1);
+ outb(0x60, io);
+ cr60 = inb(io + 1);
+ outb(0x61, io);
+ cr61 = inb(io + 1);
+ outb(0x70, io);
+ cr70 = inb(io + 1);
+ outb(0x74, io);
+ cr74 = inb(io + 1);
+ outb(0xf0, io);
+ crf0 = inb(io + 1);
+ outb(0xaa, io);
+
+ if (verbose_probing) {
+ pr_info("Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
+ cr30, cr60, cr61, cr70, cr74, crf0);
+ pr_info("Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
+ (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
+ if ((cr74 & 0x07) > 3)
+ pr_cont("dma=none\n");
+ else
+ pr_cont("dma=%d\n", cr74 & 0x07);
+ pr_info("Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
+ irqtypes[crf0 >> 7], (crf0 >> 3) & 0x0f);
+ pr_info("Winbond LPT Config: Port mode=%s\n",
+ modes[crf0 & 0x07]);
+ }
+
+ if (cr30 & 0x01) { /* the settings can be interrogated later ... */
+ s = find_free_superio();
+ if (s == NULL)
+ pr_info("Super-IO: too many chips!\n");
+ else {
+ s->io = (cr60 << 8) | cr61;
+ s->irq = cr70 & 0x0f;
+ s->dma = (((cr74 & 0x07) > 3) ?
+ PARPORT_DMA_NONE : (cr74 & 0x07));
+ }
+ }
+}
+
+static void decode_winbond(int efer, int key, int devid, int devrev, int oldid)
+{
+ const char *type = "unknown";
+ int id, progif = 2;
+
+ if (devid == devrev)
+ /* simple heuristics, we happened to read some
+ non-winbond register */
+ return;
+
+ id = (devid << 8) | devrev;
+
+ /* Values are from public data sheets pdf files, I can just
+ confirm 83977TF is correct :-) */
+ if (id == 0x9771)
+ type = "83977F/AF";
+ else if (id == 0x9773)
+ type = "83977TF / SMSC 97w33x/97w34x";
+ else if (id == 0x9774)
+ type = "83977ATF";
+ else if ((id & ~0x0f) == 0x5270)
+ type = "83977CTF / SMSC 97w36x";
+ else if ((id & ~0x0f) == 0x52f0)
+ type = "83977EF / SMSC 97w35x";
+ else if ((id & ~0x0f) == 0x5210)
+ type = "83627";
+ else if ((id & ~0x0f) == 0x6010)
+ type = "83697HF";
+ else if ((oldid & 0x0f) == 0x0a) {
+ type = "83877F";
+ progif = 1;
+ } else if ((oldid & 0x0f) == 0x0b) {
+ type = "83877AF";
+ progif = 1;
+ } else if ((oldid & 0x0f) == 0x0c) {
+ type = "83877TF";
+ progif = 1;
+ } else if ((oldid & 0x0f) == 0x0d) {
+ type = "83877ATF";
+ progif = 1;
+ } else
+ progif = 0;
+
+ if (verbose_probing)
+ pr_info("Winbond chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x oldid=%02x type=%s\n",
+ efer, key, devid, devrev, oldid, type);
+
+ if (progif == 2)
+ show_parconfig_winbond(efer, key);
+}
+
+static void decode_smsc(int efer, int key, int devid, int devrev)
+{
+ const char *type = "unknown";
+ void (*func)(int io, int key);
+ int id;
+
+ if (devid == devrev)
+ /* simple heuristics, we happened to read some
+ non-smsc register */
+ return;
+
+ func = NULL;
+ id = (devid << 8) | devrev;
+
+ if (id == 0x0302) {
+ type = "37c669";
+ func = show_parconfig_smsc37c669;
+ } else if (id == 0x6582)
+ type = "37c665IR";
+ else if (devid == 0x65)
+ type = "37c665GT";
+ else if (devid == 0x66)
+ type = "37c666GT";
+
+ if (verbose_probing)
+ pr_info("SMSC chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x type=%s\n",
+ efer, key, devid, devrev, type);
+
+ if (func)
+ func(efer, key);
+}
+
+
+static void winbond_check(int io, int key)
+{
+ int origval, devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+
+ if (!request_region(io, 3, __func__))
+ return;
+
+ origval = inb(io); /* Save original value */
+
+ /* First probe without key */
+ outb(0x20, io);
+ x_devid = inb(io + 1);
+ outb(0x21, io);
+ x_devrev = inb(io + 1);
+ outb(0x09, io);
+ x_oldid = inb(io + 1);
+
+ outb(key, io);
+ outb(key, io); /* Write Magic Sequence to EFER, extended
+ function enable register */
+ outb(0x20, io); /* Write EFIR, extended function index register */
+ devid = inb(io + 1); /* Read EFDR, extended function data register */
+ outb(0x21, io);
+ devrev = inb(io + 1);
+ outb(0x09, io);
+ oldid = inb(io + 1);
+ outb(0xaa, io); /* Magic Seal */
+
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
+ if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
+ goto out; /* protection against false positives */
+
+ decode_winbond(io, key, devid, devrev, oldid);
+out:
+ release_region(io, 3);
+}
+
+static void winbond_check2(int io, int key)
+{
+ int origval[3], devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+
+ if (!request_region(io, 3, __func__))
+ return;
+
+ origval[0] = inb(io); /* Save original values */
+ origval[1] = inb(io + 1);
+ origval[2] = inb(io + 2);
+
+ /* First probe without the key */
+ outb(0x20, io + 2);
+ x_devid = inb(io + 2);
+ outb(0x21, io + 1);
+ x_devrev = inb(io + 2);
+ outb(0x09, io + 1);
+ x_oldid = inb(io + 2);
+
+ outb(key, io); /* Write Magic Byte to EFER, extended
+ function enable register */
+ outb(0x20, io + 2); /* Write EFIR, extended function index register */
+ devid = inb(io + 2); /* Read EFDR, extended function data register */
+ outb(0x21, io + 1);
+ devrev = inb(io + 2);
+ outb(0x09, io + 1);
+ oldid = inb(io + 2);
+ outb(0xaa, io); /* Magic Seal */
+
+ outb(origval[0], io); /* in case we poked some entirely different hardware */
+ outb(origval[1], io + 1);
+ outb(origval[2], io + 2);
+
+ if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
+ goto out; /* protection against false positives */
+
+ decode_winbond(io, key, devid, devrev, oldid);
+out:
+ release_region(io, 3);
+}
+
+static void smsc_check(int io, int key)
+{
+ int origval, id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
+
+ if (!request_region(io, 3, __func__))
+ return;
+
+ origval = inb(io); /* Save original value */
+
+ /* First probe without the key */
+ outb(0x0d, io);
+ x_oldid = inb(io + 1);
+ outb(0x0e, io);
+ x_oldrev = inb(io + 1);
+ outb(0x20, io);
+ x_id = inb(io + 1);
+ outb(0x21, io);
+ x_rev = inb(io + 1);
+
+ outb(key, io);
+ outb(key, io); /* Write Magic Sequence to EFER, extended
+ function enable register */
+ outb(0x0d, io); /* Write EFIR, extended function index register */
+ oldid = inb(io + 1); /* Read EFDR, extended function data register */
+ outb(0x0e, io);
+ oldrev = inb(io + 1);
+ outb(0x20, io);
+ id = inb(io + 1);
+ outb(0x21, io);
+ rev = inb(io + 1);
+ outb(0xaa, io); /* Magic Seal */
+
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
+ if (x_id == id && x_oldrev == oldrev &&
+ x_oldid == oldid && x_rev == rev)
+ goto out; /* protection against false positives */
+
+ decode_smsc(io, key, oldid, oldrev);
+out:
+ release_region(io, 3);
+}
+
+
+static void detect_and_report_winbond(void)
+{
+ if (verbose_probing)
+ printk(KERN_DEBUG "Winbond Super-IO detection, now testing ports 3F0,370,250,4E,2E ...\n");
+ winbond_check(0x3f0, 0x87);
+ winbond_check(0x370, 0x87);
+ winbond_check(0x2e , 0x87);
+ winbond_check(0x4e , 0x87);
+ winbond_check(0x3f0, 0x86);
+ winbond_check2(0x250, 0x88);
+ winbond_check2(0x250, 0x89);
+}
+
+static void detect_and_report_smsc(void)
+{
+ if (verbose_probing)
+ printk(KERN_DEBUG "SMSC Super-IO detection, now testing Ports 2F0, 370 ...\n");
+ smsc_check(0x3f0, 0x55);
+ smsc_check(0x370, 0x55);
+ smsc_check(0x3f0, 0x44);
+ smsc_check(0x370, 0x44);
+}
+
+static void detect_and_report_it87(void)
+{
+ u16 dev;
+ u8 origval, r;
+ if (verbose_probing)
+ printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
+ if (!request_muxed_region(0x2e, 2, __func__))
+ return;
+ origval = inb(0x2e); /* Save original value */
+ outb(0x87, 0x2e);
+ outb(0x01, 0x2e);
+ outb(0x55, 0x2e);
+ outb(0x55, 0x2e);
+ outb(0x20, 0x2e);
+ dev = inb(0x2f) << 8;
+ outb(0x21, 0x2e);
+ dev |= inb(0x2f);
+ if (dev == 0x8712 || dev == 0x8705 || dev == 0x8715 ||
+ dev == 0x8716 || dev == 0x8718 || dev == 0x8726) {
+ pr_info("IT%04X SuperIO detected\n", dev);
+ outb(0x07, 0x2E); /* Parallel Port */
+ outb(0x03, 0x2F);
+ outb(0xF0, 0x2E); /* BOOT 0x80 off */
+ r = inb(0x2f);
+ outb(0xF0, 0x2E);
+ outb(r | 8, 0x2F);
+ outb(0x02, 0x2E); /* Lock */
+ outb(0x02, 0x2F);
+ } else {
+ outb(origval, 0x2e); /* Oops, sorry to disturb */
+ }
+ release_region(0x2e, 2);
+}
+#endif /* CONFIG_PARPORT_PC_SUPERIO */
+
+static struct superio_struct *find_superio(struct parport *p)
+{
+ int i;
+ for (i = 0; i < NR_SUPERIOS; i++)
+ if (superios[i].io == p->base)
+ return &superios[i];
+ return NULL;
+}
+
+static int get_superio_dma(struct parport *p)
+{
+ struct superio_struct *s = find_superio(p);
+ if (s)
+ return s->dma;
+ return PARPORT_DMA_NONE;
+}
+
+static int get_superio_irq(struct parport *p)
+{
+ struct superio_struct *s = find_superio(p);
+ if (s)
+ return s->irq;
+ return PARPORT_IRQ_NONE;
+}
+
+
+/* --- Mode detection ------------------------------------- */
+
+/*
+ * Checks for port existence, all ports support SPP MODE
+ * Returns:
+ * 0 : No parallel port at this address
+ * PARPORT_MODE_PCSPP : SPP port detected
+ * (if the user specified an ioport himself,
+ * this shall always be the case!)
+ *
+ */
+static int parport_SPP_supported(struct parport *pb)
+{
+ unsigned char r, w;
+
+ /*
+ * first clear an eventually pending EPP timeout
+ * I (sailer@ife.ee.ethz.ch) have an SMSC chipset
+ * that does not even respond to SPP cycles if an EPP
+ * timeout is pending
+ */
+ clear_epp_timeout(pb);
+
+ /* Do a simple read-write test to make sure the port exists. */
+ w = 0xc;
+ outb(w, CONTROL(pb));
+
+ /* Is there a control register that we can read from? Some
+ * ports don't allow reads, so read_control just returns a
+ * software copy. Some ports _do_ allow reads, so bypass the
+ * software copy here. In addition, some bits aren't
+ * writable. */
+ r = inb(CONTROL(pb));
+ if ((r & 0xf) == w) {
+ w = 0xe;
+ outb(w, CONTROL(pb));
+ r = inb(CONTROL(pb));
+ outb(0xc, CONTROL(pb));
+ if ((r & 0xf) == w)
+ return PARPORT_MODE_PCSPP;
+ }
+
+ if (user_specified)
+ /* That didn't work, but the user thinks there's a
+ * port here. */
+ pr_info("parport 0x%lx (WARNING): CTR: wrote 0x%02x, read 0x%02x\n",
+ pb->base, w, r);
+
+ /* Try the data register. The data lines aren't tri-stated at
+ * this stage, so we expect back what we wrote. */
+ w = 0xaa;
+ parport_pc_write_data(pb, w);
+ r = parport_pc_read_data(pb);
+ if (r == w) {
+ w = 0x55;
+ parport_pc_write_data(pb, w);
+ r = parport_pc_read_data(pb);
+ if (r == w)
+ return PARPORT_MODE_PCSPP;
+ }
+
+ if (user_specified) {
+ /* Didn't work, but the user is convinced this is the
+ * place. */
+ pr_info("parport 0x%lx (WARNING): DATA: wrote 0x%02x, read 0x%02x\n",
+ pb->base, w, r);
+ pr_info("parport 0x%lx: You gave this address, but there is probably no parallel port there!\n",
+ pb->base);
+ }
+
+ /* It's possible that we can't read the control register or
+ * the data register. In that case just believe the user. */
+ if (user_specified)
+ return PARPORT_MODE_PCSPP;
+
+ return 0;
+}
+
+/* Check for ECR
+ *
+ * Old style XT ports alias io ports every 0x400, hence accessing ECR
+ * on these cards actually accesses the CTR.
+ *
+ * Modern cards don't do this but reading from ECR will return 0xff
+ * regardless of what is written here if the card does NOT support
+ * ECP.
+ *
+ * We first check to see if ECR is the same as CTR. If not, the low
+ * two bits of ECR aren't writable, so we check by writing ECR and
+ * reading it back to see if it's what we expect.
+ */
+static int parport_ECR_present(struct parport *pb)
+{
+ struct parport_pc_private *priv = pb->private_data;
+ unsigned char r = 0xc;
+
+ outb(r, CONTROL(pb));
+ if ((inb(ECONTROL(pb)) & 0x3) == (r & 0x3)) {
+ outb(r ^ 0x2, CONTROL(pb)); /* Toggle bit 1 */
+
+ r = inb(CONTROL(pb));
+ if ((inb(ECONTROL(pb)) & 0x2) == (r & 0x2))
+ goto no_reg; /* Sure that no ECR register exists */
+ }
+
+ if ((inb(ECONTROL(pb)) & 0x3) != 0x1)
+ goto no_reg;
+
+ ECR_WRITE(pb, 0x34);
+ if (inb(ECONTROL(pb)) != 0x35)
+ goto no_reg;
+
+ priv->ecr = 1;
+ outb(0xc, CONTROL(pb));
+
+ /* Go to mode 000 */
+ frob_set_mode(pb, ECR_SPP);
+
+ return 1;
+
+ no_reg:
+ outb(0xc, CONTROL(pb));
+ return 0;
+}
+
+#ifdef CONFIG_PARPORT_1284
+/* Detect PS/2 support.
+ *
+ * Bit 5 (0x20) sets the PS/2 data direction; setting this high
+ * allows us to read data from the data lines. In theory we would get back
+ * 0xff but any peripheral attached to the port may drag some or all of the
+ * lines down to zero. So if we get back anything that isn't the contents
+ * of the data register we deem PS/2 support to be present.
+ *
+ * Some SPP ports have "half PS/2" ability - you can't turn off the line
+ * drivers, but an external peripheral with sufficiently beefy drivers of
+ * its own can overpower them and assert its own levels onto the bus, from
+ * where they can then be read back as normal. Ports with this property
+ * and the right type of device attached are likely to fail the SPP test,
+ * (as they will appear to have stuck bits) and so the fact that they might
+ * be misdetected here is rather academic.
+ */
+
+static int parport_PS2_supported(struct parport *pb)
+{
+ int ok = 0;
+
+ clear_epp_timeout(pb);
+
+ /* try to tri-state the buffer */
+ parport_pc_data_reverse(pb);
+
+ parport_pc_write_data(pb, 0x55);
+ if (parport_pc_read_data(pb) != 0x55)
+ ok++;
+
+ parport_pc_write_data(pb, 0xaa);
+ if (parport_pc_read_data(pb) != 0xaa)
+ ok++;
+
+ /* cancel input mode */
+ parport_pc_data_forward(pb);
+
+ if (ok) {
+ pb->modes |= PARPORT_MODE_TRISTATE;
+ } else {
+ struct parport_pc_private *priv = pb->private_data;
+ priv->ctr_writable &= ~0x20;
+ }
+
+ return ok;
+}
+
+#ifdef CONFIG_PARPORT_PC_FIFO
+static int parport_ECP_supported(struct parport *pb)
+{
+ int i;
+ int config, configb;
+ int pword;
+ struct parport_pc_private *priv = pb->private_data;
+ /* Translate ECP intrLine to ISA irq value */
+ static const int intrline[] = { 0, 7, 9, 10, 11, 14, 15, 5 };
+
+ /* If there is no ECR, we have no hope of supporting ECP. */
+ if (!priv->ecr)
+ return 0;
+
+ /* Find out FIFO depth */
+ ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
+ ECR_WRITE(pb, ECR_TST << 5); /* TEST FIFO */
+ for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02); i++)
+ outb(0xaa, FIFO(pb));
+
+ /*
+ * Using LGS chipset it uses ECR register, but
+ * it doesn't support ECP or FIFO MODE
+ */
+ if (i == 1024) {
+ ECR_WRITE(pb, ECR_SPP << 5);
+ return 0;
+ }
+
+ priv->fifo_depth = i;
+ if (verbose_probing)
+ printk(KERN_DEBUG "0x%lx: FIFO is %d bytes\n", pb->base, i);
+
+ /* Find out writeIntrThreshold */
+ frob_econtrol(pb, 1<<2, 1<<2);
+ frob_econtrol(pb, 1<<2, 0);
+ for (i = 1; i <= priv->fifo_depth; i++) {
+ inb(FIFO(pb));
+ udelay(50);
+ if (inb(ECONTROL(pb)) & (1<<2))
+ break;
+ }
+
+ if (i <= priv->fifo_depth) {
+ if (verbose_probing)
+ printk(KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
+ pb->base, i);
+ } else
+ /* Number of bytes we know we can write if we get an
+ interrupt. */
+ i = 0;
+
+ priv->writeIntrThreshold = i;
+
+ /* Find out readIntrThreshold */
+ frob_set_mode(pb, ECR_PS2); /* Reset FIFO and enable PS2 */
+ parport_pc_data_reverse(pb); /* Must be in PS2 mode */
+ frob_set_mode(pb, ECR_TST); /* Test FIFO */
+ frob_econtrol(pb, 1<<2, 1<<2);
+ frob_econtrol(pb, 1<<2, 0);
+ for (i = 1; i <= priv->fifo_depth; i++) {
+ outb(0xaa, FIFO(pb));
+ if (inb(ECONTROL(pb)) & (1<<2))
+ break;
+ }
+
+ if (i <= priv->fifo_depth) {
+ if (verbose_probing)
+ pr_info("0x%lx: readIntrThreshold is %d\n",
+ pb->base, i);
+ } else
+ /* Number of bytes we can read if we get an interrupt. */
+ i = 0;
+
+ priv->readIntrThreshold = i;
+
+ ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
+ ECR_WRITE(pb, 0xf4); /* Configuration mode */
+ config = inb(CONFIGA(pb));
+ pword = (config >> 4) & 0x7;
+ switch (pword) {
+ case 0:
+ pword = 2;
+ pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
+ break;
+ case 2:
+ pword = 4;
+ pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
+ break;
+ default:
+ pr_warn("0x%lx: Unknown implementation ID\n", pb->base);
+ fallthrough; /* Assume 1 */
+ case 1:
+ pword = 1;
+ }
+ priv->pword = pword;
+
+ if (verbose_probing) {
+ printk(KERN_DEBUG "0x%lx: PWord is %d bits\n",
+ pb->base, 8 * pword);
+
+ printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n",
+ pb->base, config & 0x80 ? "Level" : "Pulses");
+
+ configb = inb(CONFIGB(pb));
+ printk(KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
+ pb->base, config, configb);
+ printk(KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
+ if ((configb >> 3) & 0x07)
+ pr_cont("%d", intrline[(configb >> 3) & 0x07]);
+ else
+ pr_cont("<none or set by other means>");
+ pr_cont(" dma=");
+ if ((configb & 0x03) == 0x00)
+ pr_cont("<none or set by other means>\n");
+ else
+ pr_cont("%d\n", configb & 0x07);
+ }
+
+ /* Go back to mode 000 */
+ frob_set_mode(pb, ECR_SPP);
+
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_X86_32
+static int intel_bug_present_check_epp(struct parport *pb)
+{
+ const struct parport_pc_private *priv = pb->private_data;
+ int bug_present = 0;
+
+ if (priv->ecr) {
+ /* store value of ECR */
+ unsigned char ecr = inb(ECONTROL(pb));
+ unsigned char i;
+ for (i = 0x00; i < 0x80; i += 0x20) {
+ ECR_WRITE(pb, i);
+ if (clear_epp_timeout(pb)) {
+ /* Phony EPP in ECP. */
+ bug_present = 1;
+ break;
+ }
+ }
+ /* return ECR into the inital state */
+ ECR_WRITE(pb, ecr);
+ }
+
+ return bug_present;
+}
+static int intel_bug_present(struct parport *pb)
+{
+/* Check whether the device is legacy, not PCI or PCMCIA. Only legacy is known to be affected. */
+ if (pb->dev != NULL) {
+ return 0;
+ }
+
+ return intel_bug_present_check_epp(pb);
+}
+#else
+static int intel_bug_present(struct parport *pb)
+{
+ return 0;
+}
+#endif /* CONFIG_X86_32 */
+
+static int parport_ECPPS2_supported(struct parport *pb)
+{
+ const struct parport_pc_private *priv = pb->private_data;
+ int result;
+ unsigned char oecr;
+
+ if (!priv->ecr)
+ return 0;
+
+ oecr = inb(ECONTROL(pb));
+ ECR_WRITE(pb, ECR_PS2 << 5);
+ result = parport_PS2_supported(pb);
+ ECR_WRITE(pb, oecr);
+ return result;
+}
+
+/* EPP mode detection */
+
+static int parport_EPP_supported(struct parport *pb)
+{
+ /*
+ * Theory:
+ * Bit 0 of STR is the EPP timeout bit, this bit is 0
+ * when EPP is possible and is set high when an EPP timeout
+ * occurs (EPP uses the HALT line to stop the CPU while it does
+ * the byte transfer, an EPP timeout occurs if the attached
+ * device fails to respond after 10 micro seconds).
+ *
+ * This bit is cleared by either reading it (National Semi)
+ * or writing a 1 to the bit (SMC, UMC, WinBond), others ???
+ * This bit is always high in non EPP modes.
+ */
+
+ /* If EPP timeout bit clear then EPP available */
+ if (!clear_epp_timeout(pb))
+ return 0; /* No way to clear timeout */
+
+ /* Check for Intel bug. */
+ if (intel_bug_present(pb))
+ return 0;
+
+ pb->modes |= PARPORT_MODE_EPP;
+
+ /* Set up access functions to use EPP hardware. */
+ pb->ops->epp_read_data = parport_pc_epp_read_data;
+ pb->ops->epp_write_data = parport_pc_epp_write_data;
+ pb->ops->epp_read_addr = parport_pc_epp_read_addr;
+ pb->ops->epp_write_addr = parport_pc_epp_write_addr;
+
+ return 1;
+}
+
+static int parport_ECPEPP_supported(struct parport *pb)
+{
+ struct parport_pc_private *priv = pb->private_data;
+ int result;
+ unsigned char oecr;
+
+ if (!priv->ecr)
+ return 0;
+
+ oecr = inb(ECONTROL(pb));
+ /* Search for SMC style EPP+ECP mode */
+ ECR_WRITE(pb, 0x80);
+ outb(0x04, CONTROL(pb));
+ result = parport_EPP_supported(pb);
+
+ ECR_WRITE(pb, oecr);
+
+ if (result) {
+ /* Set up access functions to use ECP+EPP hardware. */
+ pb->ops->epp_read_data = parport_pc_ecpepp_read_data;
+ pb->ops->epp_write_data = parport_pc_ecpepp_write_data;
+ pb->ops->epp_read_addr = parport_pc_ecpepp_read_addr;
+ pb->ops->epp_write_addr = parport_pc_ecpepp_write_addr;
+ }
+
+ return result;
+}
+
+#else /* No IEEE 1284 support */
+
+/* Don't bother probing for modes we know we won't use. */
+static int parport_PS2_supported(struct parport *pb) { return 0; }
+#ifdef CONFIG_PARPORT_PC_FIFO
+static int parport_ECP_supported(struct parport *pb)
+{
+ return 0;
+}
+#endif
+static int parport_EPP_supported(struct parport *pb)
+{
+ return 0;
+}
+
+static int parport_ECPEPP_supported(struct parport *pb)
+{
+ return 0;
+}
+
+static int parport_ECPPS2_supported(struct parport *pb)
+{
+ return 0;
+}
+
+#endif /* No IEEE 1284 support */
+
+/* --- IRQ detection -------------------------------------- */
+
+/* Only if supports ECP mode */
+static int programmable_irq_support(struct parport *pb)
+{
+ int irq, intrLine;
+ unsigned char oecr = inb(ECONTROL(pb));
+ static const int lookup[8] = {
+ PARPORT_IRQ_NONE, 7, 9, 10, 11, 14, 15, 5
+ };
+
+ ECR_WRITE(pb, ECR_CNF << 5); /* Configuration MODE */
+
+ intrLine = (inb(CONFIGB(pb)) >> 3) & 0x07;
+ irq = lookup[intrLine];
+
+ ECR_WRITE(pb, oecr);
+ return irq;
+}
+
+static int irq_probe_ECP(struct parport *pb)
+{
+ int i;
+ unsigned long irqs;
+
+ irqs = probe_irq_on();
+
+ ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
+ ECR_WRITE(pb, (ECR_TST << 5) | 0x04);
+ ECR_WRITE(pb, ECR_TST << 5);
+
+ /* If Full FIFO sure that writeIntrThreshold is generated */
+ for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02) ; i++)
+ outb(0xaa, FIFO(pb));
+
+ pb->irq = probe_irq_off(irqs);
+ ECR_WRITE(pb, ECR_SPP << 5);
+
+ if (pb->irq <= 0)
+ pb->irq = PARPORT_IRQ_NONE;
+
+ return pb->irq;
+}
+
+/*
+ * This detection seems that only works in National Semiconductors
+ * This doesn't work in SMC, LGS, and Winbond
+ */
+static int irq_probe_EPP(struct parport *pb)
+{
+#ifndef ADVANCED_DETECT
+ return PARPORT_IRQ_NONE;
+#else
+ int irqs;
+ unsigned char oecr;
+
+ if (pb->modes & PARPORT_MODE_PCECR)
+ oecr = inb(ECONTROL(pb));
+
+ irqs = probe_irq_on();
+
+ if (pb->modes & PARPORT_MODE_PCECR)
+ frob_econtrol(pb, 0x10, 0x10);
+
+ clear_epp_timeout(pb);
+ parport_pc_frob_control(pb, 0x20, 0x20);
+ parport_pc_frob_control(pb, 0x10, 0x10);
+ clear_epp_timeout(pb);
+
+ /* Device isn't expecting an EPP read
+ * and generates an IRQ.
+ */
+ parport_pc_read_epp(pb);
+ udelay(20);
+
+ pb->irq = probe_irq_off(irqs);
+ if (pb->modes & PARPORT_MODE_PCECR)
+ ECR_WRITE(pb, oecr);
+ parport_pc_write_control(pb, 0xc);
+
+ if (pb->irq <= 0)
+ pb->irq = PARPORT_IRQ_NONE;
+
+ return pb->irq;
+#endif /* Advanced detection */
+}
+
+static int irq_probe_SPP(struct parport *pb)
+{
+ /* Don't even try to do this. */
+ return PARPORT_IRQ_NONE;
+}
+
+/* We will attempt to share interrupt requests since other devices
+ * such as sound cards and network cards seem to like using the
+ * printer IRQs.
+ *
+ * When ECP is available we can autoprobe for IRQs.
+ * NOTE: If we can autoprobe it, we can register the IRQ.
+ */
+static int parport_irq_probe(struct parport *pb)
+{
+ struct parport_pc_private *priv = pb->private_data;
+
+ if (priv->ecr) {
+ pb->irq = programmable_irq_support(pb);
+
+ if (pb->irq == PARPORT_IRQ_NONE)
+ pb->irq = irq_probe_ECP(pb);
+ }
+
+ if ((pb->irq == PARPORT_IRQ_NONE) && priv->ecr &&
+ (pb->modes & PARPORT_MODE_EPP))
+ pb->irq = irq_probe_EPP(pb);
+
+ clear_epp_timeout(pb);
+
+ if (pb->irq == PARPORT_IRQ_NONE && (pb->modes & PARPORT_MODE_EPP))
+ pb->irq = irq_probe_EPP(pb);
+
+ clear_epp_timeout(pb);
+
+ if (pb->irq == PARPORT_IRQ_NONE)
+ pb->irq = irq_probe_SPP(pb);
+
+ if (pb->irq == PARPORT_IRQ_NONE)
+ pb->irq = get_superio_irq(pb);
+
+ return pb->irq;
+}
+
+/* --- DMA detection -------------------------------------- */
+
+/* Only if chipset conforms to ECP ISA Interface Standard */
+static int programmable_dma_support(struct parport *p)
+{
+ unsigned char oecr = inb(ECONTROL(p));
+ int dma;
+
+ frob_set_mode(p, ECR_CNF);
+
+ dma = inb(CONFIGB(p)) & 0x07;
+ /* 000: Indicates jumpered 8-bit DMA if read-only.
+ 100: Indicates jumpered 16-bit DMA if read-only. */
+ if ((dma & 0x03) == 0)
+ dma = PARPORT_DMA_NONE;
+
+ ECR_WRITE(p, oecr);
+ return dma;
+}
+
+static int parport_dma_probe(struct parport *p)
+{
+ const struct parport_pc_private *priv = p->private_data;
+ if (priv->ecr) /* ask ECP chipset first */
+ p->dma = programmable_dma_support(p);
+ if (p->dma == PARPORT_DMA_NONE) {
+ /* ask known Super-IO chips proper, although these
+ claim ECP compatible, some don't report their DMA
+ conforming to ECP standards */
+ p->dma = get_superio_dma(p);
+ }
+
+ return p->dma;
+}
+
+/* --- Initialisation code -------------------------------- */
+
+static LIST_HEAD(ports_list);
+static DEFINE_SPINLOCK(ports_lock);
+
+struct parport *parport_pc_probe_port(unsigned long int base,
+ unsigned long int base_hi,
+ int irq, int dma,
+ struct device *dev,
+ int irqflags)
+{
+ struct parport_pc_private *priv;
+ struct parport_operations *ops;
+ struct parport *p;
+ int probedirq = PARPORT_IRQ_NONE;
+ struct resource *base_res;
+ struct resource *ECR_res = NULL;
+ struct resource *EPP_res = NULL;
+ struct platform_device *pdev = NULL;
+ int ret;
+
+ if (!dev) {
+ /* We need a physical device to attach to, but none was
+ * provided. Create our own. */
+ pdev = platform_device_register_simple("parport_pc",
+ base, NULL, 0);
+ if (IS_ERR(pdev))
+ return NULL;
+ dev = &pdev->dev;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
+ if (ret) {
+ dev_err(dev, "Unable to set coherent dma mask: disabling DMA\n");
+ dma = PARPORT_DMA_NONE;
+ }
+ }
+
+ ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
+ if (!ops)
+ goto out1;
+
+ priv = kmalloc(sizeof(struct parport_pc_private), GFP_KERNEL);
+ if (!priv)
+ goto out2;
+
+ /* a misnomer, actually - it's allocate and reserve parport number */
+ p = parport_register_port(base, irq, dma, ops);
+ if (!p)
+ goto out3;
+
+ base_res = request_region(base, 3, p->name);
+ if (!base_res)
+ goto out4;
+
+ memcpy(ops, &parport_pc_ops, sizeof(struct parport_operations));
+ priv->ctr = 0xc;
+ priv->ctr_writable = ~0x10;
+ priv->ecr = 0;
+ priv->fifo_depth = 0;
+ priv->dma_buf = NULL;
+ priv->dma_handle = 0;
+ INIT_LIST_HEAD(&priv->list);
+ priv->port = p;
+
+ p->dev = dev;
+ p->base_hi = base_hi;
+ p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
+ p->private_data = priv;
+
+ if (base_hi) {
+ ECR_res = request_region(base_hi, 3, p->name);
+ if (ECR_res)
+ parport_ECR_present(p);
+ }
+
+ if (base != 0x3bc) {
+ EPP_res = request_region(base+0x3, 5, p->name);
+ if (EPP_res)
+ if (!parport_EPP_supported(p))
+ parport_ECPEPP_supported(p);
+ }
+ if (!parport_SPP_supported(p))
+ /* No port. */
+ goto out5;
+ if (priv->ecr)
+ parport_ECPPS2_supported(p);
+ else
+ parport_PS2_supported(p);
+
+ p->size = (p->modes & PARPORT_MODE_EPP) ? 8 : 3;
+
+ pr_info("%s: PC-style at 0x%lx", p->name, p->base);
+ if (p->base_hi && priv->ecr)
+ pr_cont(" (0x%lx)", p->base_hi);
+ if (p->irq == PARPORT_IRQ_AUTO) {
+ p->irq = PARPORT_IRQ_NONE;
+ parport_irq_probe(p);
+ } else if (p->irq == PARPORT_IRQ_PROBEONLY) {
+ p->irq = PARPORT_IRQ_NONE;
+ parport_irq_probe(p);
+ probedirq = p->irq;
+ p->irq = PARPORT_IRQ_NONE;
+ }
+ if (p->irq != PARPORT_IRQ_NONE) {
+ pr_cont(", irq %d", p->irq);
+ priv->ctr_writable |= 0x10;
+
+ if (p->dma == PARPORT_DMA_AUTO) {
+ p->dma = PARPORT_DMA_NONE;
+ parport_dma_probe(p);
+ }
+ }
+ if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq
+ is mandatory (see above) */
+ p->dma = PARPORT_DMA_NONE;
+
+#ifdef CONFIG_PARPORT_PC_FIFO
+ if (parport_ECP_supported(p) &&
+ p->dma != PARPORT_DMA_NOFIFO &&
+ priv->fifo_depth > 0 && p->irq != PARPORT_IRQ_NONE) {
+ p->modes |= PARPORT_MODE_ECP | PARPORT_MODE_COMPAT;
+ p->ops->compat_write_data = parport_pc_compat_write_block_pio;
+#ifdef CONFIG_PARPORT_1284
+ p->ops->ecp_write_data = parport_pc_ecp_write_block_pio;
+ /* currently broken, but working on it.. (FB) */
+ /* p->ops->ecp_read_data = parport_pc_ecp_read_block_pio; */
+#endif /* IEEE 1284 support */
+ if (p->dma != PARPORT_DMA_NONE) {
+ pr_cont(", dma %d", p->dma);
+ p->modes |= PARPORT_MODE_DMA;
+ } else
+ pr_cont(", using FIFO");
+ } else
+ /* We can't use the DMA channel after all. */
+ p->dma = PARPORT_DMA_NONE;
+#endif /* Allowed to use FIFO/DMA */
+
+ pr_cont(" [");
+
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
+
+ {
+ int f = 0;
+ printmode(PCSPP);
+ printmode(TRISTATE);
+ printmode(COMPAT);
+ printmode(EPP);
+ printmode(ECP);
+ printmode(DMA);
+ }
+#undef printmode
+#ifndef CONFIG_PARPORT_1284
+ pr_cont("(,...)");
+#endif /* CONFIG_PARPORT_1284 */
+ pr_cont("]\n");
+ if (probedirq != PARPORT_IRQ_NONE)
+ pr_info("%s: irq %d detected\n", p->name, probedirq);
+
+ /* If No ECP release the ports grabbed above. */
+ if (ECR_res && (p->modes & PARPORT_MODE_ECP) == 0) {
+ release_region(base_hi, 3);
+ ECR_res = NULL;
+ }
+ /* Likewise for EEP ports */
+ if (EPP_res && (p->modes & PARPORT_MODE_EPP) == 0) {
+ release_region(base+3, 5);
+ EPP_res = NULL;
+ }
+ if (p->irq != PARPORT_IRQ_NONE) {
+ if (request_irq(p->irq, parport_irq_handler,
+ irqflags, p->name, p)) {
+ pr_warn("%s: irq %d in use, resorting to polled operation\n",
+ p->name, p->irq);
+ p->irq = PARPORT_IRQ_NONE;
+ p->dma = PARPORT_DMA_NONE;
+ }
+
+#ifdef CONFIG_PARPORT_PC_FIFO
+#ifdef HAS_DMA
+ if (p->dma != PARPORT_DMA_NONE) {
+ if (request_dma(p->dma, p->name)) {
+ pr_warn("%s: dma %d in use, resorting to PIO operation\n",
+ p->name, p->dma);
+ p->dma = PARPORT_DMA_NONE;
+ } else {
+ priv->dma_buf =
+ dma_alloc_coherent(dev,
+ PAGE_SIZE,
+ &priv->dma_handle,
+ GFP_KERNEL);
+ if (!priv->dma_buf) {
+ pr_warn("%s: cannot get buffer for DMA, resorting to PIO operation\n",
+ p->name);
+ free_dma(p->dma);
+ p->dma = PARPORT_DMA_NONE;
+ }
+ }
+ }
+#endif
+#endif
+ }
+
+ /* Done probing. Now put the port into a sensible start-up state. */
+ if (priv->ecr)
+ /*
+ * Put the ECP detected port in PS2 mode.
+ * Do this also for ports that have ECR but don't do ECP.
+ */
+ ECR_WRITE(p, 0x34);
+
+ parport_pc_write_data(p, 0);
+ parport_pc_data_forward(p);
+
+ /* Now that we've told the sharing engine about the port, and
+ found out its characteristics, let the high-level drivers
+ know about it. */
+ spin_lock(&ports_lock);
+ list_add(&priv->list, &ports_list);
+ spin_unlock(&ports_lock);
+ parport_announce_port(p);
+
+ return p;
+
+out5:
+ if (ECR_res)
+ release_region(base_hi, 3);
+ if (EPP_res)
+ release_region(base+0x3, 5);
+ release_region(base, 3);
+out4:
+ parport_del_port(p);
+out3:
+ kfree(priv);
+out2:
+ kfree(ops);
+out1:
+ if (pdev)
+ platform_device_unregister(pdev);
+ return NULL;
+}
+EXPORT_SYMBOL(parport_pc_probe_port);
+
+void parport_pc_unregister_port(struct parport *p)
+{
+ struct parport_pc_private *priv = p->private_data;
+ struct parport_operations *ops = p->ops;
+
+ parport_remove_port(p);
+ spin_lock(&ports_lock);
+ list_del_init(&priv->list);
+ spin_unlock(&ports_lock);
+#if defined(CONFIG_PARPORT_PC_FIFO) && defined(HAS_DMA)
+ if (p->dma != PARPORT_DMA_NONE)
+ free_dma(p->dma);
+#endif
+ if (p->irq != PARPORT_IRQ_NONE)
+ free_irq(p->irq, p);
+ release_region(p->base, 3);
+ if (p->size > 3)
+ release_region(p->base + 3, p->size - 3);
+ if (p->modes & PARPORT_MODE_ECP)
+ release_region(p->base_hi, 3);
+#if defined(CONFIG_PARPORT_PC_FIFO) && defined(HAS_DMA)
+ if (priv->dma_buf)
+ dma_free_coherent(p->physport->dev, PAGE_SIZE,
+ priv->dma_buf,
+ priv->dma_handle);
+#endif
+ kfree(p->private_data);
+ parport_del_port(p);
+ kfree(ops); /* hope no-one cached it */
+}
+EXPORT_SYMBOL(parport_pc_unregister_port);
+
+#ifdef CONFIG_PCI
+
+/* ITE support maintained by Rich Liu <richliu@poorman.org> */
+static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
+ const struct parport_pc_via_data *via)
+{
+ short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
+ u32 ite8872set;
+ u32 ite8872_lpt, ite8872_lpthi;
+ u8 ite8872_irq, type;
+ int irq;
+ int i;
+
+ pr_debug("sio_ite_8872_probe()\n");
+
+ /* make sure which one chip */
+ for (i = 0; i < 5; i++) {
+ if (request_region(inta_addr[i], 32, "it887x")) {
+ int test;
+ pci_write_config_dword(pdev, 0x60,
+ 0xe5000000 | inta_addr[i]);
+ pci_write_config_dword(pdev, 0x78,
+ 0x00000000 | inta_addr[i]);
+ test = inb(inta_addr[i]);
+ if (test != 0xff)
+ break;
+ release_region(inta_addr[i], 32);
+ }
+ }
+ if (i >= 5) {
+ pr_info("parport_pc: cannot find ITE8872 INTA\n");
+ return 0;
+ }
+
+ type = inb(inta_addr[i] + 0x18);
+ type &= 0x0f;
+
+ switch (type) {
+ case 0x2:
+ pr_info("parport_pc: ITE8871 found (1P)\n");
+ ite8872set = 0x64200000;
+ break;
+ case 0xa:
+ pr_info("parport_pc: ITE8875 found (1P)\n");
+ ite8872set = 0x64200000;
+ break;
+ case 0xe:
+ pr_info("parport_pc: ITE8872 found (2S1P)\n");
+ ite8872set = 0x64e00000;
+ break;
+ case 0x6:
+ pr_info("parport_pc: ITE8873 found (1S)\n");
+ release_region(inta_addr[i], 32);
+ return 0;
+ case 0x8:
+ pr_info("parport_pc: ITE8874 found (2S)\n");
+ release_region(inta_addr[i], 32);
+ return 0;
+ default:
+ pr_info("parport_pc: unknown ITE887x\n");
+ pr_info("parport_pc: please mail 'lspci -nvv' output to Rich.Liu@ite.com.tw\n");
+ release_region(inta_addr[i], 32);
+ return 0;
+ }
+
+ pci_read_config_byte(pdev, 0x3c, &ite8872_irq);
+ pci_read_config_dword(pdev, 0x1c, &ite8872_lpt);
+ ite8872_lpt &= 0x0000ff00;
+ pci_read_config_dword(pdev, 0x20, &ite8872_lpthi);
+ ite8872_lpthi &= 0x0000ff00;
+ pci_write_config_dword(pdev, 0x6c, 0xe3000000 | ite8872_lpt);
+ pci_write_config_dword(pdev, 0x70, 0xe3000000 | ite8872_lpthi);
+ pci_write_config_dword(pdev, 0x80, (ite8872_lpthi<<16) | ite8872_lpt);
+ /* SET SPP&EPP , Parallel Port NO DMA , Enable All Function */
+ /* SET Parallel IRQ */
+ pci_write_config_dword(pdev, 0x9c,
+ ite8872set | (ite8872_irq * 0x11111));
+
+ pr_debug("ITE887x: The IRQ is %d\n", ite8872_irq);
+ pr_debug("ITE887x: The PARALLEL I/O port is 0x%x\n", ite8872_lpt);
+ pr_debug("ITE887x: The PARALLEL I/O porthi is 0x%x\n", ite8872_lpthi);
+
+ /* Let the user (or defaults) steer us away from interrupts */
+ irq = ite8872_irq;
+ if (autoirq != PARPORT_IRQ_AUTO)
+ irq = PARPORT_IRQ_NONE;
+
+ /*
+ * Release the resource so that parport_pc_probe_port can get it.
+ */
+ release_region(inta_addr[i], 32);
+ if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
+ irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
+ pr_info("parport_pc: ITE 8872 parallel port: io=0x%X",
+ ite8872_lpt);
+ if (irq != PARPORT_IRQ_NONE)
+ pr_cont(", irq=%d", irq);
+ pr_cont("\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* VIA 8231 support by Pavel Fedin <sonic_amiga@rambler.ru>
+ based on VIA 686a support code by Jeff Garzik <jgarzik@pobox.com> */
+static int parport_init_mode;
+
+/* Data for two known VIA chips */
+static struct parport_pc_via_data via_686a_data = {
+ 0x51,
+ 0x50,
+ 0x85,
+ 0x02,
+ 0xE2,
+ 0xF0,
+ 0xE6
+};
+static struct parport_pc_via_data via_8231_data = {
+ 0x45,
+ 0x44,
+ 0x50,
+ 0x04,
+ 0xF2,
+ 0xFA,
+ 0xF6
+};
+
+static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
+ const struct parport_pc_via_data *via)
+{
+ u8 tmp, tmp2, siofunc;
+ u8 ppcontrol = 0;
+ int dma, irq;
+ unsigned port1, port2;
+ unsigned have_epp = 0;
+
+ printk(KERN_DEBUG "parport_pc: VIA 686A/8231 detected\n");
+
+ switch (parport_init_mode) {
+ case 1:
+ printk(KERN_DEBUG "parport_pc: setting SPP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_SPP;
+ break;
+ case 2:
+ printk(KERN_DEBUG "parport_pc: setting PS/2 mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_SPP;
+ ppcontrol = VIA_PARPORT_BIDIR;
+ break;
+ case 3:
+ printk(KERN_DEBUG "parport_pc: setting EPP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_EPP;
+ ppcontrol = VIA_PARPORT_BIDIR;
+ have_epp = 1;
+ break;
+ case 4:
+ printk(KERN_DEBUG "parport_pc: setting ECP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_ECP;
+ ppcontrol = VIA_PARPORT_BIDIR;
+ break;
+ case 5:
+ printk(KERN_DEBUG "parport_pc: setting EPP+ECP mode\n");
+ siofunc = VIA_FUNCTION_PARPORT_ECP;
+ ppcontrol = VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP;
+ have_epp = 1;
+ break;
+ default:
+ printk(KERN_DEBUG "parport_pc: probing current configuration\n");
+ siofunc = VIA_FUNCTION_PROBE;
+ break;
+ }
+ /*
+ * unlock super i/o configuration
+ */
+ pci_read_config_byte(pdev, via->via_pci_superio_config_reg, &tmp);
+ tmp |= via->via_pci_superio_config_data;
+ pci_write_config_byte(pdev, via->via_pci_superio_config_reg, tmp);
+
+ /* Bits 1-0: Parallel Port Mode / Enable */
+ outb(via->viacfg_function, VIA_CONFIG_INDEX);
+ tmp = inb(VIA_CONFIG_DATA);
+ /* Bit 5: EPP+ECP enable; bit 7: PS/2 bidirectional port enable */
+ outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
+ tmp2 = inb(VIA_CONFIG_DATA);
+ if (siofunc == VIA_FUNCTION_PROBE) {
+ siofunc = tmp & VIA_FUNCTION_PARPORT_DISABLE;
+ ppcontrol = tmp2;
+ } else {
+ tmp &= ~VIA_FUNCTION_PARPORT_DISABLE;
+ tmp |= siofunc;
+ outb(via->viacfg_function, VIA_CONFIG_INDEX);
+ outb(tmp, VIA_CONFIG_DATA);
+ tmp2 &= ~(VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP);
+ tmp2 |= ppcontrol;
+ outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
+ outb(tmp2, VIA_CONFIG_DATA);
+ }
+
+ /* Parallel Port I/O Base Address, bits 9-2 */
+ outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
+ port1 = inb(VIA_CONFIG_DATA) << 2;
+
+ printk(KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",
+ port1);
+ if (port1 == 0x3BC && have_epp) {
+ outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
+ outb((0x378 >> 2), VIA_CONFIG_DATA);
+ printk(KERN_DEBUG "parport_pc: Parallel port base changed to 0x378\n");
+ port1 = 0x378;
+ }
+
+ /*
+ * lock super i/o configuration
+ */
+ pci_read_config_byte(pdev, via->via_pci_superio_config_reg, &tmp);
+ tmp &= ~via->via_pci_superio_config_data;
+ pci_write_config_byte(pdev, via->via_pci_superio_config_reg, tmp);
+
+ if (siofunc == VIA_FUNCTION_PARPORT_DISABLE) {
+ pr_info("parport_pc: VIA parallel port disabled in BIOS\n");
+ return 0;
+ }
+
+ /* Bits 7-4: PnP Routing for Parallel Port IRQ */
+ pci_read_config_byte(pdev, via->via_pci_parport_irq_reg, &tmp);
+ irq = ((tmp & VIA_IRQCONTROL_PARALLEL) >> 4);
+
+ if (siofunc == VIA_FUNCTION_PARPORT_ECP) {
+ /* Bits 3-2: PnP Routing for Parallel Port DMA */
+ pci_read_config_byte(pdev, via->via_pci_parport_dma_reg, &tmp);
+ dma = ((tmp & VIA_DMACONTROL_PARALLEL) >> 2);
+ } else
+ /* if ECP not enabled, DMA is not enabled, assumed
+ bogus 'dma' value */
+ dma = PARPORT_DMA_NONE;
+
+ /* Let the user (or defaults) steer us away from interrupts and DMA */
+ if (autoirq == PARPORT_IRQ_NONE) {
+ irq = PARPORT_IRQ_NONE;
+ dma = PARPORT_DMA_NONE;
+ }
+ if (autodma == PARPORT_DMA_NONE)
+ dma = PARPORT_DMA_NONE;
+
+ switch (port1) {
+ case 0x3bc:
+ port2 = 0x7bc; break;
+ case 0x378:
+ port2 = 0x778; break;
+ case 0x278:
+ port2 = 0x678; break;
+ default:
+ pr_info("parport_pc: Weird VIA parport base 0x%X, ignoring\n",
+ port1);
+ return 0;
+ }
+
+ /* filter bogus IRQs */
+ switch (irq) {
+ case 0:
+ case 2:
+ case 8:
+ case 13:
+ irq = PARPORT_IRQ_NONE;
+ break;
+
+ default: /* do nothing */
+ break;
+ }
+
+ /* finally, do the probe with values obtained */
+ if (parport_pc_probe_port(port1, port2, irq, dma, &pdev->dev, 0)) {
+ pr_info("parport_pc: VIA parallel port: io=0x%X", port1);
+ if (irq != PARPORT_IRQ_NONE)
+ pr_cont(", irq=%d", irq);
+ if (dma != PARPORT_DMA_NONE)
+ pr_cont(", dma=%d", dma);
+ pr_cont("\n");
+ return 1;
+ }
+
+ pr_warn("parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
+ port1, irq, dma);
+ return 0;
+}
+
+
+enum parport_pc_sio_types {
+ sio_via_686a = 0, /* Via VT82C686A motherboard Super I/O */
+ sio_via_8231, /* Via VT8231 south bridge integrated Super IO */
+ sio_ite_8872,
+ last_sio
+};
+
+/* each element directly indexed from enum list, above */
+static struct parport_pc_superio {
+ int (*probe) (struct pci_dev *pdev, int autoirq, int autodma,
+ const struct parport_pc_via_data *via);
+ const struct parport_pc_via_data *via;
+} parport_pc_superio_info[] = {
+ { sio_via_probe, &via_686a_data, },
+ { sio_via_probe, &via_8231_data, },
+ { sio_ite_8872_probe, NULL, },
+};
+
+enum parport_pc_pci_cards {
+ siig_1p_10x = last_sio,
+ siig_2p_10x,
+ siig_1p_20x,
+ siig_2p_20x,
+ lava_parallel,
+ lava_parallel_dual_a,
+ lava_parallel_dual_b,
+ boca_ioppar,
+ plx_9050,
+ timedia_4006a,
+ timedia_4014,
+ timedia_4008a,
+ timedia_4018,
+ timedia_9018a,
+ syba_2p_epp,
+ syba_1p_ecp,
+ titan_010l,
+ avlab_1p,
+ avlab_2p,
+ oxsemi_952,
+ oxsemi_954,
+ oxsemi_840,
+ oxsemi_pcie_pport,
+ aks_0100,
+ mobility_pp,
+ netmos_9705,
+ netmos_9715,
+ netmos_9755,
+ netmos_9805,
+ netmos_9815,
+ netmos_9901,
+ netmos_9865,
+ quatech_sppxp100,
+ wch_ch382l,
+ brainboxes_uc146,
+ brainboxes_px203,
+};
+
+
+/* each element directly indexed from enum list, above
+ * (but offset by last_sio) */
+static struct parport_pc_pci {
+ int numports;
+ struct { /* BAR (base address registers) numbers in the config
+ space header */
+ int lo;
+ int hi;
+ /* -1 if not there, >6 for offset-method (max BAR is 6) */
+ } addr[4];
+
+ /* If set, this is called immediately after pci_enable_device.
+ * If it returns non-zero, no probing will take place and the
+ * ports will not be used. */
+ int (*preinit_hook) (struct pci_dev *pdev, int autoirq, int autodma);
+
+ /* If set, this is called after probing for ports. If 'failed'
+ * is non-zero we couldn't use any of the ports. */
+ void (*postinit_hook) (struct pci_dev *pdev, int failed);
+} cards[] = {
+ /* siig_1p_10x */ { 1, { { 2, 3 }, } },
+ /* siig_2p_10x */ { 2, { { 2, 3 }, { 4, 5 }, } },
+ /* siig_1p_20x */ { 1, { { 0, 1 }, } },
+ /* siig_2p_20x */ { 2, { { 0, 1 }, { 2, 3 }, } },
+ /* lava_parallel */ { 1, { { 0, -1 }, } },
+ /* lava_parallel_dual_a */ { 1, { { 0, -1 }, } },
+ /* lava_parallel_dual_b */ { 1, { { 0, -1 }, } },
+ /* boca_ioppar */ { 1, { { 0, -1 }, } },
+ /* plx_9050 */ { 2, { { 4, -1 }, { 5, -1 }, } },
+ /* timedia_4006a */ { 1, { { 0, -1 }, } },
+ /* timedia_4014 */ { 2, { { 0, -1 }, { 2, -1 }, } },
+ /* timedia_4008a */ { 1, { { 0, 1 }, } },
+ /* timedia_4018 */ { 2, { { 0, 1 }, { 2, 3 }, } },
+ /* timedia_9018a */ { 2, { { 0, 1 }, { 2, 3 }, } },
+ /* SYBA uses fixed offsets in
+ a 1K io window */
+ /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
+ /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
+ /* titan_010l */ { 1, { { 3, -1 }, } },
+ /* avlab_1p */ { 1, { { 0, 1}, } },
+ /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
+ /* The Oxford Semi cards are unusual: 954 doesn't support ECP,
+ * and 840 locks up if you write 1 to bit 2! */
+ /* oxsemi_952 */ { 1, { { 0, 1 }, } },
+ /* oxsemi_954 */ { 1, { { 0, -1 }, } },
+ /* oxsemi_840 */ { 1, { { 0, 1 }, } },
+ /* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
+ /* aks_0100 */ { 1, { { 0, -1 }, } },
+ /* mobility_pp */ { 1, { { 0, 1 }, } },
+
+ /* The netmos entries below are untested */
+ /* netmos_9705 */ { 1, { { 0, -1 }, } },
+ /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
+ /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
+ /* netmos_9805 */ { 1, { { 0, 1 }, } },
+ /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } },
+ /* netmos_9901 */ { 1, { { 0, -1 }, } },
+ /* netmos_9865 */ { 1, { { 0, -1 }, } },
+ /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
+ /* wch_ch382l */ { 1, { { 2, -1 }, } },
+ /* brainboxes_uc146 */ { 1, { { 3, -1 }, } },
+ /* brainboxes_px203 */ { 1, { { 0, -1 }, } },
+};
+
+static const struct pci_device_id parport_pc_pci_tbl[] = {
+ /* Super-IO onboard chips */
+ { 0x1106, 0x0686, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_686a },
+ { 0x1106, 0x8231, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_8231 },
+ { PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8872,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_ite_8872 },
+
+ /* PCI cards */
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_10x,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1p_10x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_10x,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p_10x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_20x,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1p_20x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_20x,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p_20x },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PARALLEL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, lava_parallel },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, lava_parallel_dual_a },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, lava_parallel_dual_b },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_BOCA_IOPPAR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, boca_ioppar },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0, 0, plx_9050 },
+ /* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
+ { 0x1409, 0x7268, 0x1409, 0x0101, 0, 0, timedia_4006a },
+ { 0x1409, 0x7268, 0x1409, 0x0102, 0, 0, timedia_4014 },
+ { 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a },
+ { 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 },
+ { 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a },
+ { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp },
+ { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
+ /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
+ /* AFAVLAB_TK9902 */
+ { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
+ { 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p},
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954PP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840_G,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0_G,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_G,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_U,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 },
+ { 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
+ /* NetMos communication controllers */
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9715,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9715 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9755,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9755 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9805,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x2000, 0, 0, netmos_9901 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x1000, 0, 0, netmos_9865 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x2000, 0, 0, netmos_9865 },
+ /* Quatech SPPXP-100 Parallel port PCI ExpressCard */
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
+ /* WCH CH382L PCI-E single parallel port card */
+ { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
+ /* Brainboxes IX-500/550 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x402a,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ /* Brainboxes UC-146/UC-157 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0be1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0be2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
+ /* Brainboxes PX-146/PX-257 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x401c,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ /* Brainboxes PX-203 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4007,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 },
+ /* Brainboxes PX-475 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x401f,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { 0, } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
+
+struct pci_parport_data {
+ int num;
+ struct parport *ports[2];
+};
+
+static int parport_pc_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int err, count, n, i = id->driver_data;
+ struct pci_parport_data *data;
+
+ if (i < last_sio)
+ /* This is an onboard Super-IO and has already been probed */
+ return 0;
+
+ /* This is a PCI card */
+ i -= last_sio;
+ count = 0;
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+
+ data = kmalloc(sizeof(struct pci_parport_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (cards[i].preinit_hook &&
+ cards[i].preinit_hook(dev, PARPORT_IRQ_NONE, PARPORT_DMA_NONE)) {
+ kfree(data);
+ return -ENODEV;
+ }
+
+ for (n = 0; n < cards[i].numports; n++) {
+ int lo = cards[i].addr[n].lo;
+ int hi = cards[i].addr[n].hi;
+ int irq;
+ unsigned long io_lo, io_hi;
+ io_lo = pci_resource_start(dev, lo);
+ io_hi = 0;
+ if ((hi >= 0) && (hi <= 6))
+ io_hi = pci_resource_start(dev, hi);
+ else if (hi > 6)
+ io_lo += hi; /* Reinterpret the meaning of
+ "hi" as an offset (see SYBA
+ def.) */
+ /* TODO: test if sharing interrupts works */
+ irq = dev->irq;
+ if (irq == IRQ_NONE) {
+ printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
+ id->vendor, id->device, io_lo, io_hi);
+ irq = PARPORT_IRQ_NONE;
+ } else {
+ printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
+ id->vendor, id->device, io_lo, io_hi, irq);
+ }
+ data->ports[count] =
+ parport_pc_probe_port(io_lo, io_hi, irq,
+ PARPORT_DMA_NONE, &dev->dev,
+ IRQF_SHARED);
+ if (data->ports[count])
+ count++;
+ }
+
+ data->num = count;
+
+ if (cards[i].postinit_hook)
+ cards[i].postinit_hook(dev, count == 0);
+
+ if (count) {
+ pci_set_drvdata(dev, data);
+ return 0;
+ }
+
+ kfree(data);
+
+ return -ENODEV;
+}
+
+static void parport_pc_pci_remove(struct pci_dev *dev)
+{
+ struct pci_parport_data *data = pci_get_drvdata(dev);
+ int i;
+
+ if (data) {
+ for (i = data->num - 1; i >= 0; i--)
+ parport_pc_unregister_port(data->ports[i]);
+
+ kfree(data);
+ }
+}
+
+static struct pci_driver parport_pc_pci_driver = {
+ .name = "parport_pc",
+ .id_table = parport_pc_pci_tbl,
+ .probe = parport_pc_pci_probe,
+ .remove = parport_pc_pci_remove,
+};
+
+static int __init parport_pc_init_superio(int autoirq, int autodma)
+{
+ const struct pci_device_id *id;
+ struct pci_dev *pdev = NULL;
+ int ret = 0;
+
+ for_each_pci_dev(pdev) {
+ id = pci_match_id(parport_pc_pci_tbl, pdev);
+ if (id == NULL || id->driver_data >= last_sio)
+ continue;
+
+ if (parport_pc_superio_info[id->driver_data].probe(
+ pdev, autoirq, autodma,
+ parport_pc_superio_info[id->driver_data].via)) {
+ ret++;
+ }
+ }
+
+ return ret; /* number of devices found */
+}
+#else
+static struct pci_driver parport_pc_pci_driver;
+static int __init parport_pc_init_superio(int autoirq, int autodma)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_PNP
+
+static const struct pnp_device_id parport_pc_pnp_tbl[] = {
+ /* Standard LPT Printer Port */
+ {.id = "PNP0400", .driver_data = 0},
+ /* ECP Printer Port */
+ {.id = "PNP0401", .driver_data = 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(pnp, parport_pc_pnp_tbl);
+
+static int parport_pc_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *id)
+{
+ struct parport *pdata;
+ unsigned long io_lo, io_hi;
+ int dma, irq;
+
+ if (pnp_port_valid(dev, 0) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ io_lo = pnp_port_start(dev, 0);
+ } else
+ return -EINVAL;
+
+ if (pnp_port_valid(dev, 1) &&
+ !(pnp_port_flags(dev, 1) & IORESOURCE_DISABLED)) {
+ io_hi = pnp_port_start(dev, 1);
+ } else
+ io_hi = 0;
+
+ if (pnp_irq_valid(dev, 0) &&
+ !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ irq = pnp_irq(dev, 0);
+ } else
+ irq = PARPORT_IRQ_NONE;
+
+ if (pnp_dma_valid(dev, 0) &&
+ !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ dma = pnp_dma(dev, 0);
+ } else
+ dma = PARPORT_DMA_NONE;
+
+ dev_info(&dev->dev, "reported by %s\n", dev->protocol->name);
+ pdata = parport_pc_probe_port(io_lo, io_hi, irq, dma, &dev->dev, 0);
+ if (pdata == NULL)
+ return -ENODEV;
+
+ pnp_set_drvdata(dev, pdata);
+ return 0;
+}
+
+static void parport_pc_pnp_remove(struct pnp_dev *dev)
+{
+ struct parport *pdata = (struct parport *)pnp_get_drvdata(dev);
+ if (!pdata)
+ return;
+
+ parport_pc_unregister_port(pdata);
+}
+
+/* we only need the pnp layer to activate the device, at least for now */
+static struct pnp_driver parport_pc_pnp_driver = {
+ .name = "parport_pc",
+ .id_table = parport_pc_pnp_tbl,
+ .probe = parport_pc_pnp_probe,
+ .remove = parport_pc_pnp_remove,
+};
+
+#else
+static struct pnp_driver parport_pc_pnp_driver;
+#endif /* CONFIG_PNP */
+
+static int parport_pc_platform_probe(struct platform_device *pdev)
+{
+ /* Always succeed, the actual probing is done in
+ * parport_pc_probe_port(). */
+ return 0;
+}
+
+static struct platform_driver parport_pc_platform_driver = {
+ .driver = {
+ .name = "parport_pc",
+ },
+ .probe = parport_pc_platform_probe,
+};
+
+/* This is called by parport_pc_find_nonpci_ports (in asm/parport.h) */
+static int __attribute__((unused))
+parport_pc_find_isa_ports(int autoirq, int autodma)
+{
+ int count = 0;
+
+ if (parport_pc_probe_port(0x3bc, 0x7bc, autoirq, autodma, NULL, 0))
+ count++;
+ if (parport_pc_probe_port(0x378, 0x778, autoirq, autodma, NULL, 0))
+ count++;
+ if (parport_pc_probe_port(0x278, 0x678, autoirq, autodma, NULL, 0))
+ count++;
+
+ return count;
+}
+
+/* This function is called by parport_pc_init if the user didn't
+ * specify any ports to probe. Its job is to find some ports. Order
+ * is important here -- we want ISA ports to be registered first,
+ * followed by PCI cards (for least surprise), but before that we want
+ * to do chipset-specific tests for some onboard ports that we know
+ * about.
+ *
+ * autoirq is PARPORT_IRQ_NONE, PARPORT_IRQ_AUTO, or PARPORT_IRQ_PROBEONLY
+ * autodma is PARPORT_DMA_NONE or PARPORT_DMA_AUTO
+ */
+static void __init parport_pc_find_ports(int autoirq, int autodma)
+{
+ int count = 0, err;
+
+#ifdef CONFIG_PARPORT_PC_SUPERIO
+ detect_and_report_it87();
+ detect_and_report_winbond();
+ detect_and_report_smsc();
+#endif
+
+ /* Onboard SuperIO chipsets that show themselves on the PCI bus. */
+ count += parport_pc_init_superio(autoirq, autodma);
+
+ /* PnP ports, skip detection if SuperIO already found them */
+ if (!count) {
+ err = pnp_register_driver(&parport_pc_pnp_driver);
+ if (!err)
+ pnp_registered_parport = 1;
+ }
+
+ /* ISA ports and whatever (see asm/parport.h). */
+ parport_pc_find_nonpci_ports(autoirq, autodma);
+
+ err = pci_register_driver(&parport_pc_pci_driver);
+ if (!err)
+ pci_registered_parport = 1;
+}
+
+/*
+ * Piles of crap below pretend to be a parser for module and kernel
+ * parameters. Say "thank you" to whoever had come up with that
+ * syntax and keep in mind that code below is a cleaned up version.
+ */
+
+static int __initdata io[PARPORT_PC_MAX_PORTS+1] = {
+ [0 ... PARPORT_PC_MAX_PORTS] = 0
+};
+static int __initdata io_hi[PARPORT_PC_MAX_PORTS+1] = {
+ [0 ... PARPORT_PC_MAX_PORTS] = PARPORT_IOHI_AUTO
+};
+static int __initdata dmaval[PARPORT_PC_MAX_PORTS] = {
+ [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_DMA_NONE
+};
+static int __initdata irqval[PARPORT_PC_MAX_PORTS] = {
+ [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_IRQ_PROBEONLY
+};
+
+static int __init parport_parse_param(const char *s, int *val,
+ int automatic, int none, int nofifo)
+{
+ if (!s)
+ return 0;
+ if (!strncmp(s, "auto", 4))
+ *val = automatic;
+ else if (!strncmp(s, "none", 4))
+ *val = none;
+ else if (nofifo && !strncmp(s, "nofifo", 6))
+ *val = nofifo;
+ else {
+ char *ep;
+ unsigned long r = simple_strtoul(s, &ep, 0);
+ if (ep != s)
+ *val = r;
+ else {
+ pr_err("parport: bad specifier `%s'\n", s);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int __init parport_parse_irq(const char *irqstr, int *val)
+{
+ return parport_parse_param(irqstr, val, PARPORT_IRQ_AUTO,
+ PARPORT_IRQ_NONE, 0);
+}
+
+static int __init parport_parse_dma(const char *dmastr, int *val)
+{
+ return parport_parse_param(dmastr, val, PARPORT_DMA_AUTO,
+ PARPORT_DMA_NONE, PARPORT_DMA_NOFIFO);
+}
+
+#ifdef CONFIG_PCI
+static int __init parport_init_mode_setup(char *str)
+{
+ printk(KERN_DEBUG "parport_pc.c: Specified parameter parport_init_mode=%s\n",
+ str);
+
+ if (!strcmp(str, "spp"))
+ parport_init_mode = 1;
+ if (!strcmp(str, "ps2"))
+ parport_init_mode = 2;
+ if (!strcmp(str, "epp"))
+ parport_init_mode = 3;
+ if (!strcmp(str, "ecp"))
+ parport_init_mode = 4;
+ if (!strcmp(str, "ecpepp"))
+ parport_init_mode = 5;
+ return 1;
+}
+#endif
+
+#ifdef MODULE
+static char *irq[PARPORT_PC_MAX_PORTS];
+static char *dma[PARPORT_PC_MAX_PORTS];
+
+MODULE_PARM_DESC(io, "Base I/O address (SPP regs)");
+module_param_hw_array(io, int, ioport, NULL, 0);
+MODULE_PARM_DESC(io_hi, "Base I/O address (ECR)");
+module_param_hw_array(io_hi, int, ioport, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ line");
+module_param_hw_array(irq, charp, irq, NULL, 0);
+MODULE_PARM_DESC(dma, "DMA channel");
+module_param_hw_array(dma, charp, dma, NULL, 0);
+#if defined(CONFIG_PARPORT_PC_SUPERIO) || \
+ (defined(CONFIG_PARPORT_1284) && defined(CONFIG_PARPORT_PC_FIFO))
+MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialisation");
+module_param(verbose_probing, int, 0644);
+#endif
+#ifdef CONFIG_PCI
+static char *init_mode;
+MODULE_PARM_DESC(init_mode,
+ "Initialise mode for VIA VT8231 port (spp, ps2, epp, ecp or ecpepp)");
+module_param(init_mode, charp, 0);
+#endif
+
+static int __init parse_parport_params(void)
+{
+ unsigned int i;
+ int val;
+
+#ifdef CONFIG_PCI
+ if (init_mode)
+ parport_init_mode_setup(init_mode);
+#endif
+
+ for (i = 0; i < PARPORT_PC_MAX_PORTS && io[i]; i++) {
+ if (parport_parse_irq(irq[i], &val))
+ return 1;
+ irqval[i] = val;
+ if (parport_parse_dma(dma[i], &val))
+ return 1;
+ dmaval[i] = val;
+ }
+ if (!io[0]) {
+ /* The user can make us use any IRQs or DMAs we find. */
+ if (irq[0] && !parport_parse_irq(irq[0], &val))
+ switch (val) {
+ case PARPORT_IRQ_NONE:
+ case PARPORT_IRQ_AUTO:
+ irqval[0] = val;
+ break;
+ default:
+ pr_warn("parport_pc: irq specified without base address. Use 'io=' to specify one\n");
+ }
+
+ if (dma[0] && !parport_parse_dma(dma[0], &val))
+ switch (val) {
+ case PARPORT_DMA_NONE:
+ case PARPORT_DMA_AUTO:
+ dmaval[0] = val;
+ break;
+ default:
+ pr_warn("parport_pc: dma specified without base address. Use 'io=' to specify one\n");
+ }
+ }
+ return 0;
+}
+
+#else
+
+static int parport_setup_ptr __initdata;
+
+/*
+ * Acceptable parameters:
+ *
+ * parport=0
+ * parport=auto
+ * parport=0xBASE[,IRQ[,DMA]]
+ *
+ * IRQ/DMA may be numeric or 'auto' or 'none'
+ */
+static int __init parport_setup(char *str)
+{
+ char *endptr;
+ char *sep;
+ int val;
+
+ if (!str || !*str || (*str == '0' && !*(str+1))) {
+ /* Disable parport if "parport=0" in cmdline */
+ io[0] = PARPORT_DISABLE;
+ return 1;
+ }
+
+ if (!strncmp(str, "auto", 4)) {
+ irqval[0] = PARPORT_IRQ_AUTO;
+ dmaval[0] = PARPORT_DMA_AUTO;
+ return 1;
+ }
+
+ val = simple_strtoul(str, &endptr, 0);
+ if (endptr == str) {
+ pr_warn("parport=%s not understood\n", str);
+ return 1;
+ }
+
+ if (parport_setup_ptr == PARPORT_PC_MAX_PORTS) {
+ pr_err("parport=%s ignored, too many ports\n", str);
+ return 1;
+ }
+
+ io[parport_setup_ptr] = val;
+ irqval[parport_setup_ptr] = PARPORT_IRQ_NONE;
+ dmaval[parport_setup_ptr] = PARPORT_DMA_NONE;
+
+ sep = strchr(str, ',');
+ if (sep++) {
+ if (parport_parse_irq(sep, &val))
+ return 1;
+ irqval[parport_setup_ptr] = val;
+ sep = strchr(sep, ',');
+ if (sep++) {
+ if (parport_parse_dma(sep, &val))
+ return 1;
+ dmaval[parport_setup_ptr] = val;
+ }
+ }
+ parport_setup_ptr++;
+ return 1;
+}
+
+static int __init parse_parport_params(void)
+{
+ return io[0] == PARPORT_DISABLE;
+}
+
+__setup("parport=", parport_setup);
+
+/*
+ * Acceptable parameters:
+ *
+ * parport_init_mode=[spp|ps2|epp|ecp|ecpepp]
+ */
+#ifdef CONFIG_PCI
+__setup("parport_init_mode=", parport_init_mode_setup);
+#endif
+#endif
+
+/* "Parser" ends here */
+
+static int __init parport_pc_init(void)
+{
+ int err;
+
+ if (parse_parport_params())
+ return -EINVAL;
+
+ err = platform_driver_register(&parport_pc_platform_driver);
+ if (err)
+ return err;
+
+ if (io[0]) {
+ int i;
+ /* Only probe the ports we were given. */
+ user_specified = 1;
+ for (i = 0; i < PARPORT_PC_MAX_PORTS; i++) {
+ if (!io[i])
+ break;
+ if (io_hi[i] == PARPORT_IOHI_AUTO)
+ io_hi[i] = 0x400 + io[i];
+ parport_pc_probe_port(io[i], io_hi[i],
+ irqval[i], dmaval[i], NULL, 0);
+ }
+ } else
+ parport_pc_find_ports(irqval[0], dmaval[0]);
+
+ return 0;
+}
+
+static void __exit parport_pc_exit(void)
+{
+ if (pci_registered_parport)
+ pci_unregister_driver(&parport_pc_pci_driver);
+ if (pnp_registered_parport)
+ pnp_unregister_driver(&parport_pc_pnp_driver);
+ platform_driver_unregister(&parport_pc_platform_driver);
+
+ while (!list_empty(&ports_list)) {
+ struct parport_pc_private *priv;
+ struct parport *port;
+ struct device *dev;
+ priv = list_entry(ports_list.next,
+ struct parport_pc_private, list);
+ port = priv->port;
+ dev = port->dev;
+ parport_pc_unregister_port(port);
+ if (dev && dev->bus == &platform_bus_type)
+ platform_device_unregister(to_platform_device(dev));
+ }
+}
+
+MODULE_AUTHOR("Phil Blundell, Tim Waugh, others");
+MODULE_DESCRIPTION("PC-style parallel port driver");
+MODULE_LICENSE("GPL");
+module_init(parport_pc_init)
+module_exit(parport_pc_exit)
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
new file mode 100644
index 000000000..46f213270
--- /dev/null
+++ b/drivers/parport/parport_serial.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Support for common PCI multi-I/O cards (which is most of them)
+ *
+ * Copyright (C) 2001 Tim Waugh <twaugh@redhat.com>
+ *
+ * Multi-function PCI cards are supposed to present separate logical
+ * devices on the bus. A common thing to do seems to be to just use
+ * one logical device with lots of base address registers for both
+ * parallel ports and serial ports. This driver is for dealing with
+ * that.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/parport.h>
+#include <linux/parport_pc.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/8250_pci.h>
+
+enum parport_pc_pci_cards {
+ titan_110l = 0,
+ titan_210l,
+ netmos_9xx5_combo,
+ netmos_9855,
+ netmos_9855_2p,
+ netmos_9900,
+ netmos_9900_2p,
+ netmos_99xx_1p,
+ avlab_1s1p,
+ avlab_1s2p,
+ avlab_2s1p,
+ siig_1s1p_10x,
+ siig_2s1p_10x,
+ siig_2p1s_20x,
+ siig_1s1p_20x,
+ siig_2s1p_20x,
+ timedia_4078a,
+ timedia_4079h,
+ timedia_4085h,
+ timedia_4088a,
+ timedia_4089a,
+ timedia_4095a,
+ timedia_4096a,
+ timedia_4078u,
+ timedia_4079a,
+ timedia_4085u,
+ timedia_4079r,
+ timedia_4079s,
+ timedia_4079d,
+ timedia_4079e,
+ timedia_4079f,
+ timedia_9079a,
+ timedia_9079b,
+ timedia_9079c,
+ wch_ch353_1s1p,
+ wch_ch353_2s1p,
+ wch_ch382_0s1p,
+ wch_ch382_2s1p,
+ brainboxes_5s1p,
+ sunix_4008a,
+ sunix_5069a,
+ sunix_5079a,
+ sunix_5099a,
+ brainboxes_uc257,
+ brainboxes_is300,
+ brainboxes_uc414,
+ brainboxes_px263,
+};
+
+/* each element directly indexed from enum list, above */
+struct parport_pc_pci {
+ int numports;
+ struct { /* BAR (base address registers) numbers in the config
+ space header */
+ int lo;
+ int hi; /* -1 if not there, >6 for offset-method (max
+ BAR is 6) */
+ } addr[4];
+
+ /* If set, this is called immediately after pci_enable_device.
+ * If it returns non-zero, no probing will take place and the
+ * ports will not be used. */
+ int (*preinit_hook) (struct pci_dev *pdev, struct parport_pc_pci *card,
+ int autoirq, int autodma);
+
+ /* If set, this is called after probing for ports. If 'failed'
+ * is non-zero we couldn't use any of the ports. */
+ void (*postinit_hook) (struct pci_dev *pdev,
+ struct parport_pc_pci *card, int failed);
+};
+
+static int netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *par,
+ int autoirq, int autodma)
+{
+ /* the rule described below doesn't hold for this device */
+ if (dev->device == PCI_DEVICE_ID_NETMOS_9835 &&
+ dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
+ dev->subsystem_device == 0x0299)
+ return -ENODEV;
+
+ if (dev->device == PCI_DEVICE_ID_NETMOS_9912) {
+ par->numports = 1;
+ } else {
+ /*
+ * Netmos uses the subdevice ID to indicate the number of parallel
+ * and serial ports. The form is 0x00PS, where <P> is the number of
+ * parallel ports and <S> is the number of serial ports.
+ */
+ par->numports = (dev->subsystem_device & 0xf0) >> 4;
+ if (par->numports > ARRAY_SIZE(par->addr))
+ par->numports = ARRAY_SIZE(par->addr);
+ }
+
+ return 0;
+}
+
+static struct parport_pc_pci cards[] = {
+ /* titan_110l */ { 1, { { 3, -1 }, } },
+ /* titan_210l */ { 1, { { 3, -1 }, } },
+ /* netmos_9xx5_combo */ { 1, { { 2, -1 }, }, netmos_parallel_init },
+ /* netmos_9855 */ { 1, { { 0, -1 }, }, netmos_parallel_init },
+ /* netmos_9855_2p */ { 2, { { 0, -1 }, { 2, -1 }, } },
+ /* netmos_9900 */ {1, { { 3, 4 }, }, netmos_parallel_init },
+ /* netmos_9900_2p */ {2, { { 0, 1 }, { 3, 4 }, } },
+ /* netmos_99xx_1p */ {1, { { 0, 1 }, } },
+ /* avlab_1s1p */ { 1, { { 1, 2}, } },
+ /* avlab_1s2p */ { 2, { { 1, 2}, { 3, 4 },} },
+ /* avlab_2s1p */ { 1, { { 2, 3}, } },
+ /* siig_1s1p_10x */ { 1, { { 3, 4 }, } },
+ /* siig_2s1p_10x */ { 1, { { 4, 5 }, } },
+ /* siig_2p1s_20x */ { 2, { { 1, 2 }, { 3, 4 }, } },
+ /* siig_1s1p_20x */ { 1, { { 1, 2 }, } },
+ /* siig_2s1p_20x */ { 1, { { 2, 3 }, } },
+ /* timedia_4078a */ { 1, { { 2, -1 }, } },
+ /* timedia_4079h */ { 1, { { 2, 3 }, } },
+ /* timedia_4085h */ { 2, { { 2, -1 }, { 4, -1 }, } },
+ /* timedia_4088a */ { 2, { { 2, 3 }, { 4, 5 }, } },
+ /* timedia_4089a */ { 2, { { 2, 3 }, { 4, 5 }, } },
+ /* timedia_4095a */ { 2, { { 2, 3 }, { 4, 5 }, } },
+ /* timedia_4096a */ { 2, { { 2, 3 }, { 4, 5 }, } },
+ /* timedia_4078u */ { 1, { { 2, -1 }, } },
+ /* timedia_4079a */ { 1, { { 2, 3 }, } },
+ /* timedia_4085u */ { 2, { { 2, -1 }, { 4, -1 }, } },
+ /* timedia_4079r */ { 1, { { 2, 3 }, } },
+ /* timedia_4079s */ { 1, { { 2, 3 }, } },
+ /* timedia_4079d */ { 1, { { 2, 3 }, } },
+ /* timedia_4079e */ { 1, { { 2, 3 }, } },
+ /* timedia_4079f */ { 1, { { 2, 3 }, } },
+ /* timedia_9079a */ { 1, { { 2, 3 }, } },
+ /* timedia_9079b */ { 1, { { 2, 3 }, } },
+ /* timedia_9079c */ { 1, { { 2, 3 }, } },
+ /* wch_ch353_1s1p*/ { 1, { { 1, -1}, } },
+ /* wch_ch353_2s1p*/ { 1, { { 2, -1}, } },
+ /* wch_ch382_0s1p*/ { 1, { { 2, -1}, } },
+ /* wch_ch382_2s1p*/ { 1, { { 2, -1}, } },
+ /* brainboxes_5s1p */ { 1, { { 3, -1 }, } },
+ /* sunix_4008a */ { 1, { { 1, 2 }, } },
+ /* sunix_5069a */ { 1, { { 1, 2 }, } },
+ /* sunix_5079a */ { 1, { { 1, 2 }, } },
+ /* sunix_5099a */ { 1, { { 1, 2 }, } },
+ /* brainboxes_uc257 */ { 1, { { 3, -1 }, } },
+ /* brainboxes_is300 */ { 1, { { 3, -1 }, } },
+ /* brainboxes_uc414 */ { 1, { { 3, -1 }, } },
+ /* brainboxes_px263 */ { 1, { { 3, -1 }, } },
+};
+
+static struct pci_device_id parport_serial_pci_tbl[] = {
+ /* PCI cards */
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_110L,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_110l },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_210L,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_210l },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9735,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9745,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9845,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
+ 0x1000, 0x0020, 0, 0, netmos_9855_2p },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
+ 0x1000, 0x0022, 0, 0, netmos_9855_2p },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9855 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x3011, 0, 0, netmos_9900 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x3012, 0, 0, netmos_9900 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x3020, 0, 0, netmos_9900_2p },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9912,
+ 0xA000, 0x2000, 0, 0, netmos_99xx_1p },
+ /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
+ { PCI_VENDOR_ID_AFAVLAB, 0x2110,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s1p },
+ { PCI_VENDOR_ID_AFAVLAB, 0x2111,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s1p },
+ { PCI_VENDOR_ID_AFAVLAB, 0x2112,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s1p },
+ { PCI_VENDOR_ID_AFAVLAB, 0x2140,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s2p },
+ { PCI_VENDOR_ID_AFAVLAB, 0x2141,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s2p },
+ { PCI_VENDOR_ID_AFAVLAB, 0x2142,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s2p },
+ { PCI_VENDOR_ID_AFAVLAB, 0x2160,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2s1p },
+ { PCI_VENDOR_ID_AFAVLAB, 0x2161,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2s1p },
+ { PCI_VENDOR_ID_AFAVLAB, 0x2162,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2s1p },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_10x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_10x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_10x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_10x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_10x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_10x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p1s_20x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p1s_20x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2p1s_20x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_20x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_1s1p_20x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
+ /* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
+ { 0x1409, 0x7168, 0x1409, 0x4078, 0, 0, timedia_4078a },
+ { 0x1409, 0x7168, 0x1409, 0x4079, 0, 0, timedia_4079h },
+ { 0x1409, 0x7168, 0x1409, 0x4085, 0, 0, timedia_4085h },
+ { 0x1409, 0x7168, 0x1409, 0x4088, 0, 0, timedia_4088a },
+ { 0x1409, 0x7168, 0x1409, 0x4089, 0, 0, timedia_4089a },
+ { 0x1409, 0x7168, 0x1409, 0x4095, 0, 0, timedia_4095a },
+ { 0x1409, 0x7168, 0x1409, 0x4096, 0, 0, timedia_4096a },
+ { 0x1409, 0x7168, 0x1409, 0x5078, 0, 0, timedia_4078u },
+ { 0x1409, 0x7168, 0x1409, 0x5079, 0, 0, timedia_4079a },
+ { 0x1409, 0x7168, 0x1409, 0x5085, 0, 0, timedia_4085u },
+ { 0x1409, 0x7168, 0x1409, 0x6079, 0, 0, timedia_4079r },
+ { 0x1409, 0x7168, 0x1409, 0x7079, 0, 0, timedia_4079s },
+ { 0x1409, 0x7168, 0x1409, 0x8079, 0, 0, timedia_4079d },
+ { 0x1409, 0x7168, 0x1409, 0x9079, 0, 0, timedia_4079e },
+ { 0x1409, 0x7168, 0x1409, 0xa079, 0, 0, timedia_4079f },
+ { 0x1409, 0x7168, 0x1409, 0xb079, 0, 0, timedia_9079a },
+ { 0x1409, 0x7168, 0x1409, 0xc079, 0, 0, timedia_9079b },
+ { 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
+
+ /* WCH CARDS */
+ { 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
+ { 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
+ { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p},
+ { 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p},
+
+ /* BrainBoxes PX272/PX306 MIO card */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_5s1p },
+
+ /* Sunix boards */
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0100, 0, 0, sunix_4008a },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0101, 0, 0, sunix_5069a },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0102, 0, 0, sunix_5079a },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0104, 0, 0, sunix_5099a },
+
+ /* Brainboxes UC-203 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0bc1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0bc2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+
+ /* Brainboxes UC-257 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0862,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0863,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+
+ /* Brainboxes UC-414 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0e61,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc414 },
+
+ /* Brainboxes UC-475 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0981,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0982,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+
+ /* Brainboxes IS-300/IS-500 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0da0,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_is300 },
+
+ /* Brainboxes PX-263/PX-295 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x402c,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px263 },
+
+ { 0, } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
+
+/*
+ * This table describes the serial "geometry" of these boards. Any
+ * quirks for these can be found in drivers/serial/8250_pci.c
+ *
+ * Cards not tested are marked n/t
+ * If you have one of these cards and it works for you, please tell me..
+ */
+static struct pciserial_board pci_parport_serial_boards[] = {
+ [titan_110l] = {
+ .flags = FL_BASE1 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [titan_210l] = {
+ .flags = FL_BASE1 | FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [netmos_9xx5_combo] = {
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [netmos_9855] = {
+ .flags = FL_BASE2 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [netmos_9855_2p] = {
+ .flags = FL_BASE4 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [netmos_9900] = { /* n/t */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [netmos_9900_2p] = { /* parallel only */ /* n/t */
+ .flags = FL_BASE0,
+ .num_ports = 0,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [netmos_99xx_1p] = { /* parallel only */ /* n/t */
+ .flags = FL_BASE0,
+ .num_ports = 0,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_1s1p] = { /* n/t */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_1s2p] = { /* n/t */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_2s1p] = { /* n/t */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [siig_1s1p_10x] = {
+ .flags = FL_BASE2,
+ .num_ports = 1,
+ .base_baud = 460800,
+ .uart_offset = 8,
+ },
+ [siig_2s1p_10x] = {
+ .flags = FL_BASE2,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [siig_2p1s_20x] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [siig_1s1p_20x] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [siig_2s1p_20x] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4078a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079h] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4085h] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4088a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4089a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4095a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4096a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4078u] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4085u] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079r] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079s] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079d] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079e] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079f] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_9079a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_9079b] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_9079c] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [wch_ch353_1s1p] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [wch_ch353_2s1p] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [wch_ch382_0s1p] = {
+ .flags = FL_BASE0,
+ .num_ports = 0,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [wch_ch382_2s1p] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ .first_offset = 0xC0,
+ },
+ [brainboxes_5s1p] = {
+ .flags = FL_BASE2,
+ .num_ports = 5,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [sunix_4008a] = {
+ .num_ports = 0,
+ },
+ [sunix_5069a] = {
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [sunix_5079a] = {
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [sunix_5099a] = {
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [brainboxes_uc257] = {
+ .flags = FL_BASE2,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [brainboxes_is300] = {
+ .flags = FL_BASE2,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [brainboxes_uc414] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [brainboxes_px263] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+};
+
+struct parport_serial_private {
+ struct serial_private *serial;
+ int num_par;
+ struct parport *port[PARPORT_MAX];
+ struct parport_pc_pci par;
+};
+
+/* Register the serial port(s) of a PCI card. */
+static int serial_register(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct parport_serial_private *priv = pci_get_drvdata (dev);
+ struct pciserial_board *board;
+ struct serial_private *serial;
+
+ board = &pci_parport_serial_boards[id->driver_data];
+ if (board->num_ports == 0)
+ return 0;
+
+ serial = pciserial_init_ports(dev, board);
+ if (IS_ERR(serial))
+ return PTR_ERR(serial);
+
+ priv->serial = serial;
+ return 0;
+}
+
+/* Register the parallel port(s) of a PCI card. */
+static int parport_register(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct parport_pc_pci *card;
+ struct parport_serial_private *priv = pci_get_drvdata (dev);
+ int n, success = 0;
+
+ priv->par = cards[id->driver_data];
+ card = &priv->par;
+ if (card->preinit_hook &&
+ card->preinit_hook (dev, card, PARPORT_IRQ_NONE, PARPORT_DMA_NONE))
+ return -ENODEV;
+
+ for (n = 0; n < card->numports; n++) {
+ struct parport *port;
+ int lo = card->addr[n].lo;
+ int hi = card->addr[n].hi;
+ unsigned long io_lo, io_hi;
+ int irq;
+
+ if (priv->num_par == ARRAY_SIZE (priv->port)) {
+ dev_warn(&dev->dev,
+ "only %zu parallel ports supported (%d reported)\n",
+ ARRAY_SIZE(priv->port), card->numports);
+ break;
+ }
+
+ io_lo = pci_resource_start (dev, lo);
+ io_hi = 0;
+ if ((hi >= 0) && (hi <= 6))
+ io_hi = pci_resource_start (dev, hi);
+ else if (hi > 6)
+ io_lo += hi; /* Reinterpret the meaning of
+ "hi" as an offset (see SYBA
+ def.) */
+ /* TODO: test if sharing interrupts works */
+ irq = dev->irq;
+ if (irq == IRQ_NONE) {
+ dev_dbg(&dev->dev,
+ "PCI parallel port detected: I/O at %#lx(%#lx)\n",
+ io_lo, io_hi);
+ irq = PARPORT_IRQ_NONE;
+ } else {
+ dev_dbg(&dev->dev,
+ "PCI parallel port detected: I/O at %#lx(%#lx), IRQ %d\n",
+ io_lo, io_hi, irq);
+ }
+ port = parport_pc_probe_port (io_lo, io_hi, irq,
+ PARPORT_DMA_NONE, &dev->dev, IRQF_SHARED);
+ if (port) {
+ priv->port[priv->num_par++] = port;
+ success = 1;
+ }
+ }
+
+ if (card->postinit_hook)
+ card->postinit_hook (dev, card, !success);
+
+ return 0;
+}
+
+static int parport_serial_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ struct parport_serial_private *priv;
+ int err;
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ pci_set_drvdata (dev, priv);
+
+ err = pcim_enable_device(dev);
+ if (err)
+ return err;
+
+ err = parport_register(dev, id);
+ if (err)
+ return err;
+
+ err = serial_register(dev, id);
+ if (err) {
+ int i;
+ for (i = 0; i < priv->num_par; i++)
+ parport_pc_unregister_port (priv->port[i]);
+ return err;
+ }
+
+ return 0;
+}
+
+static void parport_serial_pci_remove(struct pci_dev *dev)
+{
+ struct parport_serial_private *priv = pci_get_drvdata (dev);
+ int i;
+
+ // Serial ports
+ if (priv->serial)
+ pciserial_remove_ports(priv->serial);
+
+ // Parallel ports
+ for (i = 0; i < priv->num_par; i++)
+ parport_pc_unregister_port (priv->port[i]);
+
+ return;
+}
+
+static int __maybe_unused parport_serial_pci_suspend(struct device *dev)
+{
+ struct parport_serial_private *priv = dev_get_drvdata(dev);
+
+ if (priv->serial)
+ pciserial_suspend_ports(priv->serial);
+
+ /* FIXME: What about parport? */
+ return 0;
+}
+
+static int __maybe_unused parport_serial_pci_resume(struct device *dev)
+{
+ struct parport_serial_private *priv = dev_get_drvdata(dev);
+
+ if (priv->serial)
+ pciserial_resume_ports(priv->serial);
+
+ /* FIXME: What about parport? */
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(parport_serial_pm_ops,
+ parport_serial_pci_suspend, parport_serial_pci_resume);
+
+static struct pci_driver parport_serial_pci_driver = {
+ .name = "parport_serial",
+ .id_table = parport_serial_pci_tbl,
+ .probe = parport_serial_pci_probe,
+ .remove = parport_serial_pci_remove,
+ .driver = {
+ .pm = &parport_serial_pm_ops,
+ },
+};
+module_pci_driver(parport_serial_pci_driver);
+
+MODULE_AUTHOR("Tim Waugh <twaugh@redhat.com>");
+MODULE_DESCRIPTION("Driver for common parallel+serial multi-I/O PCI cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
new file mode 100644
index 000000000..e840c1b5a
--- /dev/null
+++ b/drivers/parport/parport_sunbpp.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* parport_sunbpp.c: Parallel-port routines for SBUS
+ *
+ * Author: Derrick J. Brashear <shadow@dementia.org>
+ *
+ * based on work by:
+ * Phil Blundell <philb@gnu.org>
+ * Tim Waugh <tim@cyberelk.demon.co.uk>
+ * Jose Renau <renau@acm.org>
+ * David Campbell <campbell@tirian.che.curtin.edu.au>
+ * Grant Guenther <grant@torque.net>
+ * Eddie C. Dost <ecd@skynet.be>
+ * Stephen Williams (steve@icarus.com)
+ * Gus Baldauf (gbaldauf@ix.netcom.com)
+ * Peter Zaitcev
+ * Tom Dyas
+ *
+ * Updated to new SBUS device framework: David S. Miller <davem@davemloft.net>
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/parport.h>
+
+#include <asm/ptrace.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <asm/oplib.h> /* OpenProm Library */
+#include <asm/dma.h> /* BPP uses LSI 64854 for DMA */
+#include <asm/irq.h>
+#include <asm/sunbpp.h>
+
+#undef __SUNBPP_DEBUG
+#ifdef __SUNBPP_DEBUG
+#define dprintk(x) printk x
+#else
+#define dprintk(x)
+#endif
+
+static void parport_sunbpp_disable_irq(struct parport *p)
+{
+ struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
+ u32 tmp;
+
+ tmp = sbus_readl(&regs->p_csr);
+ tmp &= ~DMA_INT_ENAB;
+ sbus_writel(tmp, &regs->p_csr);
+}
+
+static void parport_sunbpp_enable_irq(struct parport *p)
+{
+ struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
+ u32 tmp;
+
+ tmp = sbus_readl(&regs->p_csr);
+ tmp |= DMA_INT_ENAB;
+ sbus_writel(tmp, &regs->p_csr);
+}
+
+static void parport_sunbpp_write_data(struct parport *p, unsigned char d)
+{
+ struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
+
+ sbus_writeb(d, &regs->p_dr);
+ dprintk((KERN_DEBUG "wrote 0x%x\n", d));
+}
+
+static unsigned char parport_sunbpp_read_data(struct parport *p)
+{
+ struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
+
+ return sbus_readb(&regs->p_dr);
+}
+
+static unsigned char status_sunbpp_to_pc(struct parport *p)
+{
+ struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
+ unsigned char bits = 0;
+ unsigned char value_tcr = sbus_readb(&regs->p_tcr);
+ unsigned char value_ir = sbus_readb(&regs->p_ir);
+
+ if (!(value_ir & P_IR_ERR))
+ bits |= PARPORT_STATUS_ERROR;
+ if (!(value_ir & P_IR_SLCT))
+ bits |= PARPORT_STATUS_SELECT;
+ if (!(value_ir & P_IR_PE))
+ bits |= PARPORT_STATUS_PAPEROUT;
+ if (value_tcr & P_TCR_ACK)
+ bits |= PARPORT_STATUS_ACK;
+ if (!(value_tcr & P_TCR_BUSY))
+ bits |= PARPORT_STATUS_BUSY;
+
+ dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", value_tcr, value_ir));
+ dprintk((KERN_DEBUG "read status 0x%x\n", bits));
+ return bits;
+}
+
+static unsigned char control_sunbpp_to_pc(struct parport *p)
+{
+ struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
+ unsigned char bits = 0;
+ unsigned char value_tcr = sbus_readb(&regs->p_tcr);
+ unsigned char value_or = sbus_readb(&regs->p_or);
+
+ if (!(value_tcr & P_TCR_DS))
+ bits |= PARPORT_CONTROL_STROBE;
+ if (!(value_or & P_OR_AFXN))
+ bits |= PARPORT_CONTROL_AUTOFD;
+ if (!(value_or & P_OR_INIT))
+ bits |= PARPORT_CONTROL_INIT;
+ if (value_or & P_OR_SLCT_IN)
+ bits |= PARPORT_CONTROL_SELECT;
+
+ dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", value_tcr, value_or));
+ dprintk((KERN_DEBUG "read control 0x%x\n", bits));
+ return bits;
+}
+
+static unsigned char parport_sunbpp_read_control(struct parport *p)
+{
+ return control_sunbpp_to_pc(p);
+}
+
+static unsigned char parport_sunbpp_frob_control(struct parport *p,
+ unsigned char mask,
+ unsigned char val)
+{
+ struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
+ unsigned char value_tcr = sbus_readb(&regs->p_tcr);
+ unsigned char value_or = sbus_readb(&regs->p_or);
+
+ dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n",
+ value_tcr, value_or));
+ if (mask & PARPORT_CONTROL_STROBE) {
+ if (val & PARPORT_CONTROL_STROBE) {
+ value_tcr &= ~P_TCR_DS;
+ } else {
+ value_tcr |= P_TCR_DS;
+ }
+ }
+ if (mask & PARPORT_CONTROL_AUTOFD) {
+ if (val & PARPORT_CONTROL_AUTOFD) {
+ value_or &= ~P_OR_AFXN;
+ } else {
+ value_or |= P_OR_AFXN;
+ }
+ }
+ if (mask & PARPORT_CONTROL_INIT) {
+ if (val & PARPORT_CONTROL_INIT) {
+ value_or &= ~P_OR_INIT;
+ } else {
+ value_or |= P_OR_INIT;
+ }
+ }
+ if (mask & PARPORT_CONTROL_SELECT) {
+ if (val & PARPORT_CONTROL_SELECT) {
+ value_or |= P_OR_SLCT_IN;
+ } else {
+ value_or &= ~P_OR_SLCT_IN;
+ }
+ }
+
+ sbus_writeb(value_or, &regs->p_or);
+ sbus_writeb(value_tcr, &regs->p_tcr);
+ dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n",
+ value_tcr, value_or));
+ return parport_sunbpp_read_control(p);
+}
+
+static void parport_sunbpp_write_control(struct parport *p, unsigned char d)
+{
+ const unsigned char wm = (PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_INIT |
+ PARPORT_CONTROL_SELECT);
+
+ parport_sunbpp_frob_control (p, wm, d & wm);
+}
+
+static unsigned char parport_sunbpp_read_status(struct parport *p)
+{
+ return status_sunbpp_to_pc(p);
+}
+
+static void parport_sunbpp_data_forward (struct parport *p)
+{
+ struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
+ unsigned char value_tcr = sbus_readb(&regs->p_tcr);
+
+ dprintk((KERN_DEBUG "forward\n"));
+ value_tcr &= ~P_TCR_DIR;
+ sbus_writeb(value_tcr, &regs->p_tcr);
+}
+
+static void parport_sunbpp_data_reverse (struct parport *p)
+{
+ struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base;
+ u8 val = sbus_readb(&regs->p_tcr);
+
+ dprintk((KERN_DEBUG "reverse\n"));
+ val |= P_TCR_DIR;
+ sbus_writeb(val, &regs->p_tcr);
+}
+
+static void parport_sunbpp_init_state(struct pardevice *dev, struct parport_state *s)
+{
+ s->u.pc.ctr = 0xc;
+ s->u.pc.ecr = 0x0;
+}
+
+static void parport_sunbpp_save_state(struct parport *p, struct parport_state *s)
+{
+ s->u.pc.ctr = parport_sunbpp_read_control(p);
+}
+
+static void parport_sunbpp_restore_state(struct parport *p, struct parport_state *s)
+{
+ parport_sunbpp_write_control(p, s->u.pc.ctr);
+}
+
+static struct parport_operations parport_sunbpp_ops =
+{
+ .write_data = parport_sunbpp_write_data,
+ .read_data = parport_sunbpp_read_data,
+
+ .write_control = parport_sunbpp_write_control,
+ .read_control = parport_sunbpp_read_control,
+ .frob_control = parport_sunbpp_frob_control,
+
+ .read_status = parport_sunbpp_read_status,
+
+ .enable_irq = parport_sunbpp_enable_irq,
+ .disable_irq = parport_sunbpp_disable_irq,
+
+ .data_forward = parport_sunbpp_data_forward,
+ .data_reverse = parport_sunbpp_data_reverse,
+
+ .init_state = parport_sunbpp_init_state,
+ .save_state = parport_sunbpp_save_state,
+ .restore_state = parport_sunbpp_restore_state,
+
+ .epp_write_data = parport_ieee1284_epp_write_data,
+ .epp_read_data = parport_ieee1284_epp_read_data,
+ .epp_write_addr = parport_ieee1284_epp_write_addr,
+ .epp_read_addr = parport_ieee1284_epp_read_addr,
+
+ .ecp_write_data = parport_ieee1284_ecp_write_data,
+ .ecp_read_data = parport_ieee1284_ecp_read_data,
+ .ecp_write_addr = parport_ieee1284_ecp_write_addr,
+
+ .compat_write_data = parport_ieee1284_write_compat,
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+
+ .owner = THIS_MODULE,
+};
+
+static int bpp_probe(struct platform_device *op)
+{
+ struct parport_operations *ops;
+ struct bpp_regs __iomem *regs;
+ int irq, dma, err = 0, size;
+ unsigned char value_tcr;
+ void __iomem *base;
+ struct parport *p;
+
+ irq = op->archdata.irqs[0];
+ base = of_ioremap(&op->resource[0], 0,
+ resource_size(&op->resource[0]),
+ "sunbpp");
+ if (!base)
+ return -ENODEV;
+
+ size = resource_size(&op->resource[0]);
+ dma = PARPORT_DMA_NONE;
+
+ ops = kmemdup(&parport_sunbpp_ops, sizeof(struct parport_operations),
+ GFP_KERNEL);
+ if (!ops) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ dprintk(("register_port\n"));
+ if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) {
+ err = -ENOMEM;
+ goto out_free_ops;
+ }
+
+ p->size = size;
+ p->dev = &op->dev;
+
+ if ((err = request_irq(p->irq, parport_irq_handler,
+ IRQF_SHARED, p->name, p)) != 0) {
+ goto out_put_port;
+ }
+
+ parport_sunbpp_enable_irq(p);
+
+ regs = (struct bpp_regs __iomem *)p->base;
+
+ value_tcr = sbus_readb(&regs->p_tcr);
+ value_tcr &= ~P_TCR_DIR;
+ sbus_writeb(value_tcr, &regs->p_tcr);
+
+ pr_info("%s: sunbpp at 0x%lx\n", p->name, p->base);
+
+ dev_set_drvdata(&op->dev, p);
+
+ parport_announce_port(p);
+
+ return 0;
+
+out_put_port:
+ parport_put_port(p);
+
+out_free_ops:
+ kfree(ops);
+
+out_unmap:
+ of_iounmap(&op->resource[0], base, size);
+
+ return err;
+}
+
+static int bpp_remove(struct platform_device *op)
+{
+ struct parport *p = dev_get_drvdata(&op->dev);
+ struct parport_operations *ops = p->ops;
+
+ parport_remove_port(p);
+
+ if (p->irq != PARPORT_IRQ_NONE) {
+ parport_sunbpp_disable_irq(p);
+ free_irq(p->irq, p);
+ }
+
+ of_iounmap(&op->resource[0], (void __iomem *) p->base, p->size);
+ parport_put_port(p);
+ kfree(ops);
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id bpp_match[] = {
+ {
+ .name = "SUNW,bpp",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, bpp_match);
+
+static struct platform_driver bpp_sbus_driver = {
+ .driver = {
+ .name = "bpp",
+ .of_match_table = bpp_match,
+ },
+ .probe = bpp_probe,
+ .remove = bpp_remove,
+};
+
+module_platform_driver(bpp_sbus_driver);
+
+MODULE_AUTHOR("Derrick J Brashear");
+MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port");
+MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
new file mode 100644
index 000000000..7e6d713fa
--- /dev/null
+++ b/drivers/parport/probe.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Parallel port device probing code
+ *
+ * Authors: Carsten Gross, carsten@sol.wohnheim.uni-ulm.de
+ * Philip Blundell <philb@gnu.org>
+ */
+
+#include <linux/module.h>
+#include <linux/parport.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+static const struct {
+ const char *token;
+ const char *descr;
+} classes[] = {
+ { "", "Legacy device" },
+ { "PRINTER", "Printer" },
+ { "MODEM", "Modem" },
+ { "NET", "Network device" },
+ { "HDC", "Hard disk" },
+ { "PCMCIA", "PCMCIA" },
+ { "MEDIA", "Multimedia device" },
+ { "FDC", "Floppy disk" },
+ { "PORTS", "Ports" },
+ { "SCANNER", "Scanner" },
+ { "DIGICAM", "Digital camera" },
+ { "", "Unknown device" },
+ { "", "Unspecified" },
+ { "SCSIADAPTER", "SCSI adapter" },
+ { NULL, NULL }
+};
+
+static void pretty_print(struct parport *port, int device)
+{
+ struct parport_device_info *info = &port->probe_info[device + 1];
+
+ pr_info("%s", port->name);
+
+ if (device >= 0)
+ pr_cont(" (addr %d)", device);
+
+ pr_cont(": %s", classes[info->class].descr);
+ if (info->class)
+ pr_cont(", %s %s", info->mfr, info->model);
+
+ pr_cont("\n");
+}
+
+static void parse_data(struct parport *port, int device, char *str)
+{
+ char *txt = kmalloc(strlen(str)+1, GFP_KERNEL);
+ char *p = txt, *q;
+ int guessed_class = PARPORT_CLASS_UNSPEC;
+ struct parport_device_info *info = &port->probe_info[device + 1];
+
+ if (!txt) {
+ pr_warn("%s probe: memory squeeze\n", port->name);
+ return;
+ }
+ strcpy(txt, str);
+ while (p) {
+ char *sep;
+ q = strchr(p, ';');
+ if (q) *q = 0;
+ sep = strchr(p, ':');
+ if (sep) {
+ char *u;
+ *(sep++) = 0;
+ /* Get rid of trailing blanks */
+ u = sep + strlen (sep) - 1;
+ while (u >= p && *u == ' ')
+ *u-- = '\0';
+ u = p;
+ while (*u) {
+ *u = toupper(*u);
+ u++;
+ }
+ if (!strcmp(p, "MFG") || !strcmp(p, "MANUFACTURER")) {
+ kfree(info->mfr);
+ info->mfr = kstrdup(sep, GFP_KERNEL);
+ } else if (!strcmp(p, "MDL") || !strcmp(p, "MODEL")) {
+ kfree(info->model);
+ info->model = kstrdup(sep, GFP_KERNEL);
+ } else if (!strcmp(p, "CLS") || !strcmp(p, "CLASS")) {
+ int i;
+
+ kfree(info->class_name);
+ info->class_name = kstrdup(sep, GFP_KERNEL);
+ for (u = sep; *u; u++)
+ *u = toupper(*u);
+ for (i = 0; classes[i].token; i++) {
+ if (!strcmp(classes[i].token, sep)) {
+ info->class = i;
+ goto rock_on;
+ }
+ }
+ pr_warn("%s probe: warning, class '%s' not understood\n",
+ port->name, sep);
+ info->class = PARPORT_CLASS_OTHER;
+ } else if (!strcmp(p, "CMD") ||
+ !strcmp(p, "COMMAND SET")) {
+ kfree(info->cmdset);
+ info->cmdset = kstrdup(sep, GFP_KERNEL);
+ /* if it speaks printer language, it's
+ probably a printer */
+ if (strstr(sep, "PJL") || strstr(sep, "PCL"))
+ guessed_class = PARPORT_CLASS_PRINTER;
+ } else if (!strcmp(p, "DES") || !strcmp(p, "DESCRIPTION")) {
+ kfree(info->description);
+ info->description = kstrdup(sep, GFP_KERNEL);
+ }
+ }
+ rock_on:
+ if (q)
+ p = q + 1;
+ else
+ p = NULL;
+ }
+
+ /* If the device didn't tell us its class, maybe we have managed to
+ guess one from the things it did say. */
+ if (info->class == PARPORT_CLASS_UNSPEC)
+ info->class = guessed_class;
+
+ pretty_print (port, device);
+
+ kfree(txt);
+}
+
+/* Read up to count-1 bytes of device id. Terminate buffer with
+ * '\0'. Buffer begins with two Device ID length bytes as given by
+ * device. */
+static ssize_t parport_read_device_id (struct parport *port, char *buffer,
+ size_t count)
+{
+ unsigned char length[2];
+ unsigned lelen, belen;
+ size_t idlens[4];
+ unsigned numidlens;
+ unsigned current_idlen;
+ ssize_t retval;
+ size_t len;
+
+ /* First two bytes are MSB,LSB of inclusive length. */
+ retval = parport_read (port, length, 2);
+
+ if (retval < 0)
+ return retval;
+ if (retval != 2)
+ return -EIO;
+
+ if (count < 2)
+ return 0;
+ memcpy(buffer, length, 2);
+ len = 2;
+
+ /* Some devices wrongly send LE length, and some send it two
+ * bytes short. Construct a sorted array of lengths to try. */
+ belen = (length[0] << 8) + length[1];
+ lelen = (length[1] << 8) + length[0];
+ idlens[0] = min(belen, lelen);
+ idlens[1] = idlens[0]+2;
+ if (belen != lelen) {
+ int off = 2;
+ /* Don't try lengths of 0x100 and 0x200 as 1 and 2 */
+ if (idlens[0] <= 2)
+ off = 0;
+ idlens[off] = max(belen, lelen);
+ idlens[off+1] = idlens[off]+2;
+ numidlens = off+2;
+ }
+ else {
+ /* Some devices don't truly implement Device ID, but
+ * just return constant nibble forever. This catches
+ * also those cases. */
+ if (idlens[0] == 0 || idlens[0] > 0xFFF) {
+ printk(KERN_DEBUG "%s: reported broken Device ID length of %#zX bytes\n",
+ port->name, idlens[0]);
+ return -EIO;
+ }
+ numidlens = 2;
+ }
+
+ /* Try to respect the given ID length despite all the bugs in
+ * the ID length. Read according to shortest possible ID
+ * first. */
+ for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) {
+ size_t idlen = idlens[current_idlen];
+ if (idlen+1 >= count)
+ break;
+
+ retval = parport_read (port, buffer+len, idlen-len);
+
+ if (retval < 0)
+ return retval;
+ len += retval;
+
+ if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
+ if (belen != len) {
+ printk(KERN_DEBUG "%s: Device ID was %zd bytes while device told it would be %d bytes\n",
+ port->name, len, belen);
+ }
+ goto done;
+ }
+
+ /* This might end reading the Device ID too
+ * soon. Hopefully the needed fields were already in
+ * the first 256 bytes or so that we must have read so
+ * far. */
+ if (buffer[len-1] == ';') {
+ printk(KERN_DEBUG "%s: Device ID reading stopped before device told data not available. Current idlen %u of %u, len bytes %02X %02X\n",
+ port->name, current_idlen, numidlens,
+ length[0], length[1]);
+ goto done;
+ }
+ }
+ if (current_idlen < numidlens) {
+ /* Buffer not large enough, read to end of buffer. */
+ size_t idlen, len2;
+ if (len+1 < count) {
+ retval = parport_read (port, buffer+len, count-len-1);
+ if (retval < 0)
+ return retval;
+ len += retval;
+ }
+ /* Read the whole ID since some devices would not
+ * otherwise give back the Device ID from beginning
+ * next time when asked. */
+ idlen = idlens[current_idlen];
+ len2 = len;
+ while(len2 < idlen && retval > 0) {
+ char tmp[4];
+ retval = parport_read (port, tmp,
+ min(sizeof tmp, idlen-len2));
+ if (retval < 0)
+ return retval;
+ len2 += retval;
+ }
+ }
+ /* In addition, there are broken devices out there that don't
+ even finish off with a semi-colon. We do not need to care
+ about those at this time. */
+ done:
+ buffer[len] = '\0';
+ return len;
+}
+
+/* Get Std 1284 Device ID. */
+ssize_t parport_device_id (int devnum, char *buffer, size_t count)
+{
+ ssize_t retval = -ENXIO;
+ struct pardevice *dev = parport_open(devnum, daisy_dev_name);
+ if (!dev)
+ return -ENXIO;
+
+ parport_claim_or_block (dev);
+
+ /* Negotiate to compatibility mode, and then to device ID
+ * mode. (This so that we start form beginning of device ID if
+ * already in device ID mode.) */
+ parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
+ retval = parport_negotiate (dev->port,
+ IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID);
+
+ if (!retval) {
+ retval = parport_read_device_id (dev->port, buffer, count);
+ parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
+ if (retval > 2)
+ parse_data (dev->port, dev->daisy, buffer+2);
+ }
+
+ parport_release (dev);
+ parport_close (dev);
+ return retval;
+}
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
new file mode 100644
index 000000000..d740eba3c
--- /dev/null
+++ b/drivers/parport/procfs.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Sysctl interface for parport devices.
+ *
+ * Authors: David Campbell
+ * Tim Waugh <tim@cyberelk.demon.co.uk>
+ * Philip Blundell <philb@gnu.org>
+ * Andrea Arcangeli
+ * Riccardo Facchetti <fizban@tin.it>
+ *
+ * based on work by Grant Guenther <grant@torque.net>
+ * and Philip Blundell
+ *
+ * Cleaned up include files - Russell King <linux@arm.uk.linux.org>
+ */
+
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/parport.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/device.h>
+
+#include <linux/uaccess.h>
+
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+
+#define PARPORT_MIN_TIMESLICE_VALUE 1ul
+#define PARPORT_MAX_TIMESLICE_VALUE ((unsigned long) HZ)
+#define PARPORT_MIN_SPINTIME_VALUE 1
+#define PARPORT_MAX_SPINTIME_VALUE 1000
+
+static int do_active_device(struct ctl_table *table, int write,
+ void *result, size_t *lenp, loff_t *ppos)
+{
+ struct parport *port = (struct parport *)table->extra1;
+ char buffer[256];
+ struct pardevice *dev;
+ int len = 0;
+
+ if (write) /* can't happen anyway */
+ return -EACCES;
+
+ if (*ppos) {
+ *lenp = 0;
+ return 0;
+ }
+
+ for (dev = port->devices; dev ; dev = dev->next) {
+ if(dev == port->cad) {
+ len += sprintf(buffer, "%s\n", dev->name);
+ }
+ }
+
+ if(!len) {
+ len += sprintf(buffer, "%s\n", "none");
+ }
+
+ if (len > *lenp)
+ len = *lenp;
+ else
+ *lenp = len;
+
+ *ppos += len;
+ memcpy(result, buffer, len);
+ return 0;
+}
+
+#ifdef CONFIG_PARPORT_1284
+static int do_autoprobe(struct ctl_table *table, int write,
+ void *result, size_t *lenp, loff_t *ppos)
+{
+ struct parport_device_info *info = table->extra2;
+ const char *str;
+ char buffer[256];
+ int len = 0;
+
+ if (write) /* permissions stop this */
+ return -EACCES;
+
+ if (*ppos) {
+ *lenp = 0;
+ return 0;
+ }
+
+ if ((str = info->class_name) != NULL)
+ len += sprintf (buffer + len, "CLASS:%s;\n", str);
+
+ if ((str = info->model) != NULL)
+ len += sprintf (buffer + len, "MODEL:%s;\n", str);
+
+ if ((str = info->mfr) != NULL)
+ len += sprintf (buffer + len, "MANUFACTURER:%s;\n", str);
+
+ if ((str = info->description) != NULL)
+ len += sprintf (buffer + len, "DESCRIPTION:%s;\n", str);
+
+ if ((str = info->cmdset) != NULL)
+ len += sprintf (buffer + len, "COMMAND SET:%s;\n", str);
+
+ if (len > *lenp)
+ len = *lenp;
+ else
+ *lenp = len;
+
+ *ppos += len;
+
+ memcpy(result, buffer, len);
+ return 0;
+}
+#endif /* IEEE1284.3 support. */
+
+static int do_hardware_base_addr(struct ctl_table *table, int write,
+ void *result, size_t *lenp, loff_t *ppos)
+{
+ struct parport *port = (struct parport *)table->extra1;
+ char buffer[20];
+ int len = 0;
+
+ if (*ppos) {
+ *lenp = 0;
+ return 0;
+ }
+
+ if (write) /* permissions prevent this anyway */
+ return -EACCES;
+
+ len += sprintf (buffer, "%lu\t%lu\n", port->base, port->base_hi);
+
+ if (len > *lenp)
+ len = *lenp;
+ else
+ *lenp = len;
+
+ *ppos += len;
+ memcpy(result, buffer, len);
+ return 0;
+}
+
+static int do_hardware_irq(struct ctl_table *table, int write,
+ void *result, size_t *lenp, loff_t *ppos)
+{
+ struct parport *port = (struct parport *)table->extra1;
+ char buffer[20];
+ int len = 0;
+
+ if (*ppos) {
+ *lenp = 0;
+ return 0;
+ }
+
+ if (write) /* permissions prevent this anyway */
+ return -EACCES;
+
+ len += sprintf (buffer, "%d\n", port->irq);
+
+ if (len > *lenp)
+ len = *lenp;
+ else
+ *lenp = len;
+
+ *ppos += len;
+ memcpy(result, buffer, len);
+ return 0;
+}
+
+static int do_hardware_dma(struct ctl_table *table, int write,
+ void *result, size_t *lenp, loff_t *ppos)
+{
+ struct parport *port = (struct parport *)table->extra1;
+ char buffer[20];
+ int len = 0;
+
+ if (*ppos) {
+ *lenp = 0;
+ return 0;
+ }
+
+ if (write) /* permissions prevent this anyway */
+ return -EACCES;
+
+ len += sprintf (buffer, "%d\n", port->dma);
+
+ if (len > *lenp)
+ len = *lenp;
+ else
+ *lenp = len;
+
+ *ppos += len;
+ memcpy(result, buffer, len);
+ return 0;
+}
+
+static int do_hardware_modes(struct ctl_table *table, int write,
+ void *result, size_t *lenp, loff_t *ppos)
+{
+ struct parport *port = (struct parport *)table->extra1;
+ char buffer[40];
+ int len = 0;
+
+ if (*ppos) {
+ *lenp = 0;
+ return 0;
+ }
+
+ if (write) /* permissions prevent this anyway */
+ return -EACCES;
+
+ {
+#define printmode(x) \
+do { \
+ if (port->modes & PARPORT_MODE_##x) \
+ len += sprintf(buffer + len, "%s%s", f++ ? "," : "", #x); \
+} while (0)
+ int f = 0;
+ printmode(PCSPP);
+ printmode(TRISTATE);
+ printmode(COMPAT);
+ printmode(EPP);
+ printmode(ECP);
+ printmode(DMA);
+#undef printmode
+ }
+ buffer[len++] = '\n';
+
+ if (len > *lenp)
+ len = *lenp;
+ else
+ *lenp = len;
+
+ *ppos += len;
+ memcpy(result, buffer, len);
+ return 0;
+}
+
+#define PARPORT_PORT_DIR(CHILD) { .procname = NULL, .mode = 0555, .child = CHILD }
+#define PARPORT_PARPORT_DIR(CHILD) { .procname = "parport", \
+ .mode = 0555, .child = CHILD }
+#define PARPORT_DEV_DIR(CHILD) { .procname = "dev", .mode = 0555, .child = CHILD }
+#define PARPORT_DEVICES_ROOT_DIR { .procname = "devices", \
+ .mode = 0555, .child = NULL }
+
+static const unsigned long parport_min_timeslice_value =
+PARPORT_MIN_TIMESLICE_VALUE;
+
+static const unsigned long parport_max_timeslice_value =
+PARPORT_MAX_TIMESLICE_VALUE;
+
+static const int parport_min_spintime_value =
+PARPORT_MIN_SPINTIME_VALUE;
+
+static const int parport_max_spintime_value =
+PARPORT_MAX_SPINTIME_VALUE;
+
+
+struct parport_sysctl_table {
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table vars[12];
+ struct ctl_table device_dir[2];
+ struct ctl_table port_dir[2];
+ struct ctl_table parport_dir[2];
+ struct ctl_table dev_dir[2];
+};
+
+static const struct parport_sysctl_table parport_sysctl_template = {
+ .sysctl_header = NULL,
+ {
+ {
+ .procname = "spintime",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void*) &parport_min_spintime_value,
+ .extra2 = (void*) &parport_max_spintime_value
+ },
+ {
+ .procname = "base-addr",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_hardware_base_addr
+ },
+ {
+ .procname = "irq",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_hardware_irq
+ },
+ {
+ .procname = "dma",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_hardware_dma
+ },
+ {
+ .procname = "modes",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_hardware_modes
+ },
+ PARPORT_DEVICES_ROOT_DIR,
+#ifdef CONFIG_PARPORT_1284
+ {
+ .procname = "autoprobe",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_autoprobe
+ },
+ {
+ .procname = "autoprobe0",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_autoprobe
+ },
+ {
+ .procname = "autoprobe1",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_autoprobe
+ },
+ {
+ .procname = "autoprobe2",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_autoprobe
+ },
+ {
+ .procname = "autoprobe3",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_autoprobe
+ },
+#endif /* IEEE 1284 support */
+ {}
+ },
+ {
+ {
+ .procname = "active",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = do_active_device
+ },
+ {}
+ },
+ {
+ PARPORT_PORT_DIR(NULL),
+ {}
+ },
+ {
+ PARPORT_PARPORT_DIR(NULL),
+ {}
+ },
+ {
+ PARPORT_DEV_DIR(NULL),
+ {}
+ }
+};
+
+struct parport_device_sysctl_table
+{
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table vars[2];
+ struct ctl_table device_dir[2];
+ struct ctl_table devices_root_dir[2];
+ struct ctl_table port_dir[2];
+ struct ctl_table parport_dir[2];
+ struct ctl_table dev_dir[2];
+};
+
+static const struct parport_device_sysctl_table
+parport_device_sysctl_template = {
+ .sysctl_header = NULL,
+ {
+ {
+ .procname = "timeslice",
+ .data = NULL,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void*) &parport_min_timeslice_value,
+ .extra2 = (void*) &parport_max_timeslice_value
+ },
+ },
+ {
+ {
+ .procname = NULL,
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = NULL
+ },
+ {}
+ },
+ {
+ PARPORT_DEVICES_ROOT_DIR,
+ {}
+ },
+ {
+ PARPORT_PORT_DIR(NULL),
+ {}
+ },
+ {
+ PARPORT_PARPORT_DIR(NULL),
+ {}
+ },
+ {
+ PARPORT_DEV_DIR(NULL),
+ {}
+ }
+};
+
+struct parport_default_sysctl_table
+{
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table vars[3];
+ struct ctl_table default_dir[2];
+ struct ctl_table parport_dir[2];
+ struct ctl_table dev_dir[2];
+};
+
+static struct parport_default_sysctl_table
+parport_default_sysctl_table = {
+ .sysctl_header = NULL,
+ {
+ {
+ .procname = "timeslice",
+ .data = &parport_default_timeslice,
+ .maxlen = sizeof(parport_default_timeslice),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void*) &parport_min_timeslice_value,
+ .extra2 = (void*) &parport_max_timeslice_value
+ },
+ {
+ .procname = "spintime",
+ .data = &parport_default_spintime,
+ .maxlen = sizeof(parport_default_spintime),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void*) &parport_min_spintime_value,
+ .extra2 = (void*) &parport_max_spintime_value
+ },
+ {}
+ },
+ {
+ {
+ .procname = "default",
+ .mode = 0555,
+ .child = parport_default_sysctl_table.vars
+ },
+ {}
+ },
+ {
+ PARPORT_PARPORT_DIR(parport_default_sysctl_table.default_dir),
+ {}
+ },
+ {
+ PARPORT_DEV_DIR(parport_default_sysctl_table.parport_dir),
+ {}
+ }
+};
+
+
+int parport_proc_register(struct parport *port)
+{
+ struct parport_sysctl_table *t;
+ int i;
+
+ t = kmemdup(&parport_sysctl_template, sizeof(*t), GFP_KERNEL);
+ if (t == NULL)
+ return -ENOMEM;
+
+ t->device_dir[0].extra1 = port;
+
+ for (i = 0; i < 5; i++)
+ t->vars[i].extra1 = port;
+
+ t->vars[0].data = &port->spintime;
+ t->vars[5].child = t->device_dir;
+
+ for (i = 0; i < 5; i++)
+ t->vars[6 + i].extra2 = &port->probe_info[i];
+
+ t->port_dir[0].procname = port->name;
+
+ t->port_dir[0].child = t->vars;
+ t->parport_dir[0].child = t->port_dir;
+ t->dev_dir[0].child = t->parport_dir;
+
+ t->sysctl_header = register_sysctl_table(t->dev_dir);
+ if (t->sysctl_header == NULL) {
+ kfree(t);
+ t = NULL;
+ }
+ port->sysctl_table = t;
+ return 0;
+}
+
+int parport_proc_unregister(struct parport *port)
+{
+ if (port->sysctl_table) {
+ struct parport_sysctl_table *t = port->sysctl_table;
+ port->sysctl_table = NULL;
+ unregister_sysctl_table(t->sysctl_header);
+ kfree(t);
+ }
+ return 0;
+}
+
+int parport_device_proc_register(struct pardevice *device)
+{
+ struct parport_device_sysctl_table *t;
+ struct parport * port = device->port;
+
+ t = kmemdup(&parport_device_sysctl_template, sizeof(*t), GFP_KERNEL);
+ if (t == NULL)
+ return -ENOMEM;
+
+ t->dev_dir[0].child = t->parport_dir;
+ t->parport_dir[0].child = t->port_dir;
+ t->port_dir[0].procname = port->name;
+ t->port_dir[0].child = t->devices_root_dir;
+ t->devices_root_dir[0].child = t->device_dir;
+
+ t->device_dir[0].procname = device->name;
+ t->device_dir[0].child = t->vars;
+ t->vars[0].data = &device->timeslice;
+
+ t->sysctl_header = register_sysctl_table(t->dev_dir);
+ if (t->sysctl_header == NULL) {
+ kfree(t);
+ t = NULL;
+ }
+ device->sysctl_table = t;
+ return 0;
+}
+
+int parport_device_proc_unregister(struct pardevice *device)
+{
+ if (device->sysctl_table) {
+ struct parport_device_sysctl_table *t = device->sysctl_table;
+ device->sysctl_table = NULL;
+ unregister_sysctl_table(t->sysctl_header);
+ kfree(t);
+ }
+ return 0;
+}
+
+static int __init parport_default_proc_register(void)
+{
+ int ret;
+
+ parport_default_sysctl_table.sysctl_header =
+ register_sysctl_table(parport_default_sysctl_table.dev_dir);
+ if (!parport_default_sysctl_table.sysctl_header)
+ return -ENOMEM;
+ ret = parport_bus_init();
+ if (ret) {
+ unregister_sysctl_table(parport_default_sysctl_table.
+ sysctl_header);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit parport_default_proc_unregister(void)
+{
+ if (parport_default_sysctl_table.sysctl_header) {
+ unregister_sysctl_table(parport_default_sysctl_table.
+ sysctl_header);
+ parport_default_sysctl_table.sysctl_header = NULL;
+ }
+ parport_bus_exit();
+}
+
+#else /* no sysctl or no procfs*/
+
+int parport_proc_register(struct parport *pp)
+{
+ return 0;
+}
+
+int parport_proc_unregister(struct parport *pp)
+{
+ return 0;
+}
+
+int parport_device_proc_register(struct pardevice *device)
+{
+ return 0;
+}
+
+int parport_device_proc_unregister(struct pardevice *device)
+{
+ return 0;
+}
+
+static int __init parport_default_proc_register (void)
+{
+ return parport_bus_init();
+}
+
+static void __exit parport_default_proc_unregister (void)
+{
+ parport_bus_exit();
+}
+#endif
+
+subsys_initcall(parport_default_proc_register)
+module_exit(parport_default_proc_unregister)
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
new file mode 100644
index 000000000..7fec4fefe
--- /dev/null
+++ b/drivers/parport/share.c
@@ -0,0 +1,1234 @@
+/*
+ * Parallel-port resource manager code.
+ *
+ * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
+ * Tim Waugh <tim@cyberelk.demon.co.uk>
+ * Jose Renau <renau@acm.org>
+ * Philip Blundell <philb@gnu.org>
+ * Andrea Arcangeli
+ *
+ * based on work by Grant Guenther <grant@torque.net>
+ * and Philip Blundell
+ *
+ * Any part of this program may be used in documents licensed under
+ * the GNU Free Documentation License, Version 1.1 or any later version
+ * published by the Free Software Foundation.
+ */
+
+#undef PARPORT_DEBUG_SHARING /* undef for production */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/threads.h>
+#include <linux/parport.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <asm/irq.h>
+
+#undef PARPORT_PARANOID
+
+#define PARPORT_DEFAULT_TIMESLICE (HZ/5)
+
+unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
+int parport_default_spintime = DEFAULT_SPIN_TIME;
+
+static LIST_HEAD(portlist);
+static DEFINE_SPINLOCK(parportlist_lock);
+
+/* list of all allocated ports, sorted by ->number */
+static LIST_HEAD(all_ports);
+static DEFINE_SPINLOCK(full_list_lock);
+
+static LIST_HEAD(drivers);
+
+static DEFINE_MUTEX(registration_lock);
+
+/* What you can do to a port that's gone away.. */
+static void dead_write_lines(struct parport *p, unsigned char b){}
+static unsigned char dead_read_lines(struct parport *p) { return 0; }
+static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
+ unsigned char c) { return 0; }
+static void dead_onearg(struct parport *p){}
+static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
+static void dead_state(struct parport *p, struct parport_state *s) { }
+static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
+{ return 0; }
+static size_t dead_read(struct parport *p, void *b, size_t l, int f)
+{ return 0; }
+static struct parport_operations dead_ops = {
+ .write_data = dead_write_lines, /* data */
+ .read_data = dead_read_lines,
+
+ .write_control = dead_write_lines, /* control */
+ .read_control = dead_read_lines,
+ .frob_control = dead_frob_lines,
+
+ .read_status = dead_read_lines, /* status */
+
+ .enable_irq = dead_onearg, /* enable_irq */
+ .disable_irq = dead_onearg, /* disable_irq */
+
+ .data_forward = dead_onearg, /* data_forward */
+ .data_reverse = dead_onearg, /* data_reverse */
+
+ .init_state = dead_initstate, /* init_state */
+ .save_state = dead_state,
+ .restore_state = dead_state,
+
+ .epp_write_data = dead_write, /* epp */
+ .epp_read_data = dead_read,
+ .epp_write_addr = dead_write,
+ .epp_read_addr = dead_read,
+
+ .ecp_write_data = dead_write, /* ecp */
+ .ecp_read_data = dead_read,
+ .ecp_write_addr = dead_write,
+
+ .compat_write_data = dead_write, /* compat */
+ .nibble_read_data = dead_read, /* nibble */
+ .byte_read_data = dead_read, /* byte */
+
+ .owner = NULL,
+};
+
+static struct device_type parport_device_type = {
+ .name = "parport",
+};
+
+static int is_parport(struct device *dev)
+{
+ return dev->type == &parport_device_type;
+}
+
+static int parport_probe(struct device *dev)
+{
+ struct parport_driver *drv;
+
+ if (is_parport(dev))
+ return -ENODEV;
+
+ drv = to_parport_driver(dev->driver);
+ if (!drv->probe) {
+ /* if driver has not defined a custom probe */
+ struct pardevice *par_dev = to_pardevice(dev);
+
+ if (strcmp(par_dev->name, drv->name))
+ return -ENODEV;
+ return 0;
+ }
+ /* if driver defined its own probe */
+ return drv->probe(to_pardevice(dev));
+}
+
+static struct bus_type parport_bus_type = {
+ .name = "parport",
+ .probe = parport_probe,
+};
+
+int parport_bus_init(void)
+{
+ return bus_register(&parport_bus_type);
+}
+
+void parport_bus_exit(void)
+{
+ bus_unregister(&parport_bus_type);
+}
+
+/*
+ * iterates through all the drivers registered with the bus and sends the port
+ * details to the match_port callback of the driver, so that the driver can
+ * know about the new port that just registered with the bus and decide if it
+ * wants to use this new port.
+ */
+static int driver_check(struct device_driver *dev_drv, void *_port)
+{
+ struct parport *port = _port;
+ struct parport_driver *drv = to_parport_driver(dev_drv);
+
+ if (drv->match_port)
+ drv->match_port(port);
+ return 0;
+}
+
+/* Call attach(port) for each registered driver. */
+static void attach_driver_chain(struct parport *port)
+{
+ /* caller has exclusive registration_lock */
+ struct parport_driver *drv;
+
+ list_for_each_entry(drv, &drivers, list)
+ drv->attach(port);
+
+ /*
+ * call the driver_check function of the drivers registered in
+ * new device model
+ */
+
+ bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
+}
+
+static int driver_detach(struct device_driver *_drv, void *_port)
+{
+ struct parport *port = _port;
+ struct parport_driver *drv = to_parport_driver(_drv);
+
+ if (drv->detach)
+ drv->detach(port);
+ return 0;
+}
+
+/* Call detach(port) for each registered driver. */
+static void detach_driver_chain(struct parport *port)
+{
+ struct parport_driver *drv;
+ /* caller has exclusive registration_lock */
+ list_for_each_entry(drv, &drivers, list)
+ drv->detach(port);
+
+ /*
+ * call the detach function of the drivers registered in
+ * new device model
+ */
+
+ bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
+}
+
+/* Ask kmod for some lowlevel drivers. */
+static void get_lowlevel_driver(void)
+{
+ /*
+ * There is no actual module called this: you should set
+ * up an alias for modutils.
+ */
+ request_module("parport_lowlevel");
+}
+
+/*
+ * iterates through all the devices connected to the bus and sends the device
+ * details to the match_port callback of the driver, so that the driver can
+ * know what are all the ports that are connected to the bus and choose the
+ * port to which it wants to register its device.
+ */
+static int port_check(struct device *dev, void *dev_drv)
+{
+ struct parport_driver *drv = dev_drv;
+
+ /* only send ports, do not send other devices connected to bus */
+ if (is_parport(dev))
+ drv->match_port(to_parport_dev(dev));
+ return 0;
+}
+
+/*
+ * Iterates through all the devices connected to the bus and return 1
+ * if the device is a parallel port.
+ */
+
+static int port_detect(struct device *dev, void *dev_drv)
+{
+ if (is_parport(dev))
+ return 1;
+ return 0;
+}
+
+/**
+ * parport_register_driver - register a parallel port device driver
+ * @drv: structure describing the driver
+ * @owner: owner module of drv
+ * @mod_name: module name string
+ *
+ * This can be called by a parallel port device driver in order
+ * to receive notifications about ports being found in the
+ * system, as well as ports no longer available.
+ *
+ * If devmodel is true then the new device model is used
+ * for registration.
+ *
+ * The @drv structure is allocated by the caller and must not be
+ * deallocated until after calling parport_unregister_driver().
+ *
+ * If using the non device model:
+ * The driver's attach() function may block. The port that
+ * attach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so. Calling
+ * parport_register_device() on that port will do this for you.
+ *
+ * The driver's detach() function may block. The port that
+ * detach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so.
+ *
+ *
+ * Returns 0 on success. The non device model will always succeeds.
+ * but the new device model can fail and will return the error code.
+ **/
+
+int __parport_register_driver(struct parport_driver *drv, struct module *owner,
+ const char *mod_name)
+{
+ /* using device model */
+ int ret;
+
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &parport_bus_type;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
+ ret = driver_register(&drv->driver);
+ if (ret)
+ return ret;
+
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
+ mutex_lock(&registration_lock);
+ if (drv->match_port)
+ bus_for_each_dev(&parport_bus_type, NULL, drv,
+ port_check);
+ mutex_unlock(&registration_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(__parport_register_driver);
+
+static int port_detach(struct device *dev, void *_drv)
+{
+ struct parport_driver *drv = _drv;
+
+ if (is_parport(dev) && drv->detach)
+ drv->detach(to_parport_dev(dev));
+
+ return 0;
+}
+
+/**
+ * parport_unregister_driver - deregister a parallel port device driver
+ * @drv: structure describing the driver that was given to
+ * parport_register_driver()
+ *
+ * This should be called by a parallel port device driver that
+ * has registered itself using parport_register_driver() when it
+ * is about to be unloaded.
+ *
+ * When it returns, the driver's attach() routine will no longer
+ * be called, and for each port that attach() was called for, the
+ * detach() routine will have been called.
+ *
+ * All the driver's attach() and detach() calls are guaranteed to have
+ * finished by the time this function returns.
+ **/
+
+void parport_unregister_driver(struct parport_driver *drv)
+{
+ mutex_lock(&registration_lock);
+ bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
+ driver_unregister(&drv->driver);
+ mutex_unlock(&registration_lock);
+}
+EXPORT_SYMBOL(parport_unregister_driver);
+
+static void free_port(struct device *dev)
+{
+ int d;
+ struct parport *port = to_parport_dev(dev);
+
+ spin_lock(&full_list_lock);
+ list_del(&port->full_list);
+ spin_unlock(&full_list_lock);
+ for (d = 0; d < 5; d++) {
+ kfree(port->probe_info[d].class_name);
+ kfree(port->probe_info[d].mfr);
+ kfree(port->probe_info[d].model);
+ kfree(port->probe_info[d].cmdset);
+ kfree(port->probe_info[d].description);
+ }
+
+ kfree(port->name);
+ kfree(port);
+}
+
+/**
+ * parport_get_port - increment a port's reference count
+ * @port: the port
+ *
+ * This ensures that a struct parport pointer remains valid
+ * until the matching parport_put_port() call.
+ **/
+
+struct parport *parport_get_port(struct parport *port)
+{
+ struct device *dev = get_device(&port->bus_dev);
+
+ return to_parport_dev(dev);
+}
+EXPORT_SYMBOL(parport_get_port);
+
+void parport_del_port(struct parport *port)
+{
+ device_unregister(&port->bus_dev);
+}
+EXPORT_SYMBOL(parport_del_port);
+
+/**
+ * parport_put_port - decrement a port's reference count
+ * @port: the port
+ *
+ * This should be called once for each call to parport_get_port(),
+ * once the port is no longer needed. When the reference count reaches
+ * zero (port is no longer used), free_port is called.
+ **/
+
+void parport_put_port(struct parport *port)
+{
+ put_device(&port->bus_dev);
+}
+EXPORT_SYMBOL(parport_put_port);
+
+/**
+ * parport_register_port - register a parallel port
+ * @base: base I/O address
+ * @irq: IRQ line
+ * @dma: DMA channel
+ * @ops: pointer to the port driver's port operations structure
+ *
+ * When a parallel port (lowlevel) driver finds a port that
+ * should be made available to parallel port device drivers, it
+ * should call parport_register_port(). The @base, @irq, and
+ * @dma parameters are for the convenience of port drivers, and
+ * for ports where they aren't meaningful needn't be set to
+ * anything special. They can be altered afterwards by adjusting
+ * the relevant members of the parport structure that is returned
+ * and represents the port. They should not be tampered with
+ * after calling parport_announce_port, however.
+ *
+ * If there are parallel port device drivers in the system that
+ * have registered themselves using parport_register_driver(),
+ * they are not told about the port at this time; that is done by
+ * parport_announce_port().
+ *
+ * The @ops structure is allocated by the caller, and must not be
+ * deallocated before calling parport_remove_port().
+ *
+ * If there is no memory to allocate a new parport structure,
+ * this function will return %NULL.
+ **/
+
+struct parport *parport_register_port(unsigned long base, int irq, int dma,
+ struct parport_operations *ops)
+{
+ struct list_head *l;
+ struct parport *tmp;
+ int num;
+ int device;
+ char *name;
+ int ret;
+
+ tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
+ if (!tmp)
+ return NULL;
+
+ /* Init our structure */
+ tmp->base = base;
+ tmp->irq = irq;
+ tmp->dma = dma;
+ tmp->muxport = tmp->daisy = tmp->muxsel = -1;
+ tmp->modes = 0;
+ INIT_LIST_HEAD(&tmp->list);
+ tmp->devices = tmp->cad = NULL;
+ tmp->flags = 0;
+ tmp->ops = ops;
+ tmp->physport = tmp;
+ memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
+ rwlock_init(&tmp->cad_lock);
+ spin_lock_init(&tmp->waitlist_lock);
+ spin_lock_init(&tmp->pardevice_lock);
+ tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
+ tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+ sema_init(&tmp->ieee1284.irq, 0);
+ tmp->spintime = parport_default_spintime;
+ atomic_set(&tmp->ref_count, 1);
+ INIT_LIST_HEAD(&tmp->full_list);
+
+ name = kmalloc(15, GFP_KERNEL);
+ if (!name) {
+ kfree(tmp);
+ return NULL;
+ }
+ /* Search for the lowest free parport number. */
+
+ spin_lock(&full_list_lock);
+ for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
+ struct parport *p = list_entry(l, struct parport, full_list);
+ if (p->number != num)
+ break;
+ }
+ tmp->portnum = tmp->number = num;
+ list_add_tail(&tmp->full_list, l);
+ spin_unlock(&full_list_lock);
+
+ /*
+ * Now that the portnum is known finish doing the Init.
+ */
+ sprintf(name, "parport%d", tmp->portnum = tmp->number);
+ tmp->name = name;
+ tmp->bus_dev.bus = &parport_bus_type;
+ tmp->bus_dev.release = free_port;
+ dev_set_name(&tmp->bus_dev, name);
+ tmp->bus_dev.type = &parport_device_type;
+
+ for (device = 0; device < 5; device++)
+ /* assume the worst */
+ tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
+
+ tmp->waithead = tmp->waittail = NULL;
+
+ ret = device_register(&tmp->bus_dev);
+ if (ret) {
+ put_device(&tmp->bus_dev);
+ return NULL;
+ }
+
+ return tmp;
+}
+EXPORT_SYMBOL(parport_register_port);
+
+/**
+ * parport_announce_port - tell device drivers about a parallel port
+ * @port: parallel port to announce
+ *
+ * After a port driver has registered a parallel port with
+ * parport_register_port, and performed any necessary
+ * initialisation or adjustments, it should call
+ * parport_announce_port() in order to notify all device drivers
+ * that have called parport_register_driver(). Their attach()
+ * functions will be called, with @port as the parameter.
+ **/
+
+void parport_announce_port(struct parport *port)
+{
+ int i;
+
+#ifdef CONFIG_PARPORT_1284
+ /* Analyse the IEEE1284.3 topology of the port. */
+ parport_daisy_init(port);
+#endif
+
+ if (!port->dev)
+ pr_warn("%s: fix this legacy no-device port driver!\n",
+ port->name);
+
+ parport_proc_register(port);
+ mutex_lock(&registration_lock);
+ spin_lock_irq(&parportlist_lock);
+ list_add_tail(&port->list, &portlist);
+ for (i = 1; i < 3; i++) {
+ struct parport *slave = port->slaves[i-1];
+ if (slave)
+ list_add_tail(&slave->list, &portlist);
+ }
+ spin_unlock_irq(&parportlist_lock);
+
+ /* Let drivers know that new port(s) has arrived. */
+ attach_driver_chain(port);
+ for (i = 1; i < 3; i++) {
+ struct parport *slave = port->slaves[i-1];
+ if (slave)
+ attach_driver_chain(slave);
+ }
+ mutex_unlock(&registration_lock);
+}
+EXPORT_SYMBOL(parport_announce_port);
+
+/**
+ * parport_remove_port - deregister a parallel port
+ * @port: parallel port to deregister
+ *
+ * When a parallel port driver is forcibly unloaded, or a
+ * parallel port becomes inaccessible, the port driver must call
+ * this function in order to deal with device drivers that still
+ * want to use it.
+ *
+ * The parport structure associated with the port has its
+ * operations structure replaced with one containing 'null'
+ * operations that return errors or just don't do anything.
+ *
+ * Any drivers that have registered themselves using
+ * parport_register_driver() are notified that the port is no
+ * longer accessible by having their detach() routines called
+ * with @port as the parameter.
+ **/
+
+void parport_remove_port(struct parport *port)
+{
+ int i;
+
+ mutex_lock(&registration_lock);
+
+ /* Spread the word. */
+ detach_driver_chain(port);
+
+#ifdef CONFIG_PARPORT_1284
+ /* Forget the IEEE1284.3 topology of the port. */
+ parport_daisy_fini(port);
+ for (i = 1; i < 3; i++) {
+ struct parport *slave = port->slaves[i-1];
+ if (!slave)
+ continue;
+ detach_driver_chain(slave);
+ parport_daisy_fini(slave);
+ }
+#endif
+
+ port->ops = &dead_ops;
+ spin_lock(&parportlist_lock);
+ list_del_init(&port->list);
+ for (i = 1; i < 3; i++) {
+ struct parport *slave = port->slaves[i-1];
+ if (slave)
+ list_del_init(&slave->list);
+ }
+ spin_unlock(&parportlist_lock);
+
+ mutex_unlock(&registration_lock);
+
+ parport_proc_unregister(port);
+
+ for (i = 1; i < 3; i++) {
+ struct parport *slave = port->slaves[i-1];
+ if (slave)
+ parport_put_port(slave);
+ }
+}
+EXPORT_SYMBOL(parport_remove_port);
+
+static void free_pardevice(struct device *dev)
+{
+ struct pardevice *par_dev = to_pardevice(dev);
+
+ kfree(par_dev->name);
+ kfree(par_dev);
+}
+
+/**
+ * parport_register_dev_model - register a device on a parallel port
+ * @port: port to which the device is attached
+ * @name: a name to refer to the device
+ * @par_dev_cb: struct containing callbacks
+ * @id: device number to be given to the device
+ *
+ * This function, called by parallel port device drivers,
+ * declares that a device is connected to a port, and tells the
+ * system all it needs to know.
+ *
+ * The struct pardev_cb contains pointer to callbacks. preemption
+ * callback function, @preempt, is called when this device driver
+ * has claimed access to the port but another device driver wants
+ * to use it. It is given, @private, as its parameter, and should
+ * return zero if it is willing for the system to release the port
+ * to another driver on its behalf. If it wants to keep control of
+ * the port it should return non-zero, and no action will be taken.
+ * It is good manners for the driver to try to release the port at
+ * the earliest opportunity after its preemption callback rejects a
+ * preemption attempt. Note that if a preemption callback is happy
+ * for preemption to go ahead, there is no need to release the
+ * port; it is done automatically. This function may not block, as
+ * it may be called from interrupt context. If the device driver
+ * does not support preemption, @preempt can be %NULL.
+ *
+ * The wake-up ("kick") callback function, @wakeup, is called when
+ * the port is available to be claimed for exclusive access; that
+ * is, parport_claim() is guaranteed to succeed when called from
+ * inside the wake-up callback function. If the driver wants to
+ * claim the port it should do so; otherwise, it need not take
+ * any action. This function may not block, as it may be called
+ * from interrupt context. If the device driver does not want to
+ * be explicitly invited to claim the port in this way, @wakeup can
+ * be %NULL.
+ *
+ * The interrupt handler, @irq_func, is called when an interrupt
+ * arrives from the parallel port. Note that if a device driver
+ * wants to use interrupts it should use parport_enable_irq(),
+ * and can also check the irq member of the parport structure
+ * representing the port.
+ *
+ * The parallel port (lowlevel) driver is the one that has called
+ * request_irq() and whose interrupt handler is called first.
+ * This handler does whatever needs to be done to the hardware to
+ * acknowledge the interrupt (for PC-style ports there is nothing
+ * special to be done). It then tells the IEEE 1284 code about
+ * the interrupt, which may involve reacting to an IEEE 1284
+ * event depending on the current IEEE 1284 phase. After this,
+ * it calls @irq_func. Needless to say, @irq_func will be called
+ * from interrupt context, and may not block.
+ *
+ * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
+ * so should only be used when sharing the port with other device
+ * drivers is impossible and would lead to incorrect behaviour.
+ * Use it sparingly! Normally, @flags will be zero.
+ *
+ * This function returns a pointer to a structure that represents
+ * the device on the port, or %NULL if there is not enough memory
+ * to allocate space for that structure.
+ **/
+
+struct pardevice *
+parport_register_dev_model(struct parport *port, const char *name,
+ const struct pardev_cb *par_dev_cb, int id)
+{
+ struct pardevice *par_dev;
+ int ret;
+ char *devname;
+
+ if (port->physport->flags & PARPORT_FLAG_EXCL) {
+ /* An exclusive device is registered. */
+ pr_err("%s: no more devices allowed\n", port->name);
+ return NULL;
+ }
+
+ if (par_dev_cb->flags & PARPORT_DEV_LURK) {
+ if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
+ pr_info("%s: refused to register lurking device (%s) without callbacks\n",
+ port->name, name);
+ return NULL;
+ }
+ }
+
+ if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
+ if (port->physport->devices) {
+ /*
+ * If a device is already registered and this new
+ * device wants exclusive access, then no need to
+ * continue as we can not grant exclusive access to
+ * this device.
+ */
+ pr_err("%s: cannot grant exclusive access for device %s\n",
+ port->name, name);
+ return NULL;
+ }
+ }
+
+ if (!try_module_get(port->ops->owner))
+ return NULL;
+
+ parport_get_port(port);
+
+ par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
+ if (!par_dev)
+ goto err_put_port;
+
+ par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
+ if (!par_dev->state)
+ goto err_put_par_dev;
+
+ devname = kstrdup(name, GFP_KERNEL);
+ if (!devname)
+ goto err_free_par_dev;
+
+ par_dev->name = devname;
+ par_dev->port = port;
+ par_dev->daisy = -1;
+ par_dev->preempt = par_dev_cb->preempt;
+ par_dev->wakeup = par_dev_cb->wakeup;
+ par_dev->private = par_dev_cb->private;
+ par_dev->flags = par_dev_cb->flags;
+ par_dev->irq_func = par_dev_cb->irq_func;
+ par_dev->waiting = 0;
+ par_dev->timeout = 5 * HZ;
+
+ par_dev->dev.parent = &port->bus_dev;
+ par_dev->dev.bus = &parport_bus_type;
+ ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
+ if (ret)
+ goto err_free_devname;
+ par_dev->dev.release = free_pardevice;
+ par_dev->devmodel = true;
+ ret = device_register(&par_dev->dev);
+ if (ret) {
+ kfree(par_dev->state);
+ put_device(&par_dev->dev);
+ goto err_put_port;
+ }
+
+ /* Chain this onto the list */
+ par_dev->prev = NULL;
+ /*
+ * This function must not run from an irq handler so we don' t need
+ * to clear irq on the local CPU. -arca
+ */
+ spin_lock(&port->physport->pardevice_lock);
+
+ if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
+ if (port->physport->devices) {
+ spin_unlock(&port->physport->pardevice_lock);
+ pr_debug("%s: cannot grant exclusive access for device %s\n",
+ port->name, name);
+ kfree(par_dev->state);
+ device_unregister(&par_dev->dev);
+ goto err_put_port;
+ }
+ port->flags |= PARPORT_FLAG_EXCL;
+ }
+
+ par_dev->next = port->physport->devices;
+ wmb(); /*
+ * Make sure that tmp->next is written before it's
+ * added to the list; see comments marked 'no locking
+ * required'
+ */
+ if (port->physport->devices)
+ port->physport->devices->prev = par_dev;
+ port->physport->devices = par_dev;
+ spin_unlock(&port->physport->pardevice_lock);
+
+ init_waitqueue_head(&par_dev->wait_q);
+ par_dev->timeslice = parport_default_timeslice;
+ par_dev->waitnext = NULL;
+ par_dev->waitprev = NULL;
+
+ /*
+ * This has to be run as last thing since init_state may need other
+ * pardevice fields. -arca
+ */
+ port->ops->init_state(par_dev, par_dev->state);
+ if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
+ port->proc_device = par_dev;
+ parport_device_proc_register(par_dev);
+ }
+
+ return par_dev;
+
+err_free_devname:
+ kfree(devname);
+err_free_par_dev:
+ kfree(par_dev->state);
+err_put_par_dev:
+ if (!par_dev->devmodel)
+ kfree(par_dev);
+err_put_port:
+ parport_put_port(port);
+ module_put(port->ops->owner);
+
+ return NULL;
+}
+EXPORT_SYMBOL(parport_register_dev_model);
+
+/**
+ * parport_unregister_device - deregister a device on a parallel port
+ * @dev: pointer to structure representing device
+ *
+ * This undoes the effect of parport_register_device().
+ **/
+
+void parport_unregister_device(struct pardevice *dev)
+{
+ struct parport *port;
+
+#ifdef PARPORT_PARANOID
+ if (!dev) {
+ pr_err("%s: passed NULL\n", __func__);
+ return;
+ }
+#endif
+
+ port = dev->port->physport;
+
+ if (port->proc_device == dev) {
+ port->proc_device = NULL;
+ clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
+ parport_device_proc_unregister(dev);
+ }
+
+ if (port->cad == dev) {
+ printk(KERN_DEBUG "%s: %s forgot to release port\n",
+ port->name, dev->name);
+ parport_release(dev);
+ }
+
+ spin_lock(&port->pardevice_lock);
+ if (dev->next)
+ dev->next->prev = dev->prev;
+ if (dev->prev)
+ dev->prev->next = dev->next;
+ else
+ port->devices = dev->next;
+
+ if (dev->flags & PARPORT_DEV_EXCL)
+ port->flags &= ~PARPORT_FLAG_EXCL;
+
+ spin_unlock(&port->pardevice_lock);
+
+ /*
+ * Make sure we haven't left any pointers around in the wait
+ * list.
+ */
+ spin_lock_irq(&port->waitlist_lock);
+ if (dev->waitprev || dev->waitnext || port->waithead == dev) {
+ if (dev->waitprev)
+ dev->waitprev->waitnext = dev->waitnext;
+ else
+ port->waithead = dev->waitnext;
+ if (dev->waitnext)
+ dev->waitnext->waitprev = dev->waitprev;
+ else
+ port->waittail = dev->waitprev;
+ }
+ spin_unlock_irq(&port->waitlist_lock);
+
+ kfree(dev->state);
+ device_unregister(&dev->dev);
+
+ module_put(port->ops->owner);
+ parport_put_port(port);
+}
+EXPORT_SYMBOL(parport_unregister_device);
+
+/**
+ * parport_find_number - find a parallel port by number
+ * @number: parallel port number
+ *
+ * This returns the parallel port with the specified number, or
+ * %NULL if there is none.
+ *
+ * There is an implicit parport_get_port() done already; to throw
+ * away the reference to the port that parport_find_number()
+ * gives you, use parport_put_port().
+ */
+
+struct parport *parport_find_number(int number)
+{
+ struct parport *port, *result = NULL;
+
+ if (list_empty(&portlist))
+ get_lowlevel_driver();
+
+ spin_lock(&parportlist_lock);
+ list_for_each_entry(port, &portlist, list) {
+ if (port->number == number) {
+ result = parport_get_port(port);
+ break;
+ }
+ }
+ spin_unlock(&parportlist_lock);
+ return result;
+}
+EXPORT_SYMBOL(parport_find_number);
+
+/**
+ * parport_find_base - find a parallel port by base address
+ * @base: base I/O address
+ *
+ * This returns the parallel port with the specified base
+ * address, or %NULL if there is none.
+ *
+ * There is an implicit parport_get_port() done already; to throw
+ * away the reference to the port that parport_find_base()
+ * gives you, use parport_put_port().
+ */
+
+struct parport *parport_find_base(unsigned long base)
+{
+ struct parport *port, *result = NULL;
+
+ if (list_empty(&portlist))
+ get_lowlevel_driver();
+
+ spin_lock(&parportlist_lock);
+ list_for_each_entry(port, &portlist, list) {
+ if (port->base == base) {
+ result = parport_get_port(port);
+ break;
+ }
+ }
+ spin_unlock(&parportlist_lock);
+ return result;
+}
+EXPORT_SYMBOL(parport_find_base);
+
+/**
+ * parport_claim - claim access to a parallel port device
+ * @dev: pointer to structure representing a device on the port
+ *
+ * This function will not block and so can be used from interrupt
+ * context. If parport_claim() succeeds in claiming access to
+ * the port it returns zero and the port is available to use. It
+ * may fail (returning non-zero) if the port is in use by another
+ * driver and that driver is not willing to relinquish control of
+ * the port.
+ **/
+
+int parport_claim(struct pardevice *dev)
+{
+ struct pardevice *oldcad;
+ struct parport *port = dev->port->physport;
+ unsigned long flags;
+
+ if (port->cad == dev) {
+ pr_info("%s: %s already owner\n", dev->port->name, dev->name);
+ return 0;
+ }
+
+ /* Preempt any current device */
+ write_lock_irqsave(&port->cad_lock, flags);
+ oldcad = port->cad;
+ if (oldcad) {
+ if (oldcad->preempt) {
+ if (oldcad->preempt(oldcad->private))
+ goto blocked;
+ port->ops->save_state(port, dev->state);
+ } else
+ goto blocked;
+
+ if (port->cad != oldcad) {
+ /*
+ * I think we'll actually deadlock rather than
+ * get here, but just in case..
+ */
+ pr_warn("%s: %s released port when preempted!\n",
+ port->name, oldcad->name);
+ if (port->cad)
+ goto blocked;
+ }
+ }
+
+ /* Can't fail from now on, so mark ourselves as no longer waiting. */
+ if (dev->waiting & 1) {
+ dev->waiting = 0;
+
+ /* Take ourselves out of the wait list again. */
+ spin_lock_irq(&port->waitlist_lock);
+ if (dev->waitprev)
+ dev->waitprev->waitnext = dev->waitnext;
+ else
+ port->waithead = dev->waitnext;
+ if (dev->waitnext)
+ dev->waitnext->waitprev = dev->waitprev;
+ else
+ port->waittail = dev->waitprev;
+ spin_unlock_irq(&port->waitlist_lock);
+ dev->waitprev = dev->waitnext = NULL;
+ }
+
+ /* Now we do the change of devices */
+ port->cad = dev;
+
+#ifdef CONFIG_PARPORT_1284
+ /* If it's a mux port, select it. */
+ if (dev->port->muxport >= 0) {
+ /* FIXME */
+ port->muxsel = dev->port->muxport;
+ }
+
+ /* If it's a daisy chain device, select it. */
+ if (dev->daisy >= 0) {
+ /* This could be lazier. */
+ if (!parport_daisy_select(port, dev->daisy,
+ IEEE1284_MODE_COMPAT))
+ port->daisy = dev->daisy;
+ }
+#endif /* IEEE1284.3 support */
+
+ /* Restore control registers */
+ port->ops->restore_state(port, dev->state);
+ write_unlock_irqrestore(&port->cad_lock, flags);
+ dev->time = jiffies;
+ return 0;
+
+blocked:
+ /*
+ * If this is the first time we tried to claim the port, register an
+ * interest. This is only allowed for devices sleeping in
+ * parport_claim_or_block(), or those with a wakeup function.
+ */
+
+ /* The cad_lock is still held for writing here */
+ if (dev->waiting & 2 || dev->wakeup) {
+ spin_lock(&port->waitlist_lock);
+ if (test_and_set_bit(0, &dev->waiting) == 0) {
+ /* First add ourselves to the end of the wait list. */
+ dev->waitnext = NULL;
+ dev->waitprev = port->waittail;
+ if (port->waittail) {
+ port->waittail->waitnext = dev;
+ port->waittail = dev;
+ } else
+ port->waithead = port->waittail = dev;
+ }
+ spin_unlock(&port->waitlist_lock);
+ }
+ write_unlock_irqrestore(&port->cad_lock, flags);
+ return -EAGAIN;
+}
+EXPORT_SYMBOL(parport_claim);
+
+/**
+ * parport_claim_or_block - claim access to a parallel port device
+ * @dev: pointer to structure representing a device on the port
+ *
+ * This behaves like parport_claim(), but will block if necessary
+ * to wait for the port to be free. A return value of 1
+ * indicates that it slept; 0 means that it succeeded without
+ * needing to sleep. A negative error code indicates failure.
+ **/
+
+int parport_claim_or_block(struct pardevice *dev)
+{
+ int r;
+
+ /*
+ * Signal to parport_claim() that we can wait even without a
+ * wakeup function.
+ */
+ dev->waiting = 2;
+
+ /* Try to claim the port. If this fails, we need to sleep. */
+ r = parport_claim(dev);
+ if (r == -EAGAIN) {
+#ifdef PARPORT_DEBUG_SHARING
+ printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
+ dev->name);
+#endif
+ /*
+ * FIXME!!! Use the proper locking for dev->waiting,
+ * and make this use the "wait_event_interruptible()"
+ * interfaces. The cli/sti that used to be here
+ * did nothing.
+ *
+ * See also parport_release()
+ */
+
+ /*
+ * If dev->waiting is clear now, an interrupt
+ * gave us the port and we would deadlock if we slept.
+ */
+ if (dev->waiting) {
+ wait_event_interruptible(dev->wait_q,
+ !dev->waiting);
+ if (signal_pending(current))
+ return -EINTR;
+ r = 1;
+ } else {
+ r = 0;
+#ifdef PARPORT_DEBUG_SHARING
+ printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
+ dev->name);
+#endif
+ }
+
+#ifdef PARPORT_DEBUG_SHARING
+ if (dev->port->physport->cad != dev)
+ printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
+ dev->name, dev->port->physport->cad ?
+ dev->port->physport->cad->name : "nobody");
+#endif
+ }
+ dev->waiting = 0;
+ return r;
+}
+EXPORT_SYMBOL(parport_claim_or_block);
+
+/**
+ * parport_release - give up access to a parallel port device
+ * @dev: pointer to structure representing parallel port device
+ *
+ * This function cannot fail, but it should not be called without
+ * the port claimed. Similarly, if the port is already claimed
+ * you should not try claiming it again.
+ **/
+
+void parport_release(struct pardevice *dev)
+{
+ struct parport *port = dev->port->physport;
+ struct pardevice *pd;
+ unsigned long flags;
+
+ /* Make sure that dev is the current device */
+ write_lock_irqsave(&port->cad_lock, flags);
+ if (port->cad != dev) {
+ write_unlock_irqrestore(&port->cad_lock, flags);
+ pr_warn("%s: %s tried to release parport when not owner\n",
+ port->name, dev->name);
+ return;
+ }
+
+#ifdef CONFIG_PARPORT_1284
+ /* If this is on a mux port, deselect it. */
+ if (dev->port->muxport >= 0) {
+ /* FIXME */
+ port->muxsel = -1;
+ }
+
+ /* If this is a daisy device, deselect it. */
+ if (dev->daisy >= 0) {
+ parport_daisy_deselect_all(port);
+ port->daisy = -1;
+ }
+#endif
+
+ port->cad = NULL;
+ write_unlock_irqrestore(&port->cad_lock, flags);
+
+ /* Save control registers */
+ port->ops->save_state(port, dev->state);
+
+ /*
+ * If anybody is waiting, find out who's been there longest and
+ * then wake them up. (Note: no locking required)
+ */
+ /* !!! LOCKING IS NEEDED HERE */
+ for (pd = port->waithead; pd; pd = pd->waitnext) {
+ if (pd->waiting & 2) { /* sleeping in claim_or_block */
+ parport_claim(pd);
+ if (waitqueue_active(&pd->wait_q))
+ wake_up_interruptible(&pd->wait_q);
+ return;
+ } else if (pd->wakeup) {
+ pd->wakeup(pd->private);
+ if (dev->port->cad) /* racy but no matter */
+ return;
+ } else {
+ pr_err("%s: don't know how to wake %s\n",
+ port->name, pd->name);
+ }
+ }
+
+ /*
+ * Nobody was waiting, so walk the list to see if anyone is
+ * interested in being woken up. (Note: no locking required)
+ */
+ /* !!! LOCKING IS NEEDED HERE */
+ for (pd = port->devices; !port->cad && pd; pd = pd->next) {
+ if (pd->wakeup && pd != dev)
+ pd->wakeup(pd->private);
+ }
+}
+EXPORT_SYMBOL(parport_release);
+
+irqreturn_t parport_irq_handler(int irq, void *dev_id)
+{
+ struct parport *port = dev_id;
+
+ parport_generic_irq(port);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(parport_irq_handler);
+
+MODULE_LICENSE("GPL");